]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/commitdiff
Merge tag 'char-misc-4.11-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregk...
authorLinus Torvalds <torvalds@linux-foundation.org>
Wed, 22 Feb 2017 19:38:22 +0000 (11:38 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 22 Feb 2017 19:38:22 +0000 (11:38 -0800)
Pull char/misc driver updates from Greg KH:
 "Here is the big char/misc driver patchset for 4.11-rc1.

  Lots of different driver subsystems updated here: rework for the
  hyperv subsystem to handle new platforms better, mei and w1 and extcon
  driver updates, as well as a number of other "minor" driver updates.

  All of these have been in linux-next for a while with no reported
  issues"

* tag 'char-misc-4.11-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc: (169 commits)
  goldfish: Sanitize the broken interrupt handler
  x86/platform/goldfish: Prevent unconditional loading
  vmbus: replace modulus operation with subtraction
  vmbus: constify parameters where possible
  vmbus: expose hv_begin/end_read
  vmbus: remove conditional locking of vmbus_write
  vmbus: add direct isr callback mode
  vmbus: change to per channel tasklet
  vmbus: put related per-cpu variable together
  vmbus: callback is in softirq not workqueue
  binder: Add support for file-descriptor arrays
  binder: Add support for scatter-gather
  binder: Add extra size to allocator
  binder: Refactor binder_transact()
  binder: Support multiple /dev instances
  binder: Deal with contexts in debugfs
  binder: Support multiple context managers
  binder: Split flat_binder_object
  auxdisplay: ht16k33: remove private workqueue
  auxdisplay: ht16k33: rework input device initialization
  ...

134 files changed:
Documentation/DocBook/Makefile
Documentation/DocBook/uio-howto.tmpl [deleted file]
Documentation/admin-guide/kernel-parameters.txt
Documentation/devicetree/bindings/misc/idt_89hpesx.txt [new file with mode: 0644]
Documentation/devicetree/bindings/nvmem/imx-ocotp.txt
Documentation/devicetree/bindings/sram/sram.txt
Documentation/driver-api/index.rst
Documentation/driver-api/uio-howto.rst [new file with mode: 0644]
Documentation/extcon/intel-int3496.txt [new file with mode: 0644]
Documentation/fpga/fpga-mgr.txt
MAINTAINERS
arch/arm/mach-davinci/da850.c
arch/arm/mach-davinci/da8xx-dt.c
arch/x86/Kbuild
arch/x86/hyperv/Makefile [new file with mode: 0644]
arch/x86/hyperv/hv_init.c [new file with mode: 0644]
arch/x86/include/asm/mshyperv.h
arch/x86/include/uapi/asm/hyperv.h
arch/x86/kernel/cpu/mshyperv.c
arch/x86/platform/goldfish/goldfish.c
drivers/Kconfig
drivers/Makefile
drivers/android/Kconfig
drivers/android/binder.c
drivers/auxdisplay/ht16k33.c
drivers/char/Kconfig
drivers/char/apm-emulation.c
drivers/char/ds1302.c
drivers/char/mmtimer.c
drivers/char/xilinx_hwicap/buffer_icap.c
drivers/extcon/Kconfig
drivers/extcon/Makefile
drivers/extcon/devres.c
drivers/extcon/extcon-adc-jack.c
drivers/extcon/extcon-arizona.c
drivers/extcon/extcon-axp288.c
drivers/extcon/extcon-intel-int3496.c [new file with mode: 0644]
drivers/extcon/extcon-max14577.c
drivers/extcon/extcon-max77693.c
drivers/extcon/extcon-max77843.c
drivers/extcon/extcon-palmas.c
drivers/extcon/extcon-rt8973a.c
drivers/extcon/extcon-sm5502.c
drivers/extcon/extcon-usb-gpio.c
drivers/extcon/extcon.c
drivers/extcon/extcon.h [new file with mode: 0644]
drivers/fpga/fpga-mgr.c
drivers/fpga/zynq-fpga.c
drivers/fsi/Kconfig [new file with mode: 0644]
drivers/fsi/Makefile [new file with mode: 0644]
drivers/fsi/fsi-core.c [new file with mode: 0644]
drivers/hv/channel.c
drivers/hv/channel_mgmt.c
drivers/hv/connection.c
drivers/hv/hv.c
drivers/hv/hv_balloon.c
drivers/hv/hv_fcopy.c
drivers/hv/hv_kvp.c
drivers/hv/hv_snapshot.c
drivers/hv/hv_util.c
drivers/hv/hyperv_vmbus.h
drivers/hv/ring_buffer.c
drivers/hv/vmbus_drv.c
drivers/hwtracing/coresight/coresight-etm-perf.c
drivers/hwtracing/coresight/coresight-etm4x.c
drivers/hwtracing/coresight/coresight-etm4x.h
drivers/hwtracing/coresight/coresight-stm.c
drivers/memory/ti-aemif.c
drivers/misc/Kconfig
drivers/misc/Makefile
drivers/misc/eeprom/Kconfig
drivers/misc/eeprom/Makefile
drivers/misc/eeprom/idt_89hpesx.c [new file with mode: 0644]
drivers/misc/genwqe/card_base.c
drivers/misc/lkdtm_bugs.c
drivers/misc/lkdtm_core.c
drivers/misc/mei/amthif.c
drivers/misc/mei/bus.c
drivers/misc/mei/client.c
drivers/misc/mei/client.h
drivers/misc/mei/hbm.c
drivers/misc/mei/hw-me.c
drivers/misc/mei/hw-txe.c
drivers/misc/mei/hw-txe.h
drivers/misc/mei/init.c
drivers/misc/mei/interrupt.c
drivers/misc/mei/main.c
drivers/misc/mei/mei_dev.h
drivers/misc/mei/pci-me.c
drivers/misc/mei/pci-txe.c
drivers/misc/mic/vop/vop_vringh.c
drivers/misc/panel.c
drivers/misc/sram-exec.c [new file with mode: 0644]
drivers/misc/sram.c
drivers/misc/sram.h [new file with mode: 0644]
drivers/misc/vmw_vmci/vmci_guest.c
drivers/net/hyperv/netvsc.c
drivers/nvmem/core.c
drivers/nvmem/imx-ocotp.c
drivers/platform/goldfish/pdev_bus.c
drivers/uio/uio_hv_generic.c
drivers/vme/vme.c
drivers/w1/masters/ds2490.c
drivers/w1/masters/omap_hdq.c
drivers/w1/slaves/Kconfig
drivers/w1/slaves/Makefile
drivers/w1/slaves/w1_ds2405.c [new file with mode: 0644]
drivers/w1/w1.c
drivers/w1/w1.h
drivers/w1/w1_family.c
drivers/w1/w1_family.h
drivers/w1/w1_int.c
drivers/w1/w1_int.h
drivers/w1/w1_io.c
drivers/w1/w1_log.h
drivers/w1/w1_netlink.c
drivers/w1/w1_netlink.h
include/linux/extcon.h
include/linux/extcon/extcon-adc-jack.h
include/linux/fpga/fpga-mgr.h
include/linux/fsi.h [new file with mode: 0644]
include/linux/hyperv.h
include/linux/miscdevice.h
include/linux/platform_data/ti-aemif.h [new file with mode: 0644]
include/linux/sram.h [new file with mode: 0644]
include/linux/vme.h
include/linux/vmw_vmci_defs.h
include/uapi/linux/android/binder.h
init/Kconfig
lib/test_firmware.c
scripts/checkkconfigsymbols.py
tools/testing/selftests/firmware/Makefile
tools/testing/selftests/firmware/fw_fallback.sh [new file with mode: 0755]
tools/testing/selftests/firmware/fw_userhelper.sh [deleted file]

index a6eb7dcd4dd5c010fe76ac285769d9e0c5157adc..5fd8f5effd0c59b06d0dccec9bade44c8d6395a2 100644 (file)
@@ -11,7 +11,7 @@ DOCBOOKS := z8530book.xml  \
            writing_usb_driver.xml networking.xml \
            kernel-api.xml filesystems.xml lsm.xml kgdb.xml \
            gadget.xml libata.xml mtdnand.xml librs.xml rapidio.xml \
-           genericirq.xml s390-drivers.xml uio-howto.xml scsi.xml \
+           genericirq.xml s390-drivers.xml scsi.xml \
            sh.xml regulator.xml w1.xml \
            writing_musb_glue_layer.xml iio.xml
 
diff --git a/Documentation/DocBook/uio-howto.tmpl b/Documentation/DocBook/uio-howto.tmpl
deleted file mode 100644 (file)
index 5210f8a..0000000
+++ /dev/null
@@ -1,1112 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.2//EN"
-"http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd" []>
-
-<book id="index">
-<bookinfo>
-<title>The Userspace I/O HOWTO</title>
-
-<author>
-      <firstname>Hans-Jürgen</firstname>
-      <surname>Koch</surname>
-      <authorblurb><para>Linux developer, Linutronix</para></authorblurb>
-       <affiliation>
-       <orgname>
-               <ulink url="http://www.linutronix.de">Linutronix</ulink>
-       </orgname>
-
-       <address>
-          <email>hjk@hansjkoch.de</email>
-       </address>
-    </affiliation>
-</author>
-
-<copyright>
-       <year>2006-2008</year>
-       <holder>Hans-Jürgen Koch.</holder>
-</copyright>
-<copyright>
-       <year>2009</year>
-       <holder>Red Hat Inc, Michael S. Tsirkin (mst@redhat.com)</holder>
-</copyright>
-
-<legalnotice>
-<para>
-This documentation is Free Software licensed under the terms of the
-GPL version 2.
-</para>
-</legalnotice>
-
-<pubdate>2006-12-11</pubdate>
-
-<abstract>
-       <para>This HOWTO describes concept and usage of Linux kernel's
-               Userspace I/O system.</para>
-</abstract>
-
-<revhistory>
-       <revision>
-       <revnumber>0.10</revnumber>
-       <date>2016-10-17</date>
-       <authorinitials>sch</authorinitials>
-       <revremark>Added generic hyperv driver
-               </revremark>
-       </revision>
-       <revision>
-       <revnumber>0.9</revnumber>
-       <date>2009-07-16</date>
-       <authorinitials>mst</authorinitials>
-       <revremark>Added generic pci driver
-               </revremark>
-       </revision>
-       <revision>
-       <revnumber>0.8</revnumber>
-       <date>2008-12-24</date>
-       <authorinitials>hjk</authorinitials>
-       <revremark>Added name attributes in mem and portio sysfs directories.
-               </revremark>
-       </revision>
-       <revision>
-       <revnumber>0.7</revnumber>
-       <date>2008-12-23</date>
-       <authorinitials>hjk</authorinitials>
-       <revremark>Added generic platform drivers and offset attribute.</revremark>
-       </revision>
-       <revision>
-       <revnumber>0.6</revnumber>
-       <date>2008-12-05</date>
-       <authorinitials>hjk</authorinitials>
-       <revremark>Added description of portio sysfs attributes.</revremark>
-       </revision>
-       <revision>
-       <revnumber>0.5</revnumber>
-       <date>2008-05-22</date>
-       <authorinitials>hjk</authorinitials>
-       <revremark>Added description of write() function.</revremark>
-       </revision>
-       <revision>
-       <revnumber>0.4</revnumber>
-       <date>2007-11-26</date>
-       <authorinitials>hjk</authorinitials>
-       <revremark>Removed section about uio_dummy.</revremark>
-       </revision>
-       <revision>
-       <revnumber>0.3</revnumber>
-       <date>2007-04-29</date>
-       <authorinitials>hjk</authorinitials>
-       <revremark>Added section about userspace drivers.</revremark>
-       </revision>
-       <revision>
-       <revnumber>0.2</revnumber>
-       <date>2007-02-13</date>
-       <authorinitials>hjk</authorinitials>
-       <revremark>Update after multiple mappings were added.</revremark>
-       </revision>
-       <revision>
-       <revnumber>0.1</revnumber>
-       <date>2006-12-11</date>
-       <authorinitials>hjk</authorinitials>
-       <revremark>First draft.</revremark>
-       </revision>
-</revhistory>
-</bookinfo>
-
-<chapter id="aboutthisdoc">
-<?dbhtml filename="aboutthis.html"?>
-<title>About this document</title>
-
-<sect1 id="translations">
-<?dbhtml filename="translations.html"?>
-<title>Translations</title>
-
-<para>If you know of any translations for this document, or you are
-interested in translating it, please email me
-<email>hjk@hansjkoch.de</email>.
-</para>
-</sect1>
-
-<sect1 id="preface">
-<title>Preface</title>
-       <para>
-       For many types of devices, creating a Linux kernel driver is
-       overkill.  All that is really needed is some way to handle an
-       interrupt and provide access to the memory space of the
-       device.  The logic of controlling the device does not
-       necessarily have to be within the kernel, as the device does
-       not need to take advantage of any of other resources that the
-       kernel provides.  One such common class of devices that are
-       like this are for industrial I/O cards.
-       </para>
-       <para>
-       To address this situation, the userspace I/O system (UIO) was
-       designed.  For typical industrial I/O cards, only a very small
-       kernel module is needed. The main part of the driver will run in
-       user space. This simplifies development and reduces the risk of
-       serious bugs within a kernel module.
-       </para>
-       <para>
-       Please note that UIO is not an universal driver interface. Devices
-       that are already handled well by other kernel subsystems (like
-       networking or serial or USB) are no candidates for an UIO driver.
-       Hardware that is ideally suited for an UIO driver fulfills all of
-       the following:
-       </para>
-<itemizedlist>
-<listitem>
-       <para>The device has memory that can be mapped. The device can be
-       controlled completely by writing to this memory.</para>
-</listitem>
-<listitem>
-       <para>The device usually generates interrupts.</para>
-</listitem>
-<listitem>
-       <para>The device does not fit into one of the standard kernel
-       subsystems.</para>
-</listitem>
-</itemizedlist>
-</sect1>
-
-<sect1 id="thanks">
-<title>Acknowledgments</title>
-       <para>I'd like to thank Thomas Gleixner and Benedikt Spranger of
-       Linutronix, who have not only written most of the UIO code, but also
-       helped greatly writing this HOWTO by giving me all kinds of background
-       information.</para>
-</sect1>
-
-<sect1 id="feedback">
-<title>Feedback</title>
-       <para>Find something wrong with this document? (Or perhaps something
-       right?) I would love to hear from you. Please email me at
-       <email>hjk@hansjkoch.de</email>.</para>
-</sect1>
-</chapter>
-
-<chapter id="about">
-<?dbhtml filename="about.html"?>
-<title>About UIO</title>
-
-<para>If you use UIO for your card's driver, here's what you get:</para>
-
-<itemizedlist>
-<listitem>
-       <para>only one small kernel module to write and maintain.</para>
-</listitem>
-<listitem>
-       <para>develop the main part of your driver in user space,
-       with all the tools and libraries you're used to.</para>
-</listitem>
-<listitem>
-       <para>bugs in your driver won't crash the kernel.</para>
-</listitem>
-<listitem>
-       <para>updates of your driver can take place without recompiling
-       the kernel.</para>
-</listitem>
-</itemizedlist>
-
-<sect1 id="how_uio_works">
-<title>How UIO works</title>
-       <para>
-       Each UIO device is accessed through a device file and several
-       sysfs attribute files. The device file will be called
-       <filename>/dev/uio0</filename> for the first device, and
-       <filename>/dev/uio1</filename>, <filename>/dev/uio2</filename>
-       and so on for subsequent devices.
-       </para>
-
-       <para><filename>/dev/uioX</filename> is used to access the
-       address space of the card. Just use
-       <function>mmap()</function> to access registers or RAM
-       locations of your card.
-       </para>
-
-       <para>
-       Interrupts are handled by reading from
-       <filename>/dev/uioX</filename>. A blocking
-       <function>read()</function> from
-       <filename>/dev/uioX</filename> will return as soon as an
-       interrupt occurs. You can also use
-       <function>select()</function> on
-       <filename>/dev/uioX</filename> to wait for an interrupt. The
-       integer value read from <filename>/dev/uioX</filename>
-       represents the total interrupt count. You can use this number
-       to figure out if you missed some interrupts.
-       </para>
-       <para>
-       For some hardware that has more than one interrupt source internally,
-       but not separate IRQ mask and status registers, there might be
-       situations where userspace cannot determine what the interrupt source
-       was if the kernel handler disables them by writing to the chip's IRQ
-       register. In such a case, the kernel has to disable the IRQ completely
-       to leave the chip's register untouched. Now the userspace part can
-       determine the cause of the interrupt, but it cannot re-enable
-       interrupts. Another cornercase is chips where re-enabling interrupts
-       is a read-modify-write operation to a combined IRQ status/acknowledge
-       register. This would be racy if a new interrupt occurred
-       simultaneously.
-       </para>
-       <para>
-       To address these problems, UIO also implements a write() function. It
-       is normally not used and can be ignored for hardware that has only a
-       single interrupt source or has separate IRQ mask and status registers.
-       If you need it, however, a write to <filename>/dev/uioX</filename>
-       will call the <function>irqcontrol()</function> function implemented
-       by the driver. You have to write a 32-bit value that is usually either
-       0 or 1 to disable or enable interrupts. If a driver does not implement
-       <function>irqcontrol()</function>, <function>write()</function> will
-       return with <varname>-ENOSYS</varname>.
-       </para>
-
-       <para>
-       To handle interrupts properly, your custom kernel module can
-       provide its own interrupt handler. It will automatically be
-       called by the built-in handler.
-       </para>
-
-       <para>
-       For cards that don't generate interrupts but need to be
-       polled, there is the possibility to set up a timer that
-       triggers the interrupt handler at configurable time intervals.
-       This interrupt simulation is done by calling
-       <function>uio_event_notify()</function>
-       from the timer's event handler.
-       </para>
-
-       <para>
-       Each driver provides attributes that are used to read or write
-       variables. These attributes are accessible through sysfs
-       files.  A custom kernel driver module can add its own
-       attributes to the device owned by the uio driver, but not added
-       to the UIO device itself at this time.  This might change in the
-       future if it would be found to be useful.
-       </para>
-
-       <para>
-       The following standard attributes are provided by the UIO
-       framework:
-       </para>
-<itemizedlist>
-<listitem>
-       <para>
-       <filename>name</filename>: The name of your device. It is
-       recommended to use the name of your kernel module for this.
-       </para>
-</listitem>
-<listitem>
-       <para>
-       <filename>version</filename>: A version string defined by your
-       driver. This allows the user space part of your driver to deal
-       with different versions of the kernel module.
-       </para>
-</listitem>
-<listitem>
-       <para>
-       <filename>event</filename>: The total number of interrupts
-       handled by the driver since the last time the device node was
-       read.
-       </para>
-</listitem>
-</itemizedlist>
-<para>
-       These attributes appear under the
-       <filename>/sys/class/uio/uioX</filename> directory.  Please
-       note that this directory might be a symlink, and not a real
-       directory.  Any userspace code that accesses it must be able
-       to handle this.
-</para>
-<para>
-       Each UIO device can make one or more memory regions available for
-       memory mapping. This is necessary because some industrial I/O cards
-       require access to more than one PCI memory region in a driver.
-</para>
-<para>
-       Each mapping has its own directory in sysfs, the first mapping
-       appears as <filename>/sys/class/uio/uioX/maps/map0/</filename>.
-       Subsequent mappings create directories <filename>map1/</filename>,
-       <filename>map2/</filename>, and so on. These directories will only
-       appear if the size of the mapping is not 0.
-</para>
-<para>
-       Each <filename>mapX/</filename> directory contains four read-only files
-       that show attributes of the memory:
-</para>
-<itemizedlist>
-<listitem>
-       <para>
-       <filename>name</filename>: A string identifier for this mapping. This
-       is optional, the string can be empty. Drivers can set this to make it
-       easier for userspace to find the correct mapping.
-       </para>
-</listitem>
-<listitem>
-       <para>
-       <filename>addr</filename>: The address of memory that can be mapped.
-       </para>
-</listitem>
-<listitem>
-       <para>
-       <filename>size</filename>: The size, in bytes, of the memory
-       pointed to by addr.
-       </para>
-</listitem>
-<listitem>
-       <para>
-       <filename>offset</filename>: The offset, in bytes, that has to be
-       added to the pointer returned by <function>mmap()</function> to get
-       to the actual device memory. This is important if the device's memory
-       is not page aligned. Remember that pointers returned by
-       <function>mmap()</function> are always page aligned, so it is good
-       style to always add this offset.
-       </para>
-</listitem>
-</itemizedlist>
-
-<para>
-       From userspace, the different mappings are distinguished by adjusting
-       the <varname>offset</varname> parameter of the
-       <function>mmap()</function> call. To map the memory of mapping N, you
-       have to use N times the page size as your offset:
-</para>
-<programlisting format="linespecific">
-offset = N * getpagesize();
-</programlisting>
-
-<para>
-       Sometimes there is hardware with memory-like regions that can not be
-       mapped with the technique described here, but there are still ways to
-       access them from userspace. The most common example are x86 ioports.
-       On x86 systems, userspace can access these ioports using
-       <function>ioperm()</function>, <function>iopl()</function>,
-       <function>inb()</function>, <function>outb()</function>, and similar
-       functions.
-</para>
-<para>
-       Since these ioport regions can not be mapped, they will not appear under
-       <filename>/sys/class/uio/uioX/maps/</filename> like the normal memory
-       described above. Without information about the port regions a hardware
-       has to offer, it becomes difficult for the userspace part of the
-       driver to find out which ports belong to which UIO device.
-</para>
-<para>
-       To address this situation, the new directory
-       <filename>/sys/class/uio/uioX/portio/</filename> was added. It only
-       exists if the driver wants to pass information about one or more port
-       regions to userspace. If that is the case, subdirectories named
-       <filename>port0</filename>, <filename>port1</filename>, and so on,
-       will appear underneath
-       <filename>/sys/class/uio/uioX/portio/</filename>.
-</para>
-<para>
-       Each <filename>portX/</filename> directory contains four read-only
-       files that show name, start, size, and type of the port region:
-</para>
-<itemizedlist>
-<listitem>
-       <para>
-       <filename>name</filename>: A string identifier for this port region.
-       The string is optional and can be empty. Drivers can set it to make it
-       easier for userspace to find a certain port region.
-       </para>
-</listitem>
-<listitem>
-       <para>
-       <filename>start</filename>: The first port of this region.
-       </para>
-</listitem>
-<listitem>
-       <para>
-       <filename>size</filename>: The number of ports in this region.
-       </para>
-</listitem>
-<listitem>
-       <para>
-       <filename>porttype</filename>: A string describing the type of port.
-       </para>
-</listitem>
-</itemizedlist>
-
-
-</sect1>
-</chapter>
-
-<chapter id="custom_kernel_module" xreflabel="Writing your own kernel module">
-<?dbhtml filename="custom_kernel_module.html"?>
-<title>Writing your own kernel module</title>
-       <para>
-       Please have a look at <filename>uio_cif.c</filename> as an
-       example. The following paragraphs explain the different
-       sections of this file.
-       </para>
-
-<sect1 id="uio_info">
-<title>struct uio_info</title>
-       <para>
-       This structure tells the framework the details of your driver,
-       Some of the members are required, others are optional.
-       </para>
-
-<itemizedlist>
-<listitem><para>
-<varname>const char *name</varname>: Required. The name of your driver as
-it will appear in sysfs. I recommend using the name of your module for this.
-</para></listitem>
-
-<listitem><para>
-<varname>const char *version</varname>: Required. This string appears in
-<filename>/sys/class/uio/uioX/version</filename>.
-</para></listitem>
-
-<listitem><para>
-<varname>struct uio_mem mem[ MAX_UIO_MAPS ]</varname>: Required if you
-have memory that can be mapped with <function>mmap()</function>. For each
-mapping you need to fill one of the <varname>uio_mem</varname> structures.
-See the description below for details.
-</para></listitem>
-
-<listitem><para>
-<varname>struct uio_port port[ MAX_UIO_PORTS_REGIONS ]</varname>: Required
-if you want to pass information about ioports to userspace. For each port
-region you need to fill one of the <varname>uio_port</varname> structures.
-See the description below for details.
-</para></listitem>
-
-<listitem><para>
-<varname>long irq</varname>: Required. If your hardware generates an
-interrupt, it's your modules task to determine the irq number during
-initialization. If you don't have a hardware generated interrupt but
-want to trigger the interrupt handler in some other way, set
-<varname>irq</varname> to <varname>UIO_IRQ_CUSTOM</varname>.
-If you had no interrupt at all, you could set
-<varname>irq</varname> to <varname>UIO_IRQ_NONE</varname>, though this
-rarely makes sense.
-</para></listitem>
-
-<listitem><para>
-<varname>unsigned long irq_flags</varname>: Required if you've set
-<varname>irq</varname> to a hardware interrupt number. The flags given
-here will be used in the call to <function>request_irq()</function>.
-</para></listitem>
-
-<listitem><para>
-<varname>int (*mmap)(struct uio_info *info, struct vm_area_struct
-*vma)</varname>: Optional. If you need a special
-<function>mmap()</function> function, you can set it here. If this
-pointer is not NULL, your <function>mmap()</function> will be called
-instead of the built-in one.
-</para></listitem>
-
-<listitem><para>
-<varname>int (*open)(struct uio_info *info, struct inode *inode)
-</varname>: Optional. You might want to have your own
-<function>open()</function>, e.g. to enable interrupts only when your
-device is actually used.
-</para></listitem>
-
-<listitem><para>
-<varname>int (*release)(struct uio_info *info, struct inode *inode)
-</varname>: Optional. If you define your own
-<function>open()</function>, you will probably also want a custom
-<function>release()</function> function.
-</para></listitem>
-
-<listitem><para>
-<varname>int (*irqcontrol)(struct uio_info *info, s32 irq_on)
-</varname>: Optional. If you need to be able to enable or disable
-interrupts from userspace by writing to <filename>/dev/uioX</filename>,
-you can implement this function. The parameter <varname>irq_on</varname>
-will be 0 to disable interrupts and 1 to enable them.
-</para></listitem>
-</itemizedlist>
-
-<para>
-Usually, your device will have one or more memory regions that can be mapped
-to user space. For each region, you have to set up a
-<varname>struct uio_mem</varname> in the <varname>mem[]</varname> array.
-Here's a description of the fields of <varname>struct uio_mem</varname>:
-</para>
-
-<itemizedlist>
-<listitem><para>
-<varname>const char *name</varname>: Optional. Set this to help identify
-the memory region, it will show up in the corresponding sysfs node.
-</para></listitem>
-
-<listitem><para>
-<varname>int memtype</varname>: Required if the mapping is used. Set this to
-<varname>UIO_MEM_PHYS</varname> if you you have physical memory on your
-card to be mapped. Use <varname>UIO_MEM_LOGICAL</varname> for logical
-memory (e.g. allocated with <function>kmalloc()</function>). There's also
-<varname>UIO_MEM_VIRTUAL</varname> for virtual memory.
-</para></listitem>
-
-<listitem><para>
-<varname>phys_addr_t addr</varname>: Required if the mapping is used.
-Fill in the address of your memory block. This address is the one that
-appears in sysfs.
-</para></listitem>
-
-<listitem><para>
-<varname>resource_size_t size</varname>: Fill in the size of the
-memory block that <varname>addr</varname> points to. If <varname>size</varname>
-is zero, the mapping is considered unused. Note that you
-<emphasis>must</emphasis> initialize <varname>size</varname> with zero for
-all unused mappings.
-</para></listitem>
-
-<listitem><para>
-<varname>void *internal_addr</varname>: If you have to access this memory
-region from within your kernel module, you will want to map it internally by
-using something like <function>ioremap()</function>. Addresses
-returned by this function cannot be mapped to user space, so you must not
-store it in <varname>addr</varname>. Use <varname>internal_addr</varname>
-instead to remember such an address.
-</para></listitem>
-</itemizedlist>
-
-<para>
-Please do not touch the <varname>map</varname> element of
-<varname>struct uio_mem</varname>! It is used by the UIO framework
-to set up sysfs files for this mapping. Simply leave it alone.
-</para>
-
-<para>
-Sometimes, your device can have one or more port regions which can not be
-mapped to userspace. But if there are other possibilities for userspace to
-access these ports, it makes sense to make information about the ports
-available in sysfs. For each region, you have to set up a
-<varname>struct uio_port</varname> in the <varname>port[]</varname> array.
-Here's a description of the fields of <varname>struct uio_port</varname>:
-</para>
-
-<itemizedlist>
-<listitem><para>
-<varname>char *porttype</varname>: Required. Set this to one of the predefined
-constants. Use <varname>UIO_PORT_X86</varname> for the ioports found in x86
-architectures.
-</para></listitem>
-
-<listitem><para>
-<varname>unsigned long start</varname>: Required if the port region is used.
-Fill in the number of the first port of this region.
-</para></listitem>
-
-<listitem><para>
-<varname>unsigned long size</varname>: Fill in the number of ports in this
-region. If <varname>size</varname> is zero, the region is considered unused.
-Note that you <emphasis>must</emphasis> initialize <varname>size</varname>
-with zero for all unused regions.
-</para></listitem>
-</itemizedlist>
-
-<para>
-Please do not touch the <varname>portio</varname> element of
-<varname>struct uio_port</varname>! It is used internally by the UIO
-framework to set up sysfs files for this region. Simply leave it alone.
-</para>
-
-</sect1>
-
-<sect1 id="adding_irq_handler">
-<title>Adding an interrupt handler</title>
-       <para>
-       What you need to do in your interrupt handler depends on your
-       hardware and on how you want to handle it. You should try to
-       keep the amount of code in your kernel interrupt handler low.
-       If your hardware requires no action that you
-       <emphasis>have</emphasis> to perform after each interrupt,
-       then your handler can be empty.</para> <para>If, on the other
-       hand, your hardware <emphasis>needs</emphasis> some action to
-       be performed after each interrupt, then you
-       <emphasis>must</emphasis> do it in your kernel module. Note
-       that you cannot rely on the userspace part of your driver. Your
-       userspace program can terminate at any time, possibly leaving
-       your hardware in a state where proper interrupt handling is
-       still required.
-       </para>
-
-       <para>
-       There might also be applications where you want to read data
-       from your hardware at each interrupt and buffer it in a piece
-       of kernel memory you've allocated for that purpose.  With this
-       technique you could avoid loss of data if your userspace
-       program misses an interrupt.
-       </para>
-
-       <para>
-       A note on shared interrupts: Your driver should support
-       interrupt sharing whenever this is possible. It is possible if
-       and only if your driver can detect whether your hardware has
-       triggered the interrupt or not. This is usually done by looking
-       at an interrupt status register. If your driver sees that the
-       IRQ bit is actually set, it will perform its actions, and the
-       handler returns IRQ_HANDLED. If the driver detects that it was
-       not your hardware that caused the interrupt, it will do nothing
-       and return IRQ_NONE, allowing the kernel to call the next
-       possible interrupt handler.
-       </para>
-
-       <para>
-       If you decide not to support shared interrupts, your card
-       won't work in computers with no free interrupts. As this
-       frequently happens on the PC platform, you can save yourself a
-       lot of trouble by supporting interrupt sharing.
-       </para>
-</sect1>
-
-<sect1 id="using_uio_pdrv">
-<title>Using uio_pdrv for platform devices</title>
-       <para>
-       In many cases, UIO drivers for platform devices can be handled in a
-       generic way. In the same place where you define your
-       <varname>struct platform_device</varname>, you simply also implement
-       your interrupt handler and fill your
-       <varname>struct uio_info</varname>. A pointer to this
-       <varname>struct uio_info</varname> is then used as
-       <varname>platform_data</varname> for your platform device.
-       </para>
-       <para>
-       You also need to set up an array of <varname>struct resource</varname>
-       containing addresses and sizes of your memory mappings. This
-       information is passed to the driver using the
-       <varname>.resource</varname> and <varname>.num_resources</varname>
-       elements of <varname>struct platform_device</varname>.
-       </para>
-       <para>
-       You now have to set the <varname>.name</varname> element of
-       <varname>struct platform_device</varname> to
-       <varname>"uio_pdrv"</varname> to use the generic UIO platform device
-       driver. This driver will fill the <varname>mem[]</varname> array
-       according to the resources given, and register the device.
-       </para>
-       <para>
-       The advantage of this approach is that you only have to edit a file
-       you need to edit anyway. You do not have to create an extra driver.
-       </para>
-</sect1>
-
-<sect1 id="using_uio_pdrv_genirq">
-<title>Using uio_pdrv_genirq for platform devices</title>
-       <para>
-       Especially in embedded devices, you frequently find chips where the
-       irq pin is tied to its own dedicated interrupt line. In such cases,
-       where you can be really sure the interrupt is not shared, we can take
-       the concept of <varname>uio_pdrv</varname> one step further and use a
-       generic interrupt handler. That's what
-       <varname>uio_pdrv_genirq</varname> does.
-       </para>
-       <para>
-       The setup for this driver is the same as described above for
-       <varname>uio_pdrv</varname>, except that you do not implement an
-       interrupt handler. The <varname>.handler</varname> element of
-       <varname>struct uio_info</varname> must remain
-       <varname>NULL</varname>. The  <varname>.irq_flags</varname> element
-       must not contain <varname>IRQF_SHARED</varname>.
-       </para>
-       <para>
-       You will set the <varname>.name</varname> element of
-       <varname>struct platform_device</varname> to
-       <varname>"uio_pdrv_genirq"</varname> to use this driver.
-       </para>
-       <para>
-       The generic interrupt handler of <varname>uio_pdrv_genirq</varname>
-       will simply disable the interrupt line using
-       <function>disable_irq_nosync()</function>. After doing its work,
-       userspace can reenable the interrupt by writing 0x00000001 to the UIO
-       device file. The driver already implements an
-       <function>irq_control()</function> to make this possible, you must not
-       implement your own.
-       </para>
-       <para>
-       Using <varname>uio_pdrv_genirq</varname> not only saves a few lines of
-       interrupt handler code. You also do not need to know anything about
-       the chip's internal registers to create the kernel part of the driver.
-       All you need to know is the irq number of the pin the chip is
-       connected to.
-       </para>
-</sect1>
-
-<sect1 id="using-uio_dmem_genirq">
-<title>Using uio_dmem_genirq for platform devices</title>
-       <para>
-       In addition to statically allocated memory ranges, they may also be
-       a desire to use dynamically allocated regions in a user space driver.
-       In particular, being able to access memory made available through the
-       dma-mapping API, may be particularly useful.  The
-       <varname>uio_dmem_genirq</varname> driver provides a way to accomplish
-       this.
-       </para>
-       <para>
-       This driver is used in a similar manner to the
-       <varname>"uio_pdrv_genirq"</varname> driver with respect to interrupt
-       configuration and handling.
-       </para>
-       <para>
-       Set the <varname>.name</varname> element of
-       <varname>struct platform_device</varname> to
-       <varname>"uio_dmem_genirq"</varname> to use this driver.
-       </para>
-       <para>
-       When using this driver, fill in the <varname>.platform_data</varname>
-       element of <varname>struct platform_device</varname>, which is of type
-       <varname>struct uio_dmem_genirq_pdata</varname> and which contains the
-       following elements:
-       </para>
-       <itemizedlist>
-       <listitem><para><varname>struct uio_info uioinfo</varname>: The same
-       structure used as the  <varname>uio_pdrv_genirq</varname> platform
-       data</para></listitem>
-       <listitem><para><varname>unsigned int *dynamic_region_sizes</varname>:
-       Pointer to list of sizes of dynamic memory regions to be mapped into
-       user space.
-       </para></listitem>
-       <listitem><para><varname>unsigned int num_dynamic_regions</varname>:
-       Number of elements in <varname>dynamic_region_sizes</varname> array.
-       </para></listitem>
-       </itemizedlist>
-       <para>
-       The dynamic regions defined in the platform data will be appended to
-       the <varname> mem[] </varname> array after the platform device
-       resources, which implies that the total number of static and dynamic
-       memory regions cannot exceed <varname>MAX_UIO_MAPS</varname>.
-       </para>
-       <para>
-       The dynamic memory regions will be allocated when the UIO device file,
-       <varname>/dev/uioX</varname> is opened.
-       Similar to static memory resources, the memory region information for
-       dynamic regions is then visible via sysfs at
-       <varname>/sys/class/uio/uioX/maps/mapY/*</varname>.
-       The dynamic memory regions will be freed when the UIO device file is
-       closed. When no processes are holding the device file open, the address
-       returned to userspace is ~0.
-       </para>
-</sect1>
-
-</chapter>
-
-<chapter id="userspace_driver" xreflabel="Writing a driver in user space">
-<?dbhtml filename="userspace_driver.html"?>
-<title>Writing a driver in userspace</title>
-       <para>
-       Once you have a working kernel module for your hardware, you can
-       write the userspace part of your driver. You don't need any special
-       libraries, your driver can be written in any reasonable language,
-       you can use floating point numbers and so on. In short, you can
-       use all the tools and libraries you'd normally use for writing a
-       userspace application.
-       </para>
-
-<sect1 id="getting_uio_information">
-<title>Getting information about your UIO device</title>
-       <para>
-       Information about all UIO devices is available in sysfs. The
-       first thing you should do in your driver is check
-       <varname>name</varname> and <varname>version</varname> to
-       make sure your talking to the right device and that its kernel
-       driver has the version you expect.
-       </para>
-       <para>
-       You should also make sure that the memory mapping you need
-       exists and has the size you expect.
-       </para>
-       <para>
-       There is a tool called <varname>lsuio</varname> that lists
-       UIO devices and their attributes. It is available here:
-       </para>
-       <para>
-       <ulink url="http://www.osadl.org/projects/downloads/UIO/user/">
-               http://www.osadl.org/projects/downloads/UIO/user/</ulink>
-       </para>
-       <para>
-       With <varname>lsuio</varname> you can quickly check if your
-       kernel module is loaded and which attributes it exports.
-       Have a look at the manpage for details.
-       </para>
-       <para>
-       The source code of <varname>lsuio</varname> can serve as an
-       example for getting information about an UIO device.
-       The file <filename>uio_helper.c</filename> contains a lot of
-       functions you could use in your userspace driver code.
-       </para>
-</sect1>
-
-<sect1 id="mmap_device_memory">
-<title>mmap() device memory</title>
-       <para>
-       After you made sure you've got the right device with the
-       memory mappings you need, all you have to do is to call
-       <function>mmap()</function> to map the device's memory
-       to userspace.
-       </para>
-       <para>
-       The parameter <varname>offset</varname> of the
-       <function>mmap()</function> call has a special meaning
-       for UIO devices: It is used to select which mapping of
-       your device you want to map. To map the memory of
-       mapping N, you have to use N times the page size as
-       your offset:
-       </para>
-<programlisting format="linespecific">
-       offset = N * getpagesize();
-</programlisting>
-       <para>
-       N starts from zero, so if you've got only one memory
-       range to map, set <varname>offset = 0</varname>.
-       A drawback of this technique is that memory is always
-       mapped beginning with its start address.
-       </para>
-</sect1>
-
-<sect1 id="wait_for_interrupts">
-<title>Waiting for interrupts</title>
-       <para>
-       After you successfully mapped your devices memory, you
-       can access it like an ordinary array. Usually, you will
-       perform some initialization. After that, your hardware
-       starts working and will generate an interrupt as soon
-       as it's finished, has some data available, or needs your
-       attention because an error occurred.
-       </para>
-       <para>
-       <filename>/dev/uioX</filename> is a read-only file. A
-       <function>read()</function> will always block until an
-       interrupt occurs. There is only one legal value for the
-       <varname>count</varname> parameter of
-       <function>read()</function>, and that is the size of a
-       signed 32 bit integer (4). Any other value for
-       <varname>count</varname> causes <function>read()</function>
-       to fail. The signed 32 bit integer read is the interrupt
-       count of your device. If the value is one more than the value
-       you read the last time, everything is OK. If the difference
-       is greater than one, you missed interrupts.
-       </para>
-       <para>
-       You can also use <function>select()</function> on
-       <filename>/dev/uioX</filename>.
-       </para>
-</sect1>
-
-</chapter>
-
-<chapter id="uio_pci_generic" xreflabel="Using Generic driver for PCI cards">
-<?dbhtml filename="uio_pci_generic.html"?>
-<title>Generic PCI UIO driver</title>
-       <para>
-       The generic driver is a kernel module named uio_pci_generic.
-       It can work with any device compliant to PCI 2.3 (circa 2002) and
-       any compliant PCI Express device. Using this, you only need to
-        write the userspace driver, removing the need to write
-        a hardware-specific kernel module.
-       </para>
-
-<sect1 id="uio_pci_generic_binding">
-<title>Making the driver recognize the device</title>
-       <para>
-Since the driver does not declare any device ids, it will not get loaded
-automatically and will not automatically bind to any devices, you must load it
-and allocate id to the driver yourself. For example:
-       <programlisting>
- modprobe uio_pci_generic
- echo &quot;8086 10f5&quot; &gt; /sys/bus/pci/drivers/uio_pci_generic/new_id
-       </programlisting>
-       </para>
-       <para>
-If there already is a hardware specific kernel driver for your device, the
-generic driver still won't bind to it, in this case if you want to use the
-generic driver (why would you?) you'll have to manually unbind the hardware
-specific driver and bind the generic driver, like this:
-       <programlisting>
-    echo -n 0000:00:19.0 &gt; /sys/bus/pci/drivers/e1000e/unbind
-    echo -n 0000:00:19.0 &gt; /sys/bus/pci/drivers/uio_pci_generic/bind
-       </programlisting>
-       </para>
-       <para>
-You can verify that the device has been bound to the driver
-by looking for it in sysfs, for example like the following:
-       <programlisting>
-    ls -l /sys/bus/pci/devices/0000:00:19.0/driver
-       </programlisting>
-Which if successful should print
-       <programlisting>
-  .../0000:00:19.0/driver -&gt; ../../../bus/pci/drivers/uio_pci_generic
-       </programlisting>
-Note that the generic driver will not bind to old PCI 2.2 devices.
-If binding the device failed, run the following command:
-       <programlisting>
-  dmesg
-       </programlisting>
-and look in the output for failure reasons
-       </para>
-</sect1>
-
-<sect1 id="uio_pci_generic_internals">
-<title>Things to know about uio_pci_generic</title>
-       <para>
-Interrupts are handled using the Interrupt Disable bit in the PCI command
-register and Interrupt Status bit in the PCI status register.  All devices
-compliant to PCI 2.3 (circa 2002) and all compliant PCI Express devices should
-support these bits.  uio_pci_generic detects this support, and won't bind to
-devices which do not support the Interrupt Disable Bit in the command register.
-       </para>
-       <para>
-On each interrupt, uio_pci_generic sets the Interrupt Disable bit.
-This prevents the device from generating further interrupts
-until the bit is cleared. The userspace driver should clear this
-bit before blocking and waiting for more interrupts.
-       </para>
-</sect1>
-<sect1 id="uio_pci_generic_userspace">
-<title>Writing userspace driver using uio_pci_generic</title>
-       <para>
-Userspace driver can use pci sysfs interface, or the
-libpci libray that wraps it, to talk to the device and to
-re-enable interrupts by writing to the command register.
-       </para>
-</sect1>
-<sect1 id="uio_pci_generic_example">
-<title>Example code using uio_pci_generic</title>
-       <para>
-Here is some sample userspace driver code using uio_pci_generic:
-<programlisting>
-#include &lt;stdlib.h&gt;
-#include &lt;stdio.h&gt;
-#include &lt;unistd.h&gt;
-#include &lt;sys/types.h&gt;
-#include &lt;sys/stat.h&gt;
-#include &lt;fcntl.h&gt;
-#include &lt;errno.h&gt;
-
-int main()
-{
-       int uiofd;
-       int configfd;
-       int err;
-       int i;
-       unsigned icount;
-       unsigned char command_high;
-
-       uiofd = open(&quot;/dev/uio0&quot;, O_RDONLY);
-       if (uiofd &lt; 0) {
-               perror(&quot;uio open:&quot;);
-               return errno;
-       }
-       configfd = open(&quot;/sys/class/uio/uio0/device/config&quot;, O_RDWR);
-       if (configfd &lt; 0) {
-               perror(&quot;config open:&quot;);
-               return errno;
-       }
-
-       /* Read and cache command value */
-       err = pread(configfd, &amp;command_high, 1, 5);
-       if (err != 1) {
-               perror(&quot;command config read:&quot;);
-               return errno;
-       }
-       command_high &amp;= ~0x4;
-
-       for(i = 0;; ++i) {
-               /* Print out a message, for debugging. */
-               if (i == 0)
-                       fprintf(stderr, &quot;Started uio test driver.\n&quot;);
-               else
-                       fprintf(stderr, &quot;Interrupts: %d\n&quot;, icount);
-
-               /****************************************/
-               /* Here we got an interrupt from the
-                  device. Do something to it. */
-               /****************************************/
-
-               /* Re-enable interrupts. */
-               err = pwrite(configfd, &amp;command_high, 1, 5);
-               if (err != 1) {
-                       perror(&quot;config write:&quot;);
-                       break;
-               }
-
-               /* Wait for next interrupt. */
-               err = read(uiofd, &amp;icount, 4);
-               if (err != 4) {
-                       perror(&quot;uio read:&quot;);
-                       break;
-               }
-
-       }
-       return errno;
-}
-
-</programlisting>
-       </para>
-</sect1>
-
-</chapter>
-
-<chapter id="uio_hv_generic" xreflabel="Using Generic driver for Hyper-V VMBUS">
-<?dbhtml filename="uio_hv_generic.html"?>
-<title>Generic Hyper-V UIO driver</title>
-       <para>
-       The generic driver is a kernel module named uio_hv_generic.
-       It supports devices on the Hyper-V VMBus similar to uio_pci_generic
-       on PCI bus.
-       </para>
-
-<sect1 id="uio_hv_generic_binding">
-<title>Making the driver recognize the device</title>
-       <para>
-Since the driver does not declare any device GUID's, it will not get loaded
-automatically and will not automatically bind to any devices, you must load it
-and allocate id to the driver yourself. For example, to use the network device
-GUID:
-       <programlisting>
- modprobe uio_hv_generic
- echo &quot;f8615163-df3e-46c5-913f-f2d2f965ed0e&quot; &gt; /sys/bus/vmbus/drivers/uio_hv_generic/new_id
-       </programlisting>
-       </para>
-       <para>
-If there already is a hardware specific kernel driver for the device, the
-generic driver still won't bind to it, in this case if you want to use the
-generic driver (why would you?) you'll have to manually unbind the hardware
-specific driver and bind the generic driver, like this:
-       <programlisting>
-         echo -n vmbus-ed963694-e847-4b2a-85af-bc9cfc11d6f3 &gt; /sys/bus/vmbus/drivers/hv_netvsc/unbind
-         echo -n vmbus-ed963694-e847-4b2a-85af-bc9cfc11d6f3 &gt; /sys/bus/vmbus/drivers/uio_hv_generic/bind
-       </programlisting>
-       </para>
-       <para>
-You can verify that the device has been bound to the driver
-by looking for it in sysfs, for example like the following:
-       <programlisting>
-    ls -l /sys/bus/vmbus/devices/vmbus-ed963694-e847-4b2a-85af-bc9cfc11d6f3/driver
-       </programlisting>
-Which if successful should print
-       <programlisting>
-  .../vmbus-ed963694-e847-4b2a-85af-bc9cfc11d6f3/driver -&gt; ../../../bus/vmbus/drivers/uio_hv_generic
-       </programlisting>
-       </para>
-</sect1>
-
-<sect1 id="uio_hv_generic_internals">
-<title>Things to know about uio_hv_generic</title>
-       <para>
-On each interrupt, uio_hv_generic sets the Interrupt Disable bit.
-This prevents the device from generating further interrupts
-until the bit is cleared. The userspace driver should clear this
-bit before blocking and waiting for more interrupts.
-       </para>
-</sect1>
-</chapter>
-
-<appendix id="app1">
-<title>Further information</title>
-<itemizedlist>
-       <listitem><para>
-                       <ulink url="http://www.osadl.org">
-                               OSADL homepage.</ulink>
-               </para></listitem>
-       <listitem><para>
-               <ulink url="http://www.linutronix.de">
-                Linutronix homepage.</ulink>
-               </para></listitem>
-</itemizedlist>
-</appendix>
-
-</book>
index f2e745844d5bac2820772a254eff8d46a0853ef2..608ba95d946153ecd1d8ad3917421b851e9c4c15 100644 (file)
                        When zero, profiling data is discarded and associated
                        debugfs files are removed at module unload time.
 
+       goldfish        [X86] Enable the goldfish android emulator platform.
+                       Don't use this when you are not running on the
+                       android emulator
+
        gpt             [EFI] Forces disk with valid GPT signature but
                        invalid Protective MBR to be treated as GPT. If the
                        primary GPT is corrupted, it enables the backup/alternate
diff --git a/Documentation/devicetree/bindings/misc/idt_89hpesx.txt b/Documentation/devicetree/bindings/misc/idt_89hpesx.txt
new file mode 100644 (file)
index 0000000..b9093b7
--- /dev/null
@@ -0,0 +1,44 @@
+EEPROM / CSR SMBus-slave interface of IDT 89HPESx devices
+
+Required properties:
+  - compatible : should be "<manufacturer>,<type>"
+                Basically there is only one manufacturer: idt, but some
+                compatible devices may be produced in future. Following devices
+                are supported: 89hpes8nt2, 89hpes12nt3, 89hpes24nt6ag2,
+                89hpes32nt8ag2, 89hpes32nt8bg2, 89hpes12nt12g2, 89hpes16nt16g2,
+                89hpes24nt24g2, 89hpes32nt24ag2, 89hpes32nt24bg2;
+                89hpes12n3, 89hpes12n3a, 89hpes24n3, 89hpes24n3a;
+                89hpes32h8, 89hpes32h8g2, 89hpes48h12, 89hpes48h12g2,
+                89hpes48h12ag2, 89hpes16h16, 89hpes22h16, 89hpes22h16g2,
+                89hpes34h16, 89hpes34h16g2, 89hpes64h16, 89hpes64h16g2,
+                89hpes64h16ag2;
+                89hpes12t3g2, 89hpes24t3g2, 89hpes16t4, 89hpes4t4g2,
+                89hpes10t4g2, 89hpes16t4g2, 89hpes16t4ag2, 89hpes5t5,
+                89hpes6t5, 89hpes8t5, 89hpes8t5a, 89hpes24t6, 89hpes6t6g2,
+                89hpes24t6g2, 89hpes16t7, 89hpes32t8, 89hpes32t8g2,
+                89hpes48t12, 89hpes48t12g2.
+  - reg :       I2C address of the IDT 89HPESx device.
+
+Optionally there can be EEPROM-compatible subnode:
+  - compatible:  There are five EEPROM devices supported: 24c32, 24c64, 24c128,
+                24c256 and 24c512 differed by size.
+  - reg:         Custom address of EEPROM device (If not specified IDT 89HPESx
+    (optional)  device will try to communicate with EEPROM sited by default
+                address - 0x50)
+  - read-only :         Parameterless property disables writes to the EEPROM
+    (optional)
+
+Example:
+       idt@60 {
+               compatible = "idt,89hpes32nt8ag2";
+               reg = <0x74>;
+               #address-cells = <1>;
+               #size-cells = <0>;
+
+               eeprom@50 {
+                       compatible = "onsemi,24c64";
+                       reg = <0x50>;
+                       read-only;
+               };
+       };
+
index 383d5889e95a3fe107e48319b77ec23ec11499ac..966a72ecc6bd8586a528b15119dd453b4d88d13b 100644 (file)
@@ -1,13 +1,15 @@
 Freescale i.MX6 On-Chip OTP Controller (OCOTP) device tree bindings
 
 This binding represents the on-chip eFuse OTP controller found on
-i.MX6Q/D, i.MX6DL/S, i.MX6SL, and i.MX6SX SoCs.
+i.MX6Q/D, i.MX6DL/S, i.MX6SL, i.MX6SX and i.MX6UL SoCs.
 
 Required properties:
 - compatible: should be one of
        "fsl,imx6q-ocotp" (i.MX6Q/D/DL/S),
        "fsl,imx6sl-ocotp" (i.MX6SL), or
-       "fsl,imx6sx-ocotp" (i.MX6SX), followed by "syscon".
+       "fsl,imx6sx-ocotp" (i.MX6SX),
+       "fsl,imx6ul-ocotp" (i.MX6UL),
+       followed by "syscon".
 - reg: Should contain the register base and length.
 - clocks: Should contain a phandle pointing to the gated peripheral clock.
 
index 068c2c03c38f7226dd36a6a38ea12a6d87565892..267da4410aefd0ac8e17beedfc33adf394e014e4 100644 (file)
@@ -42,6 +42,12 @@ Optional properties in the area nodes:
          and in use by another device or devices
 - export : indicates that the reserved SRAM area may be accessed outside
            of the kernel, e.g. by bootloader or userspace
+- protect-exec : Same as 'pool' above but with the additional
+                constraint that code wil be run from the region and
+                that the memory is maintained as read-only, executable
+                during code execution. NOTE: This region must be page
+                aligned on start and end in order to properly allow
+                manipulation of the page attributes.
 - label : the name for the reserved partition, if omitted, the label
           is taken from the node name excluding the unit address.
 
index 5475a2807e7accc8241cb7aa8c15f7f09df76ef7..c5a1cd0a4ae72bf8dc23ff43635a64cc59e3b440 100644 (file)
@@ -30,6 +30,7 @@ available subsections can be seen below.
    miscellaneous
    vme
    80211/index
+   uio-howto
 
 .. only::  subproject and html
 
diff --git a/Documentation/driver-api/uio-howto.rst b/Documentation/driver-api/uio-howto.rst
new file mode 100644 (file)
index 0000000..f73d660
--- /dev/null
@@ -0,0 +1,705 @@
+=======================
+The Userspace I/O HOWTO
+=======================
+
+:Author: Hans-Jürgen Koch Linux developer, Linutronix
+:Date:   2006-12-11
+
+About this document
+===================
+
+Translations
+------------
+
+If you know of any translations for this document, or you are interested
+in translating it, please email me hjk@hansjkoch.de.
+
+Preface
+-------
+
+For many types of devices, creating a Linux kernel driver is overkill.
+All that is really needed is some way to handle an interrupt and provide
+access to the memory space of the device. The logic of controlling the
+device does not necessarily have to be within the kernel, as the device
+does not need to take advantage of any of other resources that the
+kernel provides. One such common class of devices that are like this are
+for industrial I/O cards.
+
+To address this situation, the userspace I/O system (UIO) was designed.
+For typical industrial I/O cards, only a very small kernel module is
+needed. The main part of the driver will run in user space. This
+simplifies development and reduces the risk of serious bugs within a
+kernel module.
+
+Please note that UIO is not an universal driver interface. Devices that
+are already handled well by other kernel subsystems (like networking or
+serial or USB) are no candidates for an UIO driver. Hardware that is
+ideally suited for an UIO driver fulfills all of the following:
+
+-  The device has memory that can be mapped. The device can be
+   controlled completely by writing to this memory.
+
+-  The device usually generates interrupts.
+
+-  The device does not fit into one of the standard kernel subsystems.
+
+Acknowledgments
+---------------
+
+I'd like to thank Thomas Gleixner and Benedikt Spranger of Linutronix,
+who have not only written most of the UIO code, but also helped greatly
+writing this HOWTO by giving me all kinds of background information.
+
+Feedback
+--------
+
+Find something wrong with this document? (Or perhaps something right?) I
+would love to hear from you. Please email me at hjk@hansjkoch.de.
+
+About UIO
+=========
+
+If you use UIO for your card's driver, here's what you get:
+
+-  only one small kernel module to write and maintain.
+
+-  develop the main part of your driver in user space, with all the
+   tools and libraries you're used to.
+
+-  bugs in your driver won't crash the kernel.
+
+-  updates of your driver can take place without recompiling the kernel.
+
+How UIO works
+-------------
+
+Each UIO device is accessed through a device file and several sysfs
+attribute files. The device file will be called ``/dev/uio0`` for the
+first device, and ``/dev/uio1``, ``/dev/uio2`` and so on for subsequent
+devices.
+
+``/dev/uioX`` is used to access the address space of the card. Just use
+:c:func:`mmap()` to access registers or RAM locations of your card.
+
+Interrupts are handled by reading from ``/dev/uioX``. A blocking
+:c:func:`read()` from ``/dev/uioX`` will return as soon as an
+interrupt occurs. You can also use :c:func:`select()` on
+``/dev/uioX`` to wait for an interrupt. The integer value read from
+``/dev/uioX`` represents the total interrupt count. You can use this
+number to figure out if you missed some interrupts.
+
+For some hardware that has more than one interrupt source internally,
+but not separate IRQ mask and status registers, there might be
+situations where userspace cannot determine what the interrupt source
+was if the kernel handler disables them by writing to the chip's IRQ
+register. In such a case, the kernel has to disable the IRQ completely
+to leave the chip's register untouched. Now the userspace part can
+determine the cause of the interrupt, but it cannot re-enable
+interrupts. Another cornercase is chips where re-enabling interrupts is
+a read-modify-write operation to a combined IRQ status/acknowledge
+register. This would be racy if a new interrupt occurred simultaneously.
+
+To address these problems, UIO also implements a write() function. It is
+normally not used and can be ignored for hardware that has only a single
+interrupt source or has separate IRQ mask and status registers. If you
+need it, however, a write to ``/dev/uioX`` will call the
+:c:func:`irqcontrol()` function implemented by the driver. You have
+to write a 32-bit value that is usually either 0 or 1 to disable or
+enable interrupts. If a driver does not implement
+:c:func:`irqcontrol()`, :c:func:`write()` will return with
+``-ENOSYS``.
+
+To handle interrupts properly, your custom kernel module can provide its
+own interrupt handler. It will automatically be called by the built-in
+handler.
+
+For cards that don't generate interrupts but need to be polled, there is
+the possibility to set up a timer that triggers the interrupt handler at
+configurable time intervals. This interrupt simulation is done by
+calling :c:func:`uio_event_notify()` from the timer's event
+handler.
+
+Each driver provides attributes that are used to read or write
+variables. These attributes are accessible through sysfs files. A custom
+kernel driver module can add its own attributes to the device owned by
+the uio driver, but not added to the UIO device itself at this time.
+This might change in the future if it would be found to be useful.
+
+The following standard attributes are provided by the UIO framework:
+
+-  ``name``: The name of your device. It is recommended to use the name
+   of your kernel module for this.
+
+-  ``version``: A version string defined by your driver. This allows the
+   user space part of your driver to deal with different versions of the
+   kernel module.
+
+-  ``event``: The total number of interrupts handled by the driver since
+   the last time the device node was read.
+
+These attributes appear under the ``/sys/class/uio/uioX`` directory.
+Please note that this directory might be a symlink, and not a real
+directory. Any userspace code that accesses it must be able to handle
+this.
+
+Each UIO device can make one or more memory regions available for memory
+mapping. This is necessary because some industrial I/O cards require
+access to more than one PCI memory region in a driver.
+
+Each mapping has its own directory in sysfs, the first mapping appears
+as ``/sys/class/uio/uioX/maps/map0/``. Subsequent mappings create
+directories ``map1/``, ``map2/``, and so on. These directories will only
+appear if the size of the mapping is not 0.
+
+Each ``mapX/`` directory contains four read-only files that show
+attributes of the memory:
+
+-  ``name``: A string identifier for this mapping. This is optional, the
+   string can be empty. Drivers can set this to make it easier for
+   userspace to find the correct mapping.
+
+-  ``addr``: The address of memory that can be mapped.
+
+-  ``size``: The size, in bytes, of the memory pointed to by addr.
+
+-  ``offset``: The offset, in bytes, that has to be added to the pointer
+   returned by :c:func:`mmap()` to get to the actual device memory.
+   This is important if the device's memory is not page aligned.
+   Remember that pointers returned by :c:func:`mmap()` are always
+   page aligned, so it is good style to always add this offset.
+
+From userspace, the different mappings are distinguished by adjusting
+the ``offset`` parameter of the :c:func:`mmap()` call. To map the
+memory of mapping N, you have to use N times the page size as your
+offset::
+
+    offset = N * getpagesize();
+
+Sometimes there is hardware with memory-like regions that can not be
+mapped with the technique described here, but there are still ways to
+access them from userspace. The most common example are x86 ioports. On
+x86 systems, userspace can access these ioports using
+:c:func:`ioperm()`, :c:func:`iopl()`, :c:func:`inb()`,
+:c:func:`outb()`, and similar functions.
+
+Since these ioport regions can not be mapped, they will not appear under
+``/sys/class/uio/uioX/maps/`` like the normal memory described above.
+Without information about the port regions a hardware has to offer, it
+becomes difficult for the userspace part of the driver to find out which
+ports belong to which UIO device.
+
+To address this situation, the new directory
+``/sys/class/uio/uioX/portio/`` was added. It only exists if the driver
+wants to pass information about one or more port regions to userspace.
+If that is the case, subdirectories named ``port0``, ``port1``, and so
+on, will appear underneath ``/sys/class/uio/uioX/portio/``.
+
+Each ``portX/`` directory contains four read-only files that show name,
+start, size, and type of the port region:
+
+-  ``name``: A string identifier for this port region. The string is
+   optional and can be empty. Drivers can set it to make it easier for
+   userspace to find a certain port region.
+
+-  ``start``: The first port of this region.
+
+-  ``size``: The number of ports in this region.
+
+-  ``porttype``: A string describing the type of port.
+
+Writing your own kernel module
+==============================
+
+Please have a look at ``uio_cif.c`` as an example. The following
+paragraphs explain the different sections of this file.
+
+struct uio_info
+---------------
+
+This structure tells the framework the details of your driver, Some of
+the members are required, others are optional.
+
+-  ``const char *name``: Required. The name of your driver as it will
+   appear in sysfs. I recommend using the name of your module for this.
+
+-  ``const char *version``: Required. This string appears in
+   ``/sys/class/uio/uioX/version``.
+
+-  ``struct uio_mem mem[ MAX_UIO_MAPS ]``: Required if you have memory
+   that can be mapped with :c:func:`mmap()`. For each mapping you
+   need to fill one of the ``uio_mem`` structures. See the description
+   below for details.
+
+-  ``struct uio_port port[ MAX_UIO_PORTS_REGIONS ]``: Required if you
+   want to pass information about ioports to userspace. For each port
+   region you need to fill one of the ``uio_port`` structures. See the
+   description below for details.
+
+-  ``long irq``: Required. If your hardware generates an interrupt, it's
+   your modules task to determine the irq number during initialization.
+   If you don't have a hardware generated interrupt but want to trigger
+   the interrupt handler in some other way, set ``irq`` to
+   ``UIO_IRQ_CUSTOM``. If you had no interrupt at all, you could set
+   ``irq`` to ``UIO_IRQ_NONE``, though this rarely makes sense.
+
+-  ``unsigned long irq_flags``: Required if you've set ``irq`` to a
+   hardware interrupt number. The flags given here will be used in the
+   call to :c:func:`request_irq()`.
+
+-  ``int (*mmap)(struct uio_info *info, struct vm_area_struct *vma)``:
+   Optional. If you need a special :c:func:`mmap()`
+   function, you can set it here. If this pointer is not NULL, your
+   :c:func:`mmap()` will be called instead of the built-in one.
+
+-  ``int (*open)(struct uio_info *info, struct inode *inode)``:
+   Optional. You might want to have your own :c:func:`open()`,
+   e.g. to enable interrupts only when your device is actually used.
+
+-  ``int (*release)(struct uio_info *info, struct inode *inode)``:
+   Optional. If you define your own :c:func:`open()`, you will
+   probably also want a custom :c:func:`release()` function.
+
+-  ``int (*irqcontrol)(struct uio_info *info, s32 irq_on)``:
+   Optional. If you need to be able to enable or disable interrupts
+   from userspace by writing to ``/dev/uioX``, you can implement this
+   function. The parameter ``irq_on`` will be 0 to disable interrupts
+   and 1 to enable them.
+
+Usually, your device will have one or more memory regions that can be
+mapped to user space. For each region, you have to set up a
+``struct uio_mem`` in the ``mem[]`` array. Here's a description of the
+fields of ``struct uio_mem``:
+
+-  ``const char *name``: Optional. Set this to help identify the memory
+   region, it will show up in the corresponding sysfs node.
+
+-  ``int memtype``: Required if the mapping is used. Set this to
+   ``UIO_MEM_PHYS`` if you you have physical memory on your card to be
+   mapped. Use ``UIO_MEM_LOGICAL`` for logical memory (e.g. allocated
+   with :c:func:`kmalloc()`). There's also ``UIO_MEM_VIRTUAL`` for
+   virtual memory.
+
+-  ``phys_addr_t addr``: Required if the mapping is used. Fill in the
+   address of your memory block. This address is the one that appears in
+   sysfs.
+
+-  ``resource_size_t size``: Fill in the size of the memory block that
+   ``addr`` points to. If ``size`` is zero, the mapping is considered
+   unused. Note that you *must* initialize ``size`` with zero for all
+   unused mappings.
+
+-  ``void *internal_addr``: If you have to access this memory region
+   from within your kernel module, you will want to map it internally by
+   using something like :c:func:`ioremap()`. Addresses returned by
+   this function cannot be mapped to user space, so you must not store
+   it in ``addr``. Use ``internal_addr`` instead to remember such an
+   address.
+
+Please do not touch the ``map`` element of ``struct uio_mem``! It is
+used by the UIO framework to set up sysfs files for this mapping. Simply
+leave it alone.
+
+Sometimes, your device can have one or more port regions which can not
+be mapped to userspace. But if there are other possibilities for
+userspace to access these ports, it makes sense to make information
+about the ports available in sysfs. For each region, you have to set up
+a ``struct uio_port`` in the ``port[]`` array. Here's a description of
+the fields of ``struct uio_port``:
+
+-  ``char *porttype``: Required. Set this to one of the predefined
+   constants. Use ``UIO_PORT_X86`` for the ioports found in x86
+   architectures.
+
+-  ``unsigned long start``: Required if the port region is used. Fill in
+   the number of the first port of this region.
+
+-  ``unsigned long size``: Fill in the number of ports in this region.
+   If ``size`` is zero, the region is considered unused. Note that you
+   *must* initialize ``size`` with zero for all unused regions.
+
+Please do not touch the ``portio`` element of ``struct uio_port``! It is
+used internally by the UIO framework to set up sysfs files for this
+region. Simply leave it alone.
+
+Adding an interrupt handler
+---------------------------
+
+What you need to do in your interrupt handler depends on your hardware
+and on how you want to handle it. You should try to keep the amount of
+code in your kernel interrupt handler low. If your hardware requires no
+action that you *have* to perform after each interrupt, then your
+handler can be empty.
+
+If, on the other hand, your hardware *needs* some action to be performed
+after each interrupt, then you *must* do it in your kernel module. Note
+that you cannot rely on the userspace part of your driver. Your
+userspace program can terminate at any time, possibly leaving your
+hardware in a state where proper interrupt handling is still required.
+
+There might also be applications where you want to read data from your
+hardware at each interrupt and buffer it in a piece of kernel memory
+you've allocated for that purpose. With this technique you could avoid
+loss of data if your userspace program misses an interrupt.
+
+A note on shared interrupts: Your driver should support interrupt
+sharing whenever this is possible. It is possible if and only if your
+driver can detect whether your hardware has triggered the interrupt or
+not. This is usually done by looking at an interrupt status register. If
+your driver sees that the IRQ bit is actually set, it will perform its
+actions, and the handler returns IRQ_HANDLED. If the driver detects
+that it was not your hardware that caused the interrupt, it will do
+nothing and return IRQ_NONE, allowing the kernel to call the next
+possible interrupt handler.
+
+If you decide not to support shared interrupts, your card won't work in
+computers with no free interrupts. As this frequently happens on the PC
+platform, you can save yourself a lot of trouble by supporting interrupt
+sharing.
+
+Using uio_pdrv for platform devices
+-----------------------------------
+
+In many cases, UIO drivers for platform devices can be handled in a
+generic way. In the same place where you define your
+``struct platform_device``, you simply also implement your interrupt
+handler and fill your ``struct uio_info``. A pointer to this
+``struct uio_info`` is then used as ``platform_data`` for your platform
+device.
+
+You also need to set up an array of ``struct resource`` containing
+addresses and sizes of your memory mappings. This information is passed
+to the driver using the ``.resource`` and ``.num_resources`` elements of
+``struct platform_device``.
+
+You now have to set the ``.name`` element of ``struct platform_device``
+to ``"uio_pdrv"`` to use the generic UIO platform device driver. This
+driver will fill the ``mem[]`` array according to the resources given,
+and register the device.
+
+The advantage of this approach is that you only have to edit a file you
+need to edit anyway. You do not have to create an extra driver.
+
+Using uio_pdrv_genirq for platform devices
+------------------------------------------
+
+Especially in embedded devices, you frequently find chips where the irq
+pin is tied to its own dedicated interrupt line. In such cases, where
+you can be really sure the interrupt is not shared, we can take the
+concept of ``uio_pdrv`` one step further and use a generic interrupt
+handler. That's what ``uio_pdrv_genirq`` does.
+
+The setup for this driver is the same as described above for
+``uio_pdrv``, except that you do not implement an interrupt handler. The
+``.handler`` element of ``struct uio_info`` must remain ``NULL``. The
+``.irq_flags`` element must not contain ``IRQF_SHARED``.
+
+You will set the ``.name`` element of ``struct platform_device`` to
+``"uio_pdrv_genirq"`` to use this driver.
+
+The generic interrupt handler of ``uio_pdrv_genirq`` will simply disable
+the interrupt line using :c:func:`disable_irq_nosync()`. After
+doing its work, userspace can reenable the interrupt by writing
+0x00000001 to the UIO device file. The driver already implements an
+:c:func:`irq_control()` to make this possible, you must not
+implement your own.
+
+Using ``uio_pdrv_genirq`` not only saves a few lines of interrupt
+handler code. You also do not need to know anything about the chip's
+internal registers to create the kernel part of the driver. All you need
+to know is the irq number of the pin the chip is connected to.
+
+Using uio_dmem_genirq for platform devices
+------------------------------------------
+
+In addition to statically allocated memory ranges, they may also be a
+desire to use dynamically allocated regions in a user space driver. In
+particular, being able to access memory made available through the
+dma-mapping API, may be particularly useful. The ``uio_dmem_genirq``
+driver provides a way to accomplish this.
+
+This driver is used in a similar manner to the ``"uio_pdrv_genirq"``
+driver with respect to interrupt configuration and handling.
+
+Set the ``.name`` element of ``struct platform_device`` to
+``"uio_dmem_genirq"`` to use this driver.
+
+When using this driver, fill in the ``.platform_data`` element of
+``struct platform_device``, which is of type
+``struct uio_dmem_genirq_pdata`` and which contains the following
+elements:
+
+-  ``struct uio_info uioinfo``: The same structure used as the
+   ``uio_pdrv_genirq`` platform data
+
+-  ``unsigned int *dynamic_region_sizes``: Pointer to list of sizes of
+   dynamic memory regions to be mapped into user space.
+
+-  ``unsigned int num_dynamic_regions``: Number of elements in
+   ``dynamic_region_sizes`` array.
+
+The dynamic regions defined in the platform data will be appended to the
+`` mem[] `` array after the platform device resources, which implies
+that the total number of static and dynamic memory regions cannot exceed
+``MAX_UIO_MAPS``.
+
+The dynamic memory regions will be allocated when the UIO device file,
+``/dev/uioX`` is opened. Similar to static memory resources, the memory
+region information for dynamic regions is then visible via sysfs at
+``/sys/class/uio/uioX/maps/mapY/*``. The dynamic memory regions will be
+freed when the UIO device file is closed. When no processes are holding
+the device file open, the address returned to userspace is ~0.
+
+Writing a driver in userspace
+=============================
+
+Once you have a working kernel module for your hardware, you can write
+the userspace part of your driver. You don't need any special libraries,
+your driver can be written in any reasonable language, you can use
+floating point numbers and so on. In short, you can use all the tools
+and libraries you'd normally use for writing a userspace application.
+
+Getting information about your UIO device
+-----------------------------------------
+
+Information about all UIO devices is available in sysfs. The first thing
+you should do in your driver is check ``name`` and ``version`` to make
+sure your talking to the right device and that its kernel driver has the
+version you expect.
+
+You should also make sure that the memory mapping you need exists and
+has the size you expect.
+
+There is a tool called ``lsuio`` that lists UIO devices and their
+attributes. It is available here:
+
+http://www.osadl.org/projects/downloads/UIO/user/
+
+With ``lsuio`` you can quickly check if your kernel module is loaded and
+which attributes it exports. Have a look at the manpage for details.
+
+The source code of ``lsuio`` can serve as an example for getting
+information about an UIO device. The file ``uio_helper.c`` contains a
+lot of functions you could use in your userspace driver code.
+
+mmap() device memory
+--------------------
+
+After you made sure you've got the right device with the memory mappings
+you need, all you have to do is to call :c:func:`mmap()` to map the
+device's memory to userspace.
+
+The parameter ``offset`` of the :c:func:`mmap()` call has a special
+meaning for UIO devices: It is used to select which mapping of your
+device you want to map. To map the memory of mapping N, you have to use
+N times the page size as your offset::
+
+        offset = N * getpagesize();
+
+N starts from zero, so if you've got only one memory range to map, set
+``offset = 0``. A drawback of this technique is that memory is always
+mapped beginning with its start address.
+
+Waiting for interrupts
+----------------------
+
+After you successfully mapped your devices memory, you can access it
+like an ordinary array. Usually, you will perform some initialization.
+After that, your hardware starts working and will generate an interrupt
+as soon as it's finished, has some data available, or needs your
+attention because an error occurred.
+
+``/dev/uioX`` is a read-only file. A :c:func:`read()` will always
+block until an interrupt occurs. There is only one legal value for the
+``count`` parameter of :c:func:`read()`, and that is the size of a
+signed 32 bit integer (4). Any other value for ``count`` causes
+:c:func:`read()` to fail. The signed 32 bit integer read is the
+interrupt count of your device. If the value is one more than the value
+you read the last time, everything is OK. If the difference is greater
+than one, you missed interrupts.
+
+You can also use :c:func:`select()` on ``/dev/uioX``.
+
+Generic PCI UIO driver
+======================
+
+The generic driver is a kernel module named uio_pci_generic. It can
+work with any device compliant to PCI 2.3 (circa 2002) and any compliant
+PCI Express device. Using this, you only need to write the userspace
+driver, removing the need to write a hardware-specific kernel module.
+
+Making the driver recognize the device
+--------------------------------------
+
+Since the driver does not declare any device ids, it will not get loaded
+automatically and will not automatically bind to any devices, you must
+load it and allocate id to the driver yourself. For example::
+
+     modprobe uio_pci_generic
+     echo "8086 10f5" > /sys/bus/pci/drivers/uio_pci_generic/new_id
+
+If there already is a hardware specific kernel driver for your device,
+the generic driver still won't bind to it, in this case if you want to
+use the generic driver (why would you?) you'll have to manually unbind
+the hardware specific driver and bind the generic driver, like this::
+
+        echo -n 0000:00:19.0 > /sys/bus/pci/drivers/e1000e/unbind
+        echo -n 0000:00:19.0 > /sys/bus/pci/drivers/uio_pci_generic/bind
+
+You can verify that the device has been bound to the driver by looking
+for it in sysfs, for example like the following::
+
+        ls -l /sys/bus/pci/devices/0000:00:19.0/driver
+
+Which if successful should print::
+
+      .../0000:00:19.0/driver -> ../../../bus/pci/drivers/uio_pci_generic
+
+Note that the generic driver will not bind to old PCI 2.2 devices. If
+binding the device failed, run the following command::
+
+      dmesg
+
+and look in the output for failure reasons.
+
+Things to know about uio_pci_generic
+------------------------------------
+
+Interrupts are handled using the Interrupt Disable bit in the PCI
+command register and Interrupt Status bit in the PCI status register.
+All devices compliant to PCI 2.3 (circa 2002) and all compliant PCI
+Express devices should support these bits. uio_pci_generic detects
+this support, and won't bind to devices which do not support the
+Interrupt Disable Bit in the command register.
+
+On each interrupt, uio_pci_generic sets the Interrupt Disable bit.
+This prevents the device from generating further interrupts until the
+bit is cleared. The userspace driver should clear this bit before
+blocking and waiting for more interrupts.
+
+Writing userspace driver using uio_pci_generic
+------------------------------------------------
+
+Userspace driver can use pci sysfs interface, or the libpci library that
+wraps it, to talk to the device and to re-enable interrupts by writing
+to the command register.
+
+Example code using uio_pci_generic
+----------------------------------
+
+Here is some sample userspace driver code using uio_pci_generic::
+
+    #include <stdlib.h>
+    #include <stdio.h>
+    #include <unistd.h>
+    #include <sys/types.h>
+    #include <sys/stat.h>
+    #include <fcntl.h>
+    #include <errno.h>
+
+    int main()
+    {
+        int uiofd;
+        int configfd;
+        int err;
+        int i;
+        unsigned icount;
+        unsigned char command_high;
+
+        uiofd = open("/dev/uio0", O_RDONLY);
+        if (uiofd < 0) {
+            perror("uio open:");
+            return errno;
+        }
+        configfd = open("/sys/class/uio/uio0/device/config", O_RDWR);
+        if (configfd < 0) {
+            perror("config open:");
+            return errno;
+        }
+
+        /* Read and cache command value */
+        err = pread(configfd, &command_high, 1, 5);
+        if (err != 1) {
+            perror("command config read:");
+            return errno;
+        }
+        command_high &= ~0x4;
+
+        for(i = 0;; ++i) {
+            /* Print out a message, for debugging. */
+            if (i == 0)
+                fprintf(stderr, "Started uio test driver.\n");
+            else
+                fprintf(stderr, "Interrupts: %d\n", icount);
+
+            /****************************************/
+            /* Here we got an interrupt from the
+               device. Do something to it. */
+            /****************************************/
+
+            /* Re-enable interrupts. */
+            err = pwrite(configfd, &command_high, 1, 5);
+            if (err != 1) {
+                perror("config write:");
+                break;
+            }
+
+            /* Wait for next interrupt. */
+            err = read(uiofd, &icount, 4);
+            if (err != 4) {
+                perror("uio read:");
+                break;
+            }
+
+        }
+        return errno;
+    }
+
+Generic Hyper-V UIO driver
+==========================
+
+The generic driver is a kernel module named uio_hv_generic. It
+supports devices on the Hyper-V VMBus similar to uio_pci_generic on
+PCI bus.
+
+Making the driver recognize the device
+--------------------------------------
+
+Since the driver does not declare any device GUID's, it will not get
+loaded automatically and will not automatically bind to any devices, you
+must load it and allocate id to the driver yourself. For example, to use
+the network device GUID::
+
+     modprobe uio_hv_generic
+     echo "f8615163-df3e-46c5-913f-f2d2f965ed0e" > /sys/bus/vmbus/drivers/uio_hv_generic/new_id
+
+If there already is a hardware specific kernel driver for the device,
+the generic driver still won't bind to it, in this case if you want to
+use the generic driver (why would you?) you'll have to manually unbind
+the hardware specific driver and bind the generic driver, like this::
+
+          echo -n vmbus-ed963694-e847-4b2a-85af-bc9cfc11d6f3 > /sys/bus/vmbus/drivers/hv_netvsc/unbind
+          echo -n vmbus-ed963694-e847-4b2a-85af-bc9cfc11d6f3 > /sys/bus/vmbus/drivers/uio_hv_generic/bind
+
+You can verify that the device has been bound to the driver by looking
+for it in sysfs, for example like the following::
+
+        ls -l /sys/bus/vmbus/devices/vmbus-ed963694-e847-4b2a-85af-bc9cfc11d6f3/driver
+
+Which if successful should print::
+
+      .../vmbus-ed963694-e847-4b2a-85af-bc9cfc11d6f3/driver -> ../../../bus/vmbus/drivers/uio_hv_generic
+
+Things to know about uio_hv_generic
+-----------------------------------
+
+On each interrupt, uio_hv_generic sets the Interrupt Disable bit. This
+prevents the device from generating further interrupts until the bit is
+cleared. The userspace driver should clear this bit before blocking and
+waiting for more interrupts.
+
+Further information
+===================
+
+-  `OSADL homepage. <http://www.osadl.org>`_
+
+-  `Linutronix homepage. <http://www.linutronix.de>`_
diff --git a/Documentation/extcon/intel-int3496.txt b/Documentation/extcon/intel-int3496.txt
new file mode 100644 (file)
index 0000000..af0b366
--- /dev/null
@@ -0,0 +1,22 @@
+Intel INT3496 ACPI device extcon driver documentation
+-----------------------------------------------------
+
+The Intel INT3496 ACPI device extcon driver is a driver for ACPI
+devices with an acpi-id of INT3496, such as found for example on
+Intel Baytrail and Cherrytrail tablets.
+
+This ACPI device describes how the OS can read the id-pin of the devices'
+USB-otg port, as well as how it optionally can enable Vbus output on the
+otg port and how it can optionally control the muxing of the data pins
+between an USB host and an USB peripheral controller.
+
+The ACPI devices exposes this functionality by returning an array with up
+to 3 gpio descriptors from its ACPI _CRS (Current Resource Settings) call:
+
+Index 0: The input gpio for the id-pin, this is always present and valid
+Index 1: The output gpio for enabling Vbus output from the device to the otg
+         port, write 1 to enable the Vbus output (this gpio descriptor may
+         be absent or invalid)
+Index 2: The output gpio for muxing of the data pins between the USB host and
+         the USB peripheral controller, write 1 to mux to the peripheral
+         controller
index 86ee5078fd034ff0247828fa6a3e01c8800da50f..78f197fadfd1b64ced51f9b20d8525d49d784593 100644 (file)
@@ -22,7 +22,16 @@ To program the FPGA from a file or from a buffer:
                              struct fpga_image_info *info,
                              const char *buf, size_t count);
 
-Load the FPGA from an image which exists as a buffer in memory.
+Load the FPGA from an image which exists as a contiguous buffer in
+memory. Allocating contiguous kernel memory for the buffer should be avoided,
+users are encouraged to use the _sg interface instead of this.
+
+        int fpga_mgr_buf_load_sg(struct fpga_manager *mgr,
+                                struct fpga_image_info *info,
+                                struct sg_table *sgt);
+
+Load the FPGA from an image from non-contiguous in memory. Callers can
+construct a sg_table using alloc_page backed memory.
 
        int fpga_mgr_firmware_load(struct fpga_manager *mgr,
                                   struct fpga_image_info *info,
@@ -166,7 +175,7 @@ success or negative error codes otherwise.
 
 The programming sequence is:
  1. .write_init
- 2. .write (may be called once or multiple times)
+ 2. .write or .write_sg (may be called once or multiple times)
  3. .write_complete
 
 The .write_init function will prepare the FPGA to receive the image data.  The
@@ -176,7 +185,11 @@ buffer up at least this much before starting.
 
 The .write function writes a buffer to the FPGA. The buffer may be contain the
 whole FPGA image or may be a smaller chunk of an FPGA image.  In the latter
-case, this function is called multiple times for successive chunks.
+case, this function is called multiple times for successive chunks. This interface
+is suitable for drivers which use PIO.
+
+The .write_sg version behaves the same as .write except the input is a sg_table
+scatter list. This interface is suitable for drivers which use DMA.
 
 The .write_complete function is called after all the image has been written
 to put the FPGA into operating mode.
index d6e91e96f4e513cd5bd73e538c5e0ee53896eba4..427c97e429bf628f7c09806c125e45e1f18ae2fe 100644 (file)
@@ -5993,6 +5993,7 @@ S:        Maintained
 F:     arch/x86/include/asm/mshyperv.h
 F:     arch/x86/include/uapi/asm/hyperv.h
 F:     arch/x86/kernel/cpu/mshyperv.c
+F:     arch/x86/hyperv
 F:     drivers/hid/hid-hyperv.c
 F:     drivers/hv/
 F:     drivers/input/serio/hyperv-keyboard.c
@@ -13071,7 +13072,7 @@ USERSPACE I/O (UIO)
 M:     Greg Kroah-Hartman <gregkh@linuxfoundation.org>
 S:     Maintained
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc.git
-F:     Documentation/DocBook/uio-howto.tmpl
+F:     Documentation/driver-api/uio-howto.rst
 F:     drivers/uio/
 F:     include/linux/uio*.h
 
index 1d873d15b545c26a97eefdf15d92d437ff2e1701..9780829f8a0572219bae198715036c4d878a5af8 100644 (file)
@@ -557,15 +557,7 @@ static struct clk_lookup da850_clks[] = {
        CLK("da830-mmc.0",      NULL,           &mmcsd0_clk),
        CLK("da830-mmc.1",      NULL,           &mmcsd1_clk),
        CLK("ti-aemif",         NULL,           &aemif_clk),
-       /*
-        * The only user of this clock is davinci_nand and it get's it through
-        * con_id. The nand node itself is created from within the aemif
-        * driver to guarantee that it's probed after the aemif timing
-        * parameters are configured. of_dev_auxdata is not accessible from
-        * the aemif driver and can't be passed to of_platform_populate(). For
-        * that reason we're leaving the dev_id here as NULL.
-        */
-       CLK(NULL,               "aemif",        &aemif_nand_clk),
+       CLK("davinci-nand.0",   "aemif",        &aemif_nand_clk),
        CLK("ohci-da8xx",       "usb11",        &usb11_clk),
        CLK("musb-da8xx",       "usb20",        &usb20_clk),
        CLK("spi_davinci.0",    NULL,           &spi0_clk),
index 9ee44da6eb7b85312b30260e29a295b1162f9112..06205fe4c1202942918d669425538280f8cbbf4d 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/of_irq.h>
 #include <linux/of_platform.h>
 #include <linux/irqdomain.h>
+#include <linux/platform_data/ti-aemif.h>
 
 #include <asm/mach/arch.h>
 
 #include "cp_intc.h"
 #include <mach/da8xx.h>
 
+static struct of_dev_auxdata da850_aemif_auxdata_lookup[] = {
+       OF_DEV_AUXDATA("ti,davinci-nand", 0x62000000, "davinci-nand.0", NULL),
+       {}
+};
+
+static struct aemif_platform_data aemif_data = {
+       .dev_lookup = da850_aemif_auxdata_lookup,
+};
+
 static struct of_dev_auxdata da850_auxdata_lookup[] __initdata = {
        OF_DEV_AUXDATA("ti,davinci-i2c", 0x01c22000, "i2c_davinci.1", NULL),
        OF_DEV_AUXDATA("ti,davinci-i2c", 0x01e28000, "i2c_davinci.2", NULL),
@@ -37,7 +47,7 @@ static struct of_dev_auxdata da850_auxdata_lookup[] __initdata = {
        OF_DEV_AUXDATA("ti,davinci-dm6467-emac", 0x01e20000, "davinci_emac.1",
                       NULL),
        OF_DEV_AUXDATA("ti,da830-mcasp-audio", 0x01d00000, "davinci-mcasp.0", NULL),
-       OF_DEV_AUXDATA("ti,da850-aemif", 0x68000000, "ti-aemif", NULL),
+       OF_DEV_AUXDATA("ti,da850-aemif", 0x68000000, "ti-aemif", &aemif_data),
        OF_DEV_AUXDATA("ti,da850-tilcdc", 0x01e13000, "da8xx_lcdc.0", NULL),
        OF_DEV_AUXDATA("ti,da830-ohci", 0x01e25000, "ohci-da8xx", NULL),
        OF_DEV_AUXDATA("ti,da830-musb", 0x01e00000, "musb-da8xx", NULL),
index eb3abf8ac44eb33f333ed29727dfd49ba6bfbf38..586b786b3edf9a6930bfdf05c47b4c8e0c57e718 100644 (file)
@@ -7,6 +7,9 @@ obj-$(CONFIG_KVM) += kvm/
 # Xen paravirtualization support
 obj-$(CONFIG_XEN) += xen/
 
+# Hyper-V paravirtualization support
+obj-$(CONFIG_HYPERVISOR_GUEST) += hyperv/
+
 # lguest paravirtualization support
 obj-$(CONFIG_LGUEST_GUEST) += lguest/
 
diff --git a/arch/x86/hyperv/Makefile b/arch/x86/hyperv/Makefile
new file mode 100644 (file)
index 0000000..171ae09
--- /dev/null
@@ -0,0 +1 @@
+obj-y          := hv_init.o
diff --git a/arch/x86/hyperv/hv_init.c b/arch/x86/hyperv/hv_init.c
new file mode 100644 (file)
index 0000000..db64baf
--- /dev/null
@@ -0,0 +1,277 @@
+/*
+ * X86 specific Hyper-V initialization code.
+ *
+ * Copyright (C) 2016, Microsoft, Inc.
+ *
+ * Author : K. Y. Srinivasan <kys@microsoft.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT.  See the GNU General Public License for more
+ * details.
+ *
+ */
+
+#include <linux/types.h>
+#include <asm/hypervisor.h>
+#include <asm/hyperv.h>
+#include <asm/mshyperv.h>
+#include <linux/version.h>
+#include <linux/vmalloc.h>
+#include <linux/mm.h>
+#include <linux/clockchips.h>
+
+
+#ifdef CONFIG_X86_64
+
+static struct ms_hyperv_tsc_page *tsc_pg;
+
+static u64 read_hv_clock_tsc(struct clocksource *arg)
+{
+       u64 current_tick;
+
+       if (tsc_pg->tsc_sequence != 0) {
+               /*
+                * Use the tsc page to compute the value.
+                */
+
+               while (1) {
+                       u64 tmp;
+                       u32 sequence = tsc_pg->tsc_sequence;
+                       u64 cur_tsc;
+                       u64 scale = tsc_pg->tsc_scale;
+                       s64 offset = tsc_pg->tsc_offset;
+
+                       rdtscll(cur_tsc);
+                       /* current_tick = ((cur_tsc *scale) >> 64) + offset */
+                       asm("mulq %3"
+                               : "=d" (current_tick), "=a" (tmp)
+                               : "a" (cur_tsc), "r" (scale));
+
+                       current_tick += offset;
+                       if (tsc_pg->tsc_sequence == sequence)
+                               return current_tick;
+
+                       if (tsc_pg->tsc_sequence != 0)
+                               continue;
+                       /*
+                        * Fallback using MSR method.
+                        */
+                       break;
+               }
+       }
+       rdmsrl(HV_X64_MSR_TIME_REF_COUNT, current_tick);
+       return current_tick;
+}
+
+static struct clocksource hyperv_cs_tsc = {
+               .name           = "hyperv_clocksource_tsc_page",
+               .rating         = 400,
+               .read           = read_hv_clock_tsc,
+               .mask           = CLOCKSOURCE_MASK(64),
+               .flags          = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+#endif
+
+static u64 read_hv_clock_msr(struct clocksource *arg)
+{
+       u64 current_tick;
+       /*
+        * Read the partition counter to get the current tick count. This count
+        * is set to 0 when the partition is created and is incremented in
+        * 100 nanosecond units.
+        */
+       rdmsrl(HV_X64_MSR_TIME_REF_COUNT, current_tick);
+       return current_tick;
+}
+
+static struct clocksource hyperv_cs_msr = {
+       .name           = "hyperv_clocksource_msr",
+       .rating         = 400,
+       .read           = read_hv_clock_msr,
+       .mask           = CLOCKSOURCE_MASK(64),
+       .flags          = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+static void *hypercall_pg;
+struct clocksource *hyperv_cs;
+EXPORT_SYMBOL_GPL(hyperv_cs);
+
+/*
+ * This function is to be invoked early in the boot sequence after the
+ * hypervisor has been detected.
+ *
+ * 1. Setup the hypercall page.
+ * 2. Register Hyper-V specific clocksource.
+ */
+void hyperv_init(void)
+{
+       u64 guest_id;
+       union hv_x64_msr_hypercall_contents hypercall_msr;
+
+       if (x86_hyper != &x86_hyper_ms_hyperv)
+               return;
+
+       /*
+        * Setup the hypercall page and enable hypercalls.
+        * 1. Register the guest ID
+        * 2. Enable the hypercall and register the hypercall page
+        */
+       guest_id = generate_guest_id(0, LINUX_VERSION_CODE, 0);
+       wrmsrl(HV_X64_MSR_GUEST_OS_ID, guest_id);
+
+       hypercall_pg  = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL_RX);
+       if (hypercall_pg == NULL) {
+               wrmsrl(HV_X64_MSR_GUEST_OS_ID, 0);
+               return;
+       }
+
+       rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
+       hypercall_msr.enable = 1;
+       hypercall_msr.guest_physical_address = vmalloc_to_pfn(hypercall_pg);
+       wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
+
+       /*
+        * Register Hyper-V specific clocksource.
+        */
+#ifdef CONFIG_X86_64
+       if (ms_hyperv.features & HV_X64_MSR_REFERENCE_TSC_AVAILABLE) {
+               union hv_x64_msr_hypercall_contents tsc_msr;
+
+               tsc_pg = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL);
+               if (!tsc_pg)
+                       goto register_msr_cs;
+
+               hyperv_cs = &hyperv_cs_tsc;
+
+               rdmsrl(HV_X64_MSR_REFERENCE_TSC, tsc_msr.as_uint64);
+
+               tsc_msr.enable = 1;
+               tsc_msr.guest_physical_address = vmalloc_to_pfn(tsc_pg);
+
+               wrmsrl(HV_X64_MSR_REFERENCE_TSC, tsc_msr.as_uint64);
+               clocksource_register_hz(&hyperv_cs_tsc, NSEC_PER_SEC/100);
+               return;
+       }
+#endif
+       /*
+        * For 32 bit guests just use the MSR based mechanism for reading
+        * the partition counter.
+        */
+
+register_msr_cs:
+       hyperv_cs = &hyperv_cs_msr;
+       if (ms_hyperv.features & HV_X64_MSR_TIME_REF_COUNT_AVAILABLE)
+               clocksource_register_hz(&hyperv_cs_msr, NSEC_PER_SEC/100);
+}
+
+/*
+ * This routine is called before kexec/kdump, it does the required cleanup.
+ */
+void hyperv_cleanup(void)
+{
+       union hv_x64_msr_hypercall_contents hypercall_msr;
+
+       /* Reset our OS id */
+       wrmsrl(HV_X64_MSR_GUEST_OS_ID, 0);
+
+       /* Reset the hypercall page */
+       hypercall_msr.as_uint64 = 0;
+       wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
+
+       /* Reset the TSC page */
+       hypercall_msr.as_uint64 = 0;
+       wrmsrl(HV_X64_MSR_REFERENCE_TSC, hypercall_msr.as_uint64);
+}
+EXPORT_SYMBOL_GPL(hyperv_cleanup);
+
+/*
+ * hv_do_hypercall- Invoke the specified hypercall
+ */
+u64 hv_do_hypercall(u64 control, void *input, void *output)
+{
+       u64 input_address = (input) ? virt_to_phys(input) : 0;
+       u64 output_address = (output) ? virt_to_phys(output) : 0;
+#ifdef CONFIG_X86_64
+       u64 hv_status = 0;
+
+       if (!hypercall_pg)
+               return (u64)ULLONG_MAX;
+
+       __asm__ __volatile__("mov %0, %%r8" : : "r" (output_address) : "r8");
+       __asm__ __volatile__("call *%3" : "=a" (hv_status) :
+                            "c" (control), "d" (input_address),
+                            "m" (hypercall_pg));
+
+       return hv_status;
+
+#else
+
+       u32 control_hi = control >> 32;
+       u32 control_lo = control & 0xFFFFFFFF;
+       u32 hv_status_hi = 1;
+       u32 hv_status_lo = 1;
+       u32 input_address_hi = input_address >> 32;
+       u32 input_address_lo = input_address & 0xFFFFFFFF;
+       u32 output_address_hi = output_address >> 32;
+       u32 output_address_lo = output_address & 0xFFFFFFFF;
+
+       if (!hypercall_pg)
+               return (u64)ULLONG_MAX;
+
+       __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
+                             "=a"(hv_status_lo) : "d" (control_hi),
+                             "a" (control_lo), "b" (input_address_hi),
+                             "c" (input_address_lo), "D"(output_address_hi),
+                             "S"(output_address_lo), "m" (hypercall_pg));
+
+       return hv_status_lo | ((u64)hv_status_hi << 32);
+#endif /* !x86_64 */
+}
+EXPORT_SYMBOL_GPL(hv_do_hypercall);
+
+void hyperv_report_panic(struct pt_regs *regs)
+{
+       static bool panic_reported;
+
+       /*
+        * We prefer to report panic on 'die' chain as we have proper
+        * registers to report, but if we miss it (e.g. on BUG()) we need
+        * to report it on 'panic'.
+        */
+       if (panic_reported)
+               return;
+       panic_reported = true;
+
+       wrmsrl(HV_X64_MSR_CRASH_P0, regs->ip);
+       wrmsrl(HV_X64_MSR_CRASH_P1, regs->ax);
+       wrmsrl(HV_X64_MSR_CRASH_P2, regs->bx);
+       wrmsrl(HV_X64_MSR_CRASH_P3, regs->cx);
+       wrmsrl(HV_X64_MSR_CRASH_P4, regs->dx);
+
+       /*
+        * Let Hyper-V know there is crash data available
+        */
+       wrmsrl(HV_X64_MSR_CRASH_CTL, HV_CRASH_CTL_CRASH_NOTIFY);
+}
+EXPORT_SYMBOL_GPL(hyperv_report_panic);
+
+bool hv_is_hypercall_page_setup(void)
+{
+       union hv_x64_msr_hypercall_contents hypercall_msr;
+
+       /* Check if the hypercall page is setup */
+       hypercall_msr.as_uint64 = 0;
+       rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
+
+       if (!hypercall_msr.enable)
+               return false;
+
+       return true;
+}
+EXPORT_SYMBOL_GPL(hv_is_hypercall_page_setup);
index aaf59b7da98a2c7898390512c77601cd38b79eb1..7c9c895432a9ff4a6fdcc7b409ee775a69ceb0ec 100644 (file)
@@ -3,8 +3,28 @@
 
 #include <linux/types.h>
 #include <linux/interrupt.h>
+#include <linux/clocksource.h>
 #include <asm/hyperv.h>
 
+/*
+ * The below CPUID leaves are present if VersionAndFeatures.HypervisorPresent
+ * is set by CPUID(HVCPUID_VERSION_FEATURES).
+ */
+enum hv_cpuid_function {
+       HVCPUID_VERSION_FEATURES                = 0x00000001,
+       HVCPUID_VENDOR_MAXFUNCTION              = 0x40000000,
+       HVCPUID_INTERFACE                       = 0x40000001,
+
+       /*
+        * The remaining functions depend on the value of
+        * HVCPUID_INTERFACE
+        */
+       HVCPUID_VERSION                         = 0x40000002,
+       HVCPUID_FEATURES                        = 0x40000003,
+       HVCPUID_ENLIGHTENMENT_INFO              = 0x40000004,
+       HVCPUID_IMPLEMENTATION_LIMITS           = 0x40000005,
+};
+
 struct ms_hyperv_info {
        u32 features;
        u32 misc_features;
@@ -13,6 +33,128 @@ struct ms_hyperv_info {
 
 extern struct ms_hyperv_info ms_hyperv;
 
+/*
+ * Declare the MSR used to setup pages used to communicate with the hypervisor.
+ */
+union hv_x64_msr_hypercall_contents {
+       u64 as_uint64;
+       struct {
+               u64 enable:1;
+               u64 reserved:11;
+               u64 guest_physical_address:52;
+       };
+};
+
+/*
+ * TSC page layout.
+ */
+
+struct ms_hyperv_tsc_page {
+       volatile u32 tsc_sequence;
+       u32 reserved1;
+       volatile u64 tsc_scale;
+       volatile s64 tsc_offset;
+       u64 reserved2[509];
+};
+
+/*
+ * The guest OS needs to register the guest ID with the hypervisor.
+ * The guest ID is a 64 bit entity and the structure of this ID is
+ * specified in the Hyper-V specification:
+ *
+ * msdn.microsoft.com/en-us/library/windows/hardware/ff542653%28v=vs.85%29.aspx
+ *
+ * While the current guideline does not specify how Linux guest ID(s)
+ * need to be generated, our plan is to publish the guidelines for
+ * Linux and other guest operating systems that currently are hosted
+ * on Hyper-V. The implementation here conforms to this yet
+ * unpublished guidelines.
+ *
+ *
+ * Bit(s)
+ * 63 - Indicates if the OS is Open Source or not; 1 is Open Source
+ * 62:56 - Os Type; Linux is 0x100
+ * 55:48 - Distro specific identification
+ * 47:16 - Linux kernel version number
+ * 15:0  - Distro specific identification
+ *
+ *
+ */
+
+#define HV_LINUX_VENDOR_ID              0x8100
+
+/*
+ * Generate the guest ID based on the guideline described above.
+ */
+
+static inline  __u64 generate_guest_id(__u64 d_info1, __u64 kernel_version,
+                                      __u64 d_info2)
+{
+       __u64 guest_id = 0;
+
+       guest_id = (((__u64)HV_LINUX_VENDOR_ID) << 48);
+       guest_id |= (d_info1 << 48);
+       guest_id |= (kernel_version << 16);
+       guest_id |= d_info2;
+
+       return guest_id;
+}
+
+
+/* Free the message slot and signal end-of-message if required */
+static inline void vmbus_signal_eom(struct hv_message *msg, u32 old_msg_type)
+{
+       /*
+        * On crash we're reading some other CPU's message page and we need
+        * to be careful: this other CPU may already had cleared the header
+        * and the host may already had delivered some other message there.
+        * In case we blindly write msg->header.message_type we're going
+        * to lose it. We can still lose a message of the same type but
+        * we count on the fact that there can only be one
+        * CHANNELMSG_UNLOAD_RESPONSE and we don't care about other messages
+        * on crash.
+        */
+       if (cmpxchg(&msg->header.message_type, old_msg_type,
+                   HVMSG_NONE) != old_msg_type)
+               return;
+
+       /*
+        * Make sure the write to MessageType (ie set to
+        * HVMSG_NONE) happens before we read the
+        * MessagePending and EOMing. Otherwise, the EOMing
+        * will not deliver any more messages since there is
+        * no empty slot
+        */
+       mb();
+
+       if (msg->header.message_flags.msg_pending) {
+               /*
+                * This will cause message queue rescan to
+                * possibly deliver another msg from the
+                * hypervisor
+                */
+               wrmsrl(HV_X64_MSR_EOM, 0);
+       }
+}
+
+#define hv_get_current_tick(tick) rdmsrl(HV_X64_MSR_TIME_REF_COUNT, tick)
+#define hv_init_timer(timer, tick) wrmsrl(timer, tick)
+#define hv_init_timer_config(config, val) wrmsrl(config, val)
+
+#define hv_get_simp(val) rdmsrl(HV_X64_MSR_SIMP, val)
+#define hv_set_simp(val) wrmsrl(HV_X64_MSR_SIMP, val)
+
+#define hv_get_siefp(val) rdmsrl(HV_X64_MSR_SIEFP, val)
+#define hv_set_siefp(val) wrmsrl(HV_X64_MSR_SIEFP, val)
+
+#define hv_get_synic_state(val) rdmsrl(HV_X64_MSR_SCONTROL, val)
+#define hv_set_synic_state(val) wrmsrl(HV_X64_MSR_SCONTROL, val)
+
+#define hv_get_vp_index(index) rdmsrl(HV_X64_MSR_VP_INDEX, index)
+
+#define hv_get_synint_state(int_num, val) rdmsrl(int_num, val)
+#define hv_set_synint_state(int_num, val) wrmsrl(int_num, val)
+
 void hyperv_callback_vector(void);
 #ifdef CONFIG_TRACING
 #define trace_hyperv_callback_vector hyperv_callback_vector
@@ -25,4 +167,13 @@ void hv_setup_kexec_handler(void (*handler)(void));
 void hv_remove_kexec_handler(void);
 void hv_setup_crash_handler(void (*handler)(struct pt_regs *regs));
 void hv_remove_crash_handler(void);
+
+#if IS_ENABLED(CONFIG_HYPERV)
+extern struct clocksource *hyperv_cs;
+
+void hyperv_init(void);
+void hyperv_report_panic(struct pt_regs *regs);
+bool hv_is_hypercall_page_setup(void);
+void hyperv_cleanup(void);
+#endif
 #endif
index 9b1a91834ac8062ce19f3bab02449ced5978fc99..3a20ccf787b886ab7023c693c355af041e8c93a8 100644 (file)
@@ -73,6 +73,9 @@
   */
 #define HV_X64_MSR_STAT_PAGES_AVAILABLE                (1 << 8)
 
+/* Crash MSR available */
+#define HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE (1 << 10)
+
 /*
  * Feature identification: EBX indicates which flags were specified at
  * partition creation. The format is the same as the partition creation
  */
 #define HV_X64_RELAXED_TIMING_RECOMMENDED      (1 << 5)
 
+/*
+ * Crash notification flag.
+ */
+#define HV_CRASH_CTL_CRASH_NOTIFY (1ULL << 63)
+
 /* MSR used to identify the guest OS. */
 #define HV_X64_MSR_GUEST_OS_ID                 0x40000000
 
index 65e20c97e04b1e2591a356eda4e922ed92ec0310..b5375b9497b3aed6fa2f54f85f9667b243c18f46 100644 (file)
@@ -133,26 +133,6 @@ static uint32_t  __init ms_hyperv_platform(void)
        return 0;
 }
 
-static u64 read_hv_clock(struct clocksource *arg)
-{
-       u64 current_tick;
-       /*
-        * Read the partition counter to get the current tick count. This count
-        * is set to 0 when the partition is created and is incremented in
-        * 100 nanosecond units.
-        */
-       rdmsrl(HV_X64_MSR_TIME_REF_COUNT, current_tick);
-       return current_tick;
-}
-
-static struct clocksource hyperv_cs = {
-       .name           = "hyperv_clocksource",
-       .rating         = 400, /* use this when running on Hyperv*/
-       .read           = read_hv_clock,
-       .mask           = CLOCKSOURCE_MASK(64),
-       .flags          = CLOCK_SOURCE_IS_CONTINUOUS,
-};
-
 static unsigned char hv_get_nmi_reason(void)
 {
        return 0;
@@ -180,6 +160,11 @@ static int hv_nmi_unknown(unsigned int val, struct pt_regs *regs)
 
 static void __init ms_hyperv_init_platform(void)
 {
+       int hv_host_info_eax;
+       int hv_host_info_ebx;
+       int hv_host_info_ecx;
+       int hv_host_info_edx;
+
        /*
         * Extract the features and hints
         */
@@ -190,6 +175,21 @@ static void __init ms_hyperv_init_platform(void)
        pr_info("HyperV: features 0x%x, hints 0x%x\n",
                ms_hyperv.features, ms_hyperv.hints);
 
+       /*
+        * Extract host information.
+        */
+       if (cpuid_eax(HVCPUID_VENDOR_MAXFUNCTION) >= HVCPUID_VERSION) {
+               hv_host_info_eax = cpuid_eax(HVCPUID_VERSION);
+               hv_host_info_ebx = cpuid_ebx(HVCPUID_VERSION);
+               hv_host_info_ecx = cpuid_ecx(HVCPUID_VERSION);
+               hv_host_info_edx = cpuid_edx(HVCPUID_VERSION);
+
+               pr_info("Hyper-V Host Build:%d-%d.%d-%d-%d.%d\n",
+                       hv_host_info_eax, hv_host_info_ebx >> 16,
+                       hv_host_info_ebx & 0xFFFF, hv_host_info_ecx,
+                       hv_host_info_edx >> 24, hv_host_info_edx & 0xFFFFFF);
+       }
+
 #ifdef CONFIG_X86_LOCAL_APIC
        if (ms_hyperv.features & HV_X64_MSR_APIC_FREQUENCY_AVAILABLE) {
                /*
@@ -208,9 +208,6 @@ static void __init ms_hyperv_init_platform(void)
                             "hv_nmi_unknown");
 #endif
 
-       if (ms_hyperv.features & HV_X64_MSR_TIME_REF_COUNT_AVAILABLE)
-               clocksource_register_hz(&hyperv_cs, NSEC_PER_SEC/100);
-
 #ifdef CONFIG_X86_IO_APIC
        no_timer_check = 1;
 #endif
@@ -227,6 +224,13 @@ static void __init ms_hyperv_init_platform(void)
         */
        if (efi_enabled(EFI_BOOT))
                x86_platform.get_nmi_reason = hv_get_nmi_reason;
+
+#if IS_ENABLED(CONFIG_HYPERV)
+       /*
+        * Setup the hook to get control post apic initialization.
+        */
+       x86_platform.apic_post_init = hyperv_init;
+#endif
 }
 
 const __refconst struct hypervisor_x86 x86_hyper_ms_hyperv = {
index 1693107a518e7560f774df657bcc464296e6f45f..0d17c0aafeb142310152b802a18b969c52bd89f9 100644 (file)
@@ -42,10 +42,22 @@ static struct resource goldfish_pdev_bus_resources[] = {
        }
 };
 
+static bool goldfish_enable __initdata;
+
+static int __init goldfish_setup(char *str)
+{
+       goldfish_enable = true;
+       return 0;
+}
+__setup("goldfish", goldfish_setup);
+
 static int __init goldfish_init(void)
 {
+       if (!goldfish_enable)
+               return -ENODEV;
+
        platform_device_register_simple("goldfish_pdev_bus", -1,
-                                               goldfish_pdev_bus_resources, 2);
+                                       goldfish_pdev_bus_resources, 2);
        return 0;
 }
 device_initcall(goldfish_init);
index e1e2066cecdb602e44f4a7103aa35827da9bbdd5..117ca14ccf8595813841585719790c7ac89d7e84 100644 (file)
@@ -202,4 +202,6 @@ source "drivers/hwtracing/intel_th/Kconfig"
 
 source "drivers/fpga/Kconfig"
 
+source "drivers/fsi/Kconfig"
+
 endmenu
index 060026a02f59246362aa9be2adb95b1304efe710..67ce51d62015835e56a08930f6e5e276e6d7e32e 100644 (file)
@@ -173,3 +173,4 @@ obj-$(CONFIG_STM)           += hwtracing/stm/
 obj-$(CONFIG_ANDROID)          += android/
 obj-$(CONFIG_NVMEM)            += nvmem/
 obj-$(CONFIG_FPGA)             += fpga/
+obj-$(CONFIG_FSI)              += fsi/
index bdfc6c6f4f5a7c8d9363055be25954c66eb55d3f..a82fc022d34ba88ba7adeb35b6b4ee0fafdbdb72 100644 (file)
@@ -19,6 +19,18 @@ config ANDROID_BINDER_IPC
          Android process, using Binder to identify, invoke and pass arguments
          between said processes.
 
+config ANDROID_BINDER_DEVICES
+       string "Android Binder devices"
+       depends on ANDROID_BINDER_IPC
+       default "binder"
+       ---help---
+         Default value for the binder.devices parameter.
+
+         The binder.devices parameter is a comma-separated list of strings
+         that specifies the names of the binder device nodes that will be
+         created. Each binder device has its own context manager, and is
+         therefore logically separated from the other devices.
+
 config ANDROID_BINDER_IPC_32BIT
        bool
        depends on !64BIT && ANDROID_BINDER_IPC
index 3c71b982bf2a35ae1a406e35fac2683577edb2b8..9451b762fa1c290c8f74dd59c6848faaea40a30e 100644 (file)
@@ -50,14 +50,13 @@ static DEFINE_MUTEX(binder_main_lock);
 static DEFINE_MUTEX(binder_deferred_lock);
 static DEFINE_MUTEX(binder_mmap_lock);
 
+static HLIST_HEAD(binder_devices);
 static HLIST_HEAD(binder_procs);
 static HLIST_HEAD(binder_deferred_list);
 static HLIST_HEAD(binder_dead_nodes);
 
 static struct dentry *binder_debugfs_dir_entry_root;
 static struct dentry *binder_debugfs_dir_entry_proc;
-static struct binder_node *binder_context_mgr_node;
-static kuid_t binder_context_mgr_uid = INVALID_UID;
 static int binder_last_id;
 
 #define BINDER_DEBUG_ENTRY(name) \
@@ -115,6 +114,9 @@ module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO);
 static bool binder_debug_no_lock;
 module_param_named(proc_no_lock, binder_debug_no_lock, bool, S_IWUSR | S_IRUGO);
 
+static char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
+module_param_named(devices, binder_devices_param, charp, 0444);
+
 static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
 static int binder_stop_on_user_error;
 
@@ -145,6 +147,17 @@ module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
                        binder_stop_on_user_error = 2; \
        } while (0)
 
+#define to_flat_binder_object(hdr) \
+       container_of(hdr, struct flat_binder_object, hdr)
+
+#define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
+
+#define to_binder_buffer_object(hdr) \
+       container_of(hdr, struct binder_buffer_object, hdr)
+
+#define to_binder_fd_array_object(hdr) \
+       container_of(hdr, struct binder_fd_array_object, hdr)
+
 enum binder_stat_types {
        BINDER_STAT_PROC,
        BINDER_STAT_THREAD,
@@ -158,7 +171,7 @@ enum binder_stat_types {
 
 struct binder_stats {
        int br[_IOC_NR(BR_FAILED_REPLY) + 1];
-       int bc[_IOC_NR(BC_DEAD_BINDER_DONE) + 1];
+       int bc[_IOC_NR(BC_REPLY_SG) + 1];
        int obj_created[BINDER_STAT_COUNT];
        int obj_deleted[BINDER_STAT_COUNT];
 };
@@ -186,6 +199,7 @@ struct binder_transaction_log_entry {
        int to_node;
        int data_size;
        int offsets_size;
+       const char *context_name;
 };
 struct binder_transaction_log {
        int next;
@@ -210,6 +224,18 @@ static struct binder_transaction_log_entry *binder_transaction_log_add(
        return e;
 }
 
+struct binder_context {
+       struct binder_node *binder_context_mgr_node;
+       kuid_t binder_context_mgr_uid;
+       const char *name;
+};
+
+struct binder_device {
+       struct hlist_node hlist;
+       struct miscdevice miscdev;
+       struct binder_context context;
+};
+
 struct binder_work {
        struct list_head entry;
        enum {
@@ -282,6 +308,7 @@ struct binder_buffer {
        struct binder_node *target_node;
        size_t data_size;
        size_t offsets_size;
+       size_t extra_buffers_size;
        uint8_t data[0];
 };
 
@@ -325,6 +352,7 @@ struct binder_proc {
        int ready_threads;
        long default_priority;
        struct dentry *debugfs_entry;
+       struct binder_context *context;
 };
 
 enum {
@@ -648,7 +676,9 @@ err_no_vma:
 
 static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc,
                                              size_t data_size,
-                                             size_t offsets_size, int is_async)
+                                             size_t offsets_size,
+                                             size_t extra_buffers_size,
+                                             int is_async)
 {
        struct rb_node *n = proc->free_buffers.rb_node;
        struct binder_buffer *buffer;
@@ -656,7 +686,7 @@ static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc,
        struct rb_node *best_fit = NULL;
        void *has_page_addr;
        void *end_page_addr;
-       size_t size;
+       size_t size, data_offsets_size;
 
        if (proc->vma == NULL) {
                pr_err("%d: binder_alloc_buf, no vma\n",
@@ -664,15 +694,20 @@ static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc,
                return NULL;
        }
 
-       size = ALIGN(data_size, sizeof(void *)) +
+       data_offsets_size = ALIGN(data_size, sizeof(void *)) +
                ALIGN(offsets_size, sizeof(void *));
 
-       if (size < data_size || size < offsets_size) {
+       if (data_offsets_size < data_size || data_offsets_size < offsets_size) {
                binder_user_error("%d: got transaction with invalid size %zd-%zd\n",
                                proc->pid, data_size, offsets_size);
                return NULL;
        }
-
+       size = data_offsets_size + ALIGN(extra_buffers_size, sizeof(void *));
+       if (size < data_offsets_size || size < extra_buffers_size) {
+               binder_user_error("%d: got transaction with invalid extra_buffers_size %zd\n",
+                                 proc->pid, extra_buffers_size);
+               return NULL;
+       }
        if (is_async &&
            proc->free_async_space < size + sizeof(struct binder_buffer)) {
                binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
@@ -741,6 +776,7 @@ static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc,
                      proc->pid, size, buffer);
        buffer->data_size = data_size;
        buffer->offsets_size = offsets_size;
+       buffer->extra_buffers_size = extra_buffers_size;
        buffer->async_transaction = is_async;
        if (is_async) {
                proc->free_async_space -= size + sizeof(struct binder_buffer);
@@ -815,7 +851,8 @@ static void binder_free_buf(struct binder_proc *proc,
        buffer_size = binder_buffer_size(proc, buffer);
 
        size = ALIGN(buffer->data_size, sizeof(void *)) +
-               ALIGN(buffer->offsets_size, sizeof(void *));
+               ALIGN(buffer->offsets_size, sizeof(void *)) +
+               ALIGN(buffer->extra_buffers_size, sizeof(void *));
 
        binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
                     "%d: binder_free_buf %p size %zd buffer_size %zd\n",
@@ -929,8 +966,9 @@ static int binder_inc_node(struct binder_node *node, int strong, int internal,
                if (internal) {
                        if (target_list == NULL &&
                            node->internal_strong_refs == 0 &&
-                           !(node == binder_context_mgr_node &&
-                           node->has_strong_ref)) {
+                           !(node->proc &&
+                             node == node->proc->context->binder_context_mgr_node &&
+                             node->has_strong_ref)) {
                                pr_err("invalid inc strong node for %d\n",
                                        node->debug_id);
                                return -EINVAL;
@@ -1031,6 +1069,7 @@ static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc,
        struct rb_node **p = &proc->refs_by_node.rb_node;
        struct rb_node *parent = NULL;
        struct binder_ref *ref, *new_ref;
+       struct binder_context *context = proc->context;
 
        while (*p) {
                parent = *p;
@@ -1053,7 +1092,7 @@ static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc,
        rb_link_node(&new_ref->rb_node_node, parent, p);
        rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
 
-       new_ref->desc = (node == binder_context_mgr_node) ? 0 : 1;
+       new_ref->desc = (node == context->binder_context_mgr_node) ? 0 : 1;
        for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
                ref = rb_entry(n, struct binder_ref, rb_node_desc);
                if (ref->desc > new_ref->desc)
@@ -1240,11 +1279,158 @@ static void binder_send_failed_reply(struct binder_transaction *t,
        }
 }
 
+/**
+ * binder_validate_object() - checks for a valid metadata object in a buffer.
+ * @buffer:    binder_buffer that we're parsing.
+ * @offset:    offset in the buffer at which to validate an object.
+ *
+ * Return:     If there's a valid metadata object at @offset in @buffer, the
+ *             size of that object. Otherwise, it returns zero.
+ */
+static size_t binder_validate_object(struct binder_buffer *buffer, u64 offset)
+{
+       /* Check if we can read a header first */
+       struct binder_object_header *hdr;
+       size_t object_size = 0;
+
+       if (offset > buffer->data_size - sizeof(*hdr) ||
+           buffer->data_size < sizeof(*hdr) ||
+           !IS_ALIGNED(offset, sizeof(u32)))
+               return 0;
+
+       /* Ok, now see if we can read a complete object. */
+       hdr = (struct binder_object_header *)(buffer->data + offset);
+       switch (hdr->type) {
+       case BINDER_TYPE_BINDER:
+       case BINDER_TYPE_WEAK_BINDER:
+       case BINDER_TYPE_HANDLE:
+       case BINDER_TYPE_WEAK_HANDLE:
+               object_size = sizeof(struct flat_binder_object);
+               break;
+       case BINDER_TYPE_FD:
+               object_size = sizeof(struct binder_fd_object);
+               break;
+       case BINDER_TYPE_PTR:
+               object_size = sizeof(struct binder_buffer_object);
+               break;
+       case BINDER_TYPE_FDA:
+               object_size = sizeof(struct binder_fd_array_object);
+               break;
+       default:
+               return 0;
+       }
+       if (offset <= buffer->data_size - object_size &&
+           buffer->data_size >= object_size)
+               return object_size;
+       else
+               return 0;
+}
+
+/**
+ * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
+ * @b:         binder_buffer containing the object
+ * @index:     index in offset array at which the binder_buffer_object is
+ *             located
+ * @start:     points to the start of the offset array
+ * @num_valid: the number of valid offsets in the offset array
+ *
+ * Return:     If @index is within the valid range of the offset array
+ *             described by @start and @num_valid, and if there's a valid
+ *             binder_buffer_object at the offset found in index @index
+ *             of the offset array, that object is returned. Otherwise,
+ *             %NULL is returned.
+ *             Note that the offset found in index @index itself is not
+ *             verified; this function assumes that @num_valid elements
+ *             from @start were previously verified to have valid offsets.
+ */
+static struct binder_buffer_object *binder_validate_ptr(struct binder_buffer *b,
+                                                       binder_size_t index,
+                                                       binder_size_t *start,
+                                                       binder_size_t num_valid)
+{
+       struct binder_buffer_object *buffer_obj;
+       binder_size_t *offp;
+
+       if (index >= num_valid)
+               return NULL;
+
+       offp = start + index;
+       buffer_obj = (struct binder_buffer_object *)(b->data + *offp);
+       if (buffer_obj->hdr.type != BINDER_TYPE_PTR)
+               return NULL;
+
+       return buffer_obj;
+}
+
+/**
+ * binder_validate_fixup() - validates pointer/fd fixups happen in order.
+ * @b:                 transaction buffer
+ * @objects_start      start of objects buffer
+ * @buffer:            binder_buffer_object in which to fix up
+ * @offset:            start offset in @buffer to fix up
+ * @last_obj:          last binder_buffer_object that we fixed up in
+ * @last_min_offset:   minimum fixup offset in @last_obj
+ *
+ * Return:             %true if a fixup in buffer @buffer at offset @offset is
+ *                     allowed.
+ *
+ * For safety reasons, we only allow fixups inside a buffer to happen
+ * at increasing offsets; additionally, we only allow fixup on the last
+ * buffer object that was verified, or one of its parents.
+ *
+ * Example of what is allowed:
+ *
+ * A
+ *   B (parent = A, offset = 0)
+ *   C (parent = A, offset = 16)
+ *     D (parent = C, offset = 0)
+ *   E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
+ *
+ * Examples of what is not allowed:
+ *
+ * Decreasing offsets within the same parent:
+ * A
+ *   C (parent = A, offset = 16)
+ *   B (parent = A, offset = 0) // decreasing offset within A
+ *
+ * Referring to a parent that wasn't the last object or any of its parents:
+ * A
+ *   B (parent = A, offset = 0)
+ *   C (parent = A, offset = 0)
+ *   C (parent = A, offset = 16)
+ *     D (parent = B, offset = 0) // B is not A or any of A's parents
+ */
+static bool binder_validate_fixup(struct binder_buffer *b,
+                                 binder_size_t *objects_start,
+                                 struct binder_buffer_object *buffer,
+                                 binder_size_t fixup_offset,
+                                 struct binder_buffer_object *last_obj,
+                                 binder_size_t last_min_offset)
+{
+       if (!last_obj) {
+               /* Nothing to fix up in */
+               return false;
+       }
+
+       while (last_obj != buffer) {
+               /*
+                * Safe to retrieve the parent of last_obj, since it
+                * was already previously verified by the driver.
+                */
+               if ((last_obj->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
+                       return false;
+               last_min_offset = last_obj->parent_offset + sizeof(uintptr_t);
+               last_obj = (struct binder_buffer_object *)
+                       (b->data + *(objects_start + last_obj->parent));
+       }
+       return (fixup_offset >= last_min_offset);
+}
+
 static void binder_transaction_buffer_release(struct binder_proc *proc,
                                              struct binder_buffer *buffer,
                                              binder_size_t *failed_at)
 {
-       binder_size_t *offp, *off_end;
+       binder_size_t *offp, *off_start, *off_end;
        int debug_id = buffer->debug_id;
 
        binder_debug(BINDER_DEBUG_TRANSACTION,
@@ -1255,28 +1441,30 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
        if (buffer->target_node)
                binder_dec_node(buffer->target_node, 1, 0);
 
-       offp = (binder_size_t *)(buffer->data +
-                                ALIGN(buffer->data_size, sizeof(void *)));
+       off_start = (binder_size_t *)(buffer->data +
+                                     ALIGN(buffer->data_size, sizeof(void *)));
        if (failed_at)
                off_end = failed_at;
        else
-               off_end = (void *)offp + buffer->offsets_size;
-       for (; offp < off_end; offp++) {
-               struct flat_binder_object *fp;
+               off_end = (void *)off_start + buffer->offsets_size;
+       for (offp = off_start; offp < off_end; offp++) {
+               struct binder_object_header *hdr;
+               size_t object_size = binder_validate_object(buffer, *offp);
 
-               if (*offp > buffer->data_size - sizeof(*fp) ||
-                   buffer->data_size < sizeof(*fp) ||
-                   !IS_ALIGNED(*offp, sizeof(u32))) {
-                       pr_err("transaction release %d bad offset %lld, size %zd\n",
+               if (object_size == 0) {
+                       pr_err("transaction release %d bad object at offset %lld, size %zd\n",
                               debug_id, (u64)*offp, buffer->data_size);
                        continue;
                }
-               fp = (struct flat_binder_object *)(buffer->data + *offp);
-               switch (fp->type) {
+               hdr = (struct binder_object_header *)(buffer->data + *offp);
+               switch (hdr->type) {
                case BINDER_TYPE_BINDER:
                case BINDER_TYPE_WEAK_BINDER: {
-                       struct binder_node *node = binder_get_node(proc, fp->binder);
+                       struct flat_binder_object *fp;
+                       struct binder_node *node;
 
+                       fp = to_flat_binder_object(hdr);
+                       node = binder_get_node(proc, fp->binder);
                        if (node == NULL) {
                                pr_err("transaction release %d bad node %016llx\n",
                                       debug_id, (u64)fp->binder);
@@ -1285,15 +1473,17 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
                        binder_debug(BINDER_DEBUG_TRANSACTION,
                                     "        node %d u%016llx\n",
                                     node->debug_id, (u64)node->ptr);
-                       binder_dec_node(node, fp->type == BINDER_TYPE_BINDER, 0);
+                       binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
+                                       0);
                } break;
                case BINDER_TYPE_HANDLE:
                case BINDER_TYPE_WEAK_HANDLE: {
+                       struct flat_binder_object *fp;
                        struct binder_ref *ref;
 
+                       fp = to_flat_binder_object(hdr);
                        ref = binder_get_ref(proc, fp->handle,
-                                            fp->type == BINDER_TYPE_HANDLE);
-
+                                            hdr->type == BINDER_TYPE_HANDLE);
                        if (ref == NULL) {
                                pr_err("transaction release %d bad handle %d\n",
                                 debug_id, fp->handle);
@@ -1302,32 +1492,348 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
                        binder_debug(BINDER_DEBUG_TRANSACTION,
                                     "        ref %d desc %d (node %d)\n",
                                     ref->debug_id, ref->desc, ref->node->debug_id);
-                       binder_dec_ref(ref, fp->type == BINDER_TYPE_HANDLE);
+                       binder_dec_ref(ref, hdr->type == BINDER_TYPE_HANDLE);
                } break;
 
-               case BINDER_TYPE_FD:
+               case BINDER_TYPE_FD: {
+                       struct binder_fd_object *fp = to_binder_fd_object(hdr);
+
                        binder_debug(BINDER_DEBUG_TRANSACTION,
-                                    "        fd %d\n", fp->handle);
+                                    "        fd %d\n", fp->fd);
                        if (failed_at)
-                               task_close_fd(proc, fp->handle);
+                               task_close_fd(proc, fp->fd);
+               } break;
+               case BINDER_TYPE_PTR:
+                       /*
+                        * Nothing to do here, this will get cleaned up when the
+                        * transaction buffer gets freed
+                        */
                        break;
-
+               case BINDER_TYPE_FDA: {
+                       struct binder_fd_array_object *fda;
+                       struct binder_buffer_object *parent;
+                       uintptr_t parent_buffer;
+                       u32 *fd_array;
+                       size_t fd_index;
+                       binder_size_t fd_buf_size;
+
+                       fda = to_binder_fd_array_object(hdr);
+                       parent = binder_validate_ptr(buffer, fda->parent,
+                                                    off_start,
+                                                    offp - off_start);
+                       if (!parent) {
+                               pr_err("transaction release %d bad parent offset",
+                                      debug_id);
+                               continue;
+                       }
+                       /*
+                        * Since the parent was already fixed up, convert it
+                        * back to kernel address space to access it
+                        */
+                       parent_buffer = parent->buffer -
+                               proc->user_buffer_offset;
+
+                       fd_buf_size = sizeof(u32) * fda->num_fds;
+                       if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
+                               pr_err("transaction release %d invalid number of fds (%lld)\n",
+                                      debug_id, (u64)fda->num_fds);
+                               continue;
+                       }
+                       if (fd_buf_size > parent->length ||
+                           fda->parent_offset > parent->length - fd_buf_size) {
+                               /* No space for all file descriptors here. */
+                               pr_err("transaction release %d not enough space for %lld fds in buffer\n",
+                                      debug_id, (u64)fda->num_fds);
+                               continue;
+                       }
+                       fd_array = (u32 *)(parent_buffer + fda->parent_offset);
+                       for (fd_index = 0; fd_index < fda->num_fds; fd_index++)
+                               task_close_fd(proc, fd_array[fd_index]);
+               } break;
                default:
                        pr_err("transaction release %d bad object type %x\n",
-                               debug_id, fp->type);
+                               debug_id, hdr->type);
                        break;
                }
        }
 }
 
+static int binder_translate_binder(struct flat_binder_object *fp,
+                                  struct binder_transaction *t,
+                                  struct binder_thread *thread)
+{
+       struct binder_node *node;
+       struct binder_ref *ref;
+       struct binder_proc *proc = thread->proc;
+       struct binder_proc *target_proc = t->to_proc;
+
+       node = binder_get_node(proc, fp->binder);
+       if (!node) {
+               node = binder_new_node(proc, fp->binder, fp->cookie);
+               if (!node)
+                       return -ENOMEM;
+
+               node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
+               node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
+       }
+       if (fp->cookie != node->cookie) {
+               binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
+                                 proc->pid, thread->pid, (u64)fp->binder,
+                                 node->debug_id, (u64)fp->cookie,
+                                 (u64)node->cookie);
+               return -EINVAL;
+       }
+       if (security_binder_transfer_binder(proc->tsk, target_proc->tsk))
+               return -EPERM;
+
+       ref = binder_get_ref_for_node(target_proc, node);
+       if (!ref)
+               return -EINVAL;
+
+       if (fp->hdr.type == BINDER_TYPE_BINDER)
+               fp->hdr.type = BINDER_TYPE_HANDLE;
+       else
+               fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
+       fp->binder = 0;
+       fp->handle = ref->desc;
+       fp->cookie = 0;
+       binder_inc_ref(ref, fp->hdr.type == BINDER_TYPE_HANDLE, &thread->todo);
+
+       trace_binder_transaction_node_to_ref(t, node, ref);
+       binder_debug(BINDER_DEBUG_TRANSACTION,
+                    "        node %d u%016llx -> ref %d desc %d\n",
+                    node->debug_id, (u64)node->ptr,
+                    ref->debug_id, ref->desc);
+
+       return 0;
+}
+
+static int binder_translate_handle(struct flat_binder_object *fp,
+                                  struct binder_transaction *t,
+                                  struct binder_thread *thread)
+{
+       struct binder_ref *ref;
+       struct binder_proc *proc = thread->proc;
+       struct binder_proc *target_proc = t->to_proc;
+
+       ref = binder_get_ref(proc, fp->handle,
+                            fp->hdr.type == BINDER_TYPE_HANDLE);
+       if (!ref) {
+               binder_user_error("%d:%d got transaction with invalid handle, %d\n",
+                                 proc->pid, thread->pid, fp->handle);
+               return -EINVAL;
+       }
+       if (security_binder_transfer_binder(proc->tsk, target_proc->tsk))
+               return -EPERM;
+
+       if (ref->node->proc == target_proc) {
+               if (fp->hdr.type == BINDER_TYPE_HANDLE)
+                       fp->hdr.type = BINDER_TYPE_BINDER;
+               else
+                       fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
+               fp->binder = ref->node->ptr;
+               fp->cookie = ref->node->cookie;
+               binder_inc_node(ref->node, fp->hdr.type == BINDER_TYPE_BINDER,
+                               0, NULL);
+               trace_binder_transaction_ref_to_node(t, ref);
+               binder_debug(BINDER_DEBUG_TRANSACTION,
+                            "        ref %d desc %d -> node %d u%016llx\n",
+                            ref->debug_id, ref->desc, ref->node->debug_id,
+                            (u64)ref->node->ptr);
+       } else {
+               struct binder_ref *new_ref;
+
+               new_ref = binder_get_ref_for_node(target_proc, ref->node);
+               if (!new_ref)
+                       return -EINVAL;
+
+               fp->binder = 0;
+               fp->handle = new_ref->desc;
+               fp->cookie = 0;
+               binder_inc_ref(new_ref, fp->hdr.type == BINDER_TYPE_HANDLE,
+                              NULL);
+               trace_binder_transaction_ref_to_ref(t, ref, new_ref);
+               binder_debug(BINDER_DEBUG_TRANSACTION,
+                            "        ref %d desc %d -> ref %d desc %d (node %d)\n",
+                            ref->debug_id, ref->desc, new_ref->debug_id,
+                            new_ref->desc, ref->node->debug_id);
+       }
+       return 0;
+}
+
+static int binder_translate_fd(int fd,
+                              struct binder_transaction *t,
+                              struct binder_thread *thread,
+                              struct binder_transaction *in_reply_to)
+{
+       struct binder_proc *proc = thread->proc;
+       struct binder_proc *target_proc = t->to_proc;
+       int target_fd;
+       struct file *file;
+       int ret;
+       bool target_allows_fd;
+
+       if (in_reply_to)
+               target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
+       else
+               target_allows_fd = t->buffer->target_node->accept_fds;
+       if (!target_allows_fd) {
+               binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
+                                 proc->pid, thread->pid,
+                                 in_reply_to ? "reply" : "transaction",
+                                 fd);
+               ret = -EPERM;
+               goto err_fd_not_accepted;
+       }
+
+       file = fget(fd);
+       if (!file) {
+               binder_user_error("%d:%d got transaction with invalid fd, %d\n",
+                                 proc->pid, thread->pid, fd);
+               ret = -EBADF;
+               goto err_fget;
+       }
+       ret = security_binder_transfer_file(proc->tsk, target_proc->tsk, file);
+       if (ret < 0) {
+               ret = -EPERM;
+               goto err_security;
+       }
+
+       target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
+       if (target_fd < 0) {
+               ret = -ENOMEM;
+               goto err_get_unused_fd;
+       }
+       task_fd_install(target_proc, target_fd, file);
+       trace_binder_transaction_fd(t, fd, target_fd);
+       binder_debug(BINDER_DEBUG_TRANSACTION, "        fd %d -> %d\n",
+                    fd, target_fd);
+
+       return target_fd;
+
+err_get_unused_fd:
+err_security:
+       fput(file);
+err_fget:
+err_fd_not_accepted:
+       return ret;
+}
+
+static int binder_translate_fd_array(struct binder_fd_array_object *fda,
+                                    struct binder_buffer_object *parent,
+                                    struct binder_transaction *t,
+                                    struct binder_thread *thread,
+                                    struct binder_transaction *in_reply_to)
+{
+       binder_size_t fdi, fd_buf_size, num_installed_fds;
+       int target_fd;
+       uintptr_t parent_buffer;
+       u32 *fd_array;
+       struct binder_proc *proc = thread->proc;
+       struct binder_proc *target_proc = t->to_proc;
+
+       fd_buf_size = sizeof(u32) * fda->num_fds;
+       if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
+               binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
+                                 proc->pid, thread->pid, (u64)fda->num_fds);
+               return -EINVAL;
+       }
+       if (fd_buf_size > parent->length ||
+           fda->parent_offset > parent->length - fd_buf_size) {
+               /* No space for all file descriptors here. */
+               binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
+                                 proc->pid, thread->pid, (u64)fda->num_fds);
+               return -EINVAL;
+       }
+       /*
+        * Since the parent was already fixed up, convert it
+        * back to the kernel address space to access it
+        */
+       parent_buffer = parent->buffer - target_proc->user_buffer_offset;
+       fd_array = (u32 *)(parent_buffer + fda->parent_offset);
+       if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) {
+               binder_user_error("%d:%d parent offset not aligned correctly.\n",
+                                 proc->pid, thread->pid);
+               return -EINVAL;
+       }
+       for (fdi = 0; fdi < fda->num_fds; fdi++) {
+               target_fd = binder_translate_fd(fd_array[fdi], t, thread,
+                                               in_reply_to);
+               if (target_fd < 0)
+                       goto err_translate_fd_failed;
+               fd_array[fdi] = target_fd;
+       }
+       return 0;
+
+err_translate_fd_failed:
+       /*
+        * Failed to allocate fd or security error, free fds
+        * installed so far.
+        */
+       num_installed_fds = fdi;
+       for (fdi = 0; fdi < num_installed_fds; fdi++)
+               task_close_fd(target_proc, fd_array[fdi]);
+       return target_fd;
+}
+
+static int binder_fixup_parent(struct binder_transaction *t,
+                              struct binder_thread *thread,
+                              struct binder_buffer_object *bp,
+                              binder_size_t *off_start,
+                              binder_size_t num_valid,
+                              struct binder_buffer_object *last_fixup_obj,
+                              binder_size_t last_fixup_min_off)
+{
+       struct binder_buffer_object *parent;
+       u8 *parent_buffer;
+       struct binder_buffer *b = t->buffer;
+       struct binder_proc *proc = thread->proc;
+       struct binder_proc *target_proc = t->to_proc;
+
+       if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
+               return 0;
+
+       parent = binder_validate_ptr(b, bp->parent, off_start, num_valid);
+       if (!parent) {
+               binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
+                                 proc->pid, thread->pid);
+               return -EINVAL;
+       }
+
+       if (!binder_validate_fixup(b, off_start,
+                                  parent, bp->parent_offset,
+                                  last_fixup_obj,
+                                  last_fixup_min_off)) {
+               binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
+                                 proc->pid, thread->pid);
+               return -EINVAL;
+       }
+
+       if (parent->length < sizeof(binder_uintptr_t) ||
+           bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
+               /* No space for a pointer here! */
+               binder_user_error("%d:%d got transaction with invalid parent offset\n",
+                                 proc->pid, thread->pid);
+               return -EINVAL;
+       }
+       parent_buffer = (u8 *)(parent->buffer -
+                              target_proc->user_buffer_offset);
+       *(binder_uintptr_t *)(parent_buffer + bp->parent_offset) = bp->buffer;
+
+       return 0;
+}
+
 static void binder_transaction(struct binder_proc *proc,
                               struct binder_thread *thread,
-                              struct binder_transaction_data *tr, int reply)
+                              struct binder_transaction_data *tr, int reply,
+                              binder_size_t extra_buffers_size)
 {
+       int ret;
        struct binder_transaction *t;
        struct binder_work *tcomplete;
-       binder_size_t *offp, *off_end;
+       binder_size_t *offp, *off_end, *off_start;
        binder_size_t off_min;
+       u8 *sg_bufp, *sg_buf_end;
        struct binder_proc *target_proc;
        struct binder_thread *target_thread = NULL;
        struct binder_node *target_node = NULL;
@@ -1336,6 +1842,9 @@ static void binder_transaction(struct binder_proc *proc,
        struct binder_transaction *in_reply_to = NULL;
        struct binder_transaction_log_entry *e;
        uint32_t return_error;
+       struct binder_buffer_object *last_fixup_obj = NULL;
+       binder_size_t last_fixup_min_off = 0;
+       struct binder_context *context = proc->context;
 
        e = binder_transaction_log_add(&binder_transaction_log);
        e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
@@ -1344,6 +1853,7 @@ static void binder_transaction(struct binder_proc *proc,
        e->target_handle = tr->target.handle;
        e->data_size = tr->data_size;
        e->offsets_size = tr->offsets_size;
+       e->context_name = proc->context->name;
 
        if (reply) {
                in_reply_to = thread->transaction_stack;
@@ -1396,7 +1906,7 @@ static void binder_transaction(struct binder_proc *proc,
                        }
                        target_node = ref->node;
                } else {
-                       target_node = binder_context_mgr_node;
+                       target_node = context->binder_context_mgr_node;
                        if (target_node == NULL) {
                                return_error = BR_DEAD_REPLY;
                                goto err_no_context_mgr_node;
@@ -1463,20 +1973,22 @@ static void binder_transaction(struct binder_proc *proc,
 
        if (reply)
                binder_debug(BINDER_DEBUG_TRANSACTION,
-                            "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld\n",
+                            "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
                             proc->pid, thread->pid, t->debug_id,
                             target_proc->pid, target_thread->pid,
                             (u64)tr->data.ptr.buffer,
                             (u64)tr->data.ptr.offsets,
-                            (u64)tr->data_size, (u64)tr->offsets_size);
+                            (u64)tr->data_size, (u64)tr->offsets_size,
+                            (u64)extra_buffers_size);
        else
                binder_debug(BINDER_DEBUG_TRANSACTION,
-                            "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld\n",
+                            "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
                             proc->pid, thread->pid, t->debug_id,
                             target_proc->pid, target_node->debug_id,
                             (u64)tr->data.ptr.buffer,
                             (u64)tr->data.ptr.offsets,
-                            (u64)tr->data_size, (u64)tr->offsets_size);
+                            (u64)tr->data_size, (u64)tr->offsets_size,
+                            (u64)extra_buffers_size);
 
        if (!reply && !(tr->flags & TF_ONE_WAY))
                t->from = thread;
@@ -1492,7 +2004,8 @@ static void binder_transaction(struct binder_proc *proc,
        trace_binder_transaction(reply, t, target_node);
 
        t->buffer = binder_alloc_buf(target_proc, tr->data_size,
-               tr->offsets_size, !reply && (t->flags & TF_ONE_WAY));
+               tr->offsets_size, extra_buffers_size,
+               !reply && (t->flags & TF_ONE_WAY));
        if (t->buffer == NULL) {
                return_error = BR_FAILED_REPLY;
                goto err_binder_alloc_buf_failed;
@@ -1505,8 +2018,9 @@ static void binder_transaction(struct binder_proc *proc,
        if (target_node)
                binder_inc_node(target_node, 1, 0, NULL);
 
-       offp = (binder_size_t *)(t->buffer->data +
-                                ALIGN(tr->data_size, sizeof(void *)));
+       off_start = (binder_size_t *)(t->buffer->data +
+                                     ALIGN(tr->data_size, sizeof(void *)));
+       offp = off_start;
 
        if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
                           tr->data.ptr.buffer, tr->data_size)) {
@@ -1528,177 +2042,138 @@ static void binder_transaction(struct binder_proc *proc,
                return_error = BR_FAILED_REPLY;
                goto err_bad_offset;
        }
-       off_end = (void *)offp + tr->offsets_size;
+       if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
+               binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
+                                 proc->pid, thread->pid,
+                                 (u64)extra_buffers_size);
+               return_error = BR_FAILED_REPLY;
+               goto err_bad_offset;
+       }
+       off_end = (void *)off_start + tr->offsets_size;
+       sg_bufp = (u8 *)(PTR_ALIGN(off_end, sizeof(void *)));
+       sg_buf_end = sg_bufp + extra_buffers_size;
        off_min = 0;
        for (; offp < off_end; offp++) {
-               struct flat_binder_object *fp;
+               struct binder_object_header *hdr;
+               size_t object_size = binder_validate_object(t->buffer, *offp);
 
-               if (*offp > t->buffer->data_size - sizeof(*fp) ||
-                   *offp < off_min ||
-                   t->buffer->data_size < sizeof(*fp) ||
-                   !IS_ALIGNED(*offp, sizeof(u32))) {
-                       binder_user_error("%d:%d got transaction with invalid offset, %lld (min %lld, max %lld)\n",
+               if (object_size == 0 || *offp < off_min) {
+                       binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
                                          proc->pid, thread->pid, (u64)*offp,
                                          (u64)off_min,
-                                         (u64)(t->buffer->data_size -
-                                         sizeof(*fp)));
+                                         (u64)t->buffer->data_size);
                        return_error = BR_FAILED_REPLY;
                        goto err_bad_offset;
                }
-               fp = (struct flat_binder_object *)(t->buffer->data + *offp);
-               off_min = *offp + sizeof(struct flat_binder_object);
-               switch (fp->type) {
+
+               hdr = (struct binder_object_header *)(t->buffer->data + *offp);
+               off_min = *offp + object_size;
+               switch (hdr->type) {
                case BINDER_TYPE_BINDER:
                case BINDER_TYPE_WEAK_BINDER: {
-                       struct binder_ref *ref;
-                       struct binder_node *node = binder_get_node(proc, fp->binder);
+                       struct flat_binder_object *fp;
 
-                       if (node == NULL) {
-                               node = binder_new_node(proc, fp->binder, fp->cookie);
-                               if (node == NULL) {
-                                       return_error = BR_FAILED_REPLY;
-                                       goto err_binder_new_node_failed;
-                               }
-                               node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
-                               node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
-                       }
-                       if (fp->cookie != node->cookie) {
-                               binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
-                                       proc->pid, thread->pid,
-                                       (u64)fp->binder, node->debug_id,
-                                       (u64)fp->cookie, (u64)node->cookie);
-                               return_error = BR_FAILED_REPLY;
-                               goto err_binder_get_ref_for_node_failed;
-                       }
-                       if (security_binder_transfer_binder(proc->tsk,
-                                                           target_proc->tsk)) {
+                       fp = to_flat_binder_object(hdr);
+                       ret = binder_translate_binder(fp, t, thread);
+                       if (ret < 0) {
                                return_error = BR_FAILED_REPLY;
-                               goto err_binder_get_ref_for_node_failed;
+                               goto err_translate_failed;
                        }
-                       ref = binder_get_ref_for_node(target_proc, node);
-                       if (ref == NULL) {
-                               return_error = BR_FAILED_REPLY;
-                               goto err_binder_get_ref_for_node_failed;
-                       }
-                       if (fp->type == BINDER_TYPE_BINDER)
-                               fp->type = BINDER_TYPE_HANDLE;
-                       else
-                               fp->type = BINDER_TYPE_WEAK_HANDLE;
-                       fp->binder = 0;
-                       fp->handle = ref->desc;
-                       fp->cookie = 0;
-                       binder_inc_ref(ref, fp->type == BINDER_TYPE_HANDLE,
-                                      &thread->todo);
-
-                       trace_binder_transaction_node_to_ref(t, node, ref);
-                       binder_debug(BINDER_DEBUG_TRANSACTION,
-                                    "        node %d u%016llx -> ref %d desc %d\n",
-                                    node->debug_id, (u64)node->ptr,
-                                    ref->debug_id, ref->desc);
                } break;
                case BINDER_TYPE_HANDLE:
                case BINDER_TYPE_WEAK_HANDLE: {
-                       struct binder_ref *ref;
+                       struct flat_binder_object *fp;
 
-                       ref = binder_get_ref(proc, fp->handle,
-                                            fp->type == BINDER_TYPE_HANDLE);
+                       fp = to_flat_binder_object(hdr);
+                       ret = binder_translate_handle(fp, t, thread);
+                       if (ret < 0) {
+                               return_error = BR_FAILED_REPLY;
+                               goto err_translate_failed;
+                       }
+               } break;
 
-                       if (ref == NULL) {
-                               binder_user_error("%d:%d got transaction with invalid handle, %d\n",
-                                               proc->pid,
-                                               thread->pid, fp->handle);
+               case BINDER_TYPE_FD: {
+                       struct binder_fd_object *fp = to_binder_fd_object(hdr);
+                       int target_fd = binder_translate_fd(fp->fd, t, thread,
+                                                           in_reply_to);
+
+                       if (target_fd < 0) {
                                return_error = BR_FAILED_REPLY;
-                               goto err_binder_get_ref_failed;
+                               goto err_translate_failed;
                        }
-                       if (security_binder_transfer_binder(proc->tsk,
-                                                           target_proc->tsk)) {
+                       fp->pad_binder = 0;
+                       fp->fd = target_fd;
+               } break;
+               case BINDER_TYPE_FDA: {
+                       struct binder_fd_array_object *fda =
+                               to_binder_fd_array_object(hdr);
+                       struct binder_buffer_object *parent =
+                               binder_validate_ptr(t->buffer, fda->parent,
+                                                   off_start,
+                                                   offp - off_start);
+                       if (!parent) {
+                               binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
+                                                 proc->pid, thread->pid);
                                return_error = BR_FAILED_REPLY;
-                               goto err_binder_get_ref_failed;
+                               goto err_bad_parent;
                        }
-                       if (ref->node->proc == target_proc) {
-                               if (fp->type == BINDER_TYPE_HANDLE)
-                                       fp->type = BINDER_TYPE_BINDER;
-                               else
-                                       fp->type = BINDER_TYPE_WEAK_BINDER;
-                               fp->binder = ref->node->ptr;
-                               fp->cookie = ref->node->cookie;
-                               binder_inc_node(ref->node, fp->type == BINDER_TYPE_BINDER, 0, NULL);
-                               trace_binder_transaction_ref_to_node(t, ref);
-                               binder_debug(BINDER_DEBUG_TRANSACTION,
-                                            "        ref %d desc %d -> node %d u%016llx\n",
-                                            ref->debug_id, ref->desc, ref->node->debug_id,
-                                            (u64)ref->node->ptr);
-                       } else {
-                               struct binder_ref *new_ref;
-
-                               new_ref = binder_get_ref_for_node(target_proc, ref->node);
-                               if (new_ref == NULL) {
-                                       return_error = BR_FAILED_REPLY;
-                                       goto err_binder_get_ref_for_node_failed;
-                               }
-                               fp->binder = 0;
-                               fp->handle = new_ref->desc;
-                               fp->cookie = 0;
-                               binder_inc_ref(new_ref, fp->type == BINDER_TYPE_HANDLE, NULL);
-                               trace_binder_transaction_ref_to_ref(t, ref,
-                                                                   new_ref);
-                               binder_debug(BINDER_DEBUG_TRANSACTION,
-                                            "        ref %d desc %d -> ref %d desc %d (node %d)\n",
-                                            ref->debug_id, ref->desc, new_ref->debug_id,
-                                            new_ref->desc, ref->node->debug_id);
+                       if (!binder_validate_fixup(t->buffer, off_start,
+                                                  parent, fda->parent_offset,
+                                                  last_fixup_obj,
+                                                  last_fixup_min_off)) {
+                               binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
+                                                 proc->pid, thread->pid);
+                               return_error = BR_FAILED_REPLY;
+                               goto err_bad_parent;
                        }
-               } break;
-
-               case BINDER_TYPE_FD: {
-                       int target_fd;
-                       struct file *file;
-
-                       if (reply) {
-                               if (!(in_reply_to->flags & TF_ACCEPT_FDS)) {
-                                       binder_user_error("%d:%d got reply with fd, %d, but target does not allow fds\n",
-                                               proc->pid, thread->pid, fp->handle);
-                                       return_error = BR_FAILED_REPLY;
-                                       goto err_fd_not_allowed;
-                               }
-                       } else if (!target_node->accept_fds) {
-                               binder_user_error("%d:%d got transaction with fd, %d, but target does not allow fds\n",
-                                       proc->pid, thread->pid, fp->handle);
+                       ret = binder_translate_fd_array(fda, parent, t, thread,
+                                                       in_reply_to);
+                       if (ret < 0) {
                                return_error = BR_FAILED_REPLY;
-                               goto err_fd_not_allowed;
+                               goto err_translate_failed;
                        }
-
-                       file = fget(fp->handle);
-                       if (file == NULL) {
-                               binder_user_error("%d:%d got transaction with invalid fd, %d\n",
-                                       proc->pid, thread->pid, fp->handle);
+                       last_fixup_obj = parent;
+                       last_fixup_min_off =
+                               fda->parent_offset + sizeof(u32) * fda->num_fds;
+               } break;
+               case BINDER_TYPE_PTR: {
+                       struct binder_buffer_object *bp =
+                               to_binder_buffer_object(hdr);
+                       size_t buf_left = sg_buf_end - sg_bufp;
+
+                       if (bp->length > buf_left) {
+                               binder_user_error("%d:%d got transaction with too large buffer\n",
+                                                 proc->pid, thread->pid);
                                return_error = BR_FAILED_REPLY;
-                               goto err_fget_failed;
+                               goto err_bad_offset;
                        }
-                       if (security_binder_transfer_file(proc->tsk,
-                                                         target_proc->tsk,
-                                                         file) < 0) {
-                               fput(file);
+                       if (copy_from_user(sg_bufp,
+                                          (const void __user *)(uintptr_t)
+                                          bp->buffer, bp->length)) {
+                               binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
+                                                 proc->pid, thread->pid);
                                return_error = BR_FAILED_REPLY;
-                               goto err_get_unused_fd_failed;
+                               goto err_copy_data_failed;
                        }
-                       target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
-                       if (target_fd < 0) {
-                               fput(file);
+                       /* Fixup buffer pointer to target proc address space */
+                       bp->buffer = (uintptr_t)sg_bufp +
+                               target_proc->user_buffer_offset;
+                       sg_bufp += ALIGN(bp->length, sizeof(u64));
+
+                       ret = binder_fixup_parent(t, thread, bp, off_start,
+                                                 offp - off_start,
+                                                 last_fixup_obj,
+                                                 last_fixup_min_off);
+                       if (ret < 0) {
                                return_error = BR_FAILED_REPLY;
-                               goto err_get_unused_fd_failed;
+                               goto err_translate_failed;
                        }
-                       task_fd_install(target_proc, target_fd, file);
-                       trace_binder_transaction_fd(t, fp->handle, target_fd);
-                       binder_debug(BINDER_DEBUG_TRANSACTION,
-                                    "        fd %d -> %d\n", fp->handle, target_fd);
-                       /* TODO: fput? */
-                       fp->binder = 0;
-                       fp->handle = target_fd;
+                       last_fixup_obj = bp;
+                       last_fixup_min_off = 0;
                } break;
-
                default:
                        binder_user_error("%d:%d got transaction with invalid object type, %x\n",
-                               proc->pid, thread->pid, fp->type);
+                               proc->pid, thread->pid, hdr->type);
                        return_error = BR_FAILED_REPLY;
                        goto err_bad_object_type;
                }
@@ -1728,14 +2203,10 @@ static void binder_transaction(struct binder_proc *proc,
                wake_up_interruptible(target_wait);
        return;
 
-err_get_unused_fd_failed:
-err_fget_failed:
-err_fd_not_allowed:
-err_binder_get_ref_for_node_failed:
-err_binder_get_ref_failed:
-err_binder_new_node_failed:
+err_translate_failed:
 err_bad_object_type:
 err_bad_offset:
+err_bad_parent:
 err_copy_data_failed:
        trace_binder_transaction_failed_buffer_release(t->buffer);
        binder_transaction_buffer_release(target_proc, t->buffer, offp);
@@ -1779,6 +2250,7 @@ static int binder_thread_write(struct binder_proc *proc,
                        binder_size_t *consumed)
 {
        uint32_t cmd;
+       struct binder_context *context = proc->context;
        void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
        void __user *ptr = buffer + *consumed;
        void __user *end = buffer + size;
@@ -1805,10 +2277,10 @@ static int binder_thread_write(struct binder_proc *proc,
                        if (get_user(target, (uint32_t __user *)ptr))
                                return -EFAULT;
                        ptr += sizeof(uint32_t);
-                       if (target == 0 && binder_context_mgr_node &&
+                       if (target == 0 && context->binder_context_mgr_node &&
                            (cmd == BC_INCREFS || cmd == BC_ACQUIRE)) {
                                ref = binder_get_ref_for_node(proc,
-                                              binder_context_mgr_node);
+                                       context->binder_context_mgr_node);
                                if (ref->desc != target) {
                                        binder_user_error("%d:%d tried to acquire reference to desc 0, got %d instead\n",
                                                proc->pid, thread->pid,
@@ -1953,6 +2425,17 @@ static int binder_thread_write(struct binder_proc *proc,
                        break;
                }
 
+               case BC_TRANSACTION_SG:
+               case BC_REPLY_SG: {
+                       struct binder_transaction_data_sg tr;
+
+                       if (copy_from_user(&tr, ptr, sizeof(tr)))
+                               return -EFAULT;
+                       ptr += sizeof(tr);
+                       binder_transaction(proc, thread, &tr.transaction_data,
+                                          cmd == BC_REPLY_SG, tr.buffers_size);
+                       break;
+               }
                case BC_TRANSACTION:
                case BC_REPLY: {
                        struct binder_transaction_data tr;
@@ -1960,7 +2443,8 @@ static int binder_thread_write(struct binder_proc *proc,
                        if (copy_from_user(&tr, ptr, sizeof(tr)))
                                return -EFAULT;
                        ptr += sizeof(tr);
-                       binder_transaction(proc, thread, &tr, cmd == BC_REPLY);
+                       binder_transaction(proc, thread, &tr,
+                                          cmd == BC_REPLY, 0);
                        break;
                }
 
@@ -2714,9 +3198,11 @@ static int binder_ioctl_set_ctx_mgr(struct file *filp)
 {
        int ret = 0;
        struct binder_proc *proc = filp->private_data;
+       struct binder_context *context = proc->context;
+
        kuid_t curr_euid = current_euid();
 
-       if (binder_context_mgr_node != NULL) {
+       if (context->binder_context_mgr_node) {
                pr_err("BINDER_SET_CONTEXT_MGR already set\n");
                ret = -EBUSY;
                goto out;
@@ -2724,27 +3210,27 @@ static int binder_ioctl_set_ctx_mgr(struct file *filp)
        ret = security_binder_set_context_mgr(proc->tsk);
        if (ret < 0)
                goto out;
-       if (uid_valid(binder_context_mgr_uid)) {
-               if (!uid_eq(binder_context_mgr_uid, curr_euid)) {
+       if (uid_valid(context->binder_context_mgr_uid)) {
+               if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
                        pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
                               from_kuid(&init_user_ns, curr_euid),
                               from_kuid(&init_user_ns,
-                                       binder_context_mgr_uid));
+                                        context->binder_context_mgr_uid));
                        ret = -EPERM;
                        goto out;
                }
        } else {
-               binder_context_mgr_uid = curr_euid;
+               context->binder_context_mgr_uid = curr_euid;
        }
-       binder_context_mgr_node = binder_new_node(proc, 0, 0);
-       if (binder_context_mgr_node == NULL) {
+       context->binder_context_mgr_node = binder_new_node(proc, 0, 0);
+       if (!context->binder_context_mgr_node) {
                ret = -ENOMEM;
                goto out;
        }
-       binder_context_mgr_node->local_weak_refs++;
-       binder_context_mgr_node->local_strong_refs++;
-       binder_context_mgr_node->has_strong_ref = 1;
-       binder_context_mgr_node->has_weak_ref = 1;
+       context->binder_context_mgr_node->local_weak_refs++;
+       context->binder_context_mgr_node->local_strong_refs++;
+       context->binder_context_mgr_node->has_strong_ref = 1;
+       context->binder_context_mgr_node->has_weak_ref = 1;
 out:
        return ret;
 }
@@ -2969,6 +3455,7 @@ err_bad_arg:
 static int binder_open(struct inode *nodp, struct file *filp)
 {
        struct binder_proc *proc;
+       struct binder_device *binder_dev;
 
        binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n",
                     current->group_leader->pid, current->pid);
@@ -2982,6 +3469,9 @@ static int binder_open(struct inode *nodp, struct file *filp)
        INIT_LIST_HEAD(&proc->todo);
        init_waitqueue_head(&proc->wait);
        proc->default_priority = task_nice(current);
+       binder_dev = container_of(filp->private_data, struct binder_device,
+                                 miscdev);
+       proc->context = &binder_dev->context;
 
        binder_lock(__func__);
 
@@ -2997,8 +3487,17 @@ static int binder_open(struct inode *nodp, struct file *filp)
                char strbuf[11];
 
                snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
+               /*
+                * proc debug entries are shared between contexts, so
+                * this will fail if the process tries to open the driver
+                * again with a different context. The priting code will
+                * anyway print all contexts that a given PID has, so this
+                * is not a problem.
+                */
                proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO,
-                       binder_debugfs_dir_entry_proc, proc, &binder_proc_fops);
+                       binder_debugfs_dir_entry_proc,
+                       (void *)(unsigned long)proc->pid,
+                       &binder_proc_fops);
        }
 
        return 0;
@@ -3091,6 +3590,7 @@ static int binder_node_release(struct binder_node *node, int refs)
 static void binder_deferred_release(struct binder_proc *proc)
 {
        struct binder_transaction *t;
+       struct binder_context *context = proc->context;
        struct rb_node *n;
        int threads, nodes, incoming_refs, outgoing_refs, buffers,
                active_transactions, page_count;
@@ -3100,11 +3600,12 @@ static void binder_deferred_release(struct binder_proc *proc)
 
        hlist_del(&proc->proc_node);
 
-       if (binder_context_mgr_node && binder_context_mgr_node->proc == proc) {
+       if (context->binder_context_mgr_node &&
+           context->binder_context_mgr_node->proc == proc) {
                binder_debug(BINDER_DEBUG_DEAD_BINDER,
                             "%s: %d context_mgr_node gone\n",
                             __func__, proc->pid);
-               binder_context_mgr_node = NULL;
+               context->binder_context_mgr_node = NULL;
        }
 
        threads = 0;
@@ -3391,6 +3892,7 @@ static void print_binder_proc(struct seq_file *m,
        size_t header_pos;
 
        seq_printf(m, "proc %d\n", proc->pid);
+       seq_printf(m, "context %s\n", proc->context->name);
        header_pos = m->count;
 
        for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
@@ -3460,7 +3962,9 @@ static const char * const binder_command_strings[] = {
        "BC_EXIT_LOOPER",
        "BC_REQUEST_DEATH_NOTIFICATION",
        "BC_CLEAR_DEATH_NOTIFICATION",
-       "BC_DEAD_BINDER_DONE"
+       "BC_DEAD_BINDER_DONE",
+       "BC_TRANSACTION_SG",
+       "BC_REPLY_SG",
 };
 
 static const char * const binder_objstat_strings[] = {
@@ -3515,6 +4019,7 @@ static void print_binder_proc_stats(struct seq_file *m,
        int count, strong, weak;
 
        seq_printf(m, "proc %d\n", proc->pid);
+       seq_printf(m, "context %s\n", proc->context->name);
        count = 0;
        for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
                count++;
@@ -3622,23 +4127,18 @@ static int binder_transactions_show(struct seq_file *m, void *unused)
 static int binder_proc_show(struct seq_file *m, void *unused)
 {
        struct binder_proc *itr;
-       struct binder_proc *proc = m->private;
+       int pid = (unsigned long)m->private;
        int do_lock = !binder_debug_no_lock;
-       bool valid_proc = false;
 
        if (do_lock)
                binder_lock(__func__);
 
        hlist_for_each_entry(itr, &binder_procs, proc_node) {
-               if (itr == proc) {
-                       valid_proc = true;
-                       break;
+               if (itr->pid == pid) {
+                       seq_puts(m, "binder proc state:\n");
+                       print_binder_proc(m, itr, 1);
                }
        }
-       if (valid_proc) {
-               seq_puts(m, "binder proc state:\n");
-               print_binder_proc(m, proc, 1);
-       }
        if (do_lock)
                binder_unlock(__func__);
        return 0;
@@ -3648,11 +4148,11 @@ static void print_binder_transaction_log_entry(struct seq_file *m,
                                        struct binder_transaction_log_entry *e)
 {
        seq_printf(m,
-                  "%d: %s from %d:%d to %d:%d node %d handle %d size %d:%d\n",
+                  "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d\n",
                   e->debug_id, (e->call_type == 2) ? "reply" :
                   ((e->call_type == 1) ? "async" : "call "), e->from_proc,
-                  e->from_thread, e->to_proc, e->to_thread, e->to_node,
-                  e->target_handle, e->data_size, e->offsets_size);
+                  e->from_thread, e->to_proc, e->to_thread, e->context_name,
+                  e->to_node, e->target_handle, e->data_size, e->offsets_size);
 }
 
 static int binder_transaction_log_show(struct seq_file *m, void *unused)
@@ -3680,26 +4180,50 @@ static const struct file_operations binder_fops = {
        .release = binder_release,
 };
 
-static struct miscdevice binder_miscdev = {
-       .minor = MISC_DYNAMIC_MINOR,
-       .name = "binder",
-       .fops = &binder_fops
-};
-
 BINDER_DEBUG_ENTRY(state);
 BINDER_DEBUG_ENTRY(stats);
 BINDER_DEBUG_ENTRY(transactions);
 BINDER_DEBUG_ENTRY(transaction_log);
 
+static int __init init_binder_device(const char *name)
+{
+       int ret;
+       struct binder_device *binder_device;
+
+       binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
+       if (!binder_device)
+               return -ENOMEM;
+
+       binder_device->miscdev.fops = &binder_fops;
+       binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
+       binder_device->miscdev.name = name;
+
+       binder_device->context.binder_context_mgr_uid = INVALID_UID;
+       binder_device->context.name = name;
+
+       ret = misc_register(&binder_device->miscdev);
+       if (ret < 0) {
+               kfree(binder_device);
+               return ret;
+       }
+
+       hlist_add_head(&binder_device->hlist, &binder_devices);
+
+       return ret;
+}
+
 static int __init binder_init(void)
 {
        int ret;
+       char *device_name, *device_names;
+       struct binder_device *device;
+       struct hlist_node *tmp;
 
        binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
        if (binder_debugfs_dir_entry_root)
                binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
                                                 binder_debugfs_dir_entry_root);
-       ret = misc_register(&binder_miscdev);
+
        if (binder_debugfs_dir_entry_root) {
                debugfs_create_file("state",
                                    S_IRUGO,
@@ -3727,6 +4251,35 @@ static int __init binder_init(void)
                                    &binder_transaction_log_failed,
                                    &binder_transaction_log_fops);
        }
+
+       /*
+        * Copy the module_parameter string, because we don't want to
+        * tokenize it in-place.
+        */
+       device_names = kzalloc(strlen(binder_devices_param) + 1, GFP_KERNEL);
+       if (!device_names) {
+               ret = -ENOMEM;
+               goto err_alloc_device_names_failed;
+       }
+       strcpy(device_names, binder_devices_param);
+
+       while ((device_name = strsep(&device_names, ","))) {
+               ret = init_binder_device(device_name);
+               if (ret)
+                       goto err_init_binder_device_failed;
+       }
+
+       return ret;
+
+err_init_binder_device_failed:
+       hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
+               misc_deregister(&device->miscdev);
+               hlist_del(&device->hlist);
+               kfree(device);
+       }
+err_alloc_device_names_failed:
+       debugfs_remove_recursive(binder_debugfs_dir_entry_root);
+
        return ret;
 }
 
index eeb323f56c07429f36410b5743b8e88be6d2964d..f66b45b235b02300e587c0c64f5e34d70de9c538 100644 (file)
 #define HT16K33_FB_SIZE                (HT16K33_MATRIX_LED_MAX_COLS * BYTES_PER_ROW)
 
 struct ht16k33_keypad {
+       struct i2c_client *client;
        struct input_dev *dev;
-       spinlock_t lock;
-       struct delayed_work work;
        uint32_t cols;
        uint32_t rows;
        uint32_t row_shift;
        uint32_t debounce_ms;
        uint16_t last_key_state[HT16K33_MATRIX_KEYPAD_MAX_COLS];
+
+       wait_queue_head_t wait;
+       bool stopped;
 };
 
 struct ht16k33_fbdev {
@@ -78,7 +80,6 @@ struct ht16k33_priv {
        struct i2c_client *client;
        struct ht16k33_keypad keypad;
        struct ht16k33_fbdev fbdev;
-       struct workqueue_struct *workqueue;
 };
 
 static struct fb_fix_screeninfo ht16k33_fb_fix = {
@@ -124,16 +125,8 @@ static void ht16k33_fb_queue(struct ht16k33_priv *priv)
 {
        struct ht16k33_fbdev *fbdev = &priv->fbdev;
 
-       queue_delayed_work(priv->workqueue, &fbdev->work,
-               msecs_to_jiffies(HZ / fbdev->refresh_rate));
-}
-
-static void ht16k33_keypad_queue(struct ht16k33_priv *priv)
-{
-       struct ht16k33_keypad *keypad = &priv->keypad;
-
-       queue_delayed_work(priv->workqueue, &keypad->work,
-               msecs_to_jiffies(keypad->debounce_ms));
+       schedule_delayed_work(&fbdev->work,
+                             msecs_to_jiffies(HZ / fbdev->refresh_rate));
 }
 
 /*
@@ -182,32 +175,6 @@ requeue:
        ht16k33_fb_queue(priv);
 }
 
-static int ht16k33_keypad_start(struct input_dev *dev)
-{
-       struct ht16k33_priv *priv = input_get_drvdata(dev);
-       struct ht16k33_keypad *keypad = &priv->keypad;
-
-       /*
-        * Schedule an immediate key scan to capture current key state;
-        * columns will be activated and IRQs be enabled after the scan.
-        */
-       queue_delayed_work(priv->workqueue, &keypad->work, 0);
-       return 0;
-}
-
-static void ht16k33_keypad_stop(struct input_dev *dev)
-{
-       struct ht16k33_priv *priv = input_get_drvdata(dev);
-       struct ht16k33_keypad *keypad = &priv->keypad;
-
-       cancel_delayed_work(&keypad->work);
-       /*
-        * ht16k33_keypad_scan() will leave IRQs enabled;
-        * we should disable them now.
-        */
-       disable_irq_nosync(priv->client->irq);
-}
-
 static int ht16k33_initialize(struct ht16k33_priv *priv)
 {
        uint8_t byte;
@@ -233,61 +200,6 @@ static int ht16k33_initialize(struct ht16k33_priv *priv)
        return i2c_smbus_write_byte(priv->client, byte);
 }
 
-/*
- * This gets the keys from keypad and reports it to input subsystem
- */
-static void ht16k33_keypad_scan(struct work_struct *work)
-{
-       struct ht16k33_keypad *keypad =
-               container_of(work, struct ht16k33_keypad, work.work);
-       struct ht16k33_priv *priv =
-               container_of(keypad, struct ht16k33_priv, keypad);
-       const unsigned short *keycodes = keypad->dev->keycode;
-       uint16_t bits_changed, new_state[HT16K33_MATRIX_KEYPAD_MAX_COLS];
-       uint8_t data[HT16K33_MATRIX_KEYPAD_MAX_COLS * 2];
-       int row, col, code;
-       bool reschedule = false;
-
-       if (i2c_smbus_read_i2c_block_data(priv->client, 0x40, 6, data) != 6) {
-               dev_err(&priv->client->dev, "Failed to read key data\n");
-               goto end;
-       }
-
-       for (col = 0; col < keypad->cols; col++) {
-               new_state[col] = (data[col * 2 + 1] << 8) | data[col * 2];
-               if (new_state[col])
-                       reschedule = true;
-               bits_changed = keypad->last_key_state[col] ^ new_state[col];
-
-               while (bits_changed) {
-                       row = ffs(bits_changed) - 1;
-                       code = MATRIX_SCAN_CODE(row, col, keypad->row_shift);
-                       input_event(keypad->dev, EV_MSC, MSC_SCAN, code);
-                       input_report_key(keypad->dev, keycodes[code],
-                                        new_state[col] & BIT(row));
-                       bits_changed &= ~BIT(row);
-               }
-       }
-       input_sync(keypad->dev);
-       memcpy(keypad->last_key_state, new_state, sizeof(new_state));
-
-end:
-       if (reschedule)
-               ht16k33_keypad_queue(priv);
-       else
-               enable_irq(priv->client->irq);
-}
-
-static irqreturn_t ht16k33_irq_thread(int irq, void *dev)
-{
-       struct ht16k33_priv *priv = dev;
-
-       disable_irq_nosync(priv->client->irq);
-       ht16k33_keypad_queue(priv);
-
-       return IRQ_HANDLED;
-}
-
 static int ht16k33_bl_update_status(struct backlight_device *bl)
 {
        int brightness = bl->props.brightness;
@@ -334,15 +246,152 @@ static struct fb_ops ht16k33_fb_ops = {
        .fb_mmap = ht16k33_mmap,
 };
 
+/*
+ * This gets the keys from keypad and reports it to input subsystem.
+ * Returns true if a key is pressed.
+ */
+static bool ht16k33_keypad_scan(struct ht16k33_keypad *keypad)
+{
+       const unsigned short *keycodes = keypad->dev->keycode;
+       u16 new_state[HT16K33_MATRIX_KEYPAD_MAX_COLS];
+       u8 data[HT16K33_MATRIX_KEYPAD_MAX_COLS * 2];
+       unsigned long bits_changed;
+       int row, col, code;
+       bool pressed = false;
+
+       if (i2c_smbus_read_i2c_block_data(keypad->client, 0x40, 6, data) != 6) {
+               dev_err(&keypad->client->dev, "Failed to read key data\n");
+               return false;
+       }
+
+       for (col = 0; col < keypad->cols; col++) {
+               new_state[col] = (data[col * 2 + 1] << 8) | data[col * 2];
+               if (new_state[col])
+                       pressed = true;
+               bits_changed = keypad->last_key_state[col] ^ new_state[col];
+
+               for_each_set_bit(row, &bits_changed, BITS_PER_LONG) {
+                       code = MATRIX_SCAN_CODE(row, col, keypad->row_shift);
+                       input_event(keypad->dev, EV_MSC, MSC_SCAN, code);
+                       input_report_key(keypad->dev, keycodes[code],
+                                        new_state[col] & BIT(row));
+               }
+       }
+       input_sync(keypad->dev);
+       memcpy(keypad->last_key_state, new_state, sizeof(new_state));
+
+       return pressed;
+}
+
+static irqreturn_t ht16k33_keypad_irq_thread(int irq, void *dev)
+{
+       struct ht16k33_keypad *keypad = dev;
+
+       do {
+               wait_event_timeout(keypad->wait, keypad->stopped,
+                                   msecs_to_jiffies(keypad->debounce_ms));
+               if (keypad->stopped)
+                       break;
+       } while (ht16k33_keypad_scan(keypad));
+
+       return IRQ_HANDLED;
+}
+
+static int ht16k33_keypad_start(struct input_dev *dev)
+{
+       struct ht16k33_keypad *keypad = input_get_drvdata(dev);
+
+       keypad->stopped = false;
+       mb();
+       enable_irq(keypad->client->irq);
+
+       return 0;
+}
+
+static void ht16k33_keypad_stop(struct input_dev *dev)
+{
+       struct ht16k33_keypad *keypad = input_get_drvdata(dev);
+
+       keypad->stopped = true;
+       mb();
+       wake_up(&keypad->wait);
+       disable_irq(keypad->client->irq);
+}
+
+static int ht16k33_keypad_probe(struct i2c_client *client,
+                               struct ht16k33_keypad *keypad)
+{
+       struct device_node *node = client->dev.of_node;
+       u32 rows = HT16K33_MATRIX_KEYPAD_MAX_ROWS;
+       u32 cols = HT16K33_MATRIX_KEYPAD_MAX_COLS;
+       int err;
+
+       keypad->client = client;
+       init_waitqueue_head(&keypad->wait);
+
+       keypad->dev = devm_input_allocate_device(&client->dev);
+       if (!keypad->dev)
+               return -ENOMEM;
+
+       input_set_drvdata(keypad->dev, keypad);
+
+       keypad->dev->name = DRIVER_NAME"-keypad";
+       keypad->dev->id.bustype = BUS_I2C;
+       keypad->dev->open = ht16k33_keypad_start;
+       keypad->dev->close = ht16k33_keypad_stop;
+
+       if (!of_get_property(node, "linux,no-autorepeat", NULL))
+               __set_bit(EV_REP, keypad->dev->evbit);
+
+       err = of_property_read_u32(node, "debounce-delay-ms",
+                                  &keypad->debounce_ms);
+       if (err) {
+               dev_err(&client->dev, "key debounce delay not specified\n");
+               return err;
+       }
+
+       err = matrix_keypad_parse_of_params(&client->dev, &rows, &cols);
+       if (err)
+               return err;
+
+       keypad->rows = rows;
+       keypad->cols = cols;
+       keypad->row_shift = get_count_order(cols);
+
+       err = matrix_keypad_build_keymap(NULL, NULL, rows, cols, NULL,
+                                        keypad->dev);
+       if (err) {
+               dev_err(&client->dev, "failed to build keymap\n");
+               return err;
+       }
+
+       err = devm_request_threaded_irq(&client->dev, client->irq,
+                                       NULL, ht16k33_keypad_irq_thread,
+                                       IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+                                       DRIVER_NAME, keypad);
+       if (err) {
+               dev_err(&client->dev, "irq request failed %d, error %d\n",
+                       client->irq, err);
+               return err;
+       }
+
+       ht16k33_keypad_stop(keypad->dev);
+
+       err = input_register_device(keypad->dev);
+       if (err)
+               return err;
+
+       return 0;
+}
+
 static int ht16k33_probe(struct i2c_client *client,
                                  const struct i2c_device_id *id)
 {
        int err;
-       uint32_t rows, cols, dft_brightness;
+       uint32_t dft_brightness;
        struct backlight_device *bl;
        struct backlight_properties bl_props;
        struct ht16k33_priv *priv;
-       struct ht16k33_keypad *keypad;
        struct ht16k33_fbdev *fbdev;
        struct device_node *node = client->dev.of_node;
 
@@ -363,23 +412,16 @@ static int ht16k33_probe(struct i2c_client *client,
        priv->client = client;
        i2c_set_clientdata(client, priv);
        fbdev = &priv->fbdev;
-       keypad = &priv->keypad;
-
-       priv->workqueue = create_singlethread_workqueue(DRIVER_NAME "-wq");
-       if (priv->workqueue == NULL)
-               return -ENOMEM;
 
        err = ht16k33_initialize(priv);
        if (err)
-               goto err_destroy_wq;
+               return err;
 
        /* Framebuffer (2 bytes per column) */
        BUILD_BUG_ON(PAGE_SIZE < HT16K33_FB_SIZE);
        fbdev->buffer = (unsigned char *) get_zeroed_page(GFP_KERNEL);
-       if (!fbdev->buffer) {
-               err = -ENOMEM;
-               goto err_free_fbdev;
-       }
+       if (!fbdev->buffer)
+               return -ENOMEM;
 
        fbdev->cache = devm_kmalloc(&client->dev, HT16K33_FB_SIZE, GFP_KERNEL);
        if (!fbdev->cache) {
@@ -415,59 +457,7 @@ static int ht16k33_probe(struct i2c_client *client,
        if (err)
                goto err_fbdev_info;
 
-       /* Keypad */
-       keypad->dev = devm_input_allocate_device(&client->dev);
-       if (!keypad->dev) {
-               err = -ENOMEM;
-               goto err_fbdev_unregister;
-       }
-
-       keypad->dev->name = DRIVER_NAME"-keypad";
-       keypad->dev->id.bustype = BUS_I2C;
-       keypad->dev->open = ht16k33_keypad_start;
-       keypad->dev->close = ht16k33_keypad_stop;
-
-       if (!of_get_property(node, "linux,no-autorepeat", NULL))
-               __set_bit(EV_REP, keypad->dev->evbit);
-
-       err = of_property_read_u32(node, "debounce-delay-ms",
-                                  &keypad->debounce_ms);
-       if (err) {
-               dev_err(&client->dev, "key debounce delay not specified\n");
-               goto err_fbdev_unregister;
-       }
-
-       err = devm_request_threaded_irq(&client->dev, client->irq, NULL,
-                                       ht16k33_irq_thread,
-                                       IRQF_TRIGGER_RISING | IRQF_ONESHOT,
-                                       DRIVER_NAME, priv);
-       if (err) {
-               dev_err(&client->dev, "irq request failed %d, error %d\n",
-                       client->irq, err);
-               goto err_fbdev_unregister;
-       }
-
-       disable_irq_nosync(client->irq);
-       rows = HT16K33_MATRIX_KEYPAD_MAX_ROWS;
-       cols = HT16K33_MATRIX_KEYPAD_MAX_COLS;
-       err = matrix_keypad_parse_of_params(&client->dev, &rows, &cols);
-       if (err)
-               goto err_fbdev_unregister;
-
-       err = matrix_keypad_build_keymap(NULL, NULL, rows, cols, NULL,
-                                        keypad->dev);
-       if (err) {
-               dev_err(&client->dev, "failed to build keymap\n");
-               goto err_fbdev_unregister;
-       }
-
-       input_set_drvdata(keypad->dev, priv);
-       keypad->rows = rows;
-       keypad->cols = cols;
-       keypad->row_shift = get_count_order(cols);
-       INIT_DELAYED_WORK(&keypad->work, ht16k33_keypad_scan);
-
-       err = input_register_device(keypad->dev);
+       err = ht16k33_keypad_probe(client, &priv->keypad);
        if (err)
                goto err_fbdev_unregister;
 
@@ -482,7 +472,7 @@ static int ht16k33_probe(struct i2c_client *client,
        if (IS_ERR(bl)) {
                dev_err(&client->dev, "failed to register backlight\n");
                err = PTR_ERR(bl);
-               goto err_keypad_unregister;
+               goto err_fbdev_unregister;
        }
 
        err = of_property_read_u32(node, "default-brightness-level",
@@ -502,18 +492,12 @@ static int ht16k33_probe(struct i2c_client *client,
        ht16k33_fb_queue(priv);
        return 0;
 
-err_keypad_unregister:
-       input_unregister_device(keypad->dev);
 err_fbdev_unregister:
        unregister_framebuffer(fbdev->info);
 err_fbdev_info:
        framebuffer_release(fbdev->info);
 err_fbdev_buffer:
        free_page((unsigned long) fbdev->buffer);
-err_free_fbdev:
-       kfree(fbdev);
-err_destroy_wq:
-       destroy_workqueue(priv->workqueue);
 
        return err;
 }
@@ -521,17 +505,13 @@ err_destroy_wq:
 static int ht16k33_remove(struct i2c_client *client)
 {
        struct ht16k33_priv *priv = i2c_get_clientdata(client);
-       struct ht16k33_keypad *keypad = &priv->keypad;
        struct ht16k33_fbdev *fbdev = &priv->fbdev;
 
-       ht16k33_keypad_stop(keypad->dev);
-
        cancel_delayed_work(&fbdev->work);
        unregister_framebuffer(fbdev->info);
        framebuffer_release(fbdev->info);
        free_page((unsigned long) fbdev->buffer);
 
-       destroy_workqueue(priv->workqueue);
        return 0;
 }
 
index fde005ef9d36a96d02be8680cb1845fc042296fa..4ee2a10207d0eee59163ce52ef5d8643e7acc7ca 100644 (file)
@@ -571,9 +571,12 @@ config TELCLOCK
          controlling the behavior of this hardware.
 
 config DEVPORT
-       bool
+       bool "/dev/port character device"
        depends on ISA || PCI
        default y
+       help
+         Say Y here if you want to support the /dev/port device. The /dev/port
+         device is similar to /dev/mem, but for I/O ports.
 
 source "drivers/s390/char/Kconfig"
 
index dd9dfa15e9d13364b1ce2ea65de957bdd21a6ee4..1dfb9f8de1715a6afcfe2828138e678e28602da1 100644 (file)
 #include <linux/kthread.h>
 #include <linux/delay.h>
 
-
-/*
- * The apm_bios device is one of the misc char devices.
- * This is its minor number.
- */
-#define APM_MINOR_DEV  134
-
 /*
  * One option can be changed at boot time as follows:
  *     apm=on/off                      enable/disable APM
index 7d34b203718af7bd9fd5c3a00ca294e5c7a8dea8..c614a56e68ccb6f2562f8635a90b10f162740c1f 100644 (file)
@@ -17,7 +17,6 @@
 #include <linux/init.h>
 #include <linux/mm.h>
 #include <linux/module.h>
-#include <linux/miscdevice.h>
 #include <linux/delay.h>
 #include <linux/bcd.h>
 #include <linux/mutex.h>
index f786b18ac5008d598cd4a0217a73bb69bc042c34..b708c85dc9c1d7b5d243c4add03cc2fe5936141a 100644 (file)
@@ -463,9 +463,9 @@ static int mmtimer_mmap(struct file *file, struct vm_area_struct *vma)
 }
 
 static struct miscdevice mmtimer_miscdev = {
-       SGI_MMTIMER,
-       MMTIMER_NAME,
-       &mmtimer_fops
+       .minor = SGI_MMTIMER,
+       .name = MMTIMER_NAME,
+       .fops = &mmtimer_fops
 };
 
 static struct timespec sgi_clock_offset;
index 53c3882e49812803633ac49f6b56c1ba9311e773..35981cae1afab28ab73b3071adba30f385608cc8 100644 (file)
@@ -269,7 +269,6 @@ int buffer_icap_set_configuration(struct hwicap_drvdata *drvdata, u32 *data,
 {
        int status;
        s32 buffer_count = 0;
-       s32 num_writes = 0;
        bool dirty = false;
        u32 i;
        void __iomem *base_address = drvdata->base_address;
@@ -298,7 +297,6 @@ int buffer_icap_set_configuration(struct hwicap_drvdata *drvdata, u32 *data,
                }
 
                buffer_count = 0;
-               num_writes++;
                dirty = false;
        }
 
@@ -328,7 +326,6 @@ int buffer_icap_get_configuration(struct hwicap_drvdata *drvdata, u32 *data,
 {
        int status;
        s32 buffer_count = 0;
-       s32 read_count = 0;
        u32 i;
        void __iomem *base_address = drvdata->base_address;
 
@@ -353,7 +350,6 @@ int buffer_icap_get_configuration(struct hwicap_drvdata *drvdata, u32 *data,
                        }
 
                        buffer_count = 0;
-                       read_count++;
                }
 
                /* Copy data from bram */
index 04788d92ea522a5346e76f6dd6f5d6bf08431c4e..96bbae579c0b01cfdc3798e8fdb03948cb391280 100644 (file)
@@ -42,6 +42,16 @@ config EXTCON_GPIO
          Say Y here to enable GPIO based extcon support. Note that GPIO
          extcon supports single state per extcon instance.
 
+config EXTCON_INTEL_INT3496
+       tristate "Intel INT3496 ACPI device extcon driver"
+       depends on GPIOLIB && ACPI
+       help
+         Say Y here to enable extcon support for USB OTG ports controlled by
+         an Intel INT3496 ACPI device.
+
+         This ACPI device is typically found on Intel Baytrail or Cherrytrail
+         based tablets, or other Baytrail / Cherrytrail devices.
+
 config EXTCON_MAX14577
        tristate "Maxim MAX14577/77836 EXTCON Support"
        depends on MFD_MAX14577
index 31a0a999c4fb0c9b6c3da9680f55caba57e02782..237ac3f953c2bfbfcedc4dff477788af39f1232e 100644 (file)
@@ -8,6 +8,7 @@ obj-$(CONFIG_EXTCON_ADC_JACK)   += extcon-adc-jack.o
 obj-$(CONFIG_EXTCON_ARIZONA)   += extcon-arizona.o
 obj-$(CONFIG_EXTCON_AXP288)    += extcon-axp288.o
 obj-$(CONFIG_EXTCON_GPIO)      += extcon-gpio.o
+obj-$(CONFIG_EXTCON_INTEL_INT3496) += extcon-intel-int3496.o
 obj-$(CONFIG_EXTCON_MAX14577)  += extcon-max14577.o
 obj-$(CONFIG_EXTCON_MAX3355)   += extcon-max3355.o
 obj-$(CONFIG_EXTCON_MAX77693)  += extcon-max77693.o
index e686acd1c459176596d1b20a0e081b9335a13f3f..b40eb18059273715b9ab5e7b30edfaf3ab448743 100644 (file)
@@ -14,7 +14,7 @@
  * GNU General Public License for more details.
  */
 
-#include <linux/extcon.h>
+#include "extcon.h"
 
 static int devm_extcon_dev_match(struct device *dev, void *res, void *data)
 {
index bc538708c75370a61ac83b9331e0bf1dd6aea810..6f6537ab0a7911e5dbfaaee5e2f2794326588838 100644 (file)
@@ -67,7 +67,7 @@ static void adc_jack_handler(struct work_struct *work)
 
        ret = iio_read_channel_raw(data->chan, &adc_val);
        if (ret < 0) {
-               dev_err(&data->edev->dev, "read channel() error: %d\n", ret);
+               dev_err(data->dev, "read channel() error: %d\n", ret);
                return;
        }
 
index d836d4ce5ee46ab9da23715881d0cdd82ec7fcb5..ed78b7c26627e7df5ebfeeb76841d13bee952246 100644 (file)
@@ -236,12 +236,8 @@ static void arizona_extcon_set_mode(struct arizona_extcon_info *info, int mode)
 
        mode %= info->micd_num_modes;
 
-       if (arizona->pdata.micd_pol_gpio > 0)
-               gpio_set_value_cansleep(arizona->pdata.micd_pol_gpio,
-                                       info->micd_modes[mode].gpio);
-       else
-               gpiod_set_value_cansleep(info->micd_pol_gpio,
-                                        info->micd_modes[mode].gpio);
+       gpiod_set_value_cansleep(info->micd_pol_gpio,
+                                info->micd_modes[mode].gpio);
 
        regmap_update_bits(arizona->regmap, ARIZONA_MIC_DETECT_1,
                           ARIZONA_MICD_BIAS_SRC_MASK,
@@ -1412,21 +1408,21 @@ static int arizona_extcon_probe(struct platform_device *pdev)
                regmap_update_bits(arizona->regmap, ARIZONA_GP_SWITCH_1,
                                ARIZONA_SW1_MODE_MASK, arizona->pdata.gpsw);
 
-       if (arizona->pdata.micd_pol_gpio > 0) {
+       if (pdata->micd_pol_gpio > 0) {
                if (info->micd_modes[0].gpio)
                        mode = GPIOF_OUT_INIT_HIGH;
                else
                        mode = GPIOF_OUT_INIT_LOW;
 
-               ret = devm_gpio_request_one(&pdev->dev,
-                                           arizona->pdata.micd_pol_gpio,
-                                           mode,
-                                           "MICD polarity");
+               ret = devm_gpio_request_one(&pdev->dev, pdata->micd_pol_gpio,
+                                           mode, "MICD polarity");
                if (ret != 0) {
                        dev_err(arizona->dev, "Failed to request GPIO%d: %d\n",
-                               arizona->pdata.micd_pol_gpio, ret);
+                               pdata->micd_pol_gpio, ret);
                        goto err_register;
                }
+
+               info->micd_pol_gpio = gpio_to_desc(pdata->micd_pol_gpio);
        } else {
                if (info->micd_modes[0].gpio)
                        mode = GPIOD_OUT_HIGH;
index 42f41e8082924e229d99ceac264549494449b9d3..f4fd03e58e37292235284506433b3d5efc928b90 100644 (file)
@@ -21,7 +21,6 @@
 #include <linux/interrupt.h>
 #include <linux/platform_device.h>
 #include <linux/property.h>
-#include <linux/usb/phy.h>
 #include <linux/notifier.h>
 #include <linux/extcon.h>
 #include <linux/regmap.h>
 #define DET_STAT_CDP                   2
 #define DET_STAT_DCP                   3
 
-/* IRQ enable-1 register */
-#define PWRSRC_IRQ_CFG_MASK            (BIT(4)|BIT(3)|BIT(2))
-
-/* IRQ enable-6 register */
-#define BC12_IRQ_CFG_MASK              BIT(1)
-
 enum axp288_extcon_reg {
        AXP288_PS_STAT_REG              = 0x00,
        AXP288_PS_BOOT_REASON_REG       = 0x02,
@@ -84,8 +77,6 @@ enum axp288_extcon_reg {
        AXP288_BC_VBUS_CNTL_REG         = 0x2d,
        AXP288_BC_USB_STAT_REG          = 0x2e,
        AXP288_BC_DET_STAT_REG          = 0x2f,
-       AXP288_PWRSRC_IRQ_CFG_REG       = 0x40,
-       AXP288_BC12_IRQ_CFG_REG         = 0x45,
 };
 
 enum axp288_mux_select {
@@ -105,6 +96,7 @@ static const unsigned int axp288_extcon_cables[] = {
        EXTCON_CHG_USB_SDP,
        EXTCON_CHG_USB_CDP,
        EXTCON_CHG_USB_DCP,
+       EXTCON_USB,
        EXTCON_NONE,
 };
 
@@ -112,11 +104,11 @@ struct axp288_extcon_info {
        struct device *dev;
        struct regmap *regmap;
        struct regmap_irq_chip_data *regmap_irqc;
-       struct axp288_extcon_pdata *pdata;
+       struct gpio_desc *gpio_mux_cntl;
        int irq[EXTCON_IRQ_END];
        struct extcon_dev *edev;
        struct notifier_block extcon_nb;
-       struct usb_phy *otg;
+       unsigned int previous_cable;
 };
 
 /* Power up/down reason string array */
@@ -156,10 +148,9 @@ static void axp288_extcon_log_rsi(struct axp288_extcon_info *info)
 
 static int axp288_handle_chrg_det_event(struct axp288_extcon_info *info)
 {
-       static bool notify_otg, notify_charger;
-       static unsigned int cable;
        int ret, stat, cfg, pwr_stat;
        u8 chrg_type;
+       unsigned int cable = info->previous_cable;
        bool vbus_attach = false;
 
        ret = regmap_read(info->regmap, AXP288_PS_STAT_REG, &pwr_stat);
@@ -168,9 +159,9 @@ static int axp288_handle_chrg_det_event(struct axp288_extcon_info *info)
                return ret;
        }
 
-       vbus_attach = (pwr_stat & PS_STAT_VBUS_PRESENT);
+       vbus_attach = (pwr_stat & PS_STAT_VBUS_VALID);
        if (!vbus_attach)
-               goto notify_otg;
+               goto no_vbus;
 
        /* Check charger detection completion status */
        ret = regmap_read(info->regmap, AXP288_BC_GLOBAL_REG, &cfg);
@@ -190,19 +181,14 @@ static int axp288_handle_chrg_det_event(struct axp288_extcon_info *info)
        switch (chrg_type) {
        case DET_STAT_SDP:
                dev_dbg(info->dev, "sdp cable is connected\n");
-               notify_otg = true;
-               notify_charger = true;
                cable = EXTCON_CHG_USB_SDP;
                break;
        case DET_STAT_CDP:
                dev_dbg(info->dev, "cdp cable is connected\n");
-               notify_otg = true;
-               notify_charger = true;
                cable = EXTCON_CHG_USB_CDP;
                break;
        case DET_STAT_DCP:
                dev_dbg(info->dev, "dcp cable is connected\n");
-               notify_charger = true;
                cable = EXTCON_CHG_USB_DCP;
                break;
        default:
@@ -210,27 +196,28 @@ static int axp288_handle_chrg_det_event(struct axp288_extcon_info *info)
                        "disconnect or unknown or ID event\n");
        }
 
-notify_otg:
-       if (notify_otg) {
-               /*
-                * If VBUS is absent Connect D+/D- lines to PMIC for BC
-                * detection. Else connect them to SOC for USB communication.
-                */
-               if (info->pdata->gpio_mux_cntl)
-                       gpiod_set_value(info->pdata->gpio_mux_cntl,
-                               vbus_attach ? EXTCON_GPIO_MUX_SEL_SOC
-                                               : EXTCON_GPIO_MUX_SEL_PMIC);
-
-               atomic_notifier_call_chain(&info->otg->notifier,
-                       vbus_attach ? USB_EVENT_VBUS : USB_EVENT_NONE, NULL);
-       }
-
-       if (notify_charger)
+no_vbus:
+       /*
+        * If VBUS is absent Connect D+/D- lines to PMIC for BC
+        * detection. Else connect them to SOC for USB communication.
+        */
+       if (info->gpio_mux_cntl)
+               gpiod_set_value(info->gpio_mux_cntl,
+                       vbus_attach ? EXTCON_GPIO_MUX_SEL_SOC
+                                       : EXTCON_GPIO_MUX_SEL_PMIC);
+
+       extcon_set_state_sync(info->edev, info->previous_cable, false);
+       if (info->previous_cable == EXTCON_CHG_USB_SDP)
+               extcon_set_state_sync(info->edev, EXTCON_USB, false);
+
+       if (vbus_attach) {
                extcon_set_state_sync(info->edev, cable, vbus_attach);
+               if (cable == EXTCON_CHG_USB_SDP)
+                       extcon_set_state_sync(info->edev, EXTCON_USB,
+                                               vbus_attach);
 
-       /* Clear the flags on disconnect event */
-       if (!vbus_attach)
-               notify_otg = notify_charger = false;
+               info->previous_cable = cable;
+       }
 
        return 0;
 
@@ -253,15 +240,10 @@ static irqreturn_t axp288_extcon_isr(int irq, void *data)
        return IRQ_HANDLED;
 }
 
-static void axp288_extcon_enable_irq(struct axp288_extcon_info *info)
+static void axp288_extcon_enable(struct axp288_extcon_info *info)
 {
-       /* Unmask VBUS interrupt */
-       regmap_write(info->regmap, AXP288_PWRSRC_IRQ_CFG_REG,
-                                               PWRSRC_IRQ_CFG_MASK);
        regmap_update_bits(info->regmap, AXP288_BC_GLOBAL_REG,
                                                BC_GLOBAL_RUN, 0);
-       /* Unmask the BC1.2 complete interrupts */
-       regmap_write(info->regmap, AXP288_BC12_IRQ_CFG_REG, BC12_IRQ_CFG_MASK);
        /* Enable the charger detection logic */
        regmap_update_bits(info->regmap, AXP288_BC_GLOBAL_REG,
                                        BC_GLOBAL_RUN, BC_GLOBAL_RUN);
@@ -271,6 +253,7 @@ static int axp288_extcon_probe(struct platform_device *pdev)
 {
        struct axp288_extcon_info *info;
        struct axp20x_dev *axp20x = dev_get_drvdata(pdev->dev.parent);
+       struct axp288_extcon_pdata *pdata = pdev->dev.platform_data;
        int ret, i, pirq, gpio;
 
        info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
@@ -280,15 +263,10 @@ static int axp288_extcon_probe(struct platform_device *pdev)
        info->dev = &pdev->dev;
        info->regmap = axp20x->regmap;
        info->regmap_irqc = axp20x->regmap_irqc;
-       info->pdata = pdev->dev.platform_data;
-
-       if (!info->pdata) {
-               /* Try ACPI provided pdata via device properties */
-               if (!device_property_present(&pdev->dev,
-                                       "axp288_extcon_data\n"))
-                       dev_err(&pdev->dev, "failed to get platform data\n");
-               return -ENODEV;
-       }
+       info->previous_cable = EXTCON_NONE;
+       if (pdata)
+               info->gpio_mux_cntl = pdata->gpio_mux_cntl;
+
        platform_set_drvdata(pdev, info);
 
        axp288_extcon_log_rsi(info);
@@ -308,23 +286,16 @@ static int axp288_extcon_probe(struct platform_device *pdev)
                return ret;
        }
 
-       /* Get otg transceiver phy */
-       info->otg = devm_usb_get_phy(&pdev->dev, USB_PHY_TYPE_USB2);
-       if (IS_ERR(info->otg)) {
-               dev_err(&pdev->dev, "failed to get otg transceiver\n");
-               return PTR_ERR(info->otg);
-       }
-
        /* Set up gpio control for USB Mux */
-       if (info->pdata->gpio_mux_cntl) {
-               gpio = desc_to_gpio(info->pdata->gpio_mux_cntl);
+       if (info->gpio_mux_cntl) {
+               gpio = desc_to_gpio(info->gpio_mux_cntl);
                ret = devm_gpio_request(&pdev->dev, gpio, "USB_MUX");
                if (ret < 0) {
                        dev_err(&pdev->dev,
                                "failed to request the gpio=%d\n", gpio);
                        return ret;
                }
-               gpiod_direction_output(info->pdata->gpio_mux_cntl,
+               gpiod_direction_output(info->gpio_mux_cntl,
                                                EXTCON_GPIO_MUX_SEL_PMIC);
        }
 
@@ -349,14 +320,21 @@ static int axp288_extcon_probe(struct platform_device *pdev)
                }
        }
 
-       /* Enable interrupts */
-       axp288_extcon_enable_irq(info);
+       /* Start charger cable type detection */
+       axp288_extcon_enable(info);
 
        return 0;
 }
 
+static const struct platform_device_id axp288_extcon_table[] = {
+       { .name = "axp288_extcon" },
+       {},
+};
+MODULE_DEVICE_TABLE(platform, axp288_extcon_table);
+
 static struct platform_driver axp288_extcon_driver = {
        .probe = axp288_extcon_probe,
+       .id_table = axp288_extcon_table,
        .driver = {
                .name = "axp288_extcon",
        },
diff --git a/drivers/extcon/extcon-intel-int3496.c b/drivers/extcon/extcon-intel-int3496.c
new file mode 100644 (file)
index 0000000..a3131b0
--- /dev/null
@@ -0,0 +1,179 @@
+/*
+ * Intel INT3496 ACPI device extcon driver
+ *
+ * Copyright (c) 2016 Hans de Goede <hdegoede@redhat.com>
+ *
+ * Based on android x86 kernel code which is:
+ *
+ * Copyright (c) 2014, Intel Corporation.
+ * Author: David Cohen <david.a.cohen@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/acpi.h>
+#include <linux/extcon.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#define INT3496_GPIO_USB_ID    0
+#define INT3496_GPIO_VBUS_EN   1
+#define INT3496_GPIO_USB_MUX   2
+#define DEBOUNCE_TIME          msecs_to_jiffies(50)
+
+struct int3496_data {
+       struct device *dev;
+       struct extcon_dev *edev;
+       struct delayed_work work;
+       struct gpio_desc *gpio_usb_id;
+       struct gpio_desc *gpio_vbus_en;
+       struct gpio_desc *gpio_usb_mux;
+       int usb_id_irq;
+};
+
+static const unsigned int int3496_cable[] = {
+       EXTCON_USB_HOST,
+       EXTCON_NONE,
+};
+
+static void int3496_do_usb_id(struct work_struct *work)
+{
+       struct int3496_data *data =
+               container_of(work, struct int3496_data, work.work);
+       int id = gpiod_get_value_cansleep(data->gpio_usb_id);
+
+       /* id == 1: PERIPHERAL, id == 0: HOST */
+       dev_dbg(data->dev, "Connected %s cable\n", id ? "PERIPHERAL" : "HOST");
+
+       /*
+        * Peripheral: set USB mux to peripheral and disable VBUS
+        * Host: set USB mux to host and enable VBUS
+        */
+       if (!IS_ERR(data->gpio_usb_mux))
+               gpiod_direction_output(data->gpio_usb_mux, id);
+
+       if (!IS_ERR(data->gpio_vbus_en))
+               gpiod_direction_output(data->gpio_vbus_en, !id);
+
+       extcon_set_state_sync(data->edev, EXTCON_USB_HOST, !id);
+}
+
+static irqreturn_t int3496_thread_isr(int irq, void *priv)
+{
+       struct int3496_data *data = priv;
+
+       /* Let the pin settle before processing it */
+       mod_delayed_work(system_wq, &data->work, DEBOUNCE_TIME);
+
+       return IRQ_HANDLED;
+}
+
+static int int3496_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct int3496_data *data;
+       int ret;
+
+       data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+       if (!data)
+               return -ENOMEM;
+
+       data->dev = dev;
+       INIT_DELAYED_WORK(&data->work, int3496_do_usb_id);
+
+       data->gpio_usb_id = devm_gpiod_get_index(dev, "id",
+                                               INT3496_GPIO_USB_ID,
+                                               GPIOD_IN);
+       if (IS_ERR(data->gpio_usb_id)) {
+               ret = PTR_ERR(data->gpio_usb_id);
+               dev_err(dev, "can't request USB ID GPIO: %d\n", ret);
+               return ret;
+       }
+
+       data->usb_id_irq = gpiod_to_irq(data->gpio_usb_id);
+       if (data->usb_id_irq <= 0) {
+               dev_err(dev, "can't get USB ID IRQ: %d\n", data->usb_id_irq);
+               return -EINVAL;
+       }
+
+       data->gpio_vbus_en = devm_gpiod_get_index(dev, "vbus en",
+                                                INT3496_GPIO_VBUS_EN,
+                                                GPIOD_ASIS);
+       if (IS_ERR(data->gpio_vbus_en))
+               dev_info(dev, "can't request VBUS EN GPIO\n");
+
+       data->gpio_usb_mux = devm_gpiod_get_index(dev, "usb mux",
+                                                INT3496_GPIO_USB_MUX,
+                                                GPIOD_ASIS);
+       if (IS_ERR(data->gpio_usb_mux))
+               dev_info(dev, "can't request USB MUX GPIO\n");
+
+       /* register extcon device */
+       data->edev = devm_extcon_dev_allocate(dev, int3496_cable);
+       if (IS_ERR(data->edev))
+               return -ENOMEM;
+
+       ret = devm_extcon_dev_register(dev, data->edev);
+       if (ret < 0) {
+               dev_err(dev, "can't register extcon device: %d\n", ret);
+               return ret;
+       }
+
+       ret = devm_request_threaded_irq(dev, data->usb_id_irq,
+                                       NULL, int3496_thread_isr,
+                                       IRQF_SHARED | IRQF_ONESHOT |
+                                       IRQF_TRIGGER_RISING |
+                                       IRQF_TRIGGER_FALLING,
+                                       dev_name(dev), data);
+       if (ret < 0) {
+               dev_err(dev, "can't request IRQ for USB ID GPIO: %d\n", ret);
+               return ret;
+       }
+
+       /* queue initial processing of id-pin */
+       queue_delayed_work(system_wq, &data->work, 0);
+
+       platform_set_drvdata(pdev, data);
+
+       return 0;
+}
+
+static int int3496_remove(struct platform_device *pdev)
+{
+       struct int3496_data *data = platform_get_drvdata(pdev);
+
+       devm_free_irq(&pdev->dev, data->usb_id_irq, data);
+       cancel_delayed_work_sync(&data->work);
+
+       return 0;
+}
+
+static struct acpi_device_id int3496_acpi_match[] = {
+       { "INT3496" },
+       { }
+};
+MODULE_DEVICE_TABLE(acpi, int3496_acpi_match);
+
+static struct platform_driver int3496_driver = {
+       .driver = {
+               .name = "intel-int3496",
+               .acpi_match_table = int3496_acpi_match,
+       },
+       .probe = int3496_probe,
+       .remove = int3496_remove,
+};
+
+module_platform_driver(int3496_driver);
+
+MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
+MODULE_DESCRIPTION("Intel INT3496 ACPI device extcon driver");
+MODULE_LICENSE("GPL");
index 12e26c4e77638cf16041037b040a93371c714bd4..f6414b7fa5bc93fd4d4a6aecdcb63f1401974a8b 100644 (file)
@@ -531,8 +531,10 @@ static int max14577_parse_irq(struct max14577_muic_info *info, int irq_type)
        case MAX14577_IRQ_INT1_ADC:
        case MAX14577_IRQ_INT1_ADCLOW:
        case MAX14577_IRQ_INT1_ADCERR:
-               /* Handle all of accessory except for
-                  type of charger accessory */
+               /*
+                * Handle all of accessory except for
+                * type of charger accessory.
+                */
                info->irq_adc = true;
                return 1;
        case MAX14577_IRQ_INT2_CHGTYP:
index 68dbcb814b2ff78a64bc7a456a380e89227e32f9..62163468f205bd557478247a30130cfd23def9a3 100644 (file)
@@ -188,8 +188,10 @@ enum max77693_muic_acc_type {
        MAX77693_MUIC_ADC_AUDIO_MODE_REMOTE,
        MAX77693_MUIC_ADC_OPEN,
 
-       /* The below accessories have same ADC value so ADCLow and
-          ADC1K bit is used to separate specific accessory */
+       /*
+        * The below accessories have same ADC value so ADCLow and
+        * ADC1K bit is used to separate specific accessory.
+        */
                                                /* ADC|VBVolot|ADCLow|ADC1K| */
        MAX77693_MUIC_GND_USB_HOST = 0x100,     /* 0x0|      0|     0|    0| */
        MAX77693_MUIC_GND_USB_HOST_VB = 0x104,  /* 0x0|      1|     0|    0| */
@@ -970,8 +972,10 @@ static void max77693_muic_irq_work(struct work_struct *work)
        case MAX77693_MUIC_IRQ_INT1_ADC_LOW:
        case MAX77693_MUIC_IRQ_INT1_ADC_ERR:
        case MAX77693_MUIC_IRQ_INT1_ADC1K:
-               /* Handle all of accessory except for
-                  type of charger accessory */
+               /*
+                * Handle all of accessory except for
+                * type of charger accessory.
+                */
                ret = max77693_muic_adc_handler(info);
                break;
        case MAX77693_MUIC_IRQ_INT2_CHGTYP:
index 5d11fdf36e9424b9e03eb484098e4752f147be4c..6e722d552cf101d01986749bab89de68d47898f8 100644 (file)
@@ -97,8 +97,10 @@ enum max77843_muic_accessory_type {
        MAX77843_MUIC_ADC_AUDIO_DEVICE_TYPE1,
        MAX77843_MUIC_ADC_OPEN,
 
-       /* The blow accessories should check
-          not only ADC value but also ADC1K and VBVolt value. */
+       /*
+        * The below accessories should check
+        * not only ADC value but also ADC1K and VBVolt value.
+        */
                                                /* Offset|ADC1K|VBVolt| */
        MAX77843_MUIC_GND_USB_HOST = 0x100,     /*    0x1|    0|     0| */
        MAX77843_MUIC_GND_USB_HOST_VB = 0x101,  /*    0x1|    0|     1| */
@@ -265,16 +267,20 @@ static int max77843_muic_get_cable_type(struct max77843_muic_info *info,
                /* Check GROUND accessory with charger cable */
                if (adc == MAX77843_MUIC_ADC_GROUND) {
                        if (chg_type == MAX77843_MUIC_CHG_NONE) {
-                               /* The following state when charger cable is
+                               /*
+                                * The following state when charger cable is
                                 * disconnected but the GROUND accessory still
-                                * connected */
+                                * connected.
+                                */
                                *attached = false;
                                cable_type = info->prev_chg_type;
                                info->prev_chg_type = MAX77843_MUIC_CHG_NONE;
                        } else {
 
-                               /* The following state when charger cable is
-                                * connected on the GROUND accessory */
+                               /*
+                                * The following state when charger cable is
+                                * connected on the GROUND accessory.
+                                */
                                *attached = true;
                                cable_type = MAX77843_MUIC_CHG_GND;
                                info->prev_chg_type = MAX77843_MUIC_CHG_GND;
@@ -299,11 +305,13 @@ static int max77843_muic_get_cable_type(struct max77843_muic_info *info,
                } else {
                        *attached = true;
 
-                       /* Offset|ADC1K|VBVolt|
+                       /*
+                        * Offset|ADC1K|VBVolt|
                         *    0x1|    0|     0| USB-HOST
                         *    0x1|    0|     1| USB-HOST with VB
                         *    0x1|    1|     0| MHL
-                        *    0x1|    1|     1| MHL with VB */
+                        *    0x1|    1|     1| MHL with VB
+                        */
                        /* Get ADC1K register bit */
                        gnd_type = (info->status[MAX77843_MUIC_STATUS1] &
                                        MAX77843_MUIC_STATUS1_ADC1K_MASK);
index 634ba70782de9c92f256e9e5cdd535f71dd642f2..ca904e8b32351110335e48c0fd755994c0042d81 100644 (file)
@@ -62,7 +62,7 @@ static irqreturn_t palmas_vbus_irq_handler(int irq, void *_palmas_usb)
                if (palmas_usb->linkstat != PALMAS_USB_STATE_VBUS) {
                        palmas_usb->linkstat = PALMAS_USB_STATE_VBUS;
                        extcon_set_state_sync(edev, EXTCON_USB, true);
-                       dev_info(palmas_usb->dev, "USB cable is attached\n");
+                       dev_dbg(palmas_usb->dev, "USB cable is attached\n");
                } else {
                        dev_dbg(palmas_usb->dev,
                                "Spurious connect event detected\n");
@@ -71,7 +71,7 @@ static irqreturn_t palmas_vbus_irq_handler(int irq, void *_palmas_usb)
                if (palmas_usb->linkstat == PALMAS_USB_STATE_VBUS) {
                        palmas_usb->linkstat = PALMAS_USB_STATE_DISCONNECT;
                        extcon_set_state_sync(edev, EXTCON_USB, false);
-                       dev_info(palmas_usb->dev, "USB cable is detached\n");
+                       dev_dbg(palmas_usb->dev, "USB cable is detached\n");
                } else {
                        dev_dbg(palmas_usb->dev,
                                "Spurious disconnect event detected\n");
@@ -99,7 +99,7 @@ static irqreturn_t palmas_id_irq_handler(int irq, void *_palmas_usb)
                        PALMAS_USB_ID_INT_EN_HI_CLR_ID_GND);
                palmas_usb->linkstat = PALMAS_USB_STATE_ID;
                extcon_set_state_sync(edev, EXTCON_USB_HOST, true);
-               dev_info(palmas_usb->dev, "USB-HOST cable is attached\n");
+               dev_dbg(palmas_usb->dev, "USB-HOST cable is attached\n");
        } else if ((set & PALMAS_USB_ID_INT_SRC_ID_FLOAT) &&
                                (id_src & PALMAS_USB_ID_INT_SRC_ID_FLOAT)) {
                palmas_write(palmas_usb->palmas, PALMAS_USB_OTG_BASE,
@@ -107,17 +107,17 @@ static irqreturn_t palmas_id_irq_handler(int irq, void *_palmas_usb)
                        PALMAS_USB_ID_INT_EN_HI_CLR_ID_FLOAT);
                palmas_usb->linkstat = PALMAS_USB_STATE_DISCONNECT;
                extcon_set_state_sync(edev, EXTCON_USB_HOST, false);
-               dev_info(palmas_usb->dev, "USB-HOST cable is detached\n");
+               dev_dbg(palmas_usb->dev, "USB-HOST cable is detached\n");
        } else if ((palmas_usb->linkstat == PALMAS_USB_STATE_ID) &&
                                (!(set & PALMAS_USB_ID_INT_SRC_ID_GND))) {
                palmas_usb->linkstat = PALMAS_USB_STATE_DISCONNECT;
                extcon_set_state_sync(edev, EXTCON_USB_HOST, false);
-               dev_info(palmas_usb->dev, "USB-HOST cable is detached\n");
+               dev_dbg(palmas_usb->dev, "USB-HOST cable is detached\n");
        } else if ((palmas_usb->linkstat == PALMAS_USB_STATE_DISCONNECT) &&
                                (id_src & PALMAS_USB_ID_INT_SRC_ID_GND)) {
                palmas_usb->linkstat = PALMAS_USB_STATE_ID;
                extcon_set_state_sync(edev, EXTCON_USB_HOST, true);
-               dev_info(palmas_usb->dev, " USB-HOST cable is attached\n");
+               dev_dbg(palmas_usb->dev, " USB-HOST cable is attached\n");
        }
 
        return IRQ_HANDLED;
@@ -138,10 +138,10 @@ static void palmas_gpio_id_detect(struct work_struct *work)
 
        if (id) {
                extcon_set_state_sync(edev, EXTCON_USB_HOST, false);
-               dev_info(palmas_usb->dev, "USB-HOST cable is detached\n");
+               dev_dbg(palmas_usb->dev, "USB-HOST cable is detached\n");
        } else {
                extcon_set_state_sync(edev, EXTCON_USB_HOST, true);
-               dev_info(palmas_usb->dev, "USB-HOST cable is attached\n");
+               dev_dbg(palmas_usb->dev, "USB-HOST cable is attached\n");
        }
 }
 
@@ -190,6 +190,11 @@ static int palmas_usb_probe(struct platform_device *pdev)
        struct palmas_usb *palmas_usb;
        int status;
 
+       if (!palmas) {
+               dev_err(&pdev->dev, "failed to get valid parent\n");
+               return -EINVAL;
+       }
+
        palmas_usb = devm_kzalloc(&pdev->dev, sizeof(*palmas_usb), GFP_KERNEL);
        if (!palmas_usb)
                return -ENOMEM;
index 174c388739ea8da35105a892b61c12741be4638b..3e882aa107e837848a45575fd6b980ba2dfa7fa2 100644 (file)
@@ -142,8 +142,10 @@ enum rt8973a_muic_acc_type {
        RT8973A_MUIC_ADC_UNKNOWN_ACC_5,
        RT8973A_MUIC_ADC_OPEN = 0x1f,
 
-       /* The below accessories has same ADC value (0x1f).
-          So, Device type1 is used to separate specific accessory. */
+       /*
+        * The below accessories has same ADC value (0x1f).
+        * So, Device type1 is used to separate specific accessory.
+        */
                                        /* |---------|--ADC| */
                                        /* |    [7:5]|[4:0]| */
        RT8973A_MUIC_ADC_USB = 0x3f,    /* |      001|11111| */
index b223256885033f01a2af1be8da6fbdb8d5a1ae2c..106ef0297b537b3a768eecef3f8f3af0829c7020 100644 (file)
@@ -135,8 +135,10 @@ enum sm5502_muic_acc_type {
        SM5502_MUIC_ADC_AUDIO_TYPE1,
        SM5502_MUIC_ADC_OPEN = 0x1f,
 
-       /* The below accessories have same ADC value (0x1f or 0x1e).
-          So, Device type1 is used to separate specific accessory. */
+       /*
+        * The below accessories have same ADC value (0x1f or 0x1e).
+        * So, Device type1 is used to separate specific accessory.
+        */
                                                        /* |---------|--ADC| */
                                                        /* |    [7:5]|[4:0]| */
        SM5502_MUIC_ADC_AUDIO_TYPE1_FULL_REMOTE = 0x3e, /* |      001|11110| */
index d589c5feff3d7d082fd8372cf48a0c3d3181b9ff..a5e1882b4ca66e370f4c52c7c024290a1685afd7 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/slab.h>
 #include <linux/workqueue.h>
 #include <linux/acpi.h>
+#include <linux/pinctrl/consumer.h>
 
 #define USB_GPIO_DEBOUNCE_MS   20      /* ms */
 
@@ -245,6 +246,9 @@ static int usb_extcon_suspend(struct device *dev)
        if (info->vbus_gpiod)
                disable_irq(info->vbus_irq);
 
+       if (!device_may_wakeup(dev))
+               pinctrl_pm_select_sleep_state(dev);
+
        return ret;
 }
 
@@ -253,6 +257,9 @@ static int usb_extcon_resume(struct device *dev)
        struct usb_extcon_info *info = dev_get_drvdata(dev);
        int ret = 0;
 
+       if (!device_may_wakeup(dev))
+               pinctrl_pm_select_default_state(dev);
+
        if (device_may_wakeup(dev)) {
                if (info->id_gpiod) {
                        ret = disable_irq_wake(info->id_irq);
index 7c1e3a7b14e0c1f540a0bbe7d6b924b124561729..09ac5e70c2f38706909d27a8369d357178d989bd 100644 (file)
 #include <linux/device.h>
 #include <linux/fs.h>
 #include <linux/err.h>
-#include <linux/extcon.h>
 #include <linux/of.h>
 #include <linux/slab.h>
 #include <linux/sysfs.h>
 
+#include "extcon.h"
+
 #define SUPPORTED_CABLE_MAX    32
 #define CABLE_NAME_MAX         30
 
@@ -59,7 +60,7 @@ struct __extcon_info {
        [EXTCON_USB_HOST] = {
                .type = EXTCON_TYPE_USB,
                .id = EXTCON_USB_HOST,
-               .name = "USB_HOST",
+               .name = "USB-HOST",
        },
 
        /* Charging external connector */
@@ -98,6 +99,11 @@ struct __extcon_info {
                .id = EXTCON_CHG_WPT,
                .name = "WPT",
        },
+       [EXTCON_CHG_USB_PD] = {
+               .type = EXTCON_TYPE_CHG | EXTCON_TYPE_USB,
+               .id = EXTCON_CHG_USB_PD,
+               .name = "PD",
+       },
 
        /* Jack external connector */
        [EXTCON_JACK_MICROPHONE] = {
@@ -906,35 +912,16 @@ int extcon_register_notifier(struct extcon_dev *edev, unsigned int id,
        unsigned long flags;
        int ret, idx = -EINVAL;
 
-       if (!nb)
+       if (!edev || !nb)
                return -EINVAL;
 
-       if (edev) {
-               idx = find_cable_index_by_id(edev, id);
-               if (idx < 0)
-                       return idx;
-
-               spin_lock_irqsave(&edev->lock, flags);
-               ret = raw_notifier_chain_register(&edev->nh[idx], nb);
-               spin_unlock_irqrestore(&edev->lock, flags);
-       } else {
-               struct extcon_dev *extd;
-
-               mutex_lock(&extcon_dev_list_lock);
-               list_for_each_entry(extd, &extcon_dev_list, entry) {
-                       idx = find_cable_index_by_id(extd, id);
-                       if (idx >= 0)
-                               break;
-               }
-               mutex_unlock(&extcon_dev_list_lock);
+       idx = find_cable_index_by_id(edev, id);
+       if (idx < 0)
+               return idx;
 
-               if (idx >= 0) {
-                       edev = extd;
-                       return extcon_register_notifier(extd, id, nb);
-               } else {
-                       ret = -ENODEV;
-               }
-       }
+       spin_lock_irqsave(&edev->lock, flags);
+       ret = raw_notifier_chain_register(&edev->nh[idx], nb);
+       spin_unlock_irqrestore(&edev->lock, flags);
 
        return ret;
 }
diff --git a/drivers/extcon/extcon.h b/drivers/extcon/extcon.h
new file mode 100644 (file)
index 0000000..993ddcc
--- /dev/null
@@ -0,0 +1,62 @@
+#ifndef __LINUX_EXTCON_INTERNAL_H__
+#define __LINUX_EXTCON_INTERNAL_H__
+
+#include <linux/extcon.h>
+
+/**
+ * struct extcon_dev - An extcon device represents one external connector.
+ * @name:              The name of this extcon device. Parent device name is
+ *                     used if NULL.
+ * @supported_cable:   Array of supported cable names ending with EXTCON_NONE.
+ *                     If supported_cable is NULL, cable name related APIs
+ *                     are disabled.
+ * @mutually_exclusive:        Array of mutually exclusive set of cables that cannot
+ *                     be attached simultaneously. The array should be
+ *                     ending with NULL or be NULL (no mutually exclusive
+ *                     cables). For example, if it is { 0x7, 0x30, 0}, then,
+ *                     {0, 1}, {0, 1, 2}, {0, 2}, {1, 2}, or {4, 5} cannot
+ *                     be attached simulataneously. {0x7, 0} is equivalent to
+ *                     {0x3, 0x6, 0x5, 0}. If it is {0xFFFFFFFF, 0}, there
+ *                     can be no simultaneous connections.
+ * @dev:               Device of this extcon.
+ * @state:             Attach/detach state of this extcon. Do not provide at
+ *                     register-time.
+ * @nh:                        Notifier for the state change events from this extcon
+ * @entry:             To support list of extcon devices so that users can
+ *                     search for extcon devices based on the extcon name.
+ * @lock:
+ * @max_supported:     Internal value to store the number of cables.
+ * @extcon_dev_type:   Device_type struct to provide attribute_groups
+ *                     customized for each extcon device.
+ * @cables:            Sysfs subdirectories. Each represents one cable.
+ *
+ * In most cases, users only need to provide "User initializing data" of
+ * this struct when registering an extcon. In some exceptional cases,
+ * optional callbacks may be needed. However, the values in "internal data"
+ * are overwritten by register function.
+ */
+struct extcon_dev {
+       /* Optional user initializing data */
+       const char *name;
+       const unsigned int *supported_cable;
+       const u32 *mutually_exclusive;
+
+       /* Internal data. Please do not set. */
+       struct device dev;
+       struct raw_notifier_head *nh;
+       struct list_head entry;
+       int max_supported;
+       spinlock_t lock;        /* could be called by irq handler */
+       u32 state;
+
+       /* /sys/class/extcon/.../cable.n/... */
+       struct device_type extcon_dev_type;
+       struct extcon_cable *cables;
+
+       /* /sys/class/extcon/.../mutually_exclusive/... */
+       struct attribute_group attr_g_muex;
+       struct attribute **attrs_muex;
+       struct device_attribute *d_attrs_muex;
+};
+
+#endif /* __LINUX_EXTCON_INTERNAL_H__ */
index f0a69d3e60a584d4565459b6e845cb205dc93946..86d2cb203533ff41df1d33d1cc4f312696e6ebf6 100644 (file)
 #include <linux/of.h>
 #include <linux/mutex.h>
 #include <linux/slab.h>
+#include <linux/scatterlist.h>
+#include <linux/highmem.h>
 
 static DEFINE_IDA(fpga_mgr_ida);
 static struct class *fpga_mgr_class;
 
+/*
+ * Call the low level driver's write_init function.  This will do the
+ * device-specific things to get the FPGA into the state where it is ready to
+ * receive an FPGA image. The low level driver only gets to see the first
+ * initial_header_size bytes in the buffer.
+ */
+static int fpga_mgr_write_init_buf(struct fpga_manager *mgr,
+                                  struct fpga_image_info *info,
+                                  const char *buf, size_t count)
+{
+       int ret;
+
+       mgr->state = FPGA_MGR_STATE_WRITE_INIT;
+       if (!mgr->mops->initial_header_size)
+               ret = mgr->mops->write_init(mgr, info, NULL, 0);
+       else
+               ret = mgr->mops->write_init(
+                   mgr, info, buf, min(mgr->mops->initial_header_size, count));
+
+       if (ret) {
+               dev_err(&mgr->dev, "Error preparing FPGA for writing\n");
+               mgr->state = FPGA_MGR_STATE_WRITE_INIT_ERR;
+               return ret;
+       }
+
+       return 0;
+}
+
+static int fpga_mgr_write_init_sg(struct fpga_manager *mgr,
+                                 struct fpga_image_info *info,
+                                 struct sg_table *sgt)
+{
+       struct sg_mapping_iter miter;
+       size_t len;
+       char *buf;
+       int ret;
+
+       if (!mgr->mops->initial_header_size)
+               return fpga_mgr_write_init_buf(mgr, info, NULL, 0);
+
+       /*
+        * First try to use miter to map the first fragment to access the
+        * header, this is the typical path.
+        */
+       sg_miter_start(&miter, sgt->sgl, sgt->nents, SG_MITER_FROM_SG);
+       if (sg_miter_next(&miter) &&
+           miter.length >= mgr->mops->initial_header_size) {
+               ret = fpga_mgr_write_init_buf(mgr, info, miter.addr,
+                                             miter.length);
+               sg_miter_stop(&miter);
+               return ret;
+       }
+       sg_miter_stop(&miter);
+
+       /* Otherwise copy the fragments into temporary memory. */
+       buf = kmalloc(mgr->mops->initial_header_size, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+
+       len = sg_copy_to_buffer(sgt->sgl, sgt->nents, buf,
+                               mgr->mops->initial_header_size);
+       ret = fpga_mgr_write_init_buf(mgr, info, buf, len);
+
+       kfree(buf);
+
+       return ret;
+}
+
+/*
+ * After all the FPGA image has been written, do the device specific steps to
+ * finish and set the FPGA into operating mode.
+ */
+static int fpga_mgr_write_complete(struct fpga_manager *mgr,
+                                  struct fpga_image_info *info)
+{
+       int ret;
+
+       mgr->state = FPGA_MGR_STATE_WRITE_COMPLETE;
+       ret = mgr->mops->write_complete(mgr, info);
+       if (ret) {
+               dev_err(&mgr->dev, "Error after writing image data to FPGA\n");
+               mgr->state = FPGA_MGR_STATE_WRITE_COMPLETE_ERR;
+               return ret;
+       }
+       mgr->state = FPGA_MGR_STATE_OPERATING;
+
+       return 0;
+}
+
 /**
- * fpga_mgr_buf_load - load fpga from image in buffer
+ * fpga_mgr_buf_load_sg - load fpga from image in buffer from a scatter list
  * @mgr:       fpga manager
  * @info:      fpga image specific information
- * @buf:       buffer contain fpga image
- * @count:     byte count of buf
+ * @sgt:       scatterlist table
  *
  * Step the low level fpga manager through the device-specific steps of getting
  * an FPGA ready to be configured, writing the image to it, then doing whatever
@@ -42,54 +132,139 @@ static struct class *fpga_mgr_class;
  * mgr pointer from of_fpga_mgr_get() or fpga_mgr_get() and checked that it is
  * not an error code.
  *
+ * This is the preferred entry point for FPGA programming, it does not require
+ * any contiguous kernel memory.
+ *
  * Return: 0 on success, negative error code otherwise.
  */
-int fpga_mgr_buf_load(struct fpga_manager *mgr, struct fpga_image_info *info,
-                     const char *buf, size_t count)
+int fpga_mgr_buf_load_sg(struct fpga_manager *mgr, struct fpga_image_info *info,
+                        struct sg_table *sgt)
 {
-       struct device *dev = &mgr->dev;
        int ret;
 
-       /*
-        * Call the low level driver's write_init function.  This will do the
-        * device-specific things to get the FPGA into the state where it is
-        * ready to receive an FPGA image. The low level driver only gets to
-        * see the first initial_header_size bytes in the buffer.
-        */
-       mgr->state = FPGA_MGR_STATE_WRITE_INIT;
-       ret = mgr->mops->write_init(mgr, info, buf,
-                                   min(mgr->mops->initial_header_size, count));
+       ret = fpga_mgr_write_init_sg(mgr, info, sgt);
+       if (ret)
+               return ret;
+
+       /* Write the FPGA image to the FPGA. */
+       mgr->state = FPGA_MGR_STATE_WRITE;
+       if (mgr->mops->write_sg) {
+               ret = mgr->mops->write_sg(mgr, sgt);
+       } else {
+               struct sg_mapping_iter miter;
+
+               sg_miter_start(&miter, sgt->sgl, sgt->nents, SG_MITER_FROM_SG);
+               while (sg_miter_next(&miter)) {
+                       ret = mgr->mops->write(mgr, miter.addr, miter.length);
+                       if (ret)
+                               break;
+               }
+               sg_miter_stop(&miter);
+       }
+
        if (ret) {
-               dev_err(dev, "Error preparing FPGA for writing\n");
-               mgr->state = FPGA_MGR_STATE_WRITE_INIT_ERR;
+               dev_err(&mgr->dev, "Error while writing image data to FPGA\n");
+               mgr->state = FPGA_MGR_STATE_WRITE_ERR;
                return ret;
        }
 
+       return fpga_mgr_write_complete(mgr, info);
+}
+EXPORT_SYMBOL_GPL(fpga_mgr_buf_load_sg);
+
+static int fpga_mgr_buf_load_mapped(struct fpga_manager *mgr,
+                                   struct fpga_image_info *info,
+                                   const char *buf, size_t count)
+{
+       int ret;
+
+       ret = fpga_mgr_write_init_buf(mgr, info, buf, count);
+       if (ret)
+               return ret;
+
        /*
         * Write the FPGA image to the FPGA.
         */
        mgr->state = FPGA_MGR_STATE_WRITE;
        ret = mgr->mops->write(mgr, buf, count);
        if (ret) {
-               dev_err(dev, "Error while writing image data to FPGA\n");
+               dev_err(&mgr->dev, "Error while writing image data to FPGA\n");
                mgr->state = FPGA_MGR_STATE_WRITE_ERR;
                return ret;
        }
 
+       return fpga_mgr_write_complete(mgr, info);
+}
+
+/**
+ * fpga_mgr_buf_load - load fpga from image in buffer
+ * @mgr:       fpga manager
+ * @flags:     flags setting fpga confuration modes
+ * @buf:       buffer contain fpga image
+ * @count:     byte count of buf
+ *
+ * Step the low level fpga manager through the device-specific steps of getting
+ * an FPGA ready to be configured, writing the image to it, then doing whatever
+ * post-configuration steps necessary.  This code assumes the caller got the
+ * mgr pointer from of_fpga_mgr_get() and checked that it is not an error code.
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+int fpga_mgr_buf_load(struct fpga_manager *mgr, struct fpga_image_info *info,
+                     const char *buf, size_t count)
+{
+       struct page **pages;
+       struct sg_table sgt;
+       const void *p;
+       int nr_pages;
+       int index;
+       int rc;
+
        /*
-        * After all the FPGA image has been written, do the device specific
-        * steps to finish and set the FPGA into operating mode.
+        * This is just a fast path if the caller has already created a
+        * contiguous kernel buffer and the driver doesn't require SG, non-SG
+        * drivers will still work on the slow path.
         */
-       mgr->state = FPGA_MGR_STATE_WRITE_COMPLETE;
-       ret = mgr->mops->write_complete(mgr, info);
-       if (ret) {
-               dev_err(dev, "Error after writing image data to FPGA\n");
-               mgr->state = FPGA_MGR_STATE_WRITE_COMPLETE_ERR;
-               return ret;
+       if (mgr->mops->write)
+               return fpga_mgr_buf_load_mapped(mgr, info, buf, count);
+
+       /*
+        * Convert the linear kernel pointer into a sg_table of pages for use
+        * by the driver.
+        */
+       nr_pages = DIV_ROUND_UP((unsigned long)buf + count, PAGE_SIZE) -
+                  (unsigned long)buf / PAGE_SIZE;
+       pages = kmalloc_array(nr_pages, sizeof(struct page *), GFP_KERNEL);
+       if (!pages)
+               return -ENOMEM;
+
+       p = buf - offset_in_page(buf);
+       for (index = 0; index < nr_pages; index++) {
+               if (is_vmalloc_addr(p))
+                       pages[index] = vmalloc_to_page(p);
+               else
+                       pages[index] = kmap_to_page((void *)p);
+               if (!pages[index]) {
+                       kfree(pages);
+                       return -EFAULT;
+               }
+               p += PAGE_SIZE;
        }
-       mgr->state = FPGA_MGR_STATE_OPERATING;
 
-       return 0;
+       /*
+        * The temporary pages list is used to code share the merging algorithm
+        * in sg_alloc_table_from_pages
+        */
+       rc = sg_alloc_table_from_pages(&sgt, pages, index, offset_in_page(buf),
+                                      count, GFP_KERNEL);
+       kfree(pages);
+       if (rc)
+               return rc;
+
+       rc = fpga_mgr_buf_load_sg(mgr, info, &sgt);
+       sg_free_table(&sgt);
+
+       return rc;
 }
 EXPORT_SYMBOL_GPL(fpga_mgr_buf_load);
 
@@ -291,8 +466,9 @@ int fpga_mgr_register(struct device *dev, const char *name,
        struct fpga_manager *mgr;
        int id, ret;
 
-       if (!mops || !mops->write_init || !mops->write ||
-           !mops->write_complete || !mops->state) {
+       if (!mops || !mops->write_complete || !mops->state ||
+           !mops->write_init || (!mops->write && !mops->write_sg) ||
+           (mops->write && mops->write_sg)) {
                dev_err(dev, "Attempt to register without fpga_manager_ops\n");
                return -EINVAL;
        }
index 1812bf7614e1d4d6740024295e8a20d539b764ae..34cb98139442dfb93a7de248105b799f0ab982f7 100644 (file)
@@ -30,6 +30,7 @@
 #include <linux/pm.h>
 #include <linux/regmap.h>
 #include <linux/string.h>
+#include <linux/scatterlist.h>
 
 /* Offsets into SLCR regmap */
 
@@ -80,6 +81,7 @@
 
 /* FPGA init status */
 #define STATUS_DMA_Q_F                 BIT(31)
+#define STATUS_DMA_Q_E                 BIT(30)
 #define STATUS_PCFG_INIT_MASK          BIT(4)
 
 /* Interrupt Status/Mask Register Bit definitions */
@@ -89,7 +91,7 @@
 #define IXR_D_P_DONE_MASK              BIT(12)
  /* FPGA programmed */
 #define IXR_PCFG_DONE_MASK             BIT(2)
-#define IXR_ERROR_FLAGS_MASK           0x00F0F860
+#define IXR_ERROR_FLAGS_MASK           0x00F0C860
 #define IXR_ALL_MASK                   0xF8F7F87F
 
 /* Miscellaneous constant values */
 #define DMA_INVALID_ADDRESS            GENMASK(31, 0)
 /* Used to unlock the dev */
 #define UNLOCK_MASK                    0x757bdf0d
-/* Timeout for DMA to complete */
-#define DMA_DONE_TIMEOUT               msecs_to_jiffies(1000)
 /* Timeout for polling reset bits */
 #define INIT_POLL_TIMEOUT              2500000
 /* Delay for polling reset bits */
 #define INIT_POLL_DELAY                        20
+/* Signal this is the last DMA transfer, wait for the AXI and PCAP before
+ * interrupting
+ */
+#define DMA_SRC_LAST_TRANSFER          1
+/* Timeout for DMA completion */
+#define DMA_TIMEOUT_MS                 5000
 
 /* Masks for controlling stuff in SLCR */
 /* Disable all Level shifters */
@@ -124,6 +130,11 @@ struct zynq_fpga_priv {
        void __iomem *io_base;
        struct regmap *slcr;
 
+       spinlock_t dma_lock;
+       unsigned int dma_elm;
+       unsigned int dma_nelms;
+       struct scatterlist *cur_sg;
+
        struct completion dma_done;
 };
 
@@ -143,37 +154,104 @@ static inline u32 zynq_fpga_read(const struct zynq_fpga_priv *priv,
        readl_poll_timeout(priv->io_base + addr, val, cond, sleep_us, \
                           timeout_us)
 
-static void zynq_fpga_mask_irqs(struct zynq_fpga_priv *priv)
+/* Cause the specified irq mask bits to generate IRQs */
+static inline void zynq_fpga_set_irq(struct zynq_fpga_priv *priv, u32 enable)
 {
-       u32 intr_mask;
-
-       intr_mask = zynq_fpga_read(priv, INT_MASK_OFFSET);
-       zynq_fpga_write(priv, INT_MASK_OFFSET,
-                       intr_mask | IXR_DMA_DONE_MASK | IXR_ERROR_FLAGS_MASK);
+       zynq_fpga_write(priv, INT_MASK_OFFSET, ~enable);
 }
 
-static void zynq_fpga_unmask_irqs(struct zynq_fpga_priv *priv)
+/* Must be called with dma_lock held */
+static void zynq_step_dma(struct zynq_fpga_priv *priv)
 {
-       u32 intr_mask;
+       u32 addr;
+       u32 len;
+       bool first;
+
+       first = priv->dma_elm == 0;
+       while (priv->cur_sg) {
+               /* Feed the DMA queue until it is full. */
+               if (zynq_fpga_read(priv, STATUS_OFFSET) & STATUS_DMA_Q_F)
+                       break;
+
+               addr = sg_dma_address(priv->cur_sg);
+               len = sg_dma_len(priv->cur_sg);
+               if (priv->dma_elm + 1 == priv->dma_nelms) {
+                       /* The last transfer waits for the PCAP to finish too,
+                        * notice this also changes the irq_mask to ignore
+                        * IXR_DMA_DONE_MASK which ensures we do not trigger
+                        * the completion too early.
+                        */
+                       addr |= DMA_SRC_LAST_TRANSFER;
+                       priv->cur_sg = NULL;
+               } else {
+                       priv->cur_sg = sg_next(priv->cur_sg);
+                       priv->dma_elm++;
+               }
 
-       intr_mask = zynq_fpga_read(priv, INT_MASK_OFFSET);
-       zynq_fpga_write(priv, INT_MASK_OFFSET,
-                       intr_mask
-                       & ~(IXR_D_P_DONE_MASK | IXR_ERROR_FLAGS_MASK));
+               zynq_fpga_write(priv, DMA_SRC_ADDR_OFFSET, addr);
+               zynq_fpga_write(priv, DMA_DST_ADDR_OFFSET, DMA_INVALID_ADDRESS);
+               zynq_fpga_write(priv, DMA_SRC_LEN_OFFSET, len / 4);
+               zynq_fpga_write(priv, DMA_DEST_LEN_OFFSET, 0);
+       }
+
+       /* Once the first transfer is queued we can turn on the ISR, future
+        * calls to zynq_step_dma will happen from the ISR context. The
+        * dma_lock spinlock guarentees this handover is done coherently, the
+        * ISR enable is put at the end to avoid another CPU spinning in the
+        * ISR on this lock.
+        */
+       if (first && priv->cur_sg) {
+               zynq_fpga_set_irq(priv,
+                                 IXR_DMA_DONE_MASK | IXR_ERROR_FLAGS_MASK);
+       } else if (!priv->cur_sg) {
+               /* The last transfer changes to DMA & PCAP mode since we do
+                * not want to continue until everything has been flushed into
+                * the PCAP.
+                */
+               zynq_fpga_set_irq(priv,
+                                 IXR_D_P_DONE_MASK | IXR_ERROR_FLAGS_MASK);
+       }
 }
 
 static irqreturn_t zynq_fpga_isr(int irq, void *data)
 {
        struct zynq_fpga_priv *priv = data;
+       u32 intr_status;
 
-       /* disable DMA and error IRQs */
-       zynq_fpga_mask_irqs(priv);
+       /* If anything other than DMA completion is reported stop and hand
+        * control back to zynq_fpga_ops_write, something went wrong,
+        * otherwise progress the DMA.
+        */
+       spin_lock(&priv->dma_lock);
+       intr_status = zynq_fpga_read(priv, INT_STS_OFFSET);
+       if (!(intr_status & IXR_ERROR_FLAGS_MASK) &&
+           (intr_status & IXR_DMA_DONE_MASK) && priv->cur_sg) {
+               zynq_fpga_write(priv, INT_STS_OFFSET, IXR_DMA_DONE_MASK);
+               zynq_step_dma(priv);
+               spin_unlock(&priv->dma_lock);
+               return IRQ_HANDLED;
+       }
+       spin_unlock(&priv->dma_lock);
 
+       zynq_fpga_set_irq(priv, 0);
        complete(&priv->dma_done);
 
        return IRQ_HANDLED;
 }
 
+/* Sanity check the proposed bitstream. It must start with the sync word in
+ * the correct byte order, and be dword aligned. The input is a Xilinx .bin
+ * file with every 32 bit quantity swapped.
+ */
+static bool zynq_fpga_has_sync(const u8 *buf, size_t count)
+{
+       for (; count >= 4; buf += 4, count -= 4)
+               if (buf[0] == 0x66 && buf[1] == 0x55 && buf[2] == 0x99 &&
+                   buf[3] == 0xaa)
+                       return true;
+       return false;
+}
+
 static int zynq_fpga_ops_write_init(struct fpga_manager *mgr,
                                    struct fpga_image_info *info,
                                    const char *buf, size_t count)
@@ -190,6 +268,13 @@ static int zynq_fpga_ops_write_init(struct fpga_manager *mgr,
 
        /* don't globally reset PL if we're doing partial reconfig */
        if (!(info->flags & FPGA_MGR_PARTIAL_RECONFIG)) {
+               if (!zynq_fpga_has_sync(buf, count)) {
+                       dev_err(&mgr->dev,
+                               "Invalid bitstream, could not find a sync word. Bitstream must be a byte swapped .bin file\n");
+                       err = -EINVAL;
+                       goto out_err;
+               }
+
                /* assert AXI interface resets */
                regmap_write(priv->slcr, SLCR_FPGA_RST_CTRL_OFFSET,
                             FPGA_RST_ALL_MASK);
@@ -259,10 +344,11 @@ static int zynq_fpga_ops_write_init(struct fpga_manager *mgr,
        zynq_fpga_write(priv, CTRL_OFFSET,
                        (CTRL_PCAP_PR_MASK | CTRL_PCAP_MODE_MASK | ctrl));
 
-       /* check that we have room in the command queue */
+       /* We expect that the command queue is empty right now. */
        status = zynq_fpga_read(priv, STATUS_OFFSET);
-       if (status & STATUS_DMA_Q_F) {
-               dev_err(&mgr->dev, "DMA command queue full\n");
+       if ((status & STATUS_DMA_Q_F) ||
+           (status & STATUS_DMA_Q_E) != STATUS_DMA_Q_E) {
+               dev_err(&mgr->dev, "DMA command queue not right\n");
                err = -EBUSY;
                goto out_err;
        }
@@ -281,26 +367,36 @@ out_err:
        return err;
 }
 
-static int zynq_fpga_ops_write(struct fpga_manager *mgr,
-                              const char *buf, size_t count)
+static int zynq_fpga_ops_write(struct fpga_manager *mgr, struct sg_table *sgt)
 {
        struct zynq_fpga_priv *priv;
+       const char *why;
        int err;
-       char *kbuf;
-       size_t in_count;
-       dma_addr_t dma_addr;
-       u32 transfer_length;
        u32 intr_status;
+       unsigned long timeout;
+       unsigned long flags;
+       struct scatterlist *sg;
+       int i;
 
-       in_count = count;
        priv = mgr->priv;
 
-       kbuf =
-           dma_alloc_coherent(mgr->dev.parent, count, &dma_addr, GFP_KERNEL);
-       if (!kbuf)
-               return -ENOMEM;
+       /* The hardware can only DMA multiples of 4 bytes, and it requires the
+        * starting addresses to be aligned to 64 bits (UG585 pg 212).
+        */
+       for_each_sg(sgt->sgl, sg, sgt->nents, i) {
+               if ((sg->offset % 8) || (sg->length % 4)) {
+                       dev_err(&mgr->dev,
+                           "Invalid bitstream, chunks must be aligned\n");
+                       return -EINVAL;
+               }
+       }
 
-       memcpy(kbuf, buf, count);
+       priv->dma_nelms =
+           dma_map_sg(mgr->dev.parent, sgt->sgl, sgt->nents, DMA_TO_DEVICE);
+       if (priv->dma_nelms == 0) {
+               dev_err(&mgr->dev, "Unable to DMA map (TO_DEVICE)\n");
+               return -ENOMEM;
+       }
 
        /* enable clock */
        err = clk_enable(priv->clk);
@@ -308,38 +404,67 @@ static int zynq_fpga_ops_write(struct fpga_manager *mgr,
                goto out_free;
 
        zynq_fpga_write(priv, INT_STS_OFFSET, IXR_ALL_MASK);
-
        reinit_completion(&priv->dma_done);
 
-       /* enable DMA and error IRQs */
-       zynq_fpga_unmask_irqs(priv);
+       /* zynq_step_dma will turn on interrupts */
+       spin_lock_irqsave(&priv->dma_lock, flags);
+       priv->dma_elm = 0;
+       priv->cur_sg = sgt->sgl;
+       zynq_step_dma(priv);
+       spin_unlock_irqrestore(&priv->dma_lock, flags);
 
-       /* the +1 in the src addr is used to hold off on DMA_DONE IRQ
-        * until both AXI and PCAP are done ...
-        */
-       zynq_fpga_write(priv, DMA_SRC_ADDR_OFFSET, (u32)(dma_addr) + 1);
-       zynq_fpga_write(priv, DMA_DST_ADDR_OFFSET, (u32)DMA_INVALID_ADDRESS);
+       timeout = wait_for_completion_timeout(&priv->dma_done,
+                                             msecs_to_jiffies(DMA_TIMEOUT_MS));
 
-       /* convert #bytes to #words */
-       transfer_length = (count + 3) / 4;
+       spin_lock_irqsave(&priv->dma_lock, flags);
+       zynq_fpga_set_irq(priv, 0);
+       priv->cur_sg = NULL;
+       spin_unlock_irqrestore(&priv->dma_lock, flags);
 
-       zynq_fpga_write(priv, DMA_SRC_LEN_OFFSET, transfer_length);
-       zynq_fpga_write(priv, DMA_DEST_LEN_OFFSET, 0);
+       intr_status = zynq_fpga_read(priv, INT_STS_OFFSET);
+       zynq_fpga_write(priv, INT_STS_OFFSET, IXR_ALL_MASK);
 
-       wait_for_completion(&priv->dma_done);
+       /* There doesn't seem to be a way to force cancel any DMA, so if
+        * something went wrong we are relying on the hardware to have halted
+        * the DMA before we get here, if there was we could use
+        * wait_for_completion_interruptible too.
+        */
 
-       intr_status = zynq_fpga_read(priv, INT_STS_OFFSET);
-       zynq_fpga_write(priv, INT_STS_OFFSET, intr_status);
+       if (intr_status & IXR_ERROR_FLAGS_MASK) {
+               why = "DMA reported error";
+               err = -EIO;
+               goto out_report;
+       }
 
-       if (!((intr_status & IXR_D_P_DONE_MASK) == IXR_D_P_DONE_MASK)) {
-               dev_err(&mgr->dev, "Error configuring FPGA\n");
-               err = -EFAULT;
+       if (priv->cur_sg ||
+           !((intr_status & IXR_D_P_DONE_MASK) == IXR_D_P_DONE_MASK)) {
+               if (timeout == 0)
+                       why = "DMA timed out";
+               else
+                       why = "DMA did not complete";
+               err = -EIO;
+               goto out_report;
        }
 
+       err = 0;
+       goto out_clk;
+
+out_report:
+       dev_err(&mgr->dev,
+               "%s: INT_STS:0x%x CTRL:0x%x LOCK:0x%x INT_MASK:0x%x STATUS:0x%x MCTRL:0x%x\n",
+               why,
+               intr_status,
+               zynq_fpga_read(priv, CTRL_OFFSET),
+               zynq_fpga_read(priv, LOCK_OFFSET),
+               zynq_fpga_read(priv, INT_MASK_OFFSET),
+               zynq_fpga_read(priv, STATUS_OFFSET),
+               zynq_fpga_read(priv, MCTRL_OFFSET));
+
+out_clk:
        clk_disable(priv->clk);
 
 out_free:
-       dma_free_coherent(mgr->dev.parent, count, kbuf, dma_addr);
+       dma_unmap_sg(mgr->dev.parent, sgt->sgl, sgt->nents, DMA_TO_DEVICE);
        return err;
 }
 
@@ -400,9 +525,10 @@ static enum fpga_mgr_states zynq_fpga_ops_state(struct fpga_manager *mgr)
 }
 
 static const struct fpga_manager_ops zynq_fpga_ops = {
+       .initial_header_size = 128,
        .state = zynq_fpga_ops_state,
        .write_init = zynq_fpga_ops_write_init,
-       .write = zynq_fpga_ops_write,
+       .write_sg = zynq_fpga_ops_write,
        .write_complete = zynq_fpga_ops_write_complete,
 };
 
@@ -416,6 +542,7 @@ static int zynq_fpga_probe(struct platform_device *pdev)
        priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
        if (!priv)
                return -ENOMEM;
+       spin_lock_init(&priv->dma_lock);
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        priv->io_base = devm_ioremap_resource(dev, res);
@@ -452,7 +579,7 @@ static int zynq_fpga_probe(struct platform_device *pdev)
        /* unlock the device */
        zynq_fpga_write(priv, UNLOCK_OFFSET, UNLOCK_MASK);
 
-       zynq_fpga_write(priv, INT_MASK_OFFSET, 0xFFFFFFFF);
+       zynq_fpga_set_irq(priv, 0);
        zynq_fpga_write(priv, INT_STS_OFFSET, IXR_ALL_MASK);
        err = devm_request_irq(dev, priv->irq, zynq_fpga_isr, 0, dev_name(dev),
                               priv);
diff --git a/drivers/fsi/Kconfig b/drivers/fsi/Kconfig
new file mode 100644 (file)
index 0000000..04c1a0e
--- /dev/null
@@ -0,0 +1,12 @@
+#
+# FSI subsystem
+#
+
+menu "FSI support"
+
+config FSI
+       tristate "FSI support"
+       ---help---
+         FSI - the FRU Support Interface - is a simple bus for low-level
+         access to POWER-based hardware.
+endmenu
diff --git a/drivers/fsi/Makefile b/drivers/fsi/Makefile
new file mode 100644 (file)
index 0000000..db0e5e7
--- /dev/null
@@ -0,0 +1,2 @@
+
+obj-$(CONFIG_FSI) += fsi-core.o
diff --git a/drivers/fsi/fsi-core.c b/drivers/fsi/fsi-core.c
new file mode 100644 (file)
index 0000000..3d55bd5
--- /dev/null
@@ -0,0 +1,59 @@
+/*
+ * FSI core driver
+ *
+ * Copyright (C) IBM Corporation 2016
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/fsi.h>
+#include <linux/module.h>
+
+/* FSI core & Linux bus type definitions */
+
+static int fsi_bus_match(struct device *dev, struct device_driver *drv)
+{
+       struct fsi_device *fsi_dev = to_fsi_dev(dev);
+       struct fsi_driver *fsi_drv = to_fsi_drv(drv);
+       const struct fsi_device_id *id;
+
+       if (!fsi_drv->id_table)
+               return 0;
+
+       for (id = fsi_drv->id_table; id->engine_type; id++) {
+               if (id->engine_type != fsi_dev->engine_type)
+                       continue;
+               if (id->version == FSI_VERSION_ANY ||
+                               id->version == fsi_dev->version)
+                       return 1;
+       }
+
+       return 0;
+}
+
+struct bus_type fsi_bus_type = {
+       .name           = "fsi",
+       .match          = fsi_bus_match,
+};
+EXPORT_SYMBOL_GPL(fsi_bus_type);
+
+static int fsi_init(void)
+{
+       return bus_register(&fsi_bus_type);
+}
+
+static void fsi_exit(void)
+{
+       bus_unregister(&fsi_bus_type);
+}
+
+module_init(fsi_init);
+module_exit(fsi_exit);
index 5fb4c6d9209b753a12dcddb4c441ff00c3e8746a..81a80c82f1bd2b6a55df393a3df55376d709adfd 100644 (file)
@@ -47,12 +47,8 @@ void vmbus_setevent(struct vmbus_channel *channel)
         * For channels marked as in "low latency" mode
         * bypass the monitor page mechanism.
         */
-       if ((channel->offermsg.monitor_allocated) &&
-           (!channel->low_latency)) {
-               /* Each u32 represents 32 channels */
-               sync_set_bit(channel->offermsg.child_relid & 31,
-                       (unsigned long *) vmbus_connection.send_int_page +
-                       (channel->offermsg.child_relid >> 5));
+       if (channel->offermsg.monitor_allocated && !channel->low_latency) {
+               vmbus_send_interrupt(channel->offermsg.child_relid);
 
                /* Get the child to parent monitor page */
                monitorpage = vmbus_connection.monitor_pages[1];
@@ -157,6 +153,7 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
        }
 
        init_completion(&open_info->waitevent);
+       open_info->waiting_channel = newchannel;
 
        open_msg = (struct vmbus_channel_open_channel *)open_info->msg;
        open_msg->header.msgtype = CHANNELMSG_OPENCHANNEL;
@@ -181,7 +178,7 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
        spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
 
        ret = vmbus_post_msg(open_msg,
-                              sizeof(struct vmbus_channel_open_channel));
+                            sizeof(struct vmbus_channel_open_channel), true);
 
        if (ret != 0) {
                err = ret;
@@ -194,6 +191,11 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
        list_del(&open_info->msglistentry);
        spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
 
+       if (newchannel->rescind) {
+               err = -ENODEV;
+               goto error_free_gpadl;
+       }
+
        if (open_info->response.open_result.status) {
                err = -EAGAIN;
                goto error_free_gpadl;
@@ -233,7 +235,7 @@ int vmbus_send_tl_connect_request(const uuid_le *shv_guest_servie_id,
        conn_msg.guest_endpoint_id = *shv_guest_servie_id;
        conn_msg.host_service_id = *shv_host_servie_id;
 
-       return vmbus_post_msg(&conn_msg, sizeof(conn_msg));
+       return vmbus_post_msg(&conn_msg, sizeof(conn_msg), true);
 }
 EXPORT_SYMBOL_GPL(vmbus_send_tl_connect_request);
 
@@ -405,6 +407,7 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
                return ret;
 
        init_completion(&msginfo->waitevent);
+       msginfo->waiting_channel = channel;
 
        gpadlmsg = (struct vmbus_channel_gpadl_header *)msginfo->msg;
        gpadlmsg->header.msgtype = CHANNELMSG_GPADL_HEADER;
@@ -419,7 +422,7 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
        spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
 
        ret = vmbus_post_msg(gpadlmsg, msginfo->msgsize -
-                              sizeof(*msginfo));
+                            sizeof(*msginfo), true);
        if (ret != 0)
                goto cleanup;
 
@@ -433,14 +436,19 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
                gpadl_body->gpadl = next_gpadl_handle;
 
                ret = vmbus_post_msg(gpadl_body,
-                                    submsginfo->msgsize -
-                                    sizeof(*submsginfo));
+                                    submsginfo->msgsize - sizeof(*submsginfo),
+                                    true);
                if (ret != 0)
                        goto cleanup;
 
        }
        wait_for_completion(&msginfo->waitevent);
 
+       if (channel->rescind) {
+               ret = -ENODEV;
+               goto cleanup;
+       }
+
        /* At this point, we received the gpadl created msg */
        *gpadl_handle = gpadlmsg->gpadl;
 
@@ -474,6 +482,7 @@ int vmbus_teardown_gpadl(struct vmbus_channel *channel, u32 gpadl_handle)
                return -ENOMEM;
 
        init_completion(&info->waitevent);
+       info->waiting_channel = channel;
 
        msg = (struct vmbus_channel_gpadl_teardown *)info->msg;
 
@@ -485,14 +494,19 @@ int vmbus_teardown_gpadl(struct vmbus_channel *channel, u32 gpadl_handle)
        list_add_tail(&info->msglistentry,
                      &vmbus_connection.chn_msg_list);
        spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
-       ret = vmbus_post_msg(msg,
-                              sizeof(struct vmbus_channel_gpadl_teardown));
+       ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_gpadl_teardown),
+                            true);
 
        if (ret)
                goto post_msg_err;
 
        wait_for_completion(&info->waitevent);
 
+       if (channel->rescind) {
+               ret = -ENODEV;
+               goto post_msg_err;
+       }
+
 post_msg_err:
        spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
        list_del(&info->msglistentry);
@@ -516,7 +530,7 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
        int ret;
 
        /*
-        * process_chn_event(), running in the tasklet, can race
+        * vmbus_on_event(), running in the tasklet, can race
         * with vmbus_close_internal() in the case of SMP guest, e.g., when
         * the former is accessing channel->inbound.ring_buffer, the latter
         * could be freeing the ring_buffer pages.
@@ -557,7 +571,8 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
        msg->header.msgtype = CHANNELMSG_CLOSECHANNEL;
        msg->child_relid = channel->offermsg.child_relid;
 
-       ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_close_channel));
+       ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_close_channel),
+                            true);
 
        if (ret) {
                pr_err("Close failed: close post msg return is %d\n", ret);
@@ -628,15 +643,14 @@ void vmbus_close(struct vmbus_channel *channel)
 EXPORT_SYMBOL_GPL(vmbus_close);
 
 int vmbus_sendpacket_ctl(struct vmbus_channel *channel, void *buffer,
-                          u32 bufferlen, u64 requestid,
-                          enum vmbus_packet_type type, u32 flags, bool kick_q)
+                        u32 bufferlen, u64 requestid,
+                        enum vmbus_packet_type type, u32 flags)
 {
        struct vmpacket_descriptor desc;
        u32 packetlen = sizeof(struct vmpacket_descriptor) + bufferlen;
        u32 packetlen_aligned = ALIGN(packetlen, sizeof(u64));
        struct kvec bufferlist[3];
        u64 aligned_data = 0;
-       bool lock = channel->acquire_ring_lock;
        int num_vecs = ((bufferlen != 0) ? 3 : 1);
 
 
@@ -655,9 +669,7 @@ int vmbus_sendpacket_ctl(struct vmbus_channel *channel, void *buffer,
        bufferlist[2].iov_base = &aligned_data;
        bufferlist[2].iov_len = (packetlen_aligned - packetlen);
 
-       return hv_ringbuffer_write(channel, bufferlist, num_vecs,
-                                  lock, kick_q);
-
+       return hv_ringbuffer_write(channel, bufferlist, num_vecs);
 }
 EXPORT_SYMBOL(vmbus_sendpacket_ctl);
 
@@ -680,7 +692,7 @@ int vmbus_sendpacket(struct vmbus_channel *channel, void *buffer,
                           enum vmbus_packet_type type, u32 flags)
 {
        return vmbus_sendpacket_ctl(channel, buffer, bufferlen, requestid,
-                                   type, flags, true);
+                                   type, flags);
 }
 EXPORT_SYMBOL(vmbus_sendpacket);
 
@@ -692,11 +704,9 @@ EXPORT_SYMBOL(vmbus_sendpacket);
  * explicitly.
  */
 int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel,
-                                    struct hv_page_buffer pagebuffers[],
-                                    u32 pagecount, void *buffer, u32 bufferlen,
-                                    u64 requestid,
-                                    u32 flags,
-                                    bool kick_q)
+                                   struct hv_page_buffer pagebuffers[],
+                                   u32 pagecount, void *buffer, u32 bufferlen,
+                                   u64 requestid, u32 flags)
 {
        int i;
        struct vmbus_channel_packet_page_buffer desc;
@@ -705,12 +715,10 @@ int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel,
        u32 packetlen_aligned;
        struct kvec bufferlist[3];
        u64 aligned_data = 0;
-       bool lock = channel->acquire_ring_lock;
 
        if (pagecount > MAX_PAGE_BUFFER_COUNT)
                return -EINVAL;
 
-
        /*
         * Adjust the size down since vmbus_channel_packet_page_buffer is the
         * largest size we support
@@ -742,8 +750,7 @@ int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel,
        bufferlist[2].iov_base = &aligned_data;
        bufferlist[2].iov_len = (packetlen_aligned - packetlen);
 
-       return hv_ringbuffer_write(channel, bufferlist, 3,
-                                  lock, kick_q);
+       return hv_ringbuffer_write(channel, bufferlist, 3);
 }
 EXPORT_SYMBOL_GPL(vmbus_sendpacket_pagebuffer_ctl);
 
@@ -757,9 +764,10 @@ int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
                                     u64 requestid)
 {
        u32 flags = VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED;
+
        return vmbus_sendpacket_pagebuffer_ctl(channel, pagebuffers, pagecount,
-                                              buffer, bufferlen, requestid,
-                                              flags, true);
+                                              buffer, bufferlen,
+                                              requestid, flags);
 
 }
 EXPORT_SYMBOL_GPL(vmbus_sendpacket_pagebuffer);
@@ -778,7 +786,6 @@ int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
        u32 packetlen_aligned;
        struct kvec bufferlist[3];
        u64 aligned_data = 0;
-       bool lock = channel->acquire_ring_lock;
 
        packetlen = desc_size + bufferlen;
        packetlen_aligned = ALIGN(packetlen, sizeof(u64));
@@ -798,8 +805,7 @@ int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
        bufferlist[2].iov_base = &aligned_data;
        bufferlist[2].iov_len = (packetlen_aligned - packetlen);
 
-       return hv_ringbuffer_write(channel, bufferlist, 3,
-                                  lock, true);
+       return hv_ringbuffer_write(channel, bufferlist, 3);
 }
 EXPORT_SYMBOL_GPL(vmbus_sendpacket_mpb_desc);
 
@@ -817,7 +823,6 @@ int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
        u32 packetlen_aligned;
        struct kvec bufferlist[3];
        u64 aligned_data = 0;
-       bool lock = channel->acquire_ring_lock;
        u32 pfncount = NUM_PAGES_SPANNED(multi_pagebuffer->offset,
                                         multi_pagebuffer->len);
 
@@ -856,8 +861,7 @@ int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
        bufferlist[2].iov_base = &aligned_data;
        bufferlist[2].iov_len = (packetlen_aligned - packetlen);
 
-       return hv_ringbuffer_write(channel, bufferlist, 3,
-                                  lock, true);
+       return hv_ringbuffer_write(channel, bufferlist, 3);
 }
 EXPORT_SYMBOL_GPL(vmbus_sendpacket_multipagebuffer);
 
index 26b419203f16a6eee9f34f22659a3d09230c2aa0..f33465d78a025680d7515978fdbcc432d51a8062 100644 (file)
@@ -31,6 +31,7 @@
 #include <linux/completion.h>
 #include <linux/delay.h>
 #include <linux/hyperv.h>
+#include <asm/mshyperv.h>
 
 #include "hyperv_vmbus.h"
 
@@ -147,6 +148,29 @@ static const struct {
        { HV_RDV_GUID   },
 };
 
+/*
+ * The rescinded channel may be blocked waiting for a response from the host;
+ * take care of that.
+ */
+static void vmbus_rescind_cleanup(struct vmbus_channel *channel)
+{
+       struct vmbus_channel_msginfo *msginfo;
+       unsigned long flags;
+
+
+       spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
+
+       list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
+                               msglistentry) {
+
+               if (msginfo->waiting_channel == channel) {
+                       complete(&msginfo->waitevent);
+                       break;
+               }
+       }
+       spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
+}
+
 static bool is_unsupported_vmbus_devs(const uuid_le *guid)
 {
        int i;
@@ -180,33 +204,34 @@ static u16 hv_get_dev_type(const struct vmbus_channel *channel)
  * @buf: Raw buffer channel data
  *
  * @icmsghdrp is of type &struct icmsg_hdr.
- * @negop is of type &struct icmsg_negotiate.
  * Set up and fill in default negotiate response message.
  *
- * The fw_version specifies the  framework version that
- * we can support and srv_version specifies the service
- * version we can support.
+ * The fw_version and fw_vercnt specifies the framework version that
+ * we can support.
+ *
+ * The srv_version and srv_vercnt specifies the service
+ * versions we can support.
+ *
+ * Versions are given in decreasing order.
+ *
+ * nego_fw_version and nego_srv_version store the selected protocol versions.
  *
  * Mainly used by Hyper-V drivers.
  */
 bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp,
-                               struct icmsg_negotiate *negop, u8 *buf,
-                               int fw_version, int srv_version)
+                               u8 *buf, const int *fw_version, int fw_vercnt,
+                               const int *srv_version, int srv_vercnt,
+                               int *nego_fw_version, int *nego_srv_version)
 {
        int icframe_major, icframe_minor;
        int icmsg_major, icmsg_minor;
        int fw_major, fw_minor;
        int srv_major, srv_minor;
-       int i;
+       int i, j;
        bool found_match = false;
+       struct icmsg_negotiate *negop;
 
        icmsghdrp->icmsgsize = 0x10;
-       fw_major = (fw_version >> 16);
-       fw_minor = (fw_version & 0xFFFF);
-
-       srv_major = (srv_version >> 16);
-       srv_minor = (srv_version & 0xFFFF);
-
        negop = (struct icmsg_negotiate *)&buf[
                sizeof(struct vmbuspipe_hdr) +
                sizeof(struct icmsg_hdr)];
@@ -222,13 +247,22 @@ bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp,
         * support.
         */
 
-       for (i = 0; i < negop->icframe_vercnt; i++) {
-               if ((negop->icversion_data[i].major == fw_major) &&
-                  (negop->icversion_data[i].minor == fw_minor)) {
-                       icframe_major = negop->icversion_data[i].major;
-                       icframe_minor = negop->icversion_data[i].minor;
-                       found_match = true;
+       for (i = 0; i < fw_vercnt; i++) {
+               fw_major = (fw_version[i] >> 16);
+               fw_minor = (fw_version[i] & 0xFFFF);
+
+               for (j = 0; j < negop->icframe_vercnt; j++) {
+                       if ((negop->icversion_data[j].major == fw_major) &&
+                           (negop->icversion_data[j].minor == fw_minor)) {
+                               icframe_major = negop->icversion_data[j].major;
+                               icframe_minor = negop->icversion_data[j].minor;
+                               found_match = true;
+                               break;
+                       }
                }
+
+               if (found_match)
+                       break;
        }
 
        if (!found_match)
@@ -236,14 +270,26 @@ bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp,
 
        found_match = false;
 
-       for (i = negop->icframe_vercnt;
-                (i < negop->icframe_vercnt + negop->icmsg_vercnt); i++) {
-               if ((negop->icversion_data[i].major == srv_major) &&
-                  (negop->icversion_data[i].minor == srv_minor)) {
-                       icmsg_major = negop->icversion_data[i].major;
-                       icmsg_minor = negop->icversion_data[i].minor;
-                       found_match = true;
+       for (i = 0; i < srv_vercnt; i++) {
+               srv_major = (srv_version[i] >> 16);
+               srv_minor = (srv_version[i] & 0xFFFF);
+
+               for (j = negop->icframe_vercnt;
+                       (j < negop->icframe_vercnt + negop->icmsg_vercnt);
+                       j++) {
+
+                       if ((negop->icversion_data[j].major == srv_major) &&
+                               (negop->icversion_data[j].minor == srv_minor)) {
+
+                               icmsg_major = negop->icversion_data[j].major;
+                               icmsg_minor = negop->icversion_data[j].minor;
+                               found_match = true;
+                               break;
+                       }
                }
+
+               if (found_match)
+                       break;
        }
 
        /*
@@ -260,6 +306,12 @@ fw_error:
                negop->icmsg_vercnt = 1;
        }
 
+       if (nego_fw_version)
+               *nego_fw_version = (icframe_major << 16) | icframe_minor;
+
+       if (nego_srv_version)
+               *nego_srv_version = (icmsg_major << 16) | icmsg_minor;
+
        negop->icversion_data[0].major = icframe_major;
        negop->icversion_data[0].minor = icframe_minor;
        negop->icversion_data[1].major = icmsg_major;
@@ -280,13 +332,15 @@ static struct vmbus_channel *alloc_channel(void)
        if (!channel)
                return NULL;
 
-       channel->acquire_ring_lock = true;
        spin_lock_init(&channel->inbound_lock);
        spin_lock_init(&channel->lock);
 
        INIT_LIST_HEAD(&channel->sc_list);
        INIT_LIST_HEAD(&channel->percpu_list);
 
+       tasklet_init(&channel->callback_event,
+                    vmbus_on_event, (unsigned long)channel);
+
        return channel;
 }
 
@@ -295,15 +349,17 @@ static struct vmbus_channel *alloc_channel(void)
  */
 static void free_channel(struct vmbus_channel *channel)
 {
+       tasklet_kill(&channel->callback_event);
        kfree(channel);
 }
 
 static void percpu_channel_enq(void *arg)
 {
        struct vmbus_channel *channel = arg;
-       int cpu = smp_processor_id();
+       struct hv_per_cpu_context *hv_cpu
+               = this_cpu_ptr(hv_context.cpu_context);
 
-       list_add_tail(&channel->percpu_list, &hv_context.percpu_list[cpu]);
+       list_add_tail(&channel->percpu_list, &hv_cpu->chan_list);
 }
 
 static void percpu_channel_deq(void *arg)
@@ -321,24 +377,21 @@ static void vmbus_release_relid(u32 relid)
        memset(&msg, 0, sizeof(struct vmbus_channel_relid_released));
        msg.child_relid = relid;
        msg.header.msgtype = CHANNELMSG_RELID_RELEASED;
-       vmbus_post_msg(&msg, sizeof(struct vmbus_channel_relid_released));
+       vmbus_post_msg(&msg, sizeof(struct vmbus_channel_relid_released),
+                      true);
 }
 
 void hv_event_tasklet_disable(struct vmbus_channel *channel)
 {
-       struct tasklet_struct *tasklet;
-       tasklet = hv_context.event_dpc[channel->target_cpu];
-       tasklet_disable(tasklet);
+       tasklet_disable(&channel->callback_event);
 }
 
 void hv_event_tasklet_enable(struct vmbus_channel *channel)
 {
-       struct tasklet_struct *tasklet;
-       tasklet = hv_context.event_dpc[channel->target_cpu];
-       tasklet_enable(tasklet);
+       tasklet_enable(&channel->callback_event);
 
        /* In case there is any pending event */
-       tasklet_schedule(tasklet);
+       tasklet_schedule(&channel->callback_event);
 }
 
 void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid)
@@ -673,9 +726,12 @@ static void vmbus_wait_for_unload(void)
                        break;
 
                for_each_online_cpu(cpu) {
-                       page_addr = hv_context.synic_message_page[cpu];
-                       msg = (struct hv_message *)page_addr +
-                               VMBUS_MESSAGE_SINT;
+                       struct hv_per_cpu_context *hv_cpu
+                               = per_cpu_ptr(hv_context.cpu_context, cpu);
+
+                       page_addr = hv_cpu->synic_message_page;
+                       msg = (struct hv_message *)page_addr
+                               + VMBUS_MESSAGE_SINT;
 
                        message_type = READ_ONCE(msg->header.message_type);
                        if (message_type == HVMSG_NONE)
@@ -699,7 +755,10 @@ static void vmbus_wait_for_unload(void)
         * messages after we reconnect.
         */
        for_each_online_cpu(cpu) {
-               page_addr = hv_context.synic_message_page[cpu];
+               struct hv_per_cpu_context *hv_cpu
+                       = per_cpu_ptr(hv_context.cpu_context, cpu);
+
+               page_addr = hv_cpu->synic_message_page;
                msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT;
                msg->header.message_type = HVMSG_NONE;
        }
@@ -728,7 +787,8 @@ void vmbus_initiate_unload(bool crash)
        init_completion(&vmbus_connection.unload_event);
        memset(&hdr, 0, sizeof(struct vmbus_channel_message_header));
        hdr.msgtype = CHANNELMSG_UNLOAD;
-       vmbus_post_msg(&hdr, sizeof(struct vmbus_channel_message_header));
+       vmbus_post_msg(&hdr, sizeof(struct vmbus_channel_message_header),
+                      !crash);
 
        /*
         * vmbus_initiate_unload() is also called on crash and the crash can be
@@ -758,13 +818,6 @@ static void vmbus_onoffer(struct vmbus_channel_message_header *hdr)
                return;
        }
 
-       /*
-        * By default we setup state to enable batched
-        * reading. A specific service can choose to
-        * disable this prior to opening the channel.
-        */
-       newchannel->batched_reading = true;
-
        /*
         * Setup state for signalling the host.
         */
@@ -823,6 +876,8 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
        channel->rescind = true;
        spin_unlock_irqrestore(&channel->lock, flags);
 
+       vmbus_rescind_cleanup(channel);
+
        if (channel->device_obj) {
                if (channel->chn_rescind_callback) {
                        channel->chn_rescind_callback(channel);
@@ -1116,8 +1171,8 @@ int vmbus_request_offers(void)
        msg->msgtype = CHANNELMSG_REQUESTOFFERS;
 
 
-       ret = vmbus_post_msg(msg,
-                              sizeof(struct vmbus_channel_message_header));
+       ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_message_header),
+                            true);
        if (ret != 0) {
                pr_err("Unable to request offers - %d\n", ret);
 
index 6ce8b874e833dbd7f0b630f97d9bd4e6790e6831..a8366fec14581f795c046dcd47baef7a56f20692 100644 (file)
@@ -93,12 +93,10 @@ static int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo,
         * all the CPUs. This is needed for kexec to work correctly where
         * the CPU attempting to connect may not be CPU 0.
         */
-       if (version >= VERSION_WIN8_1) {
-               msg->target_vcpu = hv_context.vp_index[get_cpu()];
-               put_cpu();
-       } else {
+       if (version >= VERSION_WIN8_1)
+               msg->target_vcpu = hv_context.vp_index[smp_processor_id()];
+       else
                msg->target_vcpu = 0;
-       }
 
        /*
         * Add to list before we send the request since we may
@@ -111,7 +109,8 @@ static int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo,
        spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
 
        ret = vmbus_post_msg(msg,
-                              sizeof(struct vmbus_channel_initiate_contact));
+                            sizeof(struct vmbus_channel_initiate_contact),
+                            true);
        if (ret != 0) {
                spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
                list_del(&msginfo->msglistentry);
@@ -220,11 +219,8 @@ int vmbus_connect(void)
                goto cleanup;
 
        vmbus_proto_version = version;
-       pr_info("Hyper-V Host Build:%d-%d.%d-%d-%d.%d; Vmbus version:%d.%d\n",
-                   host_info_eax, host_info_ebx >> 16,
-                   host_info_ebx & 0xFFFF, host_info_ecx,
-                   host_info_edx >> 24, host_info_edx & 0xFFFFFF,
-                   version >> 16, version & 0xFFFF);
+       pr_info("Vmbus version:%d.%d\n",
+               version >> 16, version & 0xFFFF);
 
        kfree(msginfo);
        return 0;
@@ -263,29 +259,6 @@ void vmbus_disconnect(void)
        vmbus_connection.monitor_pages[1] = NULL;
 }
 
-/*
- * Map the given relid to the corresponding channel based on the
- * per-cpu list of channels that have been affinitized to this CPU.
- * This will be used in the channel callback path as we can do this
- * mapping in a lock-free fashion.
- */
-static struct vmbus_channel *pcpu_relid2channel(u32 relid)
-{
-       struct vmbus_channel *channel;
-       struct vmbus_channel *found_channel  = NULL;
-       int cpu = smp_processor_id();
-       struct list_head *pcpu_head = &hv_context.percpu_list[cpu];
-
-       list_for_each_entry(channel, pcpu_head, percpu_list) {
-               if (channel->offermsg.child_relid == relid) {
-                       found_channel = channel;
-                       break;
-               }
-       }
-
-       return found_channel;
-}
-
 /*
  * relid2channel - Get the channel object given its
  * child relative id (ie channel id)
@@ -322,23 +295,12 @@ struct vmbus_channel *relid2channel(u32 relid)
 }
 
 /*
- * process_chn_event - Process a channel event notification
+ * vmbus_on_event - Process a channel event notification
  */
-static void process_chn_event(u32 relid)
+void vmbus_on_event(unsigned long data)
 {
-       struct vmbus_channel *channel;
-       void *arg;
-       bool read_state;
-       u32 bytes_to_read;
-
-       /*
-        * Find the channel based on this relid and invokes the
-        * channel callback to process the event
-        */
-       channel = pcpu_relid2channel(relid);
-
-       if (!channel)
-               return;
+       struct vmbus_channel *channel = (void *) data;
+       void (*callback_fn)(void *);
 
        /*
         * A channel once created is persistent even when there
@@ -348,10 +310,13 @@ static void process_chn_event(u32 relid)
         * Thus, checking and invoking the driver specific callback takes
         * care of orderly unloading of the driver.
         */
+       callback_fn = READ_ONCE(channel->onchannel_callback);
+       if (unlikely(callback_fn == NULL))
+               return;
 
-       if (channel->onchannel_callback != NULL) {
-               arg = channel->channel_callback_context;
-               read_state = channel->batched_reading;
+       (*callback_fn)(channel->channel_callback_context);
+
+       if (channel->callback_mode == HV_CALL_BATCHED) {
                /*
                 * This callback reads the messages sent by the host.
                 * We can optimize host to guest signaling by ensuring:
@@ -363,71 +328,10 @@ static void process_chn_event(u32 relid)
                 *    state is set we check to see if additional packets are
                 *    available to read. In this case we repeat the process.
                 */
+               if (hv_end_read(&channel->inbound) != 0) {
+                       hv_begin_read(&channel->inbound);
 
-               do {
-                       if (read_state)
-                               hv_begin_read(&channel->inbound);
-                       channel->onchannel_callback(arg);
-                       if (read_state)
-                               bytes_to_read = hv_end_read(&channel->inbound);
-                       else
-                               bytes_to_read = 0;
-               } while (read_state && (bytes_to_read != 0));
-       }
-}
-
-/*
- * vmbus_on_event - Handler for events
- */
-void vmbus_on_event(unsigned long data)
-{
-       u32 dword;
-       u32 maxdword;
-       int bit;
-       u32 relid;
-       u32 *recv_int_page = NULL;
-       void *page_addr;
-       int cpu = smp_processor_id();
-       union hv_synic_event_flags *event;
-
-       if (vmbus_proto_version < VERSION_WIN8) {
-               maxdword = MAX_NUM_CHANNELS_SUPPORTED >> 5;
-               recv_int_page = vmbus_connection.recv_int_page;
-       } else {
-               /*
-                * When the host is win8 and beyond, the event page
-                * can be directly checked to get the id of the channel
-                * that has the interrupt pending.
-                */
-               maxdword = HV_EVENT_FLAGS_DWORD_COUNT;
-               page_addr = hv_context.synic_event_page[cpu];
-               event = (union hv_synic_event_flags *)page_addr +
-                                                VMBUS_MESSAGE_SINT;
-               recv_int_page = event->flags32;
-       }
-
-
-
-       /* Check events */
-       if (!recv_int_page)
-               return;
-       for (dword = 0; dword < maxdword; dword++) {
-               if (!recv_int_page[dword])
-                       continue;
-               for (bit = 0; bit < 32; bit++) {
-                       if (sync_test_and_clear_bit(bit,
-                               (unsigned long *)&recv_int_page[dword])) {
-                               relid = (dword << 5) + bit;
-
-                               if (relid == 0)
-                                       /*
-                                        * Special case - vmbus
-                                        * channel protocol msg
-                                        */
-                                       continue;
-
-                               process_chn_event(relid);
-                       }
+                       tasklet_schedule(&channel->callback_event);
                }
        }
 }
@@ -435,7 +339,7 @@ void vmbus_on_event(unsigned long data)
 /*
  * vmbus_post_msg - Send a msg on the vmbus's message connection
  */
-int vmbus_post_msg(void *buffer, size_t buflen)
+int vmbus_post_msg(void *buffer, size_t buflen, bool can_sleep)
 {
        union hv_connection_id conn_id;
        int ret = 0;
@@ -450,7 +354,7 @@ int vmbus_post_msg(void *buffer, size_t buflen)
         * insufficient resources. Retry the operation a couple of
         * times before giving up.
         */
-       while (retries < 20) {
+       while (retries < 100) {
                ret = hv_post_message(conn_id, 1, buffer, buflen);
 
                switch (ret) {
@@ -473,8 +377,14 @@ int vmbus_post_msg(void *buffer, size_t buflen)
                }
 
                retries++;
-               udelay(usec);
-               if (usec < 2048)
+               if (can_sleep && usec > 1000)
+                       msleep(usec / 1000);
+               else if (usec < MAX_UDELAY_MS * 1000)
+                       udelay(usec);
+               else
+                       mdelay(usec / 1000);
+
+               if (usec < 256000)
                        usec *= 2;
        }
        return ret;
@@ -487,12 +397,8 @@ void vmbus_set_event(struct vmbus_channel *channel)
 {
        u32 child_relid = channel->offermsg.child_relid;
 
-       if (!channel->is_dedicated_interrupt) {
-               /* Each u32 represents 32 channels */
-               sync_set_bit(child_relid & 31,
-                       (unsigned long *)vmbus_connection.send_int_page +
-                       (child_relid >> 5));
-       }
+       if (!channel->is_dedicated_interrupt)
+               vmbus_send_interrupt(child_relid);
 
        hv_do_hypercall(HVCALL_SIGNAL_EVENT, channel->sig_event, NULL);
 }
index b44b32f21e61db00a2db56db81c4e4a1a04053db..665a64f1611e31fe3650f6d030aa373ca1301c03 100644 (file)
 /* The one and only */
 struct hv_context hv_context = {
        .synic_initialized      = false,
-       .hypercall_page         = NULL,
 };
 
 #define HV_TIMER_FREQUENCY (10 * 1000 * 1000) /* 100ns period */
 #define HV_MAX_MAX_DELTA_TICKS 0xffffffff
 #define HV_MIN_DELTA_TICKS 1
 
-/*
- * query_hypervisor_info - Get version info of the windows hypervisor
- */
-unsigned int host_info_eax;
-unsigned int host_info_ebx;
-unsigned int host_info_ecx;
-unsigned int host_info_edx;
-
-static int query_hypervisor_info(void)
-{
-       unsigned int eax;
-       unsigned int ebx;
-       unsigned int ecx;
-       unsigned int edx;
-       unsigned int max_leaf;
-       unsigned int op;
-
-       /*
-       * Its assumed that this is called after confirming that Viridian
-       * is present. Query id and revision.
-       */
-       eax = 0;
-       ebx = 0;
-       ecx = 0;
-       edx = 0;
-       op = HVCPUID_VENDOR_MAXFUNCTION;
-       cpuid(op, &eax, &ebx, &ecx, &edx);
-
-       max_leaf = eax;
-
-       if (max_leaf >= HVCPUID_VERSION) {
-               eax = 0;
-               ebx = 0;
-               ecx = 0;
-               edx = 0;
-               op = HVCPUID_VERSION;
-               cpuid(op, &eax, &ebx, &ecx, &edx);
-               host_info_eax = eax;
-               host_info_ebx = ebx;
-               host_info_ecx = ecx;
-               host_info_edx = edx;
-       }
-       return max_leaf;
-}
-
-/*
- * hv_do_hypercall- Invoke the specified hypercall
- */
-u64 hv_do_hypercall(u64 control, void *input, void *output)
-{
-       u64 input_address = (input) ? virt_to_phys(input) : 0;
-       u64 output_address = (output) ? virt_to_phys(output) : 0;
-       void *hypercall_page = hv_context.hypercall_page;
-#ifdef CONFIG_X86_64
-       u64 hv_status = 0;
-
-       if (!hypercall_page)
-               return (u64)ULLONG_MAX;
-
-       __asm__ __volatile__("mov %0, %%r8" : : "r" (output_address) : "r8");
-       __asm__ __volatile__("call *%3" : "=a" (hv_status) :
-                            "c" (control), "d" (input_address),
-                            "m" (hypercall_page));
-
-       return hv_status;
-
-#else
-
-       u32 control_hi = control >> 32;
-       u32 control_lo = control & 0xFFFFFFFF;
-       u32 hv_status_hi = 1;
-       u32 hv_status_lo = 1;
-       u32 input_address_hi = input_address >> 32;
-       u32 input_address_lo = input_address & 0xFFFFFFFF;
-       u32 output_address_hi = output_address >> 32;
-       u32 output_address_lo = output_address & 0xFFFFFFFF;
-
-       if (!hypercall_page)
-               return (u64)ULLONG_MAX;
-
-       __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
-                             "=a"(hv_status_lo) : "d" (control_hi),
-                             "a" (control_lo), "b" (input_address_hi),
-                             "c" (input_address_lo), "D"(output_address_hi),
-                             "S"(output_address_lo), "m" (hypercall_page));
-
-       return hv_status_lo | ((u64)hv_status_hi << 32);
-#endif /* !x86_64 */
-}
-EXPORT_SYMBOL_GPL(hv_do_hypercall);
-
-#ifdef CONFIG_X86_64
-static u64 read_hv_clock_tsc(struct clocksource *arg)
-{
-       u64 current_tick;
-       struct ms_hyperv_tsc_page *tsc_pg = hv_context.tsc_page;
-
-       if (tsc_pg->tsc_sequence != 0) {
-               /*
-                * Use the tsc page to compute the value.
-                */
-
-               while (1) {
-                       u64 tmp;
-                       u32 sequence = tsc_pg->tsc_sequence;
-                       u64 cur_tsc;
-                       u64 scale = tsc_pg->tsc_scale;
-                       s64 offset = tsc_pg->tsc_offset;
-
-                       rdtscll(cur_tsc);
-                       /* current_tick = ((cur_tsc *scale) >> 64) + offset */
-                       asm("mulq %3"
-                               : "=d" (current_tick), "=a" (tmp)
-                               : "a" (cur_tsc), "r" (scale));
-
-                       current_tick += offset;
-                       if (tsc_pg->tsc_sequence == sequence)
-                               return current_tick;
-
-                       if (tsc_pg->tsc_sequence != 0)
-                               continue;
-                       /*
-                        * Fallback using MSR method.
-                        */
-                       break;
-               }
-       }
-       rdmsrl(HV_X64_MSR_TIME_REF_COUNT, current_tick);
-       return current_tick;
-}
-
-static struct clocksource hyperv_cs_tsc = {
-               .name           = "hyperv_clocksource_tsc_page",
-               .rating         = 425,
-               .read           = read_hv_clock_tsc,
-               .mask           = CLOCKSOURCE_MASK(64),
-               .flags          = CLOCK_SOURCE_IS_CONTINUOUS,
-};
-#endif
-
-
 /*
  * hv_init - Main initialization routine.
  *
@@ -191,129 +49,14 @@ static struct clocksource hyperv_cs_tsc = {
  */
 int hv_init(void)
 {
-       int max_leaf;
-       union hv_x64_msr_hypercall_contents hypercall_msr;
-       void *virtaddr = NULL;
-
-       memset(hv_context.synic_event_page, 0, sizeof(void *) * NR_CPUS);
-       memset(hv_context.synic_message_page, 0,
-              sizeof(void *) * NR_CPUS);
-       memset(hv_context.post_msg_page, 0,
-              sizeof(void *) * NR_CPUS);
-       memset(hv_context.vp_index, 0,
-              sizeof(int) * NR_CPUS);
-       memset(hv_context.event_dpc, 0,
-              sizeof(void *) * NR_CPUS);
-       memset(hv_context.msg_dpc, 0,
-              sizeof(void *) * NR_CPUS);
-       memset(hv_context.clk_evt, 0,
-              sizeof(void *) * NR_CPUS);
-
-       max_leaf = query_hypervisor_info();
+       if (!hv_is_hypercall_page_setup())
+               return -ENOTSUPP;
 
-       /*
-        * Write our OS ID.
-        */
-       hv_context.guestid = generate_guest_id(0, LINUX_VERSION_CODE, 0);
-       wrmsrl(HV_X64_MSR_GUEST_OS_ID, hv_context.guestid);
-
-       /* See if the hypercall page is already set */
-       rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
-
-       virtaddr = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL_EXEC);
-
-       if (!virtaddr)
-               goto cleanup;
-
-       hypercall_msr.enable = 1;
-
-       hypercall_msr.guest_physical_address = vmalloc_to_pfn(virtaddr);
-       wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
-
-       /* Confirm that hypercall page did get setup. */
-       hypercall_msr.as_uint64 = 0;
-       rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
-
-       if (!hypercall_msr.enable)
-               goto cleanup;
-
-       hv_context.hypercall_page = virtaddr;
-
-#ifdef CONFIG_X86_64
-       if (ms_hyperv.features & HV_X64_MSR_REFERENCE_TSC_AVAILABLE) {
-               union hv_x64_msr_hypercall_contents tsc_msr;
-               void *va_tsc;
-
-               va_tsc = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL);
-               if (!va_tsc)
-                       goto cleanup;
-               hv_context.tsc_page = va_tsc;
-
-               rdmsrl(HV_X64_MSR_REFERENCE_TSC, tsc_msr.as_uint64);
+       hv_context.cpu_context = alloc_percpu(struct hv_per_cpu_context);
+       if (!hv_context.cpu_context)
+               return -ENOMEM;
 
-               tsc_msr.enable = 1;
-               tsc_msr.guest_physical_address = vmalloc_to_pfn(va_tsc);
-
-               wrmsrl(HV_X64_MSR_REFERENCE_TSC, tsc_msr.as_uint64);
-               clocksource_register_hz(&hyperv_cs_tsc, NSEC_PER_SEC/100);
-       }
-#endif
        return 0;
-
-cleanup:
-       if (virtaddr) {
-               if (hypercall_msr.enable) {
-                       hypercall_msr.as_uint64 = 0;
-                       wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
-               }
-
-               vfree(virtaddr);
-       }
-
-       return -ENOTSUPP;
-}
-
-/*
- * hv_cleanup - Cleanup routine.
- *
- * This routine is called normally during driver unloading or exiting.
- */
-void hv_cleanup(bool crash)
-{
-       union hv_x64_msr_hypercall_contents hypercall_msr;
-
-       /* Reset our OS id */
-       wrmsrl(HV_X64_MSR_GUEST_OS_ID, 0);
-
-       if (hv_context.hypercall_page) {
-               hypercall_msr.as_uint64 = 0;
-               wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
-               if (!crash)
-                       vfree(hv_context.hypercall_page);
-               hv_context.hypercall_page = NULL;
-       }
-
-#ifdef CONFIG_X86_64
-       /*
-        * Cleanup the TSC page based CS.
-        */
-       if (ms_hyperv.features & HV_X64_MSR_REFERENCE_TSC_AVAILABLE) {
-               /*
-                * Crash can happen in an interrupt context and unregistering
-                * a clocksource is impossible and redundant in this case.
-                */
-               if (!oops_in_progress) {
-                       clocksource_change_rating(&hyperv_cs_tsc, 10);
-                       clocksource_unregister(&hyperv_cs_tsc);
-               }
-
-               hypercall_msr.as_uint64 = 0;
-               wrmsrl(HV_X64_MSR_REFERENCE_TSC, hypercall_msr.as_uint64);
-               if (!crash)
-                       vfree(hv_context.tsc_page);
-               hv_context.tsc_page = NULL;
-       }
-#endif
 }
 
 /*
@@ -325,25 +68,24 @@ int hv_post_message(union hv_connection_id connection_id,
                  enum hv_message_type message_type,
                  void *payload, size_t payload_size)
 {
-
        struct hv_input_post_message *aligned_msg;
+       struct hv_per_cpu_context *hv_cpu;
        u64 status;
 
        if (payload_size > HV_MESSAGE_PAYLOAD_BYTE_COUNT)
                return -EMSGSIZE;
 
-       aligned_msg = (struct hv_input_post_message *)
-                       hv_context.post_msg_page[get_cpu()];
-
+       hv_cpu = get_cpu_ptr(hv_context.cpu_context);
+       aligned_msg = hv_cpu->post_msg_page;
        aligned_msg->connectionid = connection_id;
        aligned_msg->reserved = 0;
        aligned_msg->message_type = message_type;
        aligned_msg->payload_size = payload_size;
        memcpy((void *)aligned_msg->payload, payload, payload_size);
+       put_cpu_ptr(hv_cpu);
 
        status = hv_do_hypercall(HVCALL_POST_MESSAGE, aligned_msg, NULL);
 
-       put_cpu();
        return status & 0xFFFF;
 }
 
@@ -354,16 +96,16 @@ static int hv_ce_set_next_event(unsigned long delta,
 
        WARN_ON(!clockevent_state_oneshot(evt));
 
-       rdmsrl(HV_X64_MSR_TIME_REF_COUNT, current_tick);
+       hv_get_current_tick(current_tick);
        current_tick += delta;
-       wrmsrl(HV_X64_MSR_STIMER0_COUNT, current_tick);
+       hv_init_timer(HV_X64_MSR_STIMER0_COUNT, current_tick);
        return 0;
 }
 
 static int hv_ce_shutdown(struct clock_event_device *evt)
 {
-       wrmsrl(HV_X64_MSR_STIMER0_COUNT, 0);
-       wrmsrl(HV_X64_MSR_STIMER0_CONFIG, 0);
+       hv_init_timer(HV_X64_MSR_STIMER0_COUNT, 0);
+       hv_init_timer_config(HV_X64_MSR_STIMER0_CONFIG, 0);
 
        return 0;
 }
@@ -375,7 +117,7 @@ static int hv_ce_set_oneshot(struct clock_event_device *evt)
        timer_cfg.enable = 1;
        timer_cfg.auto_enable = 1;
        timer_cfg.sintx = VMBUS_MESSAGE_SINT;
-       wrmsrl(HV_X64_MSR_STIMER0_CONFIG, timer_cfg.as_uint64);
+       hv_init_timer_config(HV_X64_MSR_STIMER0_CONFIG, timer_cfg.as_uint64);
 
        return 0;
 }
@@ -400,8 +142,6 @@ static void hv_init_clockevent_device(struct clock_event_device *dev, int cpu)
 
 int hv_synic_alloc(void)
 {
-       size_t size = sizeof(struct tasklet_struct);
-       size_t ced_size = sizeof(struct clock_event_device);
        int cpu;
 
        hv_context.hv_numa_map = kzalloc(sizeof(struct cpumask) * nr_node_ids,
@@ -411,52 +151,42 @@ int hv_synic_alloc(void)
                goto err;
        }
 
-       for_each_online_cpu(cpu) {
-               hv_context.event_dpc[cpu] = kmalloc(size, GFP_ATOMIC);
-               if (hv_context.event_dpc[cpu] == NULL) {
-                       pr_err("Unable to allocate event dpc\n");
-                       goto err;
-               }
-               tasklet_init(hv_context.event_dpc[cpu], vmbus_on_event, cpu);
+       for_each_present_cpu(cpu) {
+               struct hv_per_cpu_context *hv_cpu
+                       = per_cpu_ptr(hv_context.cpu_context, cpu);
 
-               hv_context.msg_dpc[cpu] = kmalloc(size, GFP_ATOMIC);
-               if (hv_context.msg_dpc[cpu] == NULL) {
-                       pr_err("Unable to allocate event dpc\n");
-                       goto err;
-               }
-               tasklet_init(hv_context.msg_dpc[cpu], vmbus_on_msg_dpc, cpu);
+               memset(hv_cpu, 0, sizeof(*hv_cpu));
+               tasklet_init(&hv_cpu->msg_dpc,
+                            vmbus_on_msg_dpc, (unsigned long) hv_cpu);
 
-               hv_context.clk_evt[cpu] = kzalloc(ced_size, GFP_ATOMIC);
-               if (hv_context.clk_evt[cpu] == NULL) {
+               hv_cpu->clk_evt = kzalloc(sizeof(struct clock_event_device),
+                                         GFP_KERNEL);
+               if (hv_cpu->clk_evt == NULL) {
                        pr_err("Unable to allocate clock event device\n");
                        goto err;
                }
+               hv_init_clockevent_device(hv_cpu->clk_evt, cpu);
 
-               hv_init_clockevent_device(hv_context.clk_evt[cpu], cpu);
-
-               hv_context.synic_message_page[cpu] =
+               hv_cpu->synic_message_page =
                        (void *)get_zeroed_page(GFP_ATOMIC);
-
-               if (hv_context.synic_message_page[cpu] == NULL) {
+               if (hv_cpu->synic_message_page == NULL) {
                        pr_err("Unable to allocate SYNIC message page\n");
                        goto err;
                }
 
-               hv_context.synic_event_page[cpu] =
-                       (void *)get_zeroed_page(GFP_ATOMIC);
-
-               if (hv_context.synic_event_page[cpu] == NULL) {
+               hv_cpu->synic_event_page = (void *)get_zeroed_page(GFP_ATOMIC);
+               if (hv_cpu->synic_event_page == NULL) {
                        pr_err("Unable to allocate SYNIC event page\n");
                        goto err;
                }
 
-               hv_context.post_msg_page[cpu] =
-                       (void *)get_zeroed_page(GFP_ATOMIC);
-
-               if (hv_context.post_msg_page[cpu] == NULL) {
+               hv_cpu->post_msg_page = (void *)get_zeroed_page(GFP_ATOMIC);
+               if (hv_cpu->post_msg_page == NULL) {
                        pr_err("Unable to allocate post msg page\n");
                        goto err;
                }
+
+               INIT_LIST_HEAD(&hv_cpu->chan_list);
        }
 
        return 0;
@@ -464,26 +194,24 @@ err:
        return -ENOMEM;
 }
 
-static void hv_synic_free_cpu(int cpu)
-{
-       kfree(hv_context.event_dpc[cpu]);
-       kfree(hv_context.msg_dpc[cpu]);
-       kfree(hv_context.clk_evt[cpu]);
-       if (hv_context.synic_event_page[cpu])
-               free_page((unsigned long)hv_context.synic_event_page[cpu]);
-       if (hv_context.synic_message_page[cpu])
-               free_page((unsigned long)hv_context.synic_message_page[cpu]);
-       if (hv_context.post_msg_page[cpu])
-               free_page((unsigned long)hv_context.post_msg_page[cpu]);
-}
 
 void hv_synic_free(void)
 {
        int cpu;
 
+       for_each_present_cpu(cpu) {
+               struct hv_per_cpu_context *hv_cpu
+                       = per_cpu_ptr(hv_context.cpu_context, cpu);
+
+               if (hv_cpu->synic_event_page)
+                       free_page((unsigned long)hv_cpu->synic_event_page);
+               if (hv_cpu->synic_message_page)
+                       free_page((unsigned long)hv_cpu->synic_message_page);
+               if (hv_cpu->post_msg_page)
+                       free_page((unsigned long)hv_cpu->post_msg_page);
+       }
+
        kfree(hv_context.hv_numa_map);
-       for_each_online_cpu(cpu)
-               hv_synic_free_cpu(cpu);
 }
 
 /*
@@ -493,54 +221,49 @@ void hv_synic_free(void)
  * retrieve the initialized message and event pages.  Otherwise, we create and
  * initialize the message and event pages.
  */
-void hv_synic_init(void *arg)
+int hv_synic_init(unsigned int cpu)
 {
-       u64 version;
+       struct hv_per_cpu_context *hv_cpu
+               = per_cpu_ptr(hv_context.cpu_context, cpu);
        union hv_synic_simp simp;
        union hv_synic_siefp siefp;
        union hv_synic_sint shared_sint;
        union hv_synic_scontrol sctrl;
        u64 vp_index;
 
-       int cpu = smp_processor_id();
-
-       if (!hv_context.hypercall_page)
-               return;
-
-       /* Check the version */
-       rdmsrl(HV_X64_MSR_SVERSION, version);
-
        /* Setup the Synic's message page */
-       rdmsrl(HV_X64_MSR_SIMP, simp.as_uint64);
+       hv_get_simp(simp.as_uint64);
        simp.simp_enabled = 1;
-       simp.base_simp_gpa = virt_to_phys(hv_context.synic_message_page[cpu])
+       simp.base_simp_gpa = virt_to_phys(hv_cpu->synic_message_page)
                >> PAGE_SHIFT;
 
-       wrmsrl(HV_X64_MSR_SIMP, simp.as_uint64);
+       hv_set_simp(simp.as_uint64);
 
        /* Setup the Synic's event page */
-       rdmsrl(HV_X64_MSR_SIEFP, siefp.as_uint64);
+       hv_get_siefp(siefp.as_uint64);
        siefp.siefp_enabled = 1;
-       siefp.base_siefp_gpa = virt_to_phys(hv_context.synic_event_page[cpu])
+       siefp.base_siefp_gpa = virt_to_phys(hv_cpu->synic_event_page)
                >> PAGE_SHIFT;
 
-       wrmsrl(HV_X64_MSR_SIEFP, siefp.as_uint64);
+       hv_set_siefp(siefp.as_uint64);
 
        /* Setup the shared SINT. */
-       rdmsrl(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
+       hv_get_synint_state(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT,
+                           shared_sint.as_uint64);
 
        shared_sint.as_uint64 = 0;
        shared_sint.vector = HYPERVISOR_CALLBACK_VECTOR;
        shared_sint.masked = false;
        shared_sint.auto_eoi = true;
 
-       wrmsrl(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
+       hv_set_synint_state(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT,
+                           shared_sint.as_uint64);
 
        /* Enable the global synic bit */
-       rdmsrl(HV_X64_MSR_SCONTROL, sctrl.as_uint64);
+       hv_get_synic_state(sctrl.as_uint64);
        sctrl.enable = 1;
 
-       wrmsrl(HV_X64_MSR_SCONTROL, sctrl.as_uint64);
+       hv_set_synic_state(sctrl.as_uint64);
 
        hv_context.synic_initialized = true;
 
@@ -549,20 +272,18 @@ void hv_synic_init(void *arg)
         * of cpuid and Linux' notion of cpuid.
         * This array will be indexed using Linux cpuid.
         */
-       rdmsrl(HV_X64_MSR_VP_INDEX, vp_index);
+       hv_get_vp_index(vp_index);
        hv_context.vp_index[cpu] = (u32)vp_index;
 
-       INIT_LIST_HEAD(&hv_context.percpu_list[cpu]);
-
        /*
         * Register the per-cpu clockevent source.
         */
        if (ms_hyperv.features & HV_X64_MSR_SYNTIMER_AVAILABLE)
-               clockevents_config_and_register(hv_context.clk_evt[cpu],
+               clockevents_config_and_register(hv_cpu->clk_evt,
                                                HV_TIMER_FREQUENCY,
                                                HV_MIN_DELTA_TICKS,
                                                HV_MAX_MAX_DELTA_TICKS);
-       return;
+       return 0;
 }
 
 /*
@@ -575,52 +296,94 @@ void hv_synic_clockevents_cleanup(void)
        if (!(ms_hyperv.features & HV_X64_MSR_SYNTIMER_AVAILABLE))
                return;
 
-       for_each_present_cpu(cpu)
-               clockevents_unbind_device(hv_context.clk_evt[cpu], cpu);
+       for_each_present_cpu(cpu) {
+               struct hv_per_cpu_context *hv_cpu
+                       = per_cpu_ptr(hv_context.cpu_context, cpu);
+
+               clockevents_unbind_device(hv_cpu->clk_evt, cpu);
+       }
 }
 
 /*
  * hv_synic_cleanup - Cleanup routine for hv_synic_init().
  */
-void hv_synic_cleanup(void *arg)
+int hv_synic_cleanup(unsigned int cpu)
 {
        union hv_synic_sint shared_sint;
        union hv_synic_simp simp;
        union hv_synic_siefp siefp;
        union hv_synic_scontrol sctrl;
-       int cpu = smp_processor_id();
+       struct vmbus_channel *channel, *sc;
+       bool channel_found = false;
+       unsigned long flags;
 
        if (!hv_context.synic_initialized)
-               return;
+               return -EFAULT;
+
+       /*
+        * Search for channels which are bound to the CPU we're about to
+        * cleanup. In case we find one and vmbus is still connected we need to
+        * fail, this will effectively prevent CPU offlining. There is no way
+        * we can re-bind channels to different CPUs for now.
+        */
+       mutex_lock(&vmbus_connection.channel_mutex);
+       list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
+               if (channel->target_cpu == cpu) {
+                       channel_found = true;
+                       break;
+               }
+               spin_lock_irqsave(&channel->lock, flags);
+               list_for_each_entry(sc, &channel->sc_list, sc_list) {
+                       if (sc->target_cpu == cpu) {
+                               channel_found = true;
+                               break;
+                       }
+               }
+               spin_unlock_irqrestore(&channel->lock, flags);
+               if (channel_found)
+                       break;
+       }
+       mutex_unlock(&vmbus_connection.channel_mutex);
+
+       if (channel_found && vmbus_connection.conn_state == CONNECTED)
+               return -EBUSY;
 
        /* Turn off clockevent device */
        if (ms_hyperv.features & HV_X64_MSR_SYNTIMER_AVAILABLE) {
-               clockevents_unbind_device(hv_context.clk_evt[cpu], cpu);
-               hv_ce_shutdown(hv_context.clk_evt[cpu]);
+               struct hv_per_cpu_context *hv_cpu
+                       = this_cpu_ptr(hv_context.cpu_context);
+
+               clockevents_unbind_device(hv_cpu->clk_evt, cpu);
+               hv_ce_shutdown(hv_cpu->clk_evt);
+               put_cpu_ptr(hv_cpu);
        }
 
-       rdmsrl(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
+       hv_get_synint_state(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT,
+                           shared_sint.as_uint64);
 
        shared_sint.masked = 1;
 
        /* Need to correctly cleanup in the case of SMP!!! */
        /* Disable the interrupt */
-       wrmsrl(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
+       hv_set_synint_state(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT,
+                           shared_sint.as_uint64);
 
-       rdmsrl(HV_X64_MSR_SIMP, simp.as_uint64);
+       hv_get_simp(simp.as_uint64);
        simp.simp_enabled = 0;
        simp.base_simp_gpa = 0;
 
-       wrmsrl(HV_X64_MSR_SIMP, simp.as_uint64);
+       hv_set_simp(simp.as_uint64);
 
-       rdmsrl(HV_X64_MSR_SIEFP, siefp.as_uint64);
+       hv_get_siefp(siefp.as_uint64);
        siefp.siefp_enabled = 0;
        siefp.base_siefp_gpa = 0;
 
-       wrmsrl(HV_X64_MSR_SIEFP, siefp.as_uint64);
+       hv_set_siefp(siefp.as_uint64);
 
        /* Disable the global synic bit */
-       rdmsrl(HV_X64_MSR_SCONTROL, sctrl.as_uint64);
+       hv_get_synic_state(sctrl.as_uint64);
        sctrl.enable = 0;
-       wrmsrl(HV_X64_MSR_SCONTROL, sctrl.as_uint64);
+       hv_set_synic_state(sctrl.as_uint64);
+
+       return 0;
 }
index 14c3dc4bd23c0b81c1a6cd2df84179af6963ab49..5fd03e59cee563dca301f7ce5c294de298510eab 100644 (file)
@@ -587,6 +587,7 @@ static int hv_memory_notifier(struct notifier_block *nb, unsigned long val,
                spin_lock_irqsave(&dm_device.ha_lock, flags);
                dm_device.num_pages_onlined += mem->nr_pages;
                spin_unlock_irqrestore(&dm_device.ha_lock, flags);
+               /* Fall through */
        case MEM_CANCEL_ONLINE:
                if (dm_device.ha_waiting) {
                        dm_device.ha_waiting = false;
index 8b2ba98831ec52a5ed60b4a06097db48e781daf9..9aee6014339dffc8627d173446e7bfe0dc06247c 100644 (file)
 #define WIN8_SRV_MINOR         1
 #define WIN8_SRV_VERSION       (WIN8_SRV_MAJOR << 16 | WIN8_SRV_MINOR)
 
+#define FCOPY_VER_COUNT 1
+static const int fcopy_versions[] = {
+       WIN8_SRV_VERSION
+};
+
+#define FW_VER_COUNT 1
+static const int fw_versions[] = {
+       UTIL_FW_VERSION
+};
+
 /*
  * Global state maintained for transaction that is being processed.
  * For a class of integration services, including the "file copy service",
@@ -61,6 +71,7 @@ static DECLARE_WORK(fcopy_send_work, fcopy_send_data);
 static const char fcopy_devname[] = "vmbus/hv_fcopy";
 static u8 *recv_buffer;
 static struct hvutil_transport *hvt;
+static struct completion release_event;
 /*
  * This state maintains the version number registered by the daemon.
  */
@@ -227,8 +238,6 @@ void hv_fcopy_onchannelcallback(void *context)
        u64 requestid;
        struct hv_fcopy_hdr *fcopy_msg;
        struct icmsg_hdr *icmsghdr;
-       struct icmsg_negotiate *negop = NULL;
-       int util_fw_version;
        int fcopy_srv_version;
 
        if (fcopy_transaction.state > HVUTIL_READY)
@@ -242,10 +251,15 @@ void hv_fcopy_onchannelcallback(void *context)
        icmsghdr = (struct icmsg_hdr *)&recv_buffer[
                        sizeof(struct vmbuspipe_hdr)];
        if (icmsghdr->icmsgtype == ICMSGTYPE_NEGOTIATE) {
-               util_fw_version = UTIL_FW_VERSION;
-               fcopy_srv_version = WIN8_SRV_VERSION;
-               vmbus_prep_negotiate_resp(icmsghdr, negop, recv_buffer,
-                               util_fw_version, fcopy_srv_version);
+               if (vmbus_prep_negotiate_resp(icmsghdr, recv_buffer,
+                               fw_versions, FW_VER_COUNT,
+                               fcopy_versions, FCOPY_VER_COUNT,
+                               NULL, &fcopy_srv_version)) {
+
+                       pr_info("FCopy IC version %d.%d\n",
+                               fcopy_srv_version >> 16,
+                               fcopy_srv_version & 0xFFFF);
+               }
        } else {
                fcopy_msg = (struct hv_fcopy_hdr *)&recv_buffer[
                                sizeof(struct vmbuspipe_hdr) +
@@ -317,6 +331,7 @@ static void fcopy_on_reset(void)
 
        if (cancel_delayed_work_sync(&fcopy_timeout_work))
                fcopy_respond_to_host(HV_E_FAIL);
+       complete(&release_event);
 }
 
 int hv_fcopy_init(struct hv_util_service *srv)
@@ -324,6 +339,7 @@ int hv_fcopy_init(struct hv_util_service *srv)
        recv_buffer = srv->recv_buffer;
        fcopy_transaction.recv_channel = srv->channel;
 
+       init_completion(&release_event);
        /*
         * When this driver loads, the user level daemon that
         * processes the host requests may not yet be running.
@@ -345,4 +361,5 @@ void hv_fcopy_deinit(void)
        fcopy_transaction.state = HVUTIL_DEVICE_DYING;
        cancel_delayed_work_sync(&fcopy_timeout_work);
        hvutil_transport_destroy(hvt);
+       wait_for_completion(&release_event);
 }
index 5e1fdc8d32ab0c03d25bfcf7d3fbadbcedc6f2eb..de263712e247c2b8c47f450f3d1b7eb387f1e073 100644 (file)
 #define WIN8_SRV_MINOR   0
 #define WIN8_SRV_VERSION     (WIN8_SRV_MAJOR << 16 | WIN8_SRV_MINOR)
 
+#define KVP_VER_COUNT 3
+static const int kvp_versions[] = {
+       WIN8_SRV_VERSION,
+       WIN7_SRV_VERSION,
+       WS2008_SRV_VERSION
+};
+
+#define FW_VER_COUNT 2
+static const int fw_versions[] = {
+       UTIL_FW_VERSION,
+       UTIL_WS2K8_FW_VERSION
+};
+
 /*
  * Global state maintained for transaction that is being processed. For a class
  * of integration services, including the "KVP service", the specified protocol
@@ -88,6 +101,7 @@ static DECLARE_WORK(kvp_sendkey_work, kvp_send_key);
 static const char kvp_devname[] = "vmbus/hv_kvp";
 static u8 *recv_buffer;
 static struct hvutil_transport *hvt;
+static struct completion release_event;
 /*
  * Register the kernel component with the user-level daemon.
  * As part of this registration, pass the LIC version number.
@@ -609,8 +623,6 @@ void hv_kvp_onchannelcallback(void *context)
        struct hv_kvp_msg *kvp_msg;
 
        struct icmsg_hdr *icmsghdrp;
-       struct icmsg_negotiate *negop = NULL;
-       int util_fw_version;
        int kvp_srv_version;
        static enum {NEGO_NOT_STARTED,
                     NEGO_IN_PROGRESS,
@@ -639,28 +651,14 @@ void hv_kvp_onchannelcallback(void *context)
                        sizeof(struct vmbuspipe_hdr)];
 
                if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
-                       /*
-                        * Based on the host, select appropriate
-                        * framework and service versions we will
-                        * negotiate.
-                        */
-                       switch (vmbus_proto_version) {
-                       case (VERSION_WS2008):
-                               util_fw_version = UTIL_WS2K8_FW_VERSION;
-                               kvp_srv_version = WS2008_SRV_VERSION;
-                               break;
-                       case (VERSION_WIN7):
-                               util_fw_version = UTIL_FW_VERSION;
-                               kvp_srv_version = WIN7_SRV_VERSION;
-                               break;
-                       default:
-                               util_fw_version = UTIL_FW_VERSION;
-                               kvp_srv_version = WIN8_SRV_VERSION;
+                       if (vmbus_prep_negotiate_resp(icmsghdrp,
+                                recv_buffer, fw_versions, FW_VER_COUNT,
+                                kvp_versions, KVP_VER_COUNT,
+                                NULL, &kvp_srv_version)) {
+                               pr_info("KVP IC version %d.%d\n",
+                                       kvp_srv_version >> 16,
+                                       kvp_srv_version & 0xFFFF);
                        }
-                       vmbus_prep_negotiate_resp(icmsghdrp, negop,
-                                recv_buffer, util_fw_version,
-                                kvp_srv_version);
-
                } else {
                        kvp_msg = (struct hv_kvp_msg *)&recv_buffer[
                                sizeof(struct vmbuspipe_hdr) +
@@ -716,6 +714,7 @@ static void kvp_on_reset(void)
        if (cancel_delayed_work_sync(&kvp_timeout_work))
                kvp_respond_to_host(NULL, HV_E_FAIL);
        kvp_transaction.state = HVUTIL_DEVICE_INIT;
+       complete(&release_event);
 }
 
 int
@@ -724,6 +723,7 @@ hv_kvp_init(struct hv_util_service *srv)
        recv_buffer = srv->recv_buffer;
        kvp_transaction.recv_channel = srv->channel;
 
+       init_completion(&release_event);
        /*
         * When this driver loads, the user level daemon that
         * processes the host requests may not yet be running.
@@ -747,4 +747,5 @@ void hv_kvp_deinit(void)
        cancel_delayed_work_sync(&kvp_timeout_work);
        cancel_work_sync(&kvp_sendkey_work);
        hvutil_transport_destroy(hvt);
+       wait_for_completion(&release_event);
 }
index eee238cc60bd09da1e260863f5d0532a893cbd7d..bcc03f0748d61cd5a8c9f31447b77fd8b2e7e13b 100644 (file)
 #define VSS_MINOR  0
 #define VSS_VERSION    (VSS_MAJOR << 16 | VSS_MINOR)
 
+#define VSS_VER_COUNT 1
+static const int vss_versions[] = {
+       VSS_VERSION
+};
+
+#define FW_VER_COUNT 1
+static const int fw_versions[] = {
+       UTIL_FW_VERSION
+};
+
 /*
  * Timeout values are based on expecations from host
  */
@@ -69,6 +79,7 @@ static int dm_reg_value;
 static const char vss_devname[] = "vmbus/hv_vss";
 static __u8 *recv_buffer;
 static struct hvutil_transport *hvt;
+static struct completion release_event;
 
 static void vss_timeout_func(struct work_struct *dummy);
 static void vss_handle_request(struct work_struct *dummy);
@@ -293,10 +304,9 @@ void hv_vss_onchannelcallback(void *context)
        u32 recvlen;
        u64 requestid;
        struct hv_vss_msg *vss_msg;
-
+       int vss_srv_version;
 
        struct icmsg_hdr *icmsghdrp;
-       struct icmsg_negotiate *negop = NULL;
 
        if (vss_transaction.state > HVUTIL_READY)
                return;
@@ -309,9 +319,15 @@ void hv_vss_onchannelcallback(void *context)
                        sizeof(struct vmbuspipe_hdr)];
 
                if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
-                       vmbus_prep_negotiate_resp(icmsghdrp, negop,
-                                recv_buffer, UTIL_FW_VERSION,
-                                VSS_VERSION);
+                       if (vmbus_prep_negotiate_resp(icmsghdrp,
+                                recv_buffer, fw_versions, FW_VER_COUNT,
+                                vss_versions, VSS_VER_COUNT,
+                                NULL, &vss_srv_version)) {
+
+                               pr_info("VSS IC version %d.%d\n",
+                                       vss_srv_version >> 16,
+                                       vss_srv_version & 0xFFFF);
+                       }
                } else {
                        vss_msg = (struct hv_vss_msg *)&recv_buffer[
                                sizeof(struct vmbuspipe_hdr) +
@@ -345,11 +361,13 @@ static void vss_on_reset(void)
        if (cancel_delayed_work_sync(&vss_timeout_work))
                vss_respond_to_host(HV_E_FAIL);
        vss_transaction.state = HVUTIL_DEVICE_INIT;
+       complete(&release_event);
 }
 
 int
 hv_vss_init(struct hv_util_service *srv)
 {
+       init_completion(&release_event);
        if (vmbus_proto_version < VERSION_WIN8_1) {
                pr_warn("Integration service 'Backup (volume snapshot)'"
                        " not supported on this host version.\n");
@@ -382,4 +400,5 @@ void hv_vss_deinit(void)
        cancel_delayed_work_sync(&vss_timeout_work);
        cancel_work_sync(&vss_handle_request_work);
        hvutil_transport_destroy(hvt);
+       wait_for_completion(&release_event);
 }
index e7707747f56df05208fc2a0607007206c224290e..3042eaa13062bbdfbdba853521b7632d35e619e8 100644 (file)
@@ -27,6 +27,9 @@
 #include <linux/sysctl.h>
 #include <linux/reboot.h>
 #include <linux/hyperv.h>
+#include <linux/clockchips.h>
+#include <linux/ptp_clock_kernel.h>
+#include <asm/mshyperv.h>
 
 #include "hyperv_vmbus.h"
 
 static int sd_srv_version;
 static int ts_srv_version;
 static int hb_srv_version;
-static int util_fw_version;
+
+#define SD_VER_COUNT 2
+static const int sd_versions[] = {
+       SD_VERSION,
+       SD_VERSION_1
+};
+
+#define TS_VER_COUNT 3
+static const int ts_versions[] = {
+       TS_VERSION,
+       TS_VERSION_3,
+       TS_VERSION_1
+};
+
+#define HB_VER_COUNT 2
+static const int hb_versions[] = {
+       HB_VERSION,
+       HB_VERSION_1
+};
+
+#define FW_VER_COUNT 2
+static const int fw_versions[] = {
+       UTIL_FW_VERSION,
+       UTIL_WS2K8_FW_VERSION
+};
 
 static void shutdown_onchannelcallback(void *context);
 static struct hv_util_service util_shutdown = {
@@ -118,7 +145,6 @@ static void shutdown_onchannelcallback(void *context)
        struct shutdown_msg_data *shutdown_msg;
 
        struct icmsg_hdr *icmsghdrp;
-       struct icmsg_negotiate *negop = NULL;
 
        vmbus_recvpacket(channel, shut_txf_buf,
                         PAGE_SIZE, &recvlen, &requestid);
@@ -128,9 +154,14 @@ static void shutdown_onchannelcallback(void *context)
                        sizeof(struct vmbuspipe_hdr)];
 
                if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
-                       vmbus_prep_negotiate_resp(icmsghdrp, negop,
-                                       shut_txf_buf, util_fw_version,
-                                       sd_srv_version);
+                       if (vmbus_prep_negotiate_resp(icmsghdrp, shut_txf_buf,
+                                       fw_versions, FW_VER_COUNT,
+                                       sd_versions, SD_VER_COUNT,
+                                       NULL, &sd_srv_version)) {
+                               pr_info("Shutdown IC version %d.%d\n",
+                                       sd_srv_version >> 16,
+                                       sd_srv_version & 0xFFFF);
+                       }
                } else {
                        shutdown_msg =
                                (struct shutdown_msg_data *)&shut_txf_buf[
@@ -181,31 +212,17 @@ struct adj_time_work {
 
 static void hv_set_host_time(struct work_struct *work)
 {
-       struct adj_time_work    *wrk;
-       s64 host_tns;
-       u64 newtime;
-       struct timespec host_ts;
+       struct adj_time_work *wrk;
+       struct timespec64 host_ts;
+       u64 reftime, newtime;
 
        wrk = container_of(work, struct adj_time_work, work);
 
-       newtime = wrk->host_time;
-       if (ts_srv_version > TS_VERSION_3) {
-               /*
-                * Some latency has been introduced since Hyper-V generated
-                * its time sample. Take that latency into account before
-                * using TSC reference time sample from Hyper-V.
-                *
-                * This sample is given by TimeSync v4 and above hosts.
-                */
-               u64 current_tick;
-
-               rdmsrl(HV_X64_MSR_TIME_REF_COUNT, current_tick);
-               newtime += (current_tick - wrk->ref_time);
-       }
-       host_tns = (newtime - WLTIMEDELTA) * 100;
-       host_ts = ns_to_timespec(host_tns);
+       reftime = hyperv_cs->read(hyperv_cs);
+       newtime = wrk->host_time + (reftime - wrk->ref_time);
+       host_ts = ns_to_timespec64((newtime - WLTIMEDELTA) * 100);
 
-       do_settimeofday(&host_ts);
+       do_settimeofday64(&host_ts);
 }
 
 /*
@@ -222,22 +239,60 @@ static void hv_set_host_time(struct work_struct *work)
  * to discipline the clock.
  */
 static struct adj_time_work  wrk;
-static inline void adj_guesttime(u64 hosttime, u64 reftime, u8 flags)
+
+/*
+ * The last time sample, received from the host. PTP device responds to
+ * requests by using this data and the current partition-wide time reference
+ * count.
+ */
+static struct {
+       u64                             host_time;
+       u64                             ref_time;
+       struct system_time_snapshot     snap;
+       spinlock_t                      lock;
+} host_ts;
+
+static inline void adj_guesttime(u64 hosttime, u64 reftime, u8 adj_flags)
 {
+       unsigned long flags;
+       u64 cur_reftime;
 
        /*
         * This check is safe since we are executing in the
-        * interrupt context and time synch messages arre always
+        * interrupt context and time synch messages are always
         * delivered on the same CPU.
         */
-       if (work_pending(&wrk.work))
-               return;
-
-       wrk.host_time = hosttime;
-       wrk.ref_time = reftime;
-       wrk.flags = flags;
-       if ((flags & (ICTIMESYNCFLAG_SYNC | ICTIMESYNCFLAG_SAMPLE)) != 0) {
+       if (adj_flags & ICTIMESYNCFLAG_SYNC) {
+               /* Queue a job to do do_settimeofday64() */
+               if (work_pending(&wrk.work))
+                       return;
+
+               wrk.host_time = hosttime;
+               wrk.ref_time = reftime;
+               wrk.flags = adj_flags;
                schedule_work(&wrk.work);
+       } else {
+               /*
+                * Save the adjusted time sample from the host and the snapshot
+                * of the current system time for PTP device.
+                */
+               spin_lock_irqsave(&host_ts.lock, flags);
+
+               cur_reftime = hyperv_cs->read(hyperv_cs);
+               host_ts.host_time = hosttime;
+               host_ts.ref_time = cur_reftime;
+               ktime_get_snapshot(&host_ts.snap);
+
+               /*
+                * TimeSync v4 messages contain reference time (guest's Hyper-V
+                * clocksource read when the time sample was generated), we can
+                * improve the precision by adding the delta between now and the
+                * time of generation.
+                */
+               if (ts_srv_version > TS_VERSION_3)
+                       host_ts.host_time += (cur_reftime - reftime);
+
+               spin_unlock_irqrestore(&host_ts.lock, flags);
        }
 }
 
@@ -253,7 +308,6 @@ static void timesync_onchannelcallback(void *context)
        struct ictimesync_data *timedatap;
        struct ictimesync_ref_data *refdata;
        u8 *time_txf_buf = util_timesynch.recv_buffer;
-       struct icmsg_negotiate *negop = NULL;
 
        vmbus_recvpacket(channel, time_txf_buf,
                         PAGE_SIZE, &recvlen, &requestid);
@@ -263,12 +317,14 @@ static void timesync_onchannelcallback(void *context)
                                sizeof(struct vmbuspipe_hdr)];
 
                if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
-                       vmbus_prep_negotiate_resp(icmsghdrp, negop,
-                                               time_txf_buf,
-                                               util_fw_version,
-                                               ts_srv_version);
-                       pr_info("Using TimeSync version %d.%d\n",
-                               ts_srv_version >> 16, ts_srv_version & 0xFFFF);
+                       if (vmbus_prep_negotiate_resp(icmsghdrp, time_txf_buf,
+                                               fw_versions, FW_VER_COUNT,
+                                               ts_versions, TS_VER_COUNT,
+                                               NULL, &ts_srv_version)) {
+                               pr_info("TimeSync IC version %d.%d\n",
+                                       ts_srv_version >> 16,
+                                       ts_srv_version & 0xFFFF);
+                       }
                } else {
                        if (ts_srv_version > TS_VERSION_3) {
                                refdata = (struct ictimesync_ref_data *)
@@ -312,7 +368,6 @@ static void heartbeat_onchannelcallback(void *context)
        struct icmsg_hdr *icmsghdrp;
        struct heartbeat_msg_data *heartbeat_msg;
        u8 *hbeat_txf_buf = util_heartbeat.recv_buffer;
-       struct icmsg_negotiate *negop = NULL;
 
        while (1) {
 
@@ -326,9 +381,16 @@ static void heartbeat_onchannelcallback(void *context)
                                sizeof(struct vmbuspipe_hdr)];
 
                if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
-                       vmbus_prep_negotiate_resp(icmsghdrp, negop,
-                               hbeat_txf_buf, util_fw_version,
-                               hb_srv_version);
+                       if (vmbus_prep_negotiate_resp(icmsghdrp,
+                                       hbeat_txf_buf,
+                                       fw_versions, FW_VER_COUNT,
+                                       hb_versions, HB_VER_COUNT,
+                                       NULL, &hb_srv_version)) {
+
+                               pr_info("Heartbeat IC version %d.%d\n",
+                                       hb_srv_version >> 16,
+                                       hb_srv_version & 0xFFFF);
+                       }
                } else {
                        heartbeat_msg =
                                (struct heartbeat_msg_data *)&hbeat_txf_buf[
@@ -373,38 +435,10 @@ static int util_probe(struct hv_device *dev,
         * Turn off batched reading for all util drivers before we open the
         * channel.
         */
-
-       set_channel_read_state(dev->channel, false);
+       set_channel_read_mode(dev->channel, HV_CALL_DIRECT);
 
        hv_set_drvdata(dev, srv);
 
-       /*
-        * Based on the host; initialize the framework and
-        * service version numbers we will negotiate.
-        */
-       switch (vmbus_proto_version) {
-       case (VERSION_WS2008):
-               util_fw_version = UTIL_WS2K8_FW_VERSION;
-               sd_srv_version = SD_VERSION_1;
-               ts_srv_version = TS_VERSION_1;
-               hb_srv_version = HB_VERSION_1;
-               break;
-       case VERSION_WIN7:
-       case VERSION_WIN8:
-       case VERSION_WIN8_1:
-               util_fw_version = UTIL_FW_VERSION;
-               sd_srv_version = SD_VERSION;
-               ts_srv_version = TS_VERSION_3;
-               hb_srv_version = HB_VERSION;
-               break;
-       case VERSION_WIN10:
-       default:
-               util_fw_version = UTIL_FW_VERSION;
-               sd_srv_version = SD_VERSION;
-               ts_srv_version = TS_VERSION;
-               hb_srv_version = HB_VERSION;
-       }
-
        ret = vmbus_open(dev->channel, 4 * PAGE_SIZE, 4 * PAGE_SIZE, NULL, 0,
                        srv->util_cb, dev->channel);
        if (ret)
@@ -470,14 +504,113 @@ static  struct hv_driver util_drv = {
        .remove =  util_remove,
 };
 
+static int hv_ptp_enable(struct ptp_clock_info *info,
+                        struct ptp_clock_request *request, int on)
+{
+       return -EOPNOTSUPP;
+}
+
+static int hv_ptp_settime(struct ptp_clock_info *p, const struct timespec64 *ts)
+{
+       return -EOPNOTSUPP;
+}
+
+static int hv_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta)
+{
+       return -EOPNOTSUPP;
+}
+static int hv_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+       return -EOPNOTSUPP;
+}
+
+static int hv_ptp_gettime(struct ptp_clock_info *info, struct timespec64 *ts)
+{
+       unsigned long flags;
+       u64 newtime, reftime;
+
+       spin_lock_irqsave(&host_ts.lock, flags);
+       reftime = hyperv_cs->read(hyperv_cs);
+       newtime = host_ts.host_time + (reftime - host_ts.ref_time);
+       *ts = ns_to_timespec64((newtime - WLTIMEDELTA) * 100);
+       spin_unlock_irqrestore(&host_ts.lock, flags);
+
+       return 0;
+}
+
+static int hv_ptp_get_syncdevicetime(ktime_t *device,
+                                    struct system_counterval_t *system,
+                                    void *ctx)
+{
+       system->cs = hyperv_cs;
+       system->cycles = host_ts.ref_time;
+       *device = ns_to_ktime((host_ts.host_time - WLTIMEDELTA) * 100);
+
+       return 0;
+}
+
+static int hv_ptp_getcrosststamp(struct ptp_clock_info *ptp,
+                                struct system_device_crosststamp *xtstamp)
+{
+       unsigned long flags;
+       int ret;
+
+       spin_lock_irqsave(&host_ts.lock, flags);
+
+       /*
+        * host_ts contains the last time sample from the host and the snapshot
+        * of system time. We don't need to calculate the time delta between
+        * the reception and now as get_device_system_crosststamp() does the
+        * required interpolation.
+        */
+       ret = get_device_system_crosststamp(hv_ptp_get_syncdevicetime,
+                                           NULL, &host_ts.snap, xtstamp);
+
+       spin_unlock_irqrestore(&host_ts.lock, flags);
+
+       return ret;
+}
+
+static struct ptp_clock_info ptp_hyperv_info = {
+       .name           = "hyperv",
+       .enable         = hv_ptp_enable,
+       .adjtime        = hv_ptp_adjtime,
+       .adjfreq        = hv_ptp_adjfreq,
+       .gettime64      = hv_ptp_gettime,
+       .getcrosststamp = hv_ptp_getcrosststamp,
+       .settime64      = hv_ptp_settime,
+       .owner          = THIS_MODULE,
+};
+
+static struct ptp_clock *hv_ptp_clock;
+
 static int hv_timesync_init(struct hv_util_service *srv)
 {
+       /* TimeSync requires Hyper-V clocksource. */
+       if (!hyperv_cs)
+               return -ENODEV;
+
        INIT_WORK(&wrk.work, hv_set_host_time);
+
+       /*
+        * ptp_clock_register() returns NULL when CONFIG_PTP_1588_CLOCK is
+        * disabled but the driver is still useful without the PTP device
+        * as it still handles the ICTIMESYNCFLAG_SYNC case.
+        */
+       hv_ptp_clock = ptp_clock_register(&ptp_hyperv_info, NULL);
+       if (IS_ERR_OR_NULL(hv_ptp_clock)) {
+               pr_err("cannot register PTP clock: %ld\n",
+                      PTR_ERR(hv_ptp_clock));
+               hv_ptp_clock = NULL;
+       }
+
        return 0;
 }
 
 static void hv_timesync_deinit(void)
 {
+       if (hv_ptp_clock)
+               ptp_clock_unregister(hv_ptp_clock);
        cancel_work_sync(&wrk.work);
 }
 
index 0675b395ce5c47f284dec3c86b02133086e86cf9..884f83bba1ab720f15e4d15780abf78779696aba 100644 (file)
@@ -29,6 +29,7 @@
 #include <asm/sync_bitops.h>
 #include <linux/atomic.h>
 #include <linux/hyperv.h>
+#include <linux/interrupt.h>
 
 /*
  * Timeout for services such as KVP and fcopy.
  */
 #define HV_UTIL_NEGO_TIMEOUT 55
 
-/*
- * The below CPUID leaves are present if VersionAndFeatures.HypervisorPresent
- * is set by CPUID(HVCPUID_VERSION_FEATURES).
- */
-enum hv_cpuid_function {
-       HVCPUID_VERSION_FEATURES                = 0x00000001,
-       HVCPUID_VENDOR_MAXFUNCTION              = 0x40000000,
-       HVCPUID_INTERFACE                       = 0x40000001,
-
-       /*
-        * The remaining functions depend on the value of
-        * HVCPUID_INTERFACE
-        */
-       HVCPUID_VERSION                 = 0x40000002,
-       HVCPUID_FEATURES                        = 0x40000003,
-       HVCPUID_ENLIGHTENMENT_INFO      = 0x40000004,
-       HVCPUID_IMPLEMENTATION_LIMITS           = 0x40000005,
-};
-
-#define  HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE   0x400
-
-#define HV_X64_MSR_CRASH_P0   0x40000100
-#define HV_X64_MSR_CRASH_P1   0x40000101
-#define HV_X64_MSR_CRASH_P2   0x40000102
-#define HV_X64_MSR_CRASH_P3   0x40000103
-#define HV_X64_MSR_CRASH_P4   0x40000104
-#define HV_X64_MSR_CRASH_CTL  0x40000105
-
-#define HV_CRASH_CTL_CRASH_NOTIFY (1ULL << 63)
-
-/* Define version of the synthetic interrupt controller. */
-#define HV_SYNIC_VERSION               (1)
-
-#define HV_ANY_VP                      (0xFFFFFFFF)
-
 /* Define synthetic interrupt controller flag constants. */
 #define HV_EVENT_FLAGS_COUNT           (256 * 8)
-#define HV_EVENT_FLAGS_BYTE_COUNT      (256)
-#define HV_EVENT_FLAGS_DWORD_COUNT     (256 / sizeof(u32))
-
-/* Define invalid partition identifier. */
-#define HV_PARTITION_ID_INVALID                ((u64)0x0)
-
-/* Define port type. */
-enum hv_port_type {
-       HVPORT_MSG      = 1,
-       HVPORT_EVENT            = 2,
-       HVPORT_MONITOR  = 3
-};
-
-/* Define port information structure. */
-struct hv_port_info {
-       enum hv_port_type port_type;
-       u32 padding;
-       union {
-               struct {
-                       u32 target_sint;
-                       u32 target_vp;
-                       u64 rsvdz;
-               } message_port_info;
-               struct {
-                       u32 target_sint;
-                       u32 target_vp;
-                       u16 base_flag_number;
-                       u16 flag_count;
-                       u32 rsvdz;
-               } event_port_info;
-               struct {
-                       u64 monitor_address;
-                       u64 rsvdz;
-               } monitor_port_info;
-       };
-};
-
-struct hv_connection_info {
-       enum hv_port_type port_type;
-       u32 padding;
-       union {
-               struct {
-                       u64 rsvdz;
-               } message_connection_info;
-               struct {
-                       u64 rsvdz;
-               } event_connection_info;
-               struct {
-                       u64 monitor_address;
-               } monitor_connection_info;
-       };
-};
+#define HV_EVENT_FLAGS_LONG_COUNT      (256 / sizeof(unsigned long))
 
 /*
  * Timer configuration register.
@@ -146,18 +61,10 @@ union hv_timer_config {
        };
 };
 
-/* Define the number of message buffers associated with each port. */
-#define HV_PORT_MESSAGE_BUFFER_COUNT   (16)
 
 /* Define the synthetic interrupt controller event flags format. */
 union hv_synic_event_flags {
-       u8 flags8[HV_EVENT_FLAGS_BYTE_COUNT];
-       u32 flags32[HV_EVENT_FLAGS_DWORD_COUNT];
-};
-
-/* Define the synthetic interrupt flags page layout. */
-struct hv_synic_event_flags_page {
-       union hv_synic_event_flags sintevent_flags[HV_SYNIC_SINT_COUNT];
+       unsigned long flags[HV_EVENT_FLAGS_LONG_COUNT];
 };
 
 /* Define SynIC control register. */
@@ -261,6 +168,8 @@ struct hv_monitor_page {
        u8 rsvdz4[1984];
 };
 
+#define HV_HYPERCALL_PARAM_ALIGN       sizeof(u64)
+
 /* Definition of the hv_post_message hypercall input structure. */
 struct hv_input_post_message {
        union hv_connection_id connectionid;
@@ -270,56 +179,6 @@ struct hv_input_post_message {
        u64 payload[HV_MESSAGE_PAYLOAD_QWORD_COUNT];
 };
 
-/*
- * Versioning definitions used for guests reporting themselves to the
- * hypervisor, and visa versa.
- */
-
-/* Version info reported by guest OS's */
-enum hv_guest_os_vendor {
-       HVGUESTOS_VENDOR_MICROSOFT      = 0x0001
-};
-
-enum hv_guest_os_microsoft_ids {
-       HVGUESTOS_MICROSOFT_UNDEFINED   = 0x00,
-       HVGUESTOS_MICROSOFT_MSDOS               = 0x01,
-       HVGUESTOS_MICROSOFT_WINDOWS3X   = 0x02,
-       HVGUESTOS_MICROSOFT_WINDOWS9X   = 0x03,
-       HVGUESTOS_MICROSOFT_WINDOWSNT   = 0x04,
-       HVGUESTOS_MICROSOFT_WINDOWSCE   = 0x05
-};
-
-/*
- * Declare the MSR used to identify the guest OS.
- */
-#define HV_X64_MSR_GUEST_OS_ID 0x40000000
-
-union hv_x64_msr_guest_os_id_contents {
-       u64 as_uint64;
-       struct {
-               u64 build_number:16;
-               u64 service_version:8; /* Service Pack, etc. */
-               u64 minor_version:8;
-               u64 major_version:8;
-               u64 os_id:8; /* enum hv_guest_os_microsoft_ids (if Vendor=MS) */
-               u64 vendor_id:16; /* enum hv_guest_os_vendor */
-       };
-};
-
-/*
- * Declare the MSR used to setup pages used to communicate with the hypervisor.
- */
-#define HV_X64_MSR_HYPERCALL   0x40000001
-
-union hv_x64_msr_hypercall_contents {
-       u64 as_uint64;
-       struct {
-               u64 enable:1;
-               u64 reserved:11;
-               u64 guest_physical_address:52;
-       };
-};
-
 
 enum {
        VMBUS_MESSAGE_CONNECTION_ID     = 1,
@@ -331,111 +190,44 @@ enum {
        VMBUS_MESSAGE_SINT              = 2,
 };
 
-/* #defines */
-
-#define HV_PRESENT_BIT                 0x80000000
-
-/*
- * The guest OS needs to register the guest ID with the hypervisor.
- * The guest ID is a 64 bit entity and the structure of this ID is
- * specified in the Hyper-V specification:
- *
- * http://msdn.microsoft.com/en-us/library/windows/hardware/ff542653%28v=vs.85%29.aspx
- *
- * While the current guideline does not specify how Linux guest ID(s)
- * need to be generated, our plan is to publish the guidelines for
- * Linux and other guest operating systems that currently are hosted
- * on Hyper-V. The implementation here conforms to this yet
- * unpublished guidelines.
- *
- *
- * Bit(s)
- * 63 - Indicates if the OS is Open Source or not; 1 is Open Source
- * 62:56 - Os Type; Linux is 0x100
- * 55:48 - Distro specific identification
- * 47:16 - Linux kernel version number
- * 15:0  - Distro specific identification
- *
- *
- */
-
-#define HV_LINUX_VENDOR_ID             0x8100
-
 /*
- * Generate the guest ID based on the guideline described above.
+ * Per cpu state for channel handling
  */
+struct hv_per_cpu_context {
+       void *synic_message_page;
+       void *synic_event_page;
+       /*
+        * buffer to post messages to the host.
+        */
+       void *post_msg_page;
 
-static inline  __u64 generate_guest_id(__u8 d_info1, __u32 kernel_version,
-                                       __u16 d_info2)
-{
-       __u64 guest_id = 0;
-
-       guest_id = (((__u64)HV_LINUX_VENDOR_ID) << 48);
-       guest_id |= (((__u64)(d_info1)) << 48);
-       guest_id |= (((__u64)(kernel_version)) << 16);
-       guest_id |= ((__u64)(d_info2));
-
-       return guest_id;
-}
-
-
-#define HV_CPU_POWER_MANAGEMENT                (1 << 0)
-#define HV_RECOMMENDATIONS_MAX         4
-
-#define HV_X64_MAX                     5
-#define HV_CAPS_MAX                    8
-
-
-#define HV_HYPERCALL_PARAM_ALIGN       sizeof(u64)
-
-
-/* Service definitions */
-
-#define HV_SERVICE_PARENT_PORT                         (0)
-#define HV_SERVICE_PARENT_CONNECTION                   (0)
-
-#define HV_SERVICE_CONNECT_RESPONSE_SUCCESS            (0)
-#define HV_SERVICE_CONNECT_RESPONSE_INVALID_PARAMETER  (1)
-#define HV_SERVICE_CONNECT_RESPONSE_UNKNOWN_SERVICE    (2)
-#define HV_SERVICE_CONNECT_RESPONSE_CONNECTION_REJECTED        (3)
-
-#define HV_SERVICE_CONNECT_REQUEST_MESSAGE_ID          (1)
-#define HV_SERVICE_CONNECT_RESPONSE_MESSAGE_ID         (2)
-#define HV_SERVICE_DISCONNECT_REQUEST_MESSAGE_ID       (3)
-#define HV_SERVICE_DISCONNECT_RESPONSE_MESSAGE_ID      (4)
-#define HV_SERVICE_MAX_MESSAGE_ID                              (4)
-
-#define HV_SERVICE_PROTOCOL_VERSION (0x0010)
-#define HV_CONNECT_PAYLOAD_BYTE_COUNT 64
-
-/* #define VMBUS_REVISION_NUMBER       6 */
-
-/* Our local vmbus's port and connection id. Anything >0 is fine */
-/* #define VMBUS_PORT_ID               11 */
+       /*
+        * Starting with win8, we can take channel interrupts on any CPU;
+        * we will manage the tasklet that handles events messages on a per CPU
+        * basis.
+        */
+       struct tasklet_struct msg_dpc;
 
-/* 628180B8-308D-4c5e-B7DB-1BEB62E62EF4 */
-static const uuid_le VMBUS_SERVICE_ID = {
-       .b = {
-               0xb8, 0x80, 0x81, 0x62, 0x8d, 0x30, 0x5e, 0x4c,
-               0xb7, 0xdb, 0x1b, 0xeb, 0x62, 0xe6, 0x2e, 0xf4
-       },
+       /*
+        * To optimize the mapping of relid to channel, maintain
+        * per-cpu list of the channels based on their CPU affinity.
+        */
+       struct list_head chan_list;
+       struct clock_event_device *clk_evt;
 };
 
-
-
 struct hv_context {
        /* We only support running on top of Hyper-V
        * So at this point this really can only contain the Hyper-V ID
        */
        u64 guestid;
 
-       void *hypercall_page;
        void *tsc_page;
 
        bool synic_initialized;
 
-       void *synic_message_page[NR_CPUS];
-       void *synic_event_page[NR_CPUS];
+       struct hv_per_cpu_context __percpu *cpu_context;
+
        /*
         * Hypervisor's notion of virtual processor ID is different from
         * Linux' notion of CPU ID. This information can only be retrieved
@@ -446,26 +238,7 @@ struct hv_context {
         * Linux cpuid 'a'.
         */
        u32 vp_index[NR_CPUS];
-       /*
-        * Starting with win8, we can take channel interrupts on any CPU;
-        * we will manage the tasklet that handles events messages on a per CPU
-        * basis.
-        */
-       struct tasklet_struct *event_dpc[NR_CPUS];
-       struct tasklet_struct *msg_dpc[NR_CPUS];
-       /*
-        * To optimize the mapping of relid to channel, maintain
-        * per-cpu list of the channels based on their CPU affinity.
-        */
-       struct list_head percpu_list[NR_CPUS];
-       /*
-        * buffer to post messages to the host.
-        */
-       void *post_msg_page[NR_CPUS];
-       /*
-        * Support PV clockevent device.
-        */
-       struct clock_event_device *clk_evt[NR_CPUS];
+
        /*
         * To manage allocations in a NUMA node.
         * Array indexed by numa node ID.
@@ -475,14 +248,6 @@ struct hv_context {
 
 extern struct hv_context hv_context;
 
-struct ms_hyperv_tsc_page {
-       volatile u32 tsc_sequence;
-       u32 reserved1;
-       volatile u64 tsc_scale;
-       volatile s64 tsc_offset;
-       u64 reserved2[509];
-};
-
 struct hv_ring_buffer_debug_info {
        u32 current_interrupt_mask;
        u32 current_read_index;
@@ -495,8 +260,6 @@ struct hv_ring_buffer_debug_info {
 
 extern int hv_init(void);
 
-extern void hv_cleanup(bool crash);
-
 extern int hv_post_message(union hv_connection_id connection_id,
                         enum hv_message_type message_type,
                         void *payload, size_t payload_size);
@@ -505,20 +268,12 @@ extern int hv_synic_alloc(void);
 
 extern void hv_synic_free(void);
 
-extern void hv_synic_init(void *irqarg);
+extern int hv_synic_init(unsigned int cpu);
 
-extern void hv_synic_cleanup(void *arg);
+extern int hv_synic_cleanup(unsigned int cpu);
 
 extern void hv_synic_clockevents_cleanup(void);
 
-/*
- * Host version information.
- */
-extern unsigned int host_info_eax;
-extern unsigned int host_info_ebx;
-extern unsigned int host_info_ecx;
-extern unsigned int host_info_edx;
-
 /* Interface */
 
 
@@ -528,20 +283,14 @@ int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
 void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info);
 
 int hv_ringbuffer_write(struct vmbus_channel *channel,
-                   struct kvec *kv_list,
-                   u32 kv_count, bool lock,
-                   bool kick_q);
+                       const struct kvec *kv_list, u32 kv_count);
 
 int hv_ringbuffer_read(struct vmbus_channel *channel,
                       void *buffer, u32 buflen, u32 *buffer_actual_len,
                       u64 *requestid, bool raw);
 
-void hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
-                           struct hv_ring_buffer_debug_info *debug_info);
-
-void hv_begin_read(struct hv_ring_buffer_info *rbi);
-
-u32 hv_end_read(struct hv_ring_buffer_info *rbi);
+void hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info,
+                                struct hv_ring_buffer_debug_info *debug_info);
 
 /*
  * Maximum channels is determined by the size of the interrupt page
@@ -608,6 +357,11 @@ struct vmbus_msginfo {
 
 extern struct vmbus_connection vmbus_connection;
 
+static inline void vmbus_send_interrupt(u32 relid)
+{
+       sync_set_bit(relid, vmbus_connection.send_int_page);
+}
+
 enum vmbus_message_handler_type {
        /* The related handler can sleep. */
        VMHT_BLOCKING = 0,
@@ -625,41 +379,6 @@ struct vmbus_channel_message_table_entry {
 extern struct vmbus_channel_message_table_entry
        channel_message_table[CHANNELMSG_COUNT];
 
-/* Free the message slot and signal end-of-message if required */
-static inline void vmbus_signal_eom(struct hv_message *msg, u32 old_msg_type)
-{
-       /*
-        * On crash we're reading some other CPU's message page and we need
-        * to be careful: this other CPU may already had cleared the header
-        * and the host may already had delivered some other message there.
-        * In case we blindly write msg->header.message_type we're going
-        * to lose it. We can still lose a message of the same type but
-        * we count on the fact that there can only be one
-        * CHANNELMSG_UNLOAD_RESPONSE and we don't care about other messages
-        * on crash.
-        */
-       if (cmpxchg(&msg->header.message_type, old_msg_type,
-                   HVMSG_NONE) != old_msg_type)
-               return;
-
-       /*
-        * Make sure the write to MessageType (ie set to
-        * HVMSG_NONE) happens before we read the
-        * MessagePending and EOMing. Otherwise, the EOMing
-        * will not deliver any more messages since there is
-        * no empty slot
-        */
-       mb();
-
-       if (msg->header.message_flags.msg_pending) {
-               /*
-                * This will cause message queue rescan to
-                * possibly deliver another msg from the
-                * hypervisor
-                */
-               wrmsrl(HV_X64_MSR_EOM, 0);
-       }
-}
 
 /* General vmbus interface */
 
@@ -670,10 +389,6 @@ struct hv_device *vmbus_device_create(const uuid_le *type,
 int vmbus_device_register(struct hv_device *child_device_obj);
 void vmbus_device_unregister(struct hv_device *device_obj);
 
-/* static void */
-/* VmbusChildDeviceDestroy( */
-/* struct hv_device *); */
-
 struct vmbus_channel *relid2channel(u32 relid);
 
 void vmbus_free_channels(void);
@@ -683,7 +398,7 @@ void vmbus_free_channels(void);
 int vmbus_connect(void);
 void vmbus_disconnect(void);
 
-int vmbus_post_msg(void *buffer, size_t buflen);
+int vmbus_post_msg(void *buffer, size_t buflen, bool can_sleep);
 
 void vmbus_on_event(unsigned long data);
 void vmbus_on_msg_dpc(unsigned long data);
index 308dbda700ebdaeb02f222aa46dc7bb79c24c0da..87799e81af97697cb4879acb227a30ea0bf792bd 100644 (file)
 
 #include "hyperv_vmbus.h"
 
-void hv_begin_read(struct hv_ring_buffer_info *rbi)
-{
-       rbi->ring_buffer->interrupt_mask = 1;
-       virt_mb();
-}
-
-u32 hv_end_read(struct hv_ring_buffer_info *rbi)
-{
-
-       rbi->ring_buffer->interrupt_mask = 0;
-       virt_mb();
-
-       /*
-        * Now check to see if the ring buffer is still empty.
-        * If it is not, we raced and we need to process new
-        * incoming messages.
-        */
-       return hv_get_bytes_to_read(rbi);
-}
-
 /*
  * When we write to the ring buffer, check if the host needs to
  * be signaled. Here is the details of this protocol:
@@ -77,8 +57,7 @@ u32 hv_end_read(struct hv_ring_buffer_info *rbi)
  * host logic is fixed.
  */
 
-static void hv_signal_on_write(u32 old_write, struct vmbus_channel *channel,
-                              bool kick_q)
+static void hv_signal_on_write(u32 old_write, struct vmbus_channel *channel)
 {
        struct hv_ring_buffer_info *rbi = &channel->outbound;
 
@@ -117,11 +96,9 @@ hv_set_next_write_location(struct hv_ring_buffer_info *ring_info,
 
 /* Get the next read location for the specified ring buffer. */
 static inline u32
-hv_get_next_read_location(struct hv_ring_buffer_info *ring_info)
+hv_get_next_read_location(const struct hv_ring_buffer_info *ring_info)
 {
-       u32 next = ring_info->ring_buffer->read_index;
-
-       return next;
+       return ring_info->ring_buffer->read_index;
 }
 
 /*
@@ -129,13 +106,14 @@ hv_get_next_read_location(struct hv_ring_buffer_info *ring_info)
  * This allows the caller to skip.
  */
 static inline u32
-hv_get_next_readlocation_withoffset(struct hv_ring_buffer_info *ring_info,
-                                u32 offset)
+hv_get_next_readlocation_withoffset(const struct hv_ring_buffer_info *ring_info,
+                                   u32 offset)
 {
        u32 next = ring_info->ring_buffer->read_index;
 
        next += offset;
-       next %= ring_info->ring_datasize;
+       if (next >= ring_info->ring_datasize)
+               next -= ring_info->ring_datasize;
 
        return next;
 }
@@ -151,7 +129,7 @@ hv_set_next_read_location(struct hv_ring_buffer_info *ring_info,
 
 /* Get the size of the ring buffer. */
 static inline u32
-hv_get_ring_buffersize(struct hv_ring_buffer_info *ring_info)
+hv_get_ring_buffersize(const struct hv_ring_buffer_info *ring_info)
 {
        return ring_info->ring_datasize;
 }
@@ -168,7 +146,7 @@ hv_get_ring_bufferindices(struct hv_ring_buffer_info *ring_info)
  * Assume there is enough room. Handles wrap-around in src case only!!
  */
 static u32 hv_copyfrom_ringbuffer(
-       struct hv_ring_buffer_info      *ring_info,
+       const struct hv_ring_buffer_info *ring_info,
        void                            *dest,
        u32                             destlen,
        u32                             start_read_offset)
@@ -179,7 +157,8 @@ static u32 hv_copyfrom_ringbuffer(
        memcpy(dest, ring_buffer + start_read_offset, destlen);
 
        start_read_offset += destlen;
-       start_read_offset %= ring_buffer_size;
+       if (start_read_offset >= ring_buffer_size)
+               start_read_offset -= ring_buffer_size;
 
        return start_read_offset;
 }
@@ -192,7 +171,7 @@ static u32 hv_copyfrom_ringbuffer(
 static u32 hv_copyto_ringbuffer(
        struct hv_ring_buffer_info      *ring_info,
        u32                             start_write_offset,
-       void                            *src,
+       const void                      *src,
        u32                             srclen)
 {
        void *ring_buffer = hv_get_ring_buffer(ring_info);
@@ -201,14 +180,15 @@ static u32 hv_copyto_ringbuffer(
        memcpy(ring_buffer + start_write_offset, src, srclen);
 
        start_write_offset += srclen;
-       start_write_offset %= ring_buffer_size;
+       if (start_write_offset >= ring_buffer_size)
+               start_write_offset -= ring_buffer_size;
 
        return start_write_offset;
 }
 
 /* Get various debug metrics for the specified ring buffer. */
-void hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
-                           struct hv_ring_buffer_debug_info *debug_info)
+void hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info,
+                                struct hv_ring_buffer_debug_info *debug_info)
 {
        u32 bytes_avail_towrite;
        u32 bytes_avail_toread;
@@ -285,8 +265,7 @@ void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
 
 /* Write to the ring buffer. */
 int hv_ringbuffer_write(struct vmbus_channel *channel,
-                   struct kvec *kv_list, u32 kv_count, bool lock,
-                   bool kick_q)
+                       const struct kvec *kv_list, u32 kv_count)
 {
        int i = 0;
        u32 bytes_avail_towrite;
@@ -298,13 +277,15 @@ int hv_ringbuffer_write(struct vmbus_channel *channel,
        unsigned long flags = 0;
        struct hv_ring_buffer_info *outring_info = &channel->outbound;
 
+       if (channel->rescind)
+               return -ENODEV;
+
        for (i = 0; i < kv_count; i++)
                totalbytes_towrite += kv_list[i].iov_len;
 
        totalbytes_towrite += sizeof(u64);
 
-       if (lock)
-               spin_lock_irqsave(&outring_info->ring_lock, flags);
+       spin_lock_irqsave(&outring_info->ring_lock, flags);
 
        bytes_avail_towrite = hv_get_bytes_to_write(outring_info);
 
@@ -314,8 +295,7 @@ int hv_ringbuffer_write(struct vmbus_channel *channel,
         * is empty since the read index == write index.
         */
        if (bytes_avail_towrite <= totalbytes_towrite) {
-               if (lock)
-                       spin_unlock_irqrestore(&outring_info->ring_lock, flags);
+               spin_unlock_irqrestore(&outring_info->ring_lock, flags);
                return -EAGAIN;
        }
 
@@ -346,10 +326,13 @@ int hv_ringbuffer_write(struct vmbus_channel *channel,
        hv_set_next_write_location(outring_info, next_write_location);
 
 
-       if (lock)
-               spin_unlock_irqrestore(&outring_info->ring_lock, flags);
+       spin_unlock_irqrestore(&outring_info->ring_lock, flags);
+
+       hv_signal_on_write(old_write, channel);
+
+       if (channel->rescind)
+               return -ENODEV;
 
-       hv_signal_on_write(old_write, channel, kick_q);
        return 0;
 }
 
index 230c62e7f567521fc442154a448f1fd457f7f48a..f7f6b9144b07c012c9987013a833823cc03934cb 100644 (file)
@@ -54,31 +54,7 @@ static struct acpi_device  *hv_acpi_dev;
 
 static struct completion probe_event;
 
-
-static void hyperv_report_panic(struct pt_regs *regs)
-{
-       static bool panic_reported;
-
-       /*
-        * We prefer to report panic on 'die' chain as we have proper
-        * registers to report, but if we miss it (e.g. on BUG()) we need
-        * to report it on 'panic'.
-        */
-       if (panic_reported)
-               return;
-       panic_reported = true;
-
-       wrmsrl(HV_X64_MSR_CRASH_P0, regs->ip);
-       wrmsrl(HV_X64_MSR_CRASH_P1, regs->ax);
-       wrmsrl(HV_X64_MSR_CRASH_P2, regs->bx);
-       wrmsrl(HV_X64_MSR_CRASH_P3, regs->cx);
-       wrmsrl(HV_X64_MSR_CRASH_P4, regs->dx);
-
-       /*
-        * Let Hyper-V know there is crash data available
-        */
-       wrmsrl(HV_X64_MSR_CRASH_CTL, HV_CRASH_CTL_CRASH_NOTIFY);
-}
+static int hyperv_cpuhp_online;
 
 static int hyperv_panic_event(struct notifier_block *nb, unsigned long val,
                              void *args)
@@ -859,9 +835,10 @@ static void vmbus_onmessage_work(struct work_struct *work)
        kfree(ctx);
 }
 
-static void hv_process_timer_expiration(struct hv_message *msg, int cpu)
+static void hv_process_timer_expiration(struct hv_message *msg,
+                                       struct hv_per_cpu_context *hv_cpu)
 {
-       struct clock_event_device *dev = hv_context.clk_evt[cpu];
+       struct clock_event_device *dev = hv_cpu->clk_evt;
 
        if (dev->event_handler)
                dev->event_handler(dev);
@@ -871,8 +848,8 @@ static void hv_process_timer_expiration(struct hv_message *msg, int cpu)
 
 void vmbus_on_msg_dpc(unsigned long data)
 {
-       int cpu = smp_processor_id();
-       void *page_addr = hv_context.synic_message_page[cpu];
+       struct hv_per_cpu_context *hv_cpu = (void *)data;
+       void *page_addr = hv_cpu->synic_message_page;
        struct hv_message *msg = (struct hv_message *)page_addr +
                                  VMBUS_MESSAGE_SINT;
        struct vmbus_channel_message_header *hdr;
@@ -908,16 +885,88 @@ msg_handled:
        vmbus_signal_eom(msg, message_type);
 }
 
+
+/*
+ * Direct callback for channels using other deferred processing
+ */
+static void vmbus_channel_isr(struct vmbus_channel *channel)
+{
+       void (*callback_fn)(void *);
+
+       callback_fn = READ_ONCE(channel->onchannel_callback);
+       if (likely(callback_fn != NULL))
+               (*callback_fn)(channel->channel_callback_context);
+}
+
+/*
+ * Schedule all channels with events pending
+ */
+static void vmbus_chan_sched(struct hv_per_cpu_context *hv_cpu)
+{
+       unsigned long *recv_int_page;
+       u32 maxbits, relid;
+
+       if (vmbus_proto_version < VERSION_WIN8) {
+               maxbits = MAX_NUM_CHANNELS_SUPPORTED;
+               recv_int_page = vmbus_connection.recv_int_page;
+       } else {
+               /*
+                * When the host is win8 and beyond, the event page
+                * can be directly checked to get the id of the channel
+                * that has the interrupt pending.
+                */
+               void *page_addr = hv_cpu->synic_event_page;
+               union hv_synic_event_flags *event
+                       = (union hv_synic_event_flags *)page_addr +
+                                                VMBUS_MESSAGE_SINT;
+
+               maxbits = HV_EVENT_FLAGS_COUNT;
+               recv_int_page = event->flags;
+       }
+
+       if (unlikely(!recv_int_page))
+               return;
+
+       for_each_set_bit(relid, recv_int_page, maxbits) {
+               struct vmbus_channel *channel;
+
+               if (!sync_test_and_clear_bit(relid, recv_int_page))
+                       continue;
+
+               /* Special case - vmbus channel protocol msg */
+               if (relid == 0)
+                       continue;
+
+               /* Find channel based on relid */
+               list_for_each_entry(channel, &hv_cpu->chan_list, percpu_list) {
+                       if (channel->offermsg.child_relid != relid)
+                               continue;
+
+                       switch (channel->callback_mode) {
+                       case HV_CALL_ISR:
+                               vmbus_channel_isr(channel);
+                               break;
+
+                       case HV_CALL_BATCHED:
+                               hv_begin_read(&channel->inbound);
+                               /* fallthrough */
+                       case HV_CALL_DIRECT:
+                               tasklet_schedule(&channel->callback_event);
+                       }
+               }
+       }
+}
+
 static void vmbus_isr(void)
 {
-       int cpu = smp_processor_id();
-       void *page_addr;
+       struct hv_per_cpu_context *hv_cpu
+               = this_cpu_ptr(hv_context.cpu_context);
+       void *page_addr = hv_cpu->synic_event_page;
        struct hv_message *msg;
        union hv_synic_event_flags *event;
        bool handled = false;
 
-       page_addr = hv_context.synic_event_page[cpu];
-       if (page_addr == NULL)
+       if (unlikely(page_addr == NULL))
                return;
 
        event = (union hv_synic_event_flags *)page_addr +
@@ -932,10 +981,8 @@ static void vmbus_isr(void)
                (vmbus_proto_version == VERSION_WIN7)) {
 
                /* Since we are a child, we only need to check bit 0 */
-               if (sync_test_and_clear_bit(0,
-                       (unsigned long *) &event->flags32[0])) {
+               if (sync_test_and_clear_bit(0, event->flags))
                        handled = true;
-               }
        } else {
                /*
                 * Our host is win8 or above. The signaling mechanism
@@ -947,18 +994,17 @@ static void vmbus_isr(void)
        }
 
        if (handled)
-               tasklet_schedule(hv_context.event_dpc[cpu]);
-
+               vmbus_chan_sched(hv_cpu);
 
-       page_addr = hv_context.synic_message_page[cpu];
+       page_addr = hv_cpu->synic_message_page;
        msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT;
 
        /* Check if there are actual msgs to be processed */
        if (msg->header.message_type != HVMSG_NONE) {
                if (msg->header.message_type == HVMSG_TIMER_EXPIRED)
-                       hv_process_timer_expiration(msg, cpu);
+                       hv_process_timer_expiration(msg, hv_cpu);
                else
-                       tasklet_schedule(hv_context.msg_dpc[cpu]);
+                       tasklet_schedule(&hv_cpu->msg_dpc);
        }
 
        add_interrupt_randomness(HYPERVISOR_CALLBACK_VECTOR, 0);
@@ -986,7 +1032,7 @@ static int vmbus_bus_init(void)
 
        ret = bus_register(&hv_bus);
        if (ret)
-               goto err_cleanup;
+               return ret;
 
        hv_setup_vmbus_irq(vmbus_isr);
 
@@ -997,14 +1043,16 @@ static int vmbus_bus_init(void)
         * Initialize the per-cpu interrupt state and
         * connect to the host.
         */
-       on_each_cpu(hv_synic_init, NULL, 1);
+       ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/hyperv:online",
+                               hv_synic_init, hv_synic_cleanup);
+       if (ret < 0)
+               goto err_alloc;
+       hyperv_cpuhp_online = ret;
+
        ret = vmbus_connect();
        if (ret)
                goto err_connect;
 
-       if (vmbus_proto_version > VERSION_WIN7)
-               cpu_hotplug_disable();
-
        /*
         * Only register if the crash MSRs are available
         */
@@ -1019,16 +1067,13 @@ static int vmbus_bus_init(void)
        return 0;
 
 err_connect:
-       on_each_cpu(hv_synic_cleanup, NULL, 1);
+       cpuhp_remove_state(hyperv_cpuhp_online);
 err_alloc:
        hv_synic_free();
        hv_remove_vmbus_irq();
 
        bus_unregister(&hv_bus);
 
-err_cleanup:
-       hv_cleanup(false);
-
        return ret;
 }
 
@@ -1478,13 +1523,13 @@ static struct acpi_driver vmbus_acpi_driver = {
 
 static void hv_kexec_handler(void)
 {
-       int cpu;
-
        hv_synic_clockevents_cleanup();
        vmbus_initiate_unload(false);
-       for_each_online_cpu(cpu)
-               smp_call_function_single(cpu, hv_synic_cleanup, NULL, 1);
-       hv_cleanup(false);
+       vmbus_connection.conn_state = DISCONNECTED;
+       /* Make sure conn_state is set as hv_synic_cleanup checks for it */
+       mb();
+       cpuhp_remove_state(hyperv_cpuhp_online);
+       hyperv_cleanup();
 };
 
 static void hv_crash_handler(struct pt_regs *regs)
@@ -1495,8 +1540,9 @@ static void hv_crash_handler(struct pt_regs *regs)
         * doing the cleanup for current CPU only. This should be sufficient
         * for kdump.
         */
-       hv_synic_cleanup(NULL);
-       hv_cleanup(true);
+       vmbus_connection.conn_state = DISCONNECTED;
+       hv_synic_cleanup(smp_processor_id());
+       hyperv_cleanup();
 };
 
 static int __init hv_acpi_init(void)
@@ -1547,24 +1593,24 @@ static void __exit vmbus_exit(void)
        hv_synic_clockevents_cleanup();
        vmbus_disconnect();
        hv_remove_vmbus_irq();
-       for_each_online_cpu(cpu)
-               tasklet_kill(hv_context.msg_dpc[cpu]);
+       for_each_online_cpu(cpu) {
+               struct hv_per_cpu_context *hv_cpu
+                       = per_cpu_ptr(hv_context.cpu_context, cpu);
+
+               tasklet_kill(&hv_cpu->msg_dpc);
+       }
        vmbus_free_channels();
+
        if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) {
                unregister_die_notifier(&hyperv_die_block);
                atomic_notifier_chain_unregister(&panic_notifier_list,
                                                 &hyperv_panic_block);
        }
        bus_unregister(&hv_bus);
-       hv_cleanup(false);
-       for_each_online_cpu(cpu) {
-               tasklet_kill(hv_context.event_dpc[cpu]);
-               smp_call_function_single(cpu, hv_synic_cleanup, NULL, 1);
-       }
+
+       cpuhp_remove_state(hyperv_cpuhp_online);
        hv_synic_free();
        acpi_bus_unregister_driver(&vmbus_acpi_driver);
-       if (vmbus_proto_version > VERSION_WIN7)
-               cpu_hotplug_enable();
 }
 
 
index 17741969026e0e23391763d8ce1907729c1184e1..26cfac3e6de7be45d6de4e7d2ffa1e8c52a3f39d 100644 (file)
@@ -242,6 +242,7 @@ static void *etm_setup_aux(int event_cpu, void **pages,
        if (!sink_ops(sink)->alloc_buffer)
                goto err;
 
+       cpu = cpumask_first(mask);
        /* Get the AUX specific data from the sink buffer */
        event_data->snk_config =
                        sink_ops(sink)->alloc_buffer(sink, cpu, pages,
index 031480f2c34d0f68166e924b180167a7c943d4d9..d1340fb4e457b5093a136c2eaab63f9f865e65a7 100644 (file)
@@ -216,10 +216,14 @@ static int etm4_parse_event_config(struct etmv4_drvdata *drvdata,
                goto out;
 
        /* Go from generic option to ETMv4 specifics */
-       if (attr->config & BIT(ETM_OPT_CYCACC))
-               config->cfg |= ETMv4_MODE_CYCACC;
+       if (attr->config & BIT(ETM_OPT_CYCACC)) {
+               config->cfg |= BIT(4);
+               /* TRM: Must program this for cycacc to work */
+               config->ccctlr = ETM_CYC_THRESHOLD_DEFAULT;
+       }
        if (attr->config & BIT(ETM_OPT_TS))
-               config->cfg |= ETMv4_MODE_TIMESTAMP;
+               /* bit[11], Global timestamp tracing bit */
+               config->cfg |= BIT(11);
 
 out:
        return ret;
index ba8d3f86de2129dd869f156ade0d4a286ff158eb..b3b5ea7b7fb3b25fb2adc7fb077f01fabade613e 100644 (file)
 #define ETM_ARCH_V4                    0x40
 #define ETMv4_SYNC_MASK                        0x1F
 #define ETM_CYC_THRESHOLD_MASK         0xFFF
+#define ETM_CYC_THRESHOLD_DEFAULT       0x100
 #define ETMv4_EVENT_MASK               0xFF
 #define ETM_CNTR_MAX_VAL               0xFFFF
 #define ETM_TRACEID_MASK               0x3f
index e4c55c5f998884f24b216861a2bc5c624b2376c8..93fc26f01bab61e59379681598d64320b7625651 100644 (file)
@@ -356,7 +356,7 @@ static void stm_generic_unlink(struct stm_data *stm_data,
        if (!drvdata || !drvdata->csdev)
                return;
 
-       stm_disable(drvdata->csdev, NULL);
+       coresight_disable(drvdata->csdev);
 }
 
 static phys_addr_t
index a579a0f258402c0096bc8bdb562d4e1b501fe3fb..22c1aeeb64215d855613422b7a075c2e8a5458bb 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/of.h>
 #include <linux/of_platform.h>
 #include <linux/platform_device.h>
+#include <linux/platform_data/ti-aemif.h>
 
 #define TA_SHIFT       2
 #define RHOLD_SHIFT    4
@@ -335,6 +336,8 @@ static int aemif_probe(struct platform_device *pdev)
        struct device_node *np = dev->of_node;
        struct device_node *child_np;
        struct aemif_device *aemif;
+       struct aemif_platform_data *pdata;
+       struct of_dev_auxdata *dev_lookup;
 
        if (np == NULL)
                return 0;
@@ -343,6 +346,9 @@ static int aemif_probe(struct platform_device *pdev)
        if (!aemif)
                return -ENOMEM;
 
+       pdata = dev_get_platdata(&pdev->dev);
+       dev_lookup = pdata ? pdata->dev_lookup : NULL;
+
        platform_set_drvdata(pdev, aemif);
 
        aemif->clk = devm_clk_get(dev, NULL);
@@ -390,7 +396,7 @@ static int aemif_probe(struct platform_device *pdev)
         * parameters are set.
         */
        for_each_available_child_of_node(np, child_np) {
-               ret = of_platform_populate(child_np, NULL, NULL, dev);
+               ret = of_platform_populate(child_np, NULL, dev_lookup, dev);
                if (ret < 0)
                        goto error;
        }
index 64971baf11faebaff879c58067d23fd9bf29895a..c290990d73edf87ece9b179ac3d845eef27531e9 100644 (file)
@@ -474,11 +474,15 @@ config SRAM
        bool "Generic on-chip SRAM driver"
        depends on HAS_IOMEM
        select GENERIC_ALLOCATOR
+       select SRAM_EXEC if ARM
        help
          This driver allows you to declare a memory region to be managed by
          the genalloc API. It is supposed to be used for small on-chip SRAM
          areas found on many SoCs.
 
+config SRAM_EXEC
+       bool
+
 config VEXPRESS_SYSCFG
        bool "Versatile Express System Configuration driver"
        depends on VEXPRESS_CONFIG
@@ -487,6 +491,7 @@ config VEXPRESS_SYSCFG
          ARM Ltd. Versatile Express uses specialised platform configuration
          bus. System Configuration interface is one of the possible means
          of generating transactions on this bus.
+
 config PANEL
        tristate "Parallel port LCD/Keypad Panel support"
        depends on PARPORT
@@ -494,14 +499,14 @@ config PANEL
          Say Y here if you have an HD44780 or KS-0074 LCD connected to your
          parallel port. This driver also features 4 and 6-key keypads. The LCD
          is accessible through the /dev/lcd char device (10, 156), and the
-         keypad through /dev/keypad (10, 185). Both require misc device to be
-         enabled. This code can either be compiled as a module, or linked into
-         the kernel and started at boot. If you don't understand what all this
-         is about, say N.
+         keypad through /dev/keypad (10, 185). This code can either be
+         compiled as a module, or linked into the kernel and started at boot.
+         If you don't understand what all this is about, say N.
+
+if PANEL
 
 config PANEL_PARPORT
        int "Default parallel port number (0=LPT1)"
-       depends on PANEL
        range 0 255
        default "0"
        ---help---
@@ -513,7 +518,6 @@ config PANEL_PARPORT
 
 config PANEL_PROFILE
        int "Default panel profile (0-5, 0=custom)"
-       depends on PANEL
        range 0 5
        default "5"
        ---help---
@@ -534,7 +538,7 @@ config PANEL_PROFILE
          for experts.
 
 config PANEL_KEYPAD
-       depends on PANEL && PANEL_PROFILE="0"
+       depends on PANEL_PROFILE="0"
        int "Keypad type (0=none, 1=old 6 keys, 2=new 6 keys, 3=Nexcom 4 keys)"
        range 0 3
        default 0
@@ -551,7 +555,7 @@ config PANEL_KEYPAD
          supports simultaneous keys pressed when the keypad supports them.
 
 config PANEL_LCD
-       depends on PANEL && PANEL_PROFILE="0"
+       depends on PANEL_PROFILE="0"
        int "LCD type (0=none, 1=custom, 2=old //, 3=ks0074, 4=hantronix, 5=Nexcom)"
        range 0 5
        default 0
@@ -574,7 +578,7 @@ config PANEL_LCD
           that those values changed from the 2.4 driver for better consistency.
 
 config PANEL_LCD_HEIGHT
-       depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1"
+       depends on PANEL_PROFILE="0" && PANEL_LCD="1"
        int "Number of lines on the LCD (1-2)"
        range 1 2
        default 2
@@ -583,7 +587,7 @@ config PANEL_LCD_HEIGHT
          It can either be 1 or 2.
 
 config PANEL_LCD_WIDTH
-       depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1"
+       depends on PANEL_PROFILE="0" && PANEL_LCD="1"
        int "Number of characters per line on the LCD (1-40)"
        range 1 40
        default 40
@@ -592,7 +596,7 @@ config PANEL_LCD_WIDTH
          Common values are 16,20,24,40.
 
 config PANEL_LCD_BWIDTH
-       depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1"
+       depends on PANEL_PROFILE="0" && PANEL_LCD="1"
        int "Internal LCD line width (1-40, 40 by default)"
        range 1 40
        default 40
@@ -608,7 +612,7 @@ config PANEL_LCD_BWIDTH
          If you don't know, put '40' here.
 
 config PANEL_LCD_HWIDTH
-       depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1"
+       depends on PANEL_PROFILE="0" && PANEL_LCD="1"
        int "Hardware LCD line width (1-64, 64 by default)"
        range 1 64
        default 64
@@ -622,7 +626,7 @@ config PANEL_LCD_HWIDTH
          64 here for a 2x40.
 
 config PANEL_LCD_CHARSET
-       depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1"
+       depends on PANEL_PROFILE="0" && PANEL_LCD="1"
        int "LCD character set (0=normal, 1=KS0074)"
        range 0 1
        default 0
@@ -638,7 +642,7 @@ config PANEL_LCD_CHARSET
          If you don't know, use the normal one (0).
 
 config PANEL_LCD_PROTO
-       depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1"
+       depends on PANEL_PROFILE="0" && PANEL_LCD="1"
        int "LCD communication mode (0=parallel 8 bits, 1=serial)"
        range 0 1
        default 0
@@ -651,7 +655,7 @@ config PANEL_LCD_PROTO
          parallel LCD, and 1 for a serial LCD.
 
 config PANEL_LCD_PIN_E
-       depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO="0"
+       depends on PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO="0"
         int "Parallel port pin number & polarity connected to the LCD E signal (-17...17) "
        range -17 17
        default 14
@@ -666,7 +670,7 @@ config PANEL_LCD_PIN_E
          Default for the 'E' pin in custom profile is '14' (AUTOFEED).
 
 config PANEL_LCD_PIN_RS
-       depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO="0"
+       depends on PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO="0"
         int "Parallel port pin number & polarity connected to the LCD RS signal (-17...17) "
        range -17 17
        default 17
@@ -681,7 +685,7 @@ config PANEL_LCD_PIN_RS
          Default for the 'RS' pin in custom profile is '17' (SELECT IN).
 
 config PANEL_LCD_PIN_RW
-       depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO="0"
+       depends on PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO="0"
         int "Parallel port pin number & polarity connected to the LCD RW signal (-17...17) "
        range -17 17
        default 16
@@ -696,7 +700,7 @@ config PANEL_LCD_PIN_RW
          Default for the 'RW' pin in custom profile is '16' (INIT).
 
 config PANEL_LCD_PIN_SCL
-       depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO!="0"
+       depends on PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO!="0"
         int "Parallel port pin number & polarity connected to the LCD SCL signal (-17...17) "
        range -17 17
        default 1
@@ -711,7 +715,7 @@ config PANEL_LCD_PIN_SCL
          Default for the 'SCL' pin in custom profile is '1' (STROBE).
 
 config PANEL_LCD_PIN_SDA
-       depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO!="0"
+       depends on PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO!="0"
         int "Parallel port pin number & polarity connected to the LCD SDA signal (-17...17) "
        range -17 17
        default 2
@@ -726,7 +730,7 @@ config PANEL_LCD_PIN_SDA
          Default for the 'SDA' pin in custom profile is '2' (D0).
 
 config PANEL_LCD_PIN_BL
-       depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1"
+       depends on PANEL_PROFILE="0" && PANEL_LCD="1"
         int "Parallel port pin number & polarity connected to the LCD backlight signal (-17...17) "
        range -17 17
        default 0
@@ -741,7 +745,6 @@ config PANEL_LCD_PIN_BL
          Default for the 'BL' pin in custom profile is '0' (uncontrolled).
 
 config PANEL_CHANGE_MESSAGE
-       depends on PANEL
        bool "Change LCD initialization message ?"
        default "n"
        ---help---
@@ -754,7 +757,7 @@ config PANEL_CHANGE_MESSAGE
          say 'N' and keep the default message with the version.
 
 config PANEL_BOOT_MESSAGE
-       depends on PANEL && PANEL_CHANGE_MESSAGE="y"
+       depends on PANEL_CHANGE_MESSAGE="y"
        string "New initialization message"
        default ""
        ---help---
@@ -766,6 +769,8 @@ config PANEL_BOOT_MESSAGE
          An empty message will only clear the display at driver init time. Any other
          printf()-formatted message is valid with newline and escape codes.
 
+endif # PANEL
+
 source "drivers/misc/c2port/Kconfig"
 source "drivers/misc/eeprom/Kconfig"
 source "drivers/misc/cb710/Kconfig"
index 31983366090a781441a3235cac5883d6fbb27fa4..7a3ea89339b4d07c34289a539c4f9bd2f12d2aff 100644 (file)
@@ -47,6 +47,7 @@ obj-$(CONFIG_INTEL_MEI)               += mei/
 obj-$(CONFIG_VMWARE_VMCI)      += vmw_vmci/
 obj-$(CONFIG_LATTICE_ECP3_CONFIG)      += lattice-ecp3-config.o
 obj-$(CONFIG_SRAM)             += sram.o
+obj-$(CONFIG_SRAM_EXEC)                += sram-exec.o
 obj-y                          += mic/
 obj-$(CONFIG_GENWQE)           += genwqe/
 obj-$(CONFIG_ECHO)             += echo/
index c4e41c26649e9c096675b7d6bdab82e8cc4ce30a..de58762097c4b285baa4f686c87bee856efd63e3 100644 (file)
@@ -100,4 +100,14 @@ config EEPROM_DIGSY_MTC_CFG
 
          If unsure, say N.
 
+config EEPROM_IDT_89HPESX
+       tristate "IDT 89HPESx PCIe-swtiches EEPROM / CSR support"
+       depends on I2C && SYSFS
+       help
+         Enable this driver to get read/write access to EEPROM / CSRs
+         over IDT PCIe-swtich i2c-slave interface.
+
+         This driver can also be built as a module. If so, the module
+         will be called idt_89hpesx.
+
 endmenu
index fc1e81d292673b14732e9268b7cb9cdca9d921fb..90a52624ddeb0c093a7f66e6b8a2b2c6b10e4f55 100644 (file)
@@ -5,3 +5,4 @@ obj-$(CONFIG_EEPROM_MAX6875)    += max6875.o
 obj-$(CONFIG_EEPROM_93CX6)     += eeprom_93cx6.o
 obj-$(CONFIG_EEPROM_93XX46)    += eeprom_93xx46.o
 obj-$(CONFIG_EEPROM_DIGSY_MTC_CFG) += digsy_mtc_eeprom.o
+obj-$(CONFIG_EEPROM_IDT_89HPESX) += idt_89hpesx.o
diff --git a/drivers/misc/eeprom/idt_89hpesx.c b/drivers/misc/eeprom/idt_89hpesx.c
new file mode 100644 (file)
index 0000000..4a22a1d
--- /dev/null
@@ -0,0 +1,1581 @@
+/*
+ *   This file is provided under a GPLv2 license.  When using or
+ *   redistributing this file, you may do so under that license.
+ *
+ *   GPL LICENSE SUMMARY
+ *
+ *   Copyright (C) 2016 T-Platforms. All Rights Reserved.
+ *
+ *   This program is free software; you can redistribute it and/or modify it
+ *   under the terms and conditions of the GNU General Public License,
+ *   version 2, as published by the Free Software Foundation.
+ *
+ *   This program is distributed in the hope that it will be useful, but WITHOUT
+ *   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ *   FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ *   more details.
+ *
+ *   You should have received a copy of the GNU General Public License along
+ *   with this program; if not, it can be found <http://www.gnu.org/licenses/>.
+ *
+ *   The full GNU General Public License is included in this distribution in
+ *   the file called "COPYING".
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * IDT PCIe-switch NTB Linux driver
+ *
+ * Contact Information:
+ * Serge Semin <fancer.lancer@gmail.com>, <Sergey.Semin@t-platforms.ru>
+ */
+/*
+ *           NOTE of the IDT 89HPESx SMBus-slave interface driver
+ *    This driver primarily is developed to have an access to EEPROM device of
+ * IDT PCIe-switches. IDT provides a simple SMBus interface to perform IO-
+ * operations from/to EEPROM, which is located at private (so called Master)
+ * SMBus of switches. Using that interface this the driver creates a simple
+ * binary sysfs-file in the device directory:
+ * /sys/bus/i2c/devices/<bus>-<devaddr>/eeprom
+ * In case if read-only flag is specified in the dts-node of device desription,
+ * User-space applications won't be able to write to the EEPROM sysfs-node.
+ *    Additionally IDT 89HPESx SMBus interface has an ability to write/read
+ * data of device CSRs. This driver exposes debugf-file to perform simple IO
+ * operations using that ability for just basic debug purpose. Particularly
+ * next file is created in the specific debugfs-directory:
+ * /sys/kernel/debug/idt_csr/
+ * Format of the debugfs-node is:
+ * $ cat /sys/kernel/debug/idt_csr/<bus>-<devaddr>/<devname>;
+ * <CSR address>:<CSR value>
+ * So reading the content of the file gives current CSR address and it value.
+ * If User-space application wishes to change current CSR address,
+ * it can just write a proper value to the sysfs-file:
+ * $ echo "<CSR address>" > /sys/kernel/debug/idt_csr/<bus>-<devaddr>/<devname>
+ * If it wants to change the CSR value as well, the format of the write
+ * operation is:
+ * $ echo "<CSR address>:<CSR value>" > \
+ *        /sys/kernel/debug/idt_csr/<bus>-<devaddr>/<devname>;
+ * CSR address and value can be any of hexadecimal, decimal or octal format.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/sysfs.h>
+#include <linux/debugfs.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of.h>
+#include <linux/i2c.h>
+#include <linux/pci_ids.h>
+#include <linux/delay.h>
+
+#define IDT_NAME               "89hpesx"
+#define IDT_89HPESX_DESC       "IDT 89HPESx SMBus-slave interface driver"
+#define IDT_89HPESX_VER                "1.0"
+
+MODULE_DESCRIPTION(IDT_89HPESX_DESC);
+MODULE_VERSION(IDT_89HPESX_VER);
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("T-platforms");
+
+/*
+ * csr_dbgdir - CSR read/write operations Debugfs directory
+ */
+static struct dentry *csr_dbgdir;
+
+/*
+ * struct idt_89hpesx_dev - IDT 89HPESx device data structure
+ * @eesize:    Size of EEPROM in bytes (calculated from "idt,eecompatible")
+ * @eero:      EEPROM Read-only flag
+ * @eeaddr:    EEPROM custom address
+ *
+ * @inieecmd:  Initial cmd value for EEPROM read/write operations
+ * @inicsrcmd: Initial cmd value for CSR read/write operations
+ * @iniccode:  Initialial command code value for IO-operations
+ *
+ * @csr:       CSR address to perform read operation
+ *
+ * @smb_write: SMBus write method
+ * @smb_read:  SMBus read method
+ * @smb_mtx:   SMBus mutex
+ *
+ * @client:    i2c client used to perform IO operations
+ *
+ * @ee_file:   EEPROM read/write sysfs-file
+ * @csr_file:  CSR read/write debugfs-node
+ */
+struct idt_smb_seq;
+struct idt_89hpesx_dev {
+       u32 eesize;
+       bool eero;
+       u8 eeaddr;
+
+       u8 inieecmd;
+       u8 inicsrcmd;
+       u8 iniccode;
+
+       u16 csr;
+
+       int (*smb_write)(struct idt_89hpesx_dev *, const struct idt_smb_seq *);
+       int (*smb_read)(struct idt_89hpesx_dev *, struct idt_smb_seq *);
+       struct mutex smb_mtx;
+
+       struct i2c_client *client;
+
+       struct bin_attribute *ee_file;
+       struct dentry *csr_dir;
+       struct dentry *csr_file;
+};
+
+/*
+ * struct idt_smb_seq - sequence of data to be read/written from/to IDT 89HPESx
+ * @ccode:     SMBus command code
+ * @bytecnt:   Byte count of operation
+ * @data:      Data to by written
+ */
+struct idt_smb_seq {
+       u8 ccode;
+       u8 bytecnt;
+       u8 *data;
+};
+
+/*
+ * struct idt_eeprom_seq - sequence of data to be read/written from/to EEPROM
+ * @cmd:       Transaction CMD
+ * @eeaddr:    EEPROM custom address
+ * @memaddr:   Internal memory address of EEPROM
+ * @data:      Data to be written at the memory address
+ */
+struct idt_eeprom_seq {
+       u8 cmd;
+       u8 eeaddr;
+       u16 memaddr;
+       u8 data;
+} __packed;
+
+/*
+ * struct idt_csr_seq - sequence of data to be read/written from/to CSR
+ * @cmd:       Transaction CMD
+ * @csraddr:   Internal IDT device CSR address
+ * @data:      Data to be read/written from/to the CSR address
+ */
+struct idt_csr_seq {
+       u8 cmd;
+       u16 csraddr;
+       u32 data;
+} __packed;
+
+/*
+ * SMBus command code macros
+ * @CCODE_END:         Indicates the end of transaction
+ * @CCODE_START:       Indicates the start of transaction
+ * @CCODE_CSR:         CSR read/write transaction
+ * @CCODE_EEPROM:      EEPROM read/write transaction
+ * @CCODE_BYTE:                Supplied data has BYTE length
+ * @CCODE_WORD:                Supplied data has WORD length
+ * @CCODE_BLOCK:       Supplied data has variable length passed in bytecnt
+ *                     byte right following CCODE byte
+ */
+#define CCODE_END      ((u8)0x01)
+#define CCODE_START    ((u8)0x02)
+#define CCODE_CSR      ((u8)0x00)
+#define CCODE_EEPROM   ((u8)0x04)
+#define CCODE_BYTE     ((u8)0x00)
+#define CCODE_WORD     ((u8)0x20)
+#define CCODE_BLOCK    ((u8)0x40)
+#define CCODE_PEC      ((u8)0x80)
+
+/*
+ * EEPROM command macros
+ * @EEPROM_OP_WRITE:   EEPROM write operation
+ * @EEPROM_OP_READ:    EEPROM read operation
+ * @EEPROM_USA:                Use specified address of EEPROM
+ * @EEPROM_NAERR:      EEPROM device is not ready to respond
+ * @EEPROM_LAERR:      EEPROM arbitration loss error
+ * @EEPROM_MSS:                EEPROM misplace start & stop bits error
+ * @EEPROM_WR_CNT:     Bytes count to perform write operation
+ * @EEPROM_WRRD_CNT:   Bytes count to write before reading
+ * @EEPROM_RD_CNT:     Bytes count to perform read operation
+ * @EEPROM_DEF_SIZE:   Fall back size of EEPROM
+ * @EEPROM_DEF_ADDR:   Defatul EEPROM address
+ * @EEPROM_TOUT:       Timeout before retry read operation if eeprom is busy
+ */
+#define EEPROM_OP_WRITE        ((u8)0x00)
+#define EEPROM_OP_READ ((u8)0x01)
+#define EEPROM_USA     ((u8)0x02)
+#define EEPROM_NAERR   ((u8)0x08)
+#define EEPROM_LAERR    ((u8)0x10)
+#define EEPROM_MSS     ((u8)0x20)
+#define EEPROM_WR_CNT  ((u8)5)
+#define EEPROM_WRRD_CNT        ((u8)4)
+#define EEPROM_RD_CNT  ((u8)5)
+#define EEPROM_DEF_SIZE        ((u16)4096)
+#define EEPROM_DEF_ADDR        ((u8)0x50)
+#define EEPROM_TOUT    (100)
+
+/*
+ * CSR command macros
+ * @CSR_DWE:           Enable all four bytes of the operation
+ * @CSR_OP_WRITE:      CSR write operation
+ * @CSR_OP_READ:       CSR read operation
+ * @CSR_RERR:          Read operation error
+ * @CSR_WERR:          Write operation error
+ * @CSR_WR_CNT:                Bytes count to perform write operation
+ * @CSR_WRRD_CNT:      Bytes count to write before reading
+ * @CSR_RD_CNT:                Bytes count to perform read operation
+ * @CSR_MAX:           Maximum CSR address
+ * @CSR_DEF:           Default CSR address
+ * @CSR_REAL_ADDR:     CSR real unshifted address
+ */
+#define CSR_DWE                        ((u8)0x0F)
+#define CSR_OP_WRITE           ((u8)0x00)
+#define CSR_OP_READ            ((u8)0x10)
+#define CSR_RERR               ((u8)0x40)
+#define CSR_WERR               ((u8)0x80)
+#define CSR_WR_CNT             ((u8)7)
+#define CSR_WRRD_CNT           ((u8)3)
+#define CSR_RD_CNT             ((u8)7)
+#define CSR_MAX                        ((u32)0x3FFFF)
+#define CSR_DEF                        ((u16)0x0000)
+#define CSR_REAL_ADDR(val)     ((unsigned int)val << 2)
+
+/*
+ * IDT 89HPESx basic register
+ * @IDT_VIDDID_CSR:    PCIe VID and DID of IDT 89HPESx
+ * @IDT_VID_MASK:      Mask of VID
+ */
+#define IDT_VIDDID_CSR ((u32)0x0000)
+#define IDT_VID_MASK   ((u32)0xFFFF)
+
+/*
+ * IDT 89HPESx can send NACK when new command is sent before previous one
+ * fininshed execution. In this case driver retries operation
+ * certain times.
+ * @RETRY_CNT:         Number of retries before giving up and fail
+ * @idt_smb_safe:      Generate a retry loop on corresponding SMBus method
+ */
+#define RETRY_CNT (128)
+#define idt_smb_safe(ops, args...) ({ \
+       int __retry = RETRY_CNT; \
+       s32 __sts; \
+       do { \
+               __sts = i2c_smbus_ ## ops ## _data(args); \
+       } while (__retry-- && __sts < 0); \
+       __sts; \
+})
+
+/*===========================================================================
+ *                         i2c bus level IO-operations
+ *===========================================================================
+ */
+
+/*
+ * idt_smb_write_byte() - SMBus write method when I2C_SMBUS_BYTE_DATA operation
+ *                        is only available
+ * @pdev:      Pointer to the driver data
+ * @seq:       Sequence of data to be written
+ */
+static int idt_smb_write_byte(struct idt_89hpesx_dev *pdev,
+                             const struct idt_smb_seq *seq)
+{
+       s32 sts;
+       u8 ccode;
+       int idx;
+
+       /* Loop over the supplied data sending byte one-by-one */
+       for (idx = 0; idx < seq->bytecnt; idx++) {
+               /* Collect the command code byte */
+               ccode = seq->ccode | CCODE_BYTE;
+               if (idx == 0)
+                       ccode |= CCODE_START;
+               if (idx == seq->bytecnt - 1)
+                       ccode |= CCODE_END;
+
+               /* Send data to the device */
+               sts = idt_smb_safe(write_byte, pdev->client, ccode,
+                       seq->data[idx]);
+               if (sts != 0)
+                       return (int)sts;
+       }
+
+       return 0;
+}
+
+/*
+ * idt_smb_read_byte() - SMBus read method when I2C_SMBUS_BYTE_DATA operation
+ *                        is only available
+ * @pdev:      Pointer to the driver data
+ * @seq:       Buffer to read data to
+ */
+static int idt_smb_read_byte(struct idt_89hpesx_dev *pdev,
+                            struct idt_smb_seq *seq)
+{
+       s32 sts;
+       u8 ccode;
+       int idx;
+
+       /* Loop over the supplied buffer receiving byte one-by-one */
+       for (idx = 0; idx < seq->bytecnt; idx++) {
+               /* Collect the command code byte */
+               ccode = seq->ccode | CCODE_BYTE;
+               if (idx == 0)
+                       ccode |= CCODE_START;
+               if (idx == seq->bytecnt - 1)
+                       ccode |= CCODE_END;
+
+               /* Read data from the device */
+               sts = idt_smb_safe(read_byte, pdev->client, ccode);
+               if (sts < 0)
+                       return (int)sts;
+
+               seq->data[idx] = (u8)sts;
+       }
+
+       return 0;
+}
+
+/*
+ * idt_smb_write_word() - SMBus write method when I2C_SMBUS_BYTE_DATA and
+ *                        I2C_FUNC_SMBUS_WORD_DATA operations are available
+ * @pdev:      Pointer to the driver data
+ * @seq:       Sequence of data to be written
+ */
+static int idt_smb_write_word(struct idt_89hpesx_dev *pdev,
+                             const struct idt_smb_seq *seq)
+{
+       s32 sts;
+       u8 ccode;
+       int idx, evencnt;
+
+       /* Calculate the even count of data to send */
+       evencnt = seq->bytecnt - (seq->bytecnt % 2);
+
+       /* Loop over the supplied data sending two bytes at a time */
+       for (idx = 0; idx < evencnt; idx += 2) {
+               /* Collect the command code byte */
+               ccode = seq->ccode | CCODE_WORD;
+               if (idx == 0)
+                       ccode |= CCODE_START;
+               if (idx == evencnt - 2)
+                       ccode |= CCODE_END;
+
+               /* Send word data to the device */
+               sts = idt_smb_safe(write_word, pdev->client, ccode,
+                       *(u16 *)&seq->data[idx]);
+               if (sts != 0)
+                       return (int)sts;
+       }
+
+       /* If there is odd number of bytes then send just one last byte */
+       if (seq->bytecnt != evencnt) {
+               /* Collect the command code byte */
+               ccode = seq->ccode | CCODE_BYTE | CCODE_END;
+               if (idx == 0)
+                       ccode |= CCODE_START;
+
+               /* Send byte data to the device */
+               sts = idt_smb_safe(write_byte, pdev->client, ccode,
+                       seq->data[idx]);
+               if (sts != 0)
+                       return (int)sts;
+       }
+
+       return 0;
+}
+
+/*
+ * idt_smb_read_word() - SMBus read method when I2C_SMBUS_BYTE_DATA and
+ *                       I2C_FUNC_SMBUS_WORD_DATA operations are available
+ * @pdev:      Pointer to the driver data
+ * @seq:       Buffer to read data to
+ */
+static int idt_smb_read_word(struct idt_89hpesx_dev *pdev,
+                            struct idt_smb_seq *seq)
+{
+       s32 sts;
+       u8 ccode;
+       int idx, evencnt;
+
+       /* Calculate the even count of data to send */
+       evencnt = seq->bytecnt - (seq->bytecnt % 2);
+
+       /* Loop over the supplied data reading two bytes at a time */
+       for (idx = 0; idx < evencnt; idx += 2) {
+               /* Collect the command code byte */
+               ccode = seq->ccode | CCODE_WORD;
+               if (idx == 0)
+                       ccode |= CCODE_START;
+               if (idx == evencnt - 2)
+                       ccode |= CCODE_END;
+
+               /* Read word data from the device */
+               sts = idt_smb_safe(read_word, pdev->client, ccode);
+               if (sts < 0)
+                       return (int)sts;
+
+               *(u16 *)&seq->data[idx] = (u16)sts;
+       }
+
+       /* If there is odd number of bytes then receive just one last byte */
+       if (seq->bytecnt != evencnt) {
+               /* Collect the command code byte */
+               ccode = seq->ccode | CCODE_BYTE | CCODE_END;
+               if (idx == 0)
+                       ccode |= CCODE_START;
+
+               /* Read last data byte from the device */
+               sts = idt_smb_safe(read_byte, pdev->client, ccode);
+               if (sts < 0)
+                       return (int)sts;
+
+               seq->data[idx] = (u8)sts;
+       }
+
+       return 0;
+}
+
+/*
+ * idt_smb_write_block() - SMBus write method when I2C_SMBUS_BLOCK_DATA
+ *                         operation is available
+ * @pdev:      Pointer to the driver data
+ * @seq:       Sequence of data to be written
+ */
+static int idt_smb_write_block(struct idt_89hpesx_dev *pdev,
+                              const struct idt_smb_seq *seq)
+{
+       u8 ccode;
+
+       /* Return error if too much data passed to send */
+       if (seq->bytecnt > I2C_SMBUS_BLOCK_MAX)
+               return -EINVAL;
+
+       /* Collect the command code byte */
+       ccode = seq->ccode | CCODE_BLOCK | CCODE_START | CCODE_END;
+
+       /* Send block of data to the device */
+       return idt_smb_safe(write_block, pdev->client, ccode, seq->bytecnt,
+               seq->data);
+}
+
+/*
+ * idt_smb_read_block() - SMBus read method when I2C_SMBUS_BLOCK_DATA
+ *                        operation is available
+ * @pdev:      Pointer to the driver data
+ * @seq:       Buffer to read data to
+ */
+static int idt_smb_read_block(struct idt_89hpesx_dev *pdev,
+                             struct idt_smb_seq *seq)
+{
+       s32 sts;
+       u8 ccode;
+
+       /* Return error if too much data passed to send */
+       if (seq->bytecnt > I2C_SMBUS_BLOCK_MAX)
+               return -EINVAL;
+
+       /* Collect the command code byte */
+       ccode = seq->ccode | CCODE_BLOCK | CCODE_START | CCODE_END;
+
+       /* Read block of data from the device */
+       sts = idt_smb_safe(read_block, pdev->client, ccode, seq->data);
+       if (sts != seq->bytecnt)
+               return (sts < 0 ? sts : -ENODATA);
+
+       return 0;
+}
+
+/*
+ * idt_smb_write_i2c_block() - SMBus write method when I2C_SMBUS_I2C_BLOCK_DATA
+ *                             operation is available
+ * @pdev:      Pointer to the driver data
+ * @seq:       Sequence of data to be written
+ *
+ * NOTE It's usual SMBus write block operation, except the actual data length is
+ * sent as first byte of data
+ */
+static int idt_smb_write_i2c_block(struct idt_89hpesx_dev *pdev,
+                                  const struct idt_smb_seq *seq)
+{
+       u8 ccode, buf[I2C_SMBUS_BLOCK_MAX + 1];
+
+       /* Return error if too much data passed to send */
+       if (seq->bytecnt > I2C_SMBUS_BLOCK_MAX)
+               return -EINVAL;
+
+       /* Collect the data to send. Length byte must be added prior the data */
+       buf[0] = seq->bytecnt;
+       memcpy(&buf[1], seq->data, seq->bytecnt);
+
+       /* Collect the command code byte */
+       ccode = seq->ccode | CCODE_BLOCK | CCODE_START | CCODE_END;
+
+       /* Send length and block of data to the device */
+       return idt_smb_safe(write_i2c_block, pdev->client, ccode,
+               seq->bytecnt + 1, buf);
+}
+
+/*
+ * idt_smb_read_i2c_block() - SMBus read method when I2C_SMBUS_I2C_BLOCK_DATA
+ *                            operation is available
+ * @pdev:      Pointer to the driver data
+ * @seq:       Buffer to read data to
+ *
+ * NOTE It's usual SMBus read block operation, except the actual data length is
+ * retrieved as first byte of data
+ */
+static int idt_smb_read_i2c_block(struct idt_89hpesx_dev *pdev,
+                                 struct idt_smb_seq *seq)
+{
+       u8 ccode, buf[I2C_SMBUS_BLOCK_MAX + 1];
+       s32 sts;
+
+       /* Return error if too much data passed to send */
+       if (seq->bytecnt > I2C_SMBUS_BLOCK_MAX)
+               return -EINVAL;
+
+       /* Collect the command code byte */
+       ccode = seq->ccode | CCODE_BLOCK | CCODE_START | CCODE_END;
+
+       /* Read length and block of data from the device */
+       sts = idt_smb_safe(read_i2c_block, pdev->client, ccode,
+               seq->bytecnt + 1, buf);
+       if (sts != seq->bytecnt + 1)
+               return (sts < 0 ? sts : -ENODATA);
+       if (buf[0] != seq->bytecnt)
+               return -ENODATA;
+
+       /* Copy retrieved data to the output data buffer */
+       memcpy(seq->data, &buf[1], seq->bytecnt);
+
+       return 0;
+}
+
+/*===========================================================================
+ *                          EEPROM IO-operations
+ *===========================================================================
+ */
+
+/*
+ * idt_eeprom_read_byte() - read just one byte from EEPROM
+ * @pdev:      Pointer to the driver data
+ * @memaddr:   Start EEPROM memory address
+ * @data:      Data to be written to EEPROM
+ */
+static int idt_eeprom_read_byte(struct idt_89hpesx_dev *pdev, u16 memaddr,
+                               u8 *data)
+{
+       struct device *dev = &pdev->client->dev;
+       struct idt_eeprom_seq eeseq;
+       struct idt_smb_seq smbseq;
+       int ret, retry;
+
+       /* Initialize SMBus sequence fields */
+       smbseq.ccode = pdev->iniccode | CCODE_EEPROM;
+       smbseq.data = (u8 *)&eeseq;
+
+       /*
+        * Sometimes EEPROM may respond with NACK if it's busy with previous
+        * operation, so we need to perform a few attempts of read cycle
+        */
+       retry = RETRY_CNT;
+       do {
+               /* Send EEPROM memory address to read data from */
+               smbseq.bytecnt = EEPROM_WRRD_CNT;
+               eeseq.cmd = pdev->inieecmd | EEPROM_OP_READ;
+               eeseq.eeaddr = pdev->eeaddr;
+               eeseq.memaddr = cpu_to_le16(memaddr);
+               ret = pdev->smb_write(pdev, &smbseq);
+               if (ret != 0) {
+                       dev_err(dev, "Failed to init eeprom addr 0x%02hhx",
+                               memaddr);
+                       break;
+               }
+
+               /* Perform read operation */
+               smbseq.bytecnt = EEPROM_RD_CNT;
+               ret = pdev->smb_read(pdev, &smbseq);
+               if (ret != 0) {
+                       dev_err(dev, "Failed to read eeprom data 0x%02hhx",
+                               memaddr);
+                       break;
+               }
+
+               /* Restart read operation if the device is busy */
+               if (retry && (eeseq.cmd & EEPROM_NAERR)) {
+                       dev_dbg(dev, "EEPROM busy, retry reading after %d ms",
+                               EEPROM_TOUT);
+                       msleep(EEPROM_TOUT);
+                       continue;
+               }
+
+               /* Check whether IDT successfully read data from EEPROM */
+               if (eeseq.cmd & (EEPROM_NAERR | EEPROM_LAERR | EEPROM_MSS)) {
+                       dev_err(dev,
+                               "Communication with eeprom failed, cmd 0x%hhx",
+                               eeseq.cmd);
+                       ret = -EREMOTEIO;
+                       break;
+               }
+
+               /* Save retrieved data and exit the loop */
+               *data = eeseq.data;
+               break;
+       } while (retry--);
+
+       /* Return the status of operation */
+       return ret;
+}
+
+/*
+ * idt_eeprom_write() - EEPROM write operation
+ * @pdev:      Pointer to the driver data
+ * @memaddr:   Start EEPROM memory address
+ * @len:       Length of data to be written
+ * @data:      Data to be written to EEPROM
+ */
+static int idt_eeprom_write(struct idt_89hpesx_dev *pdev, u16 memaddr, u16 len,
+                           const u8 *data)
+{
+       struct device *dev = &pdev->client->dev;
+       struct idt_eeprom_seq eeseq;
+       struct idt_smb_seq smbseq;
+       int ret;
+       u16 idx;
+
+       /* Initialize SMBus sequence fields */
+       smbseq.ccode = pdev->iniccode | CCODE_EEPROM;
+       smbseq.data = (u8 *)&eeseq;
+
+       /* Send data byte-by-byte, checking if it is successfully written */
+       for (idx = 0; idx < len; idx++, memaddr++) {
+               /* Lock IDT SMBus device */
+               mutex_lock(&pdev->smb_mtx);
+
+               /* Perform write operation */
+               smbseq.bytecnt = EEPROM_WR_CNT;
+               eeseq.cmd = pdev->inieecmd | EEPROM_OP_WRITE;
+               eeseq.eeaddr = pdev->eeaddr;
+               eeseq.memaddr = cpu_to_le16(memaddr);
+               eeseq.data = data[idx];
+               ret = pdev->smb_write(pdev, &smbseq);
+               if (ret != 0) {
+                       dev_err(dev,
+                               "Failed to write 0x%04hx:0x%02hhx to eeprom",
+                               memaddr, data[idx]);
+                       goto err_mutex_unlock;
+               }
+
+               /*
+                * Check whether the data is successfully written by reading
+                * from the same EEPROM memory address.
+                */
+               eeseq.data = ~data[idx];
+               ret = idt_eeprom_read_byte(pdev, memaddr, &eeseq.data);
+               if (ret != 0)
+                       goto err_mutex_unlock;
+
+               /* Check whether the read byte is the same as written one */
+               if (eeseq.data != data[idx]) {
+                       dev_err(dev, "Values don't match 0x%02hhx != 0x%02hhx",
+                               eeseq.data, data[idx]);
+                       ret = -EREMOTEIO;
+                       goto err_mutex_unlock;
+               }
+
+               /* Unlock IDT SMBus device */
+err_mutex_unlock:
+               mutex_unlock(&pdev->smb_mtx);
+               if (ret != 0)
+                       return ret;
+       }
+
+       return 0;
+}
+
+/*
+ * idt_eeprom_read() - EEPROM read operation
+ * @pdev:      Pointer to the driver data
+ * @memaddr:   Start EEPROM memory address
+ * @len:       Length of data to read
+ * @buf:       Buffer to read data to
+ */
+static int idt_eeprom_read(struct idt_89hpesx_dev *pdev, u16 memaddr, u16 len,
+                          u8 *buf)
+{
+       int ret;
+       u16 idx;
+
+       /* Read data byte-by-byte, retrying if it wasn't successful */
+       for (idx = 0; idx < len; idx++, memaddr++) {
+               /* Lock IDT SMBus device */
+               mutex_lock(&pdev->smb_mtx);
+
+               /* Just read the byte to the buffer */
+               ret = idt_eeprom_read_byte(pdev, memaddr, &buf[idx]);
+
+               /* Unlock IDT SMBus device */
+               mutex_unlock(&pdev->smb_mtx);
+
+               /* Return error if read operation failed */
+               if (ret != 0)
+                       return ret;
+       }
+
+       return 0;
+}
+
+/*===========================================================================
+ *                          CSR IO-operations
+ *===========================================================================
+ */
+
+/*
+ * idt_csr_write() - CSR write operation
+ * @pdev:      Pointer to the driver data
+ * @csraddr:   CSR address (with no two LS bits)
+ * @data:      Data to be written to CSR
+ */
+static int idt_csr_write(struct idt_89hpesx_dev *pdev, u16 csraddr,
+                        const u32 data)
+{
+       struct device *dev = &pdev->client->dev;
+       struct idt_csr_seq csrseq;
+       struct idt_smb_seq smbseq;
+       int ret;
+
+       /* Initialize SMBus sequence fields */
+       smbseq.ccode = pdev->iniccode | CCODE_CSR;
+       smbseq.data = (u8 *)&csrseq;
+
+       /* Lock IDT SMBus device */
+       mutex_lock(&pdev->smb_mtx);
+
+       /* Perform write operation */
+       smbseq.bytecnt = CSR_WR_CNT;
+       csrseq.cmd = pdev->inicsrcmd | CSR_OP_WRITE;
+       csrseq.csraddr = cpu_to_le16(csraddr);
+       csrseq.data = cpu_to_le32(data);
+       ret = pdev->smb_write(pdev, &smbseq);
+       if (ret != 0) {
+               dev_err(dev, "Failed to write 0x%04x: 0x%04x to csr",
+                       CSR_REAL_ADDR(csraddr), data);
+               goto err_mutex_unlock;
+       }
+
+       /* Send CSR address to read data from */
+       smbseq.bytecnt = CSR_WRRD_CNT;
+       csrseq.cmd = pdev->inicsrcmd | CSR_OP_READ;
+       ret = pdev->smb_write(pdev, &smbseq);
+       if (ret != 0) {
+               dev_err(dev, "Failed to init csr address 0x%04x",
+                       CSR_REAL_ADDR(csraddr));
+               goto err_mutex_unlock;
+       }
+
+       /* Perform read operation */
+       smbseq.bytecnt = CSR_RD_CNT;
+       ret = pdev->smb_read(pdev, &smbseq);
+       if (ret != 0) {
+               dev_err(dev, "Failed to read csr 0x%04x",
+                       CSR_REAL_ADDR(csraddr));
+               goto err_mutex_unlock;
+       }
+
+       /* Check whether IDT successfully retrieved CSR data */
+       if (csrseq.cmd & (CSR_RERR | CSR_WERR)) {
+               dev_err(dev, "IDT failed to perform CSR r/w");
+               ret = -EREMOTEIO;
+               goto err_mutex_unlock;
+       }
+
+       /* Unlock IDT SMBus device */
+err_mutex_unlock:
+       mutex_unlock(&pdev->smb_mtx);
+
+       return ret;
+}
+
+/*
+ * idt_csr_read() - CSR read operation
+ * @pdev:      Pointer to the driver data
+ * @csraddr:   CSR address (with no two LS bits)
+ * @data:      Data to be written to CSR
+ */
+static int idt_csr_read(struct idt_89hpesx_dev *pdev, u16 csraddr, u32 *data)
+{
+       struct device *dev = &pdev->client->dev;
+       struct idt_csr_seq csrseq;
+       struct idt_smb_seq smbseq;
+       int ret;
+
+       /* Initialize SMBus sequence fields */
+       smbseq.ccode = pdev->iniccode | CCODE_CSR;
+       smbseq.data = (u8 *)&csrseq;
+
+       /* Lock IDT SMBus device */
+       mutex_lock(&pdev->smb_mtx);
+
+       /* Send CSR register address before reading it */
+       smbseq.bytecnt = CSR_WRRD_CNT;
+       csrseq.cmd = pdev->inicsrcmd | CSR_OP_READ;
+       csrseq.csraddr = cpu_to_le16(csraddr);
+       ret = pdev->smb_write(pdev, &smbseq);
+       if (ret != 0) {
+               dev_err(dev, "Failed to init csr address 0x%04x",
+                       CSR_REAL_ADDR(csraddr));
+               goto err_mutex_unlock;
+       }
+
+       /* Perform read operation */
+       smbseq.bytecnt = CSR_RD_CNT;
+       ret = pdev->smb_read(pdev, &smbseq);
+       if (ret != 0) {
+               dev_err(dev, "Failed to read csr 0x%04hx",
+                       CSR_REAL_ADDR(csraddr));
+               goto err_mutex_unlock;
+       }
+
+       /* Check whether IDT successfully retrieved CSR data */
+       if (csrseq.cmd & (CSR_RERR | CSR_WERR)) {
+               dev_err(dev, "IDT failed to perform CSR r/w");
+               ret = -EREMOTEIO;
+               goto err_mutex_unlock;
+       }
+
+       /* Save data retrieved from IDT */
+       *data = le32_to_cpu(csrseq.data);
+
+       /* Unlock IDT SMBus device */
+err_mutex_unlock:
+       mutex_unlock(&pdev->smb_mtx);
+
+       return ret;
+}
+
+/*===========================================================================
+ *                          Sysfs/debugfs-nodes IO-operations
+ *===========================================================================
+ */
+
+/*
+ * eeprom_write() - EEPROM sysfs-node write callback
+ * @filep:     Pointer to the file system node
+ * @kobj:      Pointer to the kernel object related to the sysfs-node
+ * @attr:      Attributes of the file
+ * @buf:       Buffer to write data to
+ * @off:       Offset at which data should be written to
+ * @count:     Number of bytes to write
+ */
+static ssize_t eeprom_write(struct file *filp, struct kobject *kobj,
+                           struct bin_attribute *attr,
+                           char *buf, loff_t off, size_t count)
+{
+       struct idt_89hpesx_dev *pdev;
+       int ret;
+
+       /* Retrieve driver data */
+       pdev = dev_get_drvdata(kobj_to_dev(kobj));
+
+       /* Perform EEPROM write operation */
+       ret = idt_eeprom_write(pdev, (u16)off, (u16)count, (u8 *)buf);
+       return (ret != 0 ? ret : count);
+}
+
+/*
+ * eeprom_read() - EEPROM sysfs-node read callback
+ * @filep:     Pointer to the file system node
+ * @kobj:      Pointer to the kernel object related to the sysfs-node
+ * @attr:      Attributes of the file
+ * @buf:       Buffer to write data to
+ * @off:       Offset at which data should be written to
+ * @count:     Number of bytes to write
+ */
+static ssize_t eeprom_read(struct file *filp, struct kobject *kobj,
+                          struct bin_attribute *attr,
+                          char *buf, loff_t off, size_t count)
+{
+       struct idt_89hpesx_dev *pdev;
+       int ret;
+
+       /* Retrieve driver data */
+       pdev = dev_get_drvdata(kobj_to_dev(kobj));
+
+       /* Perform EEPROM read operation */
+       ret = idt_eeprom_read(pdev, (u16)off, (u16)count, (u8 *)buf);
+       return (ret != 0 ? ret : count);
+}
+
+/*
+ * idt_dbgfs_csr_write() - CSR debugfs-node write callback
+ * @filep:     Pointer to the file system file descriptor
+ * @buf:       Buffer to read data from
+ * @count:     Size of the buffer
+ * @offp:      Offset within the file
+ *
+ * It accepts either "0x<reg addr>:0x<value>" for saving register address
+ * and writing value to specified DWORD register or "0x<reg addr>" for
+ * just saving register address in order to perform next read operation.
+ *
+ * WARNING No spaces are allowed. Incoming string must be strictly formated as:
+ * "<reg addr>:<value>". Register address must be aligned within 4 bytes
+ * (one DWORD).
+ */
+static ssize_t idt_dbgfs_csr_write(struct file *filep, const char __user *ubuf,
+                                  size_t count, loff_t *offp)
+{
+       struct idt_89hpesx_dev *pdev = filep->private_data;
+       char *colon_ch, *csraddr_str, *csrval_str;
+       int ret, csraddr_len, csrval_len;
+       u32 csraddr, csrval;
+       char *buf;
+
+       /* Copy data from User-space */
+       buf = kmalloc(count + 1, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+
+       ret = simple_write_to_buffer(buf, count, offp, ubuf, count);
+       if (ret < 0)
+               goto free_buf;
+       buf[count] = 0;
+
+       /* Find position of colon in the buffer */
+       colon_ch = strnchr(buf, count, ':');
+
+       /*
+        * If there is colon passed then new CSR value should be parsed as
+        * well, so allocate buffer for CSR address substring.
+        * If no colon is found, then string must have just one number with
+        * no new CSR value
+        */
+       if (colon_ch != NULL) {
+               csraddr_len = colon_ch - buf;
+               csraddr_str =
+                       kmalloc(sizeof(char)*(csraddr_len + 1), GFP_KERNEL);
+               if (csraddr_str == NULL) {
+                       ret = -ENOMEM;
+                       goto free_buf;
+               }
+               /* Copy the register address to the substring buffer */
+               strncpy(csraddr_str, buf, csraddr_len);
+               csraddr_str[csraddr_len] = '\0';
+               /* Register value must follow the colon */
+               csrval_str = colon_ch + 1;
+               csrval_len = strnlen(csrval_str, count - csraddr_len);
+       } else /* if (str_colon == NULL) */ {
+               csraddr_str = (char *)buf; /* Just to shut warning up */
+               csraddr_len = strnlen(csraddr_str, count);
+               csrval_str = NULL;
+               csrval_len = 0;
+       }
+
+       /* Convert CSR address to u32 value */
+       ret = kstrtou32(csraddr_str, 0, &csraddr);
+       if (ret != 0)
+               goto free_csraddr_str;
+
+       /* Check whether passed register address is valid */
+       if (csraddr > CSR_MAX || !IS_ALIGNED(csraddr, SZ_4)) {
+               ret = -EINVAL;
+               goto free_csraddr_str;
+       }
+
+       /* Shift register address to the right so to have u16 address */
+       pdev->csr = (csraddr >> 2);
+
+       /* Parse new CSR value and send it to IDT, if colon has been found */
+       if (colon_ch != NULL) {
+               ret = kstrtou32(csrval_str, 0, &csrval);
+               if (ret != 0)
+                       goto free_csraddr_str;
+
+               ret = idt_csr_write(pdev, pdev->csr, csrval);
+               if (ret != 0)
+                       goto free_csraddr_str;
+       }
+
+       /* Free memory only if colon has been found */
+free_csraddr_str:
+       if (colon_ch != NULL)
+               kfree(csraddr_str);
+
+       /* Free buffer allocated for data retrieved from User-space */
+free_buf:
+       kfree(buf);
+
+       return (ret != 0 ? ret : count);
+}
+
+/*
+ * idt_dbgfs_csr_read() - CSR debugfs-node read callback
+ * @filep:     Pointer to the file system file descriptor
+ * @buf:       Buffer to write data to
+ * @count:     Size of the buffer
+ * @offp:      Offset within the file
+ *
+ * It just prints the pair "0x<reg addr>:0x<value>" to passed buffer.
+ */
+#define CSRBUF_SIZE    ((size_t)32)
+static ssize_t idt_dbgfs_csr_read(struct file *filep, char __user *ubuf,
+                                 size_t count, loff_t *offp)
+{
+       struct idt_89hpesx_dev *pdev = filep->private_data;
+       u32 csraddr, csrval;
+       char buf[CSRBUF_SIZE];
+       int ret, size;
+
+       /* Perform CSR read operation */
+       ret = idt_csr_read(pdev, pdev->csr, &csrval);
+       if (ret != 0)
+               return ret;
+
+       /* Shift register address to the left so to have real address */
+       csraddr = ((u32)pdev->csr << 2);
+
+       /* Print the "0x<reg addr>:0x<value>" to buffer */
+       size = snprintf(buf, CSRBUF_SIZE, "0x%05x:0x%08x\n",
+               (unsigned int)csraddr, (unsigned int)csrval);
+
+       /* Copy data to User-space */
+       return simple_read_from_buffer(ubuf, count, offp, buf, size);
+}
+
+/*
+ * eeprom_attribute - EEPROM sysfs-node attributes
+ *
+ * NOTE Size will be changed in compliance with OF node. EEPROM attribute will
+ * be read-only as well if the corresponding flag is specified in OF node.
+ */
+static BIN_ATTR_RW(eeprom, EEPROM_DEF_SIZE);
+
+/*
+ * csr_dbgfs_ops - CSR debugfs-node read/write operations
+ */
+static const struct file_operations csr_dbgfs_ops = {
+       .owner = THIS_MODULE,
+       .open = simple_open,
+       .write = idt_dbgfs_csr_write,
+       .read = idt_dbgfs_csr_read
+};
+
+/*===========================================================================
+ *                       Driver init/deinit methods
+ *===========================================================================
+ */
+
+/*
+ * idt_set_defval() - disable EEPROM access by default
+ * @pdev:      Pointer to the driver data
+ */
+static void idt_set_defval(struct idt_89hpesx_dev *pdev)
+{
+       /* If OF info is missing then use next values */
+       pdev->eesize = 0;
+       pdev->eero = true;
+       pdev->inieecmd = 0;
+       pdev->eeaddr = 0;
+}
+
+#ifdef CONFIG_OF
+static const struct i2c_device_id ee_ids[];
+/*
+ * idt_ee_match_id() - check whether the node belongs to compatible EEPROMs
+ */
+static const struct i2c_device_id *idt_ee_match_id(struct device_node *node)
+{
+       const struct i2c_device_id *id = ee_ids;
+       char devname[I2C_NAME_SIZE];
+
+       /* Retrieve the device name without manufacturer name */
+       if (of_modalias_node(node, devname, sizeof(devname)))
+               return NULL;
+
+       /* Search through the device name */
+        while (id->name[0]) {
+                if (strcmp(devname, id->name) == 0)
+                        return id;
+                id++;
+        }
+        return NULL;
+}
+
+/*
+ * idt_get_ofdata() - get IDT i2c-device parameters from device tree
+ * @pdev:      Pointer to the driver data
+ */
+static void idt_get_ofdata(struct idt_89hpesx_dev *pdev)
+{
+       const struct device_node *node = pdev->client->dev.of_node;
+       struct device *dev = &pdev->client->dev;
+
+       /* Read dts node parameters */
+       if (node) {
+               const struct i2c_device_id *ee_id = NULL;
+               struct device_node *child;
+               const __be32 *addr_be;
+               int len;
+
+               /* Walk through all child nodes looking for compatible one */
+               for_each_available_child_of_node(node, child) {
+                       ee_id = idt_ee_match_id(child);
+                       if (IS_ERR_OR_NULL(ee_id)) {
+                               dev_warn(dev, "Skip unsupported child node %s",
+                                       child->full_name);
+                               continue;
+                       } else
+                               break;
+               }
+
+               /* If there is no child EEPROM device, then set zero size */
+               if (!ee_id) {
+                       idt_set_defval(pdev);
+                       return;
+               }
+
+               /* Retrieve EEPROM size */
+               pdev->eesize = (u32)ee_id->driver_data;
+
+               /* Get custom EEPROM address from 'reg' attribute */
+               addr_be = of_get_property(child, "reg", &len);
+               if (!addr_be || (len < sizeof(*addr_be))) {
+                       dev_warn(dev, "No reg on %s, use default address %d",
+                               child->full_name, EEPROM_DEF_ADDR);
+                       pdev->inieecmd = 0;
+                       pdev->eeaddr = EEPROM_DEF_ADDR << 1;
+               } else {
+                       pdev->inieecmd = EEPROM_USA;
+                       pdev->eeaddr = be32_to_cpup(addr_be) << 1;
+               }
+
+               /* Check EEPROM 'read-only' flag */
+               if (of_get_property(child, "read-only", NULL))
+                       pdev->eero = true;
+               else /* if (!of_get_property(node, "read-only", NULL)) */
+                       pdev->eero = false;
+
+               dev_dbg(dev, "EEPROM of %u bytes found by %hhu",
+                       pdev->eesize, pdev->eeaddr);
+       } else {
+               dev_warn(dev, "No dts node, EEPROM access disabled");
+               idt_set_defval(pdev);
+       }
+}
+#else
+static void idt_get_ofdata(struct idt_89hpesx_dev *pdev)
+{
+       struct device *dev = &pdev->client->dev;
+
+       dev_warn(dev, "OF table is unsupported, EEPROM access disabled");
+
+       /* Nothing we can do, just set the default values */
+       idt_set_defval(pdev);
+}
+#endif /* CONFIG_OF */
+
+/*
+ * idt_create_pdev() - create and init data structure of the driver
+ * @client:    i2c client of IDT PCIe-switch device
+ */
+static struct idt_89hpesx_dev *idt_create_pdev(struct i2c_client *client)
+{
+       struct idt_89hpesx_dev *pdev;
+
+       /* Allocate memory for driver data */
+       pdev = devm_kmalloc(&client->dev, sizeof(struct idt_89hpesx_dev),
+               GFP_KERNEL);
+       if (pdev == NULL)
+               return ERR_PTR(-ENOMEM);
+
+       /* Initialize basic fields of the data */
+       pdev->client = client;
+       i2c_set_clientdata(client, pdev);
+
+       /* Read OF nodes information */
+       idt_get_ofdata(pdev);
+
+       /* Initialize basic CSR CMD field - use full DWORD-sized r/w ops */
+       pdev->inicsrcmd = CSR_DWE;
+       pdev->csr = CSR_DEF;
+
+       /* Enable Packet Error Checking if it's supported by adapter */
+       if (i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_PEC)) {
+               pdev->iniccode = CCODE_PEC;
+               client->flags |= I2C_CLIENT_PEC;
+       } else /* PEC is unsupported */ {
+               pdev->iniccode = 0;
+       }
+
+       return pdev;
+}
+
+/*
+ * idt_free_pdev() - free data structure of the driver
+ * @pdev:      Pointer to the driver data
+ */
+static void idt_free_pdev(struct idt_89hpesx_dev *pdev)
+{
+       /* Clear driver data from device private field */
+       i2c_set_clientdata(pdev->client, NULL);
+}
+
+/*
+ * idt_set_smbus_ops() - set supported SMBus operations
+ * @pdev:      Pointer to the driver data
+ * Return status of smbus check operations
+ */
+static int idt_set_smbus_ops(struct idt_89hpesx_dev *pdev)
+{
+       struct i2c_adapter *adapter = pdev->client->adapter;
+       struct device *dev = &pdev->client->dev;
+
+       /* Check i2c adapter read functionality */
+       if (i2c_check_functionality(adapter,
+                                   I2C_FUNC_SMBUS_READ_BLOCK_DATA)) {
+               pdev->smb_read = idt_smb_read_block;
+               dev_dbg(dev, "SMBus block-read op chosen");
+       } else if (i2c_check_functionality(adapter,
+                                          I2C_FUNC_SMBUS_READ_I2C_BLOCK)) {
+               pdev->smb_read = idt_smb_read_i2c_block;
+               dev_dbg(dev, "SMBus i2c-block-read op chosen");
+       } else if (i2c_check_functionality(adapter,
+                                          I2C_FUNC_SMBUS_READ_WORD_DATA) &&
+                  i2c_check_functionality(adapter,
+                                          I2C_FUNC_SMBUS_READ_BYTE_DATA)) {
+               pdev->smb_read = idt_smb_read_word;
+               dev_warn(dev, "Use slow word/byte SMBus read ops");
+       } else if (i2c_check_functionality(adapter,
+                                          I2C_FUNC_SMBUS_READ_BYTE_DATA)) {
+               pdev->smb_read = idt_smb_read_byte;
+               dev_warn(dev, "Use slow byte SMBus read op");
+       } else /* no supported smbus read operations */ {
+               dev_err(dev, "No supported SMBus read op");
+               return -EPFNOSUPPORT;
+       }
+
+       /* Check i2c adapter write functionality */
+       if (i2c_check_functionality(adapter,
+                                   I2C_FUNC_SMBUS_WRITE_BLOCK_DATA)) {
+               pdev->smb_write = idt_smb_write_block;
+               dev_dbg(dev, "SMBus block-write op chosen");
+       } else if (i2c_check_functionality(adapter,
+                                          I2C_FUNC_SMBUS_WRITE_I2C_BLOCK)) {
+               pdev->smb_write = idt_smb_write_i2c_block;
+               dev_dbg(dev, "SMBus i2c-block-write op chosen");
+       } else if (i2c_check_functionality(adapter,
+                                          I2C_FUNC_SMBUS_WRITE_WORD_DATA) &&
+                  i2c_check_functionality(adapter,
+                                          I2C_FUNC_SMBUS_WRITE_BYTE_DATA)) {
+               pdev->smb_write = idt_smb_write_word;
+               dev_warn(dev, "Use slow word/byte SMBus write op");
+       } else if (i2c_check_functionality(adapter,
+                                          I2C_FUNC_SMBUS_WRITE_BYTE_DATA)) {
+               pdev->smb_write = idt_smb_write_byte;
+               dev_warn(dev, "Use slow byte SMBus write op");
+       } else /* no supported smbus write operations */ {
+               dev_err(dev, "No supported SMBus write op");
+               return -EPFNOSUPPORT;
+       }
+
+       /* Initialize IDT SMBus slave interface mutex */
+       mutex_init(&pdev->smb_mtx);
+
+       return 0;
+}
+
+/*
+ * idt_check_dev() - check whether it's really IDT 89HPESx device
+ * @pdev:      Pointer to the driver data
+ * Return status of i2c adapter check operation
+ */
+static int idt_check_dev(struct idt_89hpesx_dev *pdev)
+{
+       struct device *dev = &pdev->client->dev;
+       u32 viddid;
+       int ret;
+
+       /* Read VID and DID directly from IDT memory space */
+       ret = idt_csr_read(pdev, IDT_VIDDID_CSR, &viddid);
+       if (ret != 0) {
+               dev_err(dev, "Failed to read VID/DID");
+               return ret;
+       }
+
+       /* Check whether it's IDT device */
+       if ((viddid & IDT_VID_MASK) != PCI_VENDOR_ID_IDT) {
+               dev_err(dev, "Got unsupported VID/DID: 0x%08x", viddid);
+               return -ENODEV;
+       }
+
+       dev_info(dev, "Found IDT 89HPES device VID:0x%04x, DID:0x%04x",
+               (viddid & IDT_VID_MASK), (viddid >> 16));
+
+       return 0;
+}
+
+/*
+ * idt_create_sysfs_files() - create sysfs attribute files
+ * @pdev:      Pointer to the driver data
+ * Return status of operation
+ */
+static int idt_create_sysfs_files(struct idt_89hpesx_dev *pdev)
+{
+       struct device *dev = &pdev->client->dev;
+       int ret;
+
+       /* Don't do anything if EEPROM isn't accessible */
+       if (pdev->eesize == 0) {
+               dev_dbg(dev, "Skip creating sysfs-files");
+               return 0;
+       }
+
+       /* Allocate memory for attribute file */
+       pdev->ee_file = devm_kmalloc(dev, sizeof(*pdev->ee_file), GFP_KERNEL);
+       if (!pdev->ee_file)
+               return -ENOMEM;
+
+       /* Copy the declared EEPROM attr structure to change some of fields */
+       memcpy(pdev->ee_file, &bin_attr_eeprom, sizeof(*pdev->ee_file));
+
+       /* In case of read-only EEPROM get rid of write ability */
+       if (pdev->eero) {
+               pdev->ee_file->attr.mode &= ~0200;
+               pdev->ee_file->write = NULL;
+       }
+       /* Create EEPROM sysfs file */
+       pdev->ee_file->size = pdev->eesize;
+       ret = sysfs_create_bin_file(&dev->kobj, pdev->ee_file);
+       if (ret != 0) {
+               dev_err(dev, "Failed to create EEPROM sysfs-node");
+               return ret;
+       }
+
+       return 0;
+}
+
+/*
+ * idt_remove_sysfs_files() - remove sysfs attribute files
+ * @pdev:      Pointer to the driver data
+ */
+static void idt_remove_sysfs_files(struct idt_89hpesx_dev *pdev)
+{
+       struct device *dev = &pdev->client->dev;
+
+       /* Don't do anything if EEPROM wasn't accessible */
+       if (pdev->eesize == 0)
+               return;
+
+       /* Remove EEPROM sysfs file */
+       sysfs_remove_bin_file(&dev->kobj, pdev->ee_file);
+}
+
+/*
+ * idt_create_dbgfs_files() - create debugfs files
+ * @pdev:      Pointer to the driver data
+ */
+#define CSRNAME_LEN    ((size_t)32)
+static void idt_create_dbgfs_files(struct idt_89hpesx_dev *pdev)
+{
+       struct i2c_client *cli = pdev->client;
+       char fname[CSRNAME_LEN];
+
+       /* Create Debugfs directory for CSR file */
+       snprintf(fname, CSRNAME_LEN, "%d-%04hx", cli->adapter->nr, cli->addr);
+       pdev->csr_dir = debugfs_create_dir(fname, csr_dbgdir);
+
+       /* Create Debugfs file for CSR read/write operations */
+       pdev->csr_file = debugfs_create_file(cli->name, 0600,
+               pdev->csr_dir, pdev, &csr_dbgfs_ops);
+}
+
+/*
+ * idt_remove_dbgfs_files() - remove debugfs files
+ * @pdev:      Pointer to the driver data
+ */
+static void idt_remove_dbgfs_files(struct idt_89hpesx_dev *pdev)
+{
+       /* Remove CSR directory and it sysfs-node */
+       debugfs_remove_recursive(pdev->csr_dir);
+}
+
+/*
+ * idt_probe() - IDT 89HPESx driver probe() callback method
+ */
+static int idt_probe(struct i2c_client *client, const struct i2c_device_id *id)
+{
+       struct idt_89hpesx_dev *pdev;
+       int ret;
+
+       /* Create driver data */
+       pdev = idt_create_pdev(client);
+       if (IS_ERR(pdev))
+               return PTR_ERR(pdev);
+
+       /* Set SMBus operations */
+       ret = idt_set_smbus_ops(pdev);
+       if (ret != 0)
+               goto err_free_pdev;
+
+       /* Check whether it is truly IDT 89HPESx device */
+       ret = idt_check_dev(pdev);
+       if (ret != 0)
+               goto err_free_pdev;
+
+       /* Create sysfs files */
+       ret = idt_create_sysfs_files(pdev);
+       if (ret != 0)
+               goto err_free_pdev;
+
+       /* Create debugfs files */
+       idt_create_dbgfs_files(pdev);
+
+       return 0;
+
+err_free_pdev:
+       idt_free_pdev(pdev);
+
+       return ret;
+}
+
+/*
+ * idt_remove() - IDT 89HPESx driver remove() callback method
+ */
+static int idt_remove(struct i2c_client *client)
+{
+       struct idt_89hpesx_dev *pdev = i2c_get_clientdata(client);
+
+       /* Remove debugfs files first */
+       idt_remove_dbgfs_files(pdev);
+
+       /* Remove sysfs files */
+       idt_remove_sysfs_files(pdev);
+
+       /* Discard driver data structure */
+       idt_free_pdev(pdev);
+
+       return 0;
+}
+
+/*
+ * ee_ids - array of supported EEPROMs
+ */
+static const struct i2c_device_id ee_ids[] = {
+       { "24c32",  4096},
+       { "24c64",  8192},
+       { "24c128", 16384},
+       { "24c256", 32768},
+       { "24c512", 65536},
+       {}
+};
+MODULE_DEVICE_TABLE(i2c, ee_ids);
+
+/*
+ * idt_ids - supported IDT 89HPESx devices
+ */
+static const struct i2c_device_id idt_ids[] = {
+       { "89hpes8nt2", 0 },
+       { "89hpes12nt3", 0 },
+
+       { "89hpes24nt6ag2", 0 },
+       { "89hpes32nt8ag2", 0 },
+       { "89hpes32nt8bg2", 0 },
+       { "89hpes12nt12g2", 0 },
+       { "89hpes16nt16g2", 0 },
+       { "89hpes24nt24g2", 0 },
+       { "89hpes32nt24ag2", 0 },
+       { "89hpes32nt24bg2", 0 },
+
+       { "89hpes12n3", 0 },
+       { "89hpes12n3a", 0 },
+       { "89hpes24n3", 0 },
+       { "89hpes24n3a", 0 },
+
+       { "89hpes32h8", 0 },
+       { "89hpes32h8g2", 0 },
+       { "89hpes48h12", 0 },
+       { "89hpes48h12g2", 0 },
+       { "89hpes48h12ag2", 0 },
+       { "89hpes16h16", 0 },
+       { "89hpes22h16", 0 },
+       { "89hpes22h16g2", 0 },
+       { "89hpes34h16", 0 },
+       { "89hpes34h16g2", 0 },
+       { "89hpes64h16", 0 },
+       { "89hpes64h16g2", 0 },
+       { "89hpes64h16ag2", 0 },
+
+       /* { "89hpes3t3", 0 }, // No SMBus-slave iface */
+       { "89hpes12t3g2", 0 },
+       { "89hpes24t3g2", 0 },
+       /* { "89hpes4t4", 0 }, // No SMBus-slave iface */
+       { "89hpes16t4", 0 },
+       { "89hpes4t4g2", 0 },
+       { "89hpes10t4g2", 0 },
+       { "89hpes16t4g2", 0 },
+       { "89hpes16t4ag2", 0 },
+       { "89hpes5t5", 0 },
+       { "89hpes6t5", 0 },
+       { "89hpes8t5", 0 },
+       { "89hpes8t5a", 0 },
+       { "89hpes24t6", 0 },
+       { "89hpes6t6g2", 0 },
+       { "89hpes24t6g2", 0 },
+       { "89hpes16t7", 0 },
+       { "89hpes32t8", 0 },
+       { "89hpes32t8g2", 0 },
+       { "89hpes48t12", 0 },
+       { "89hpes48t12g2", 0 },
+       { /* END OF LIST */ }
+};
+MODULE_DEVICE_TABLE(i2c, idt_ids);
+
+/*
+ * idt_driver - IDT 89HPESx driver structure
+ */
+static struct i2c_driver idt_driver = {
+       .driver = {
+               .name = IDT_NAME,
+       },
+       .probe = idt_probe,
+       .remove = idt_remove,
+       .id_table = idt_ids,
+};
+
+/*
+ * idt_init() - IDT 89HPESx driver init() callback method
+ */
+static int __init idt_init(void)
+{
+       /* Create Debugfs directory first */
+       if (debugfs_initialized())
+               csr_dbgdir = debugfs_create_dir("idt_csr", NULL);
+
+       /* Add new i2c-device driver */
+       return i2c_add_driver(&idt_driver);
+}
+module_init(idt_init);
+
+/*
+ * idt_exit() - IDT 89HPESx driver exit() callback method
+ */
+static void __exit idt_exit(void)
+{
+       /* Discard debugfs directory and all files if any */
+       debugfs_remove_recursive(csr_dbgdir);
+
+       /* Unregister i2c-device driver */
+       i2c_del_driver(&idt_driver);
+}
+module_exit(idt_exit);
index 6c1f49a85023e6abd80804ab71c27d57c1ccd5e0..4fd21e86ad56ea33f7d303dc108ac1888e4ac3fd 100644 (file)
@@ -1336,7 +1336,6 @@ static int genwqe_sriov_configure(struct pci_dev *dev, int numvfs)
 static struct pci_error_handlers genwqe_err_handler = {
        .error_detected = genwqe_err_error_detected,
        .mmio_enabled   = genwqe_err_result_none,
-       .link_reset     = genwqe_err_result_none,
        .slot_reset     = genwqe_err_slot_reset,
        .resume         = genwqe_err_resume,
 };
index cba0837aee2ed1a2b12d2b2a1802b25b769aaba1..e3f4cd8876b518eaf2dd9fe760828208f5befc82 100644 (file)
@@ -81,12 +81,17 @@ void lkdtm_OVERFLOW(void)
        (void) recursive_loop(recur_count);
 }
 
+static noinline void __lkdtm_CORRUPT_STACK(void *stack)
+{
+       memset(stack, 'a', 64);
+}
+
 noinline void lkdtm_CORRUPT_STACK(void)
 {
        /* Use default char array length that triggers stack protection. */
        char data[8];
+       __lkdtm_CORRUPT_STACK(&data);
 
-       memset((void *)data, 'a', 64);
        pr_info("Corrupted stack with '%16s'...\n", data);
 }
 
index 16e4cf1109306e954455539324adf4c12ddfd110..b9a4cd4a9b682a57f893a5372dcb72f4e0f389da 100644 (file)
@@ -539,7 +539,9 @@ static void __exit lkdtm_module_exit(void)
        /* Handle test-specific clean-up. */
        lkdtm_usercopy_exit();
 
-       unregister_jprobe(lkdtm_jprobe);
+       if (lkdtm_jprobe != NULL)
+               unregister_jprobe(lkdtm_jprobe);
+
        pr_info("Crash point unregistered\n");
 }
 
index 466afb2611c6592b05e98c1c1a0abe1eb1088cea..0e7406ccb6dd5e8edc51ffc1bb0d2e4a168e02e4 100644 (file)
@@ -132,8 +132,7 @@ int mei_amthif_run_next_cmd(struct mei_device *dev)
 
        dev_dbg(dev->dev, "complete amthif cmd_list cb.\n");
 
-       cb = list_first_entry_or_null(&dev->amthif_cmd_list.list,
-                                       typeof(*cb), list);
+       cb = list_first_entry_or_null(&dev->amthif_cmd_list, typeof(*cb), list);
        if (!cb) {
                dev->iamthif_state = MEI_IAMTHIF_IDLE;
                cl->fp = NULL;
@@ -167,7 +166,7 @@ int mei_amthif_write(struct mei_cl *cl, struct mei_cl_cb *cb)
 
        struct mei_device *dev = cl->dev;
 
-       list_add_tail(&cb->list, &dev->amthif_cmd_list.list);
+       list_add_tail(&cb->list, &dev->amthif_cmd_list);
 
        /*
         * The previous request is still in processing, queue this one.
@@ -211,7 +210,7 @@ unsigned int mei_amthif_poll(struct file *file, poll_table *wait)
  * Return: 0, OK; otherwise, error.
  */
 int mei_amthif_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
-                        struct mei_cl_cb *cmpl_list)
+                        struct list_head *cmpl_list)
 {
        int ret;
 
@@ -237,7 +236,7 @@ int mei_amthif_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
  */
 int mei_amthif_irq_read_msg(struct mei_cl *cl,
                            struct mei_msg_hdr *mei_hdr,
-                           struct mei_cl_cb *cmpl_list)
+                           struct list_head *cmpl_list)
 {
        struct mei_device *dev;
        int ret;
@@ -311,51 +310,31 @@ void mei_amthif_complete(struct mei_cl *cl, struct mei_cl_cb *cb)
        }
 }
 
-/**
- * mei_clear_list - removes all callbacks associated with file
- *             from mei_cb_list
- *
- * @file: file structure
- * @mei_cb_list: callbacks list
- *
- * mei_clear_list is called to clear resources associated with file
- * when application calls close function or Ctrl-C was pressed
- */
-static void mei_clear_list(const struct file *file,
-                          struct list_head *mei_cb_list)
-{
-       struct mei_cl_cb *cb, *next;
-
-       list_for_each_entry_safe(cb, next, mei_cb_list, list)
-               if (file == cb->fp)
-                       mei_io_cb_free(cb);
-}
-
 /**
 * mei_amthif_release - the release function
 *
 *  @dev: device structure
-*  @file: pointer to file structure
+*  @fp: pointer to file structure
 *
 *  Return: 0 on success, <0 on error
 */
-int mei_amthif_release(struct mei_device *dev, struct file *file)
+int mei_amthif_release(struct mei_device *dev, struct file *fp)
 {
-       struct mei_cl *cl = file->private_data;
+       struct mei_cl *cl = fp->private_data;
 
        if (dev->iamthif_open_count > 0)
                dev->iamthif_open_count--;
 
-       if (cl->fp == file && dev->iamthif_state != MEI_IAMTHIF_IDLE) {
+       if (cl->fp == fp && dev->iamthif_state != MEI_IAMTHIF_IDLE) {
 
                dev_dbg(dev->dev, "amthif canceled iamthif state %d\n",
-                   dev->iamthif_state);
+                       dev->iamthif_state);
                dev->iamthif_canceled = true;
        }
 
-       mei_clear_list(file, &dev->amthif_cmd_list.list);
-       mei_clear_list(file, &cl->rd_completed);
-       mei_clear_list(file, &dev->ctrl_rd_list.list);
+       /* Don't clean ctrl_rd_list here, the reads has to be completed */
+       mei_io_list_free_fp(&dev->amthif_cmd_list, fp);
+       mei_io_list_free_fp(&cl->rd_completed, fp);
 
        return 0;
 }
index 2d9c5dd06e423266680294e4d806e2e736d32156..cb3e9e0ca0497d38b866af5c442dab2d3495e932 100644 (file)
@@ -498,6 +498,25 @@ out:
 }
 EXPORT_SYMBOL_GPL(mei_cldev_enable);
 
+/**
+ * mei_cldev_unregister_callbacks - internal wrapper for unregistering
+ *  callbacks.
+ *
+ * @cldev: client device
+ */
+static void mei_cldev_unregister_callbacks(struct mei_cl_device *cldev)
+{
+       if (cldev->rx_cb) {
+               cancel_work_sync(&cldev->rx_work);
+               cldev->rx_cb = NULL;
+       }
+
+       if (cldev->notif_cb) {
+               cancel_work_sync(&cldev->notif_work);
+               cldev->notif_cb = NULL;
+       }
+}
+
 /**
  * mei_cldev_disable - disable me client device
  *     disconnect form the me client
@@ -519,6 +538,8 @@ int mei_cldev_disable(struct mei_cl_device *cldev)
 
        bus = cldev->bus;
 
+       mei_cldev_unregister_callbacks(cldev);
+
        mutex_lock(&bus->device_lock);
 
        if (!mei_cl_is_connected(cl)) {
@@ -541,6 +562,37 @@ out:
 }
 EXPORT_SYMBOL_GPL(mei_cldev_disable);
 
+/**
+ * mei_cl_bus_module_get - acquire module of the underlying
+ *    hw module.
+ *
+ * @cl: host client
+ *
+ * Return: true on success; false if the module was removed.
+ */
+bool mei_cl_bus_module_get(struct mei_cl *cl)
+{
+       struct mei_cl_device *cldev = cl->cldev;
+
+       if (!cldev)
+               return true;
+
+       return try_module_get(cldev->bus->dev->driver->owner);
+}
+
+/**
+ * mei_cl_bus_module_put -  release the underlying hw module.
+ *
+ * @cl: host client
+ */
+void mei_cl_bus_module_put(struct mei_cl *cl)
+{
+       struct mei_cl_device *cldev = cl->cldev;
+
+       if (cldev)
+               module_put(cldev->bus->dev->driver->owner);
+}
+
 /**
  * mei_cl_device_find - find matching entry in the driver id table
  *
@@ -665,19 +717,12 @@ static int mei_cl_device_remove(struct device *dev)
        if (!cldev || !dev->driver)
                return 0;
 
-       if (cldev->rx_cb) {
-               cancel_work_sync(&cldev->rx_work);
-               cldev->rx_cb = NULL;
-       }
-       if (cldev->notif_cb) {
-               cancel_work_sync(&cldev->notif_work);
-               cldev->notif_cb = NULL;
-       }
-
        cldrv = to_mei_cl_driver(dev->driver);
        if (cldrv->remove)
                ret = cldrv->remove(cldev);
 
+       mei_cldev_unregister_callbacks(cldev);
+
        module_put(THIS_MODULE);
        dev->driver = NULL;
        return ret;
index b0395601c6ae83340f62cd801531632134b85a7a..68fe37b5bc52fbf5ca9bf49f984495464f65eb25 100644 (file)
@@ -377,19 +377,19 @@ static struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl,
 }
 
 /**
- * __mei_io_list_flush - removes and frees cbs belonging to cl.
+ * __mei_io_list_flush_cl - removes and frees cbs belonging to cl.
  *
- * @list:  an instance of our list structure
+ * @head:  an instance of our list structure
  * @cl:    host client, can be NULL for flushing the whole list
  * @free:  whether to free the cbs
  */
-static void __mei_io_list_flush(struct mei_cl_cb *list,
-                               struct mei_cl *cl, bool free)
+static void __mei_io_list_flush_cl(struct list_head *head,
+                                  const struct mei_cl *cl, bool free)
 {
        struct mei_cl_cb *cb, *next;
 
        /* enable removing everything if no cl is specified */
-       list_for_each_entry_safe(cb, next, &list->list, list) {
+       list_for_each_entry_safe(cb, next, head, list) {
                if (!cl || mei_cl_cmp_id(cl, cb->cl)) {
                        list_del_init(&cb->list);
                        if (free)
@@ -399,25 +399,42 @@ static void __mei_io_list_flush(struct mei_cl_cb *list,
 }
 
 /**
- * mei_io_list_flush - removes list entry belonging to cl.
+ * mei_io_list_flush_cl - removes list entry belonging to cl.
  *
- * @list:  An instance of our list structure
+ * @head: An instance of our list structure
  * @cl: host client
  */
-void mei_io_list_flush(struct mei_cl_cb *list, struct mei_cl *cl)
+static inline void mei_io_list_flush_cl(struct list_head *head,
+                                       const struct mei_cl *cl)
 {
-       __mei_io_list_flush(list, cl, false);
+       __mei_io_list_flush_cl(head, cl, false);
 }
 
 /**
- * mei_io_list_free - removes cb belonging to cl and free them
+ * mei_io_list_free_cl - removes cb belonging to cl and free them
  *
- * @list:  An instance of our list structure
+ * @head: An instance of our list structure
  * @cl: host client
  */
-static inline void mei_io_list_free(struct mei_cl_cb *list, struct mei_cl *cl)
+static inline void mei_io_list_free_cl(struct list_head *head,
+                                      const struct mei_cl *cl)
 {
-       __mei_io_list_flush(list, cl, true);
+       __mei_io_list_flush_cl(head, cl, true);
+}
+
+/**
+ * mei_io_list_free_fp - free cb from a list that matches file pointer
+ *
+ * @head: io list
+ * @fp: file pointer (matching cb file object), may be NULL
+ */
+void mei_io_list_free_fp(struct list_head *head, const struct file *fp)
+{
+       struct mei_cl_cb *cb, *next;
+
+       list_for_each_entry_safe(cb, next, head, list)
+               if (!fp || fp == cb->fp)
+                       mei_io_cb_free(cb);
 }
 
 /**
@@ -479,7 +496,7 @@ struct mei_cl_cb *mei_cl_enqueue_ctrl_wr_cb(struct mei_cl *cl, size_t length,
        if (!cb)
                return NULL;
 
-       list_add_tail(&cb->list, &cl->dev->ctrl_wr_list.list);
+       list_add_tail(&cb->list, &cl->dev->ctrl_wr_list);
        return cb;
 }
 
@@ -503,27 +520,6 @@ struct mei_cl_cb *mei_cl_read_cb(const struct mei_cl *cl, const struct file *fp)
        return NULL;
 }
 
-/**
- * mei_cl_read_cb_flush - free client's read pending and completed cbs
- *   for a specific file
- *
- * @cl: host client
- * @fp: file pointer (matching cb file object), may be NULL
- */
-void mei_cl_read_cb_flush(const struct mei_cl *cl, const struct file *fp)
-{
-       struct mei_cl_cb *cb, *next;
-
-       list_for_each_entry_safe(cb, next, &cl->rd_completed, list)
-               if (!fp || fp == cb->fp)
-                       mei_io_cb_free(cb);
-
-
-       list_for_each_entry_safe(cb, next, &cl->rd_pending, list)
-               if (!fp || fp == cb->fp)
-                       mei_io_cb_free(cb);
-}
-
 /**
  * mei_cl_flush_queues - flushes queue lists belonging to cl.
  *
@@ -542,18 +538,16 @@ int mei_cl_flush_queues(struct mei_cl *cl, const struct file *fp)
        dev = cl->dev;
 
        cl_dbg(dev, cl, "remove list entry belonging to cl\n");
-       mei_io_list_free(&cl->dev->write_list, cl);
-       mei_io_list_free(&cl->dev->write_waiting_list, cl);
-       mei_io_list_flush(&cl->dev->ctrl_wr_list, cl);
-       mei_io_list_flush(&cl->dev->ctrl_rd_list, cl);
-       mei_io_list_flush(&cl->dev->amthif_cmd_list, cl);
-
-       mei_cl_read_cb_flush(cl, fp);
+       mei_io_list_free_cl(&cl->dev->write_list, cl);
+       mei_io_list_free_cl(&cl->dev->write_waiting_list, cl);
+       mei_io_list_flush_cl(&cl->dev->ctrl_wr_list, cl);
+       mei_io_list_flush_cl(&cl->dev->ctrl_rd_list, cl);
+       mei_io_list_free_fp(&cl->rd_pending, fp);
+       mei_io_list_free_fp(&cl->rd_completed, fp);
 
        return 0;
 }
 
-
 /**
  * mei_cl_init - initializes cl.
  *
@@ -756,7 +750,7 @@ static void mei_cl_wake_all(struct mei_cl *cl)
  *
  * @cl: host client
  */
-void mei_cl_set_disconnected(struct mei_cl *cl)
+static void mei_cl_set_disconnected(struct mei_cl *cl)
 {
        struct mei_device *dev = cl->dev;
 
@@ -765,15 +759,18 @@ void mei_cl_set_disconnected(struct mei_cl *cl)
                return;
 
        cl->state = MEI_FILE_DISCONNECTED;
-       mei_io_list_free(&dev->write_list, cl);
-       mei_io_list_free(&dev->write_waiting_list, cl);
-       mei_io_list_flush(&dev->ctrl_rd_list, cl);
-       mei_io_list_flush(&dev->ctrl_wr_list, cl);
+       mei_io_list_free_cl(&dev->write_list, cl);
+       mei_io_list_free_cl(&dev->write_waiting_list, cl);
+       mei_io_list_flush_cl(&dev->ctrl_rd_list, cl);
+       mei_io_list_flush_cl(&dev->ctrl_wr_list, cl);
+       mei_io_list_free_cl(&dev->amthif_cmd_list, cl);
        mei_cl_wake_all(cl);
        cl->rx_flow_ctrl_creds = 0;
        cl->tx_flow_ctrl_creds = 0;
        cl->timer_count = 0;
 
+       mei_cl_bus_module_put(cl);
+
        if (!cl->me_cl)
                return;
 
@@ -829,7 +826,7 @@ static int mei_cl_send_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb)
                return ret;
        }
 
-       list_move_tail(&cb->list, &dev->ctrl_rd_list.list);
+       list_move_tail(&cb->list, &dev->ctrl_rd_list);
        cl->timer_count = MEI_CONNECT_TIMEOUT;
        mei_schedule_stall_timer(dev);
 
@@ -847,7 +844,7 @@ static int mei_cl_send_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb)
  * Return: 0, OK; otherwise, error.
  */
 int mei_cl_irq_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb,
-                           struct mei_cl_cb *cmpl_list)
+                         struct list_head *cmpl_list)
 {
        struct mei_device *dev = cl->dev;
        u32 msg_slots;
@@ -862,7 +859,7 @@ int mei_cl_irq_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb,
 
        ret = mei_cl_send_disconnect(cl, cb);
        if (ret)
-               list_move_tail(&cb->list, &cmpl_list->list);
+               list_move_tail(&cb->list, cmpl_list);
 
        return ret;
 }
@@ -984,7 +981,7 @@ static bool mei_cl_is_other_connecting(struct mei_cl *cl)
 
        dev = cl->dev;
 
-       list_for_each_entry(cb, &dev->ctrl_rd_list.list, list) {
+       list_for_each_entry(cb, &dev->ctrl_rd_list, list) {
                if (cb->fop_type == MEI_FOP_CONNECT &&
                    mei_cl_me_id(cl) == mei_cl_me_id(cb->cl))
                        return true;
@@ -1015,7 +1012,7 @@ static int mei_cl_send_connect(struct mei_cl *cl, struct mei_cl_cb *cb)
                return ret;
        }
 
-       list_move_tail(&cb->list, &dev->ctrl_rd_list.list);
+       list_move_tail(&cb->list, &dev->ctrl_rd_list);
        cl->timer_count = MEI_CONNECT_TIMEOUT;
        mei_schedule_stall_timer(dev);
        return 0;
@@ -1031,7 +1028,7 @@ static int mei_cl_send_connect(struct mei_cl *cl, struct mei_cl_cb *cb)
  * Return: 0, OK; otherwise, error.
  */
 int mei_cl_irq_connect(struct mei_cl *cl, struct mei_cl_cb *cb,
-                             struct mei_cl_cb *cmpl_list)
+                      struct list_head *cmpl_list)
 {
        struct mei_device *dev = cl->dev;
        u32 msg_slots;
@@ -1049,7 +1046,7 @@ int mei_cl_irq_connect(struct mei_cl *cl, struct mei_cl_cb *cb,
 
        rets = mei_cl_send_connect(cl, cb);
        if (rets)
-               list_move_tail(&cb->list, &cmpl_list->list);
+               list_move_tail(&cb->list, cmpl_list);
 
        return rets;
 }
@@ -1077,13 +1074,17 @@ int mei_cl_connect(struct mei_cl *cl, struct mei_me_client *me_cl,
 
        dev = cl->dev;
 
+       if (!mei_cl_bus_module_get(cl))
+               return -ENODEV;
+
        rets = mei_cl_set_connecting(cl, me_cl);
        if (rets)
-               return rets;
+               goto nortpm;
 
        if (mei_cl_is_fixed_address(cl)) {
                cl->state = MEI_FILE_CONNECTED;
-               return 0;
+               rets = 0;
+               goto nortpm;
        }
 
        rets = pm_runtime_get(dev->dev);
@@ -1117,8 +1118,8 @@ int mei_cl_connect(struct mei_cl *cl, struct mei_me_client *me_cl,
 
        if (!mei_cl_is_connected(cl)) {
                if (cl->state == MEI_FILE_DISCONNECT_REQUIRED) {
-                       mei_io_list_flush(&dev->ctrl_rd_list, cl);
-                       mei_io_list_flush(&dev->ctrl_wr_list, cl);
+                       mei_io_list_flush_cl(&dev->ctrl_rd_list, cl);
+                       mei_io_list_flush_cl(&dev->ctrl_wr_list, cl);
                         /* ignore disconnect return valuue;
                          * in case of failure reset will be invoked
                          */
@@ -1270,7 +1271,7 @@ enum mei_cb_file_ops mei_cl_notify_req2fop(u8 req)
  * Return: 0 on such and error otherwise.
  */
 int mei_cl_irq_notify(struct mei_cl *cl, struct mei_cl_cb *cb,
-                     struct mei_cl_cb *cmpl_list)
+                     struct list_head *cmpl_list)
 {
        struct mei_device *dev = cl->dev;
        u32 msg_slots;
@@ -1288,11 +1289,11 @@ int mei_cl_irq_notify(struct mei_cl *cl, struct mei_cl_cb *cb,
        ret = mei_hbm_cl_notify_req(dev, cl, request);
        if (ret) {
                cl->status = ret;
-               list_move_tail(&cb->list, &cmpl_list->list);
+               list_move_tail(&cb->list, cmpl_list);
                return ret;
        }
 
-       list_move_tail(&cb->list, &dev->ctrl_rd_list.list);
+       list_move_tail(&cb->list, &dev->ctrl_rd_list);
        return 0;
 }
 
@@ -1325,6 +1326,9 @@ int mei_cl_notify_request(struct mei_cl *cl,
                return -EOPNOTSUPP;
        }
 
+       if (!mei_cl_is_connected(cl))
+               return -ENODEV;
+
        rets = pm_runtime_get(dev->dev);
        if (rets < 0 && rets != -EINPROGRESS) {
                pm_runtime_put_noidle(dev->dev);
@@ -1344,7 +1348,7 @@ int mei_cl_notify_request(struct mei_cl *cl,
                        rets = -ENODEV;
                        goto out;
                }
-               list_move_tail(&cb->list, &dev->ctrl_rd_list.list);
+               list_move_tail(&cb->list, &dev->ctrl_rd_list);
        }
 
        mutex_unlock(&dev->device_lock);
@@ -1419,6 +1423,11 @@ int mei_cl_notify_get(struct mei_cl *cl, bool block, bool *notify_ev)
 
        dev = cl->dev;
 
+       if (!dev->hbm_f_ev_supported) {
+               cl_dbg(dev, cl, "notifications not supported\n");
+               return -EOPNOTSUPP;
+       }
+
        if (!mei_cl_is_connected(cl))
                return -ENODEV;
 
@@ -1519,7 +1528,7 @@ nortpm:
  * Return: 0, OK; otherwise error.
  */
 int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
-                    struct mei_cl_cb *cmpl_list)
+                    struct list_head *cmpl_list)
 {
        struct mei_device *dev;
        struct mei_msg_data *buf;
@@ -1591,13 +1600,13 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
        }
 
        if (mei_hdr.msg_complete)
-               list_move_tail(&cb->list, &dev->write_waiting_list.list);
+               list_move_tail(&cb->list, &dev->write_waiting_list);
 
        return 0;
 
 err:
        cl->status = rets;
-       list_move_tail(&cb->list, &cmpl_list->list);
+       list_move_tail(&cb->list, cmpl_list);
        return rets;
 }
 
@@ -1687,9 +1696,9 @@ int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb)
 
 out:
        if (mei_hdr.msg_complete)
-               list_add_tail(&cb->list, &dev->write_waiting_list.list);
+               list_add_tail(&cb->list, &dev->write_waiting_list);
        else
-               list_add_tail(&cb->list, &dev->write_list.list);
+               list_add_tail(&cb->list, &dev->write_list);
 
        cb = NULL;
        if (blocking && cl->writing_state != MEI_WRITE_COMPLETE) {
index f2545af9be7be47ff29f225814d01719108be3fc..545ae319ba90cb044f35d67290ac1eb4ea8e1bbf 100644 (file)
@@ -83,17 +83,7 @@ static inline u8 mei_me_cl_ver(const struct mei_me_client *me_cl)
  * MEI IO Functions
  */
 void mei_io_cb_free(struct mei_cl_cb *priv_cb);
-
-/**
- * mei_io_list_init - Sets up a queue list.
- *
- * @list: An instance cl callback structure
- */
-static inline void mei_io_list_init(struct mei_cl_cb *list)
-{
-       INIT_LIST_HEAD(&list->list);
-}
-void mei_io_list_flush(struct mei_cl_cb *list, struct mei_cl *cl);
+void mei_io_list_free_fp(struct list_head *head, const struct file *fp);
 
 /*
  * MEI Host Client Functions
@@ -110,7 +100,6 @@ struct mei_cl *mei_cl_alloc_linked(struct mei_device *dev);
 
 struct mei_cl_cb *mei_cl_read_cb(const struct mei_cl *cl,
                                 const struct file *fp);
-void mei_cl_read_cb_flush(const struct mei_cl *cl, const struct file *fp);
 struct mei_cl_cb *mei_cl_alloc_cb(struct mei_cl *cl, size_t length,
                                  enum mei_cb_file_ops type,
                                  const struct file *fp);
@@ -209,19 +198,18 @@ static inline u8 mei_cl_host_addr(const struct mei_cl *cl)
 }
 
 int mei_cl_disconnect(struct mei_cl *cl);
-void mei_cl_set_disconnected(struct mei_cl *cl);
 int mei_cl_irq_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb,
-                         struct mei_cl_cb *cmpl_list);
+                         struct list_head *cmpl_list);
 int mei_cl_connect(struct mei_cl *cl, struct mei_me_client *me_cl,
                   const struct file *file);
 int mei_cl_irq_connect(struct mei_cl *cl, struct mei_cl_cb *cb,
-                             struct mei_cl_cb *cmpl_list);
+                      struct list_head *cmpl_list);
 int mei_cl_read_start(struct mei_cl *cl, size_t length, const struct file *fp);
 int mei_cl_irq_read_msg(struct mei_cl *cl, struct mei_msg_hdr *hdr,
-                       struct mei_cl_cb *cmpl_list);
+                       struct list_head *cmpl_list);
 int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb);
 int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
-                    struct mei_cl_cb *cmpl_list);
+                    struct list_head *cmpl_list);
 
 void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb);
 
@@ -232,7 +220,7 @@ enum mei_cb_file_ops mei_cl_notify_req2fop(u8 request);
 int mei_cl_notify_request(struct mei_cl *cl,
                          const struct file *file, u8 request);
 int mei_cl_irq_notify(struct mei_cl *cl, struct mei_cl_cb *cb,
-                     struct mei_cl_cb *cmpl_list);
+                     struct list_head *cmpl_list);
 int mei_cl_notify_get(struct mei_cl *cl, bool block, bool *notify_ev);
 void mei_cl_notify(struct mei_cl *cl);
 
index 25b4a1ba522df87fa60f0c8a9555607aa76df6d6..ba3a774c8d710611c15cad1d4b6b1afcc40fa221 100644 (file)
@@ -815,7 +815,7 @@ static void mei_hbm_cl_res(struct mei_device *dev,
        struct mei_cl_cb *cb, *next;
 
        cl = NULL;
-       list_for_each_entry_safe(cb, next, &dev->ctrl_rd_list.list, list) {
+       list_for_each_entry_safe(cb, next, &dev->ctrl_rd_list, list) {
 
                cl = cb->cl;
 
index a05375a3338a98370cd6abb495e27d670330a0bf..71216affcab1f349aafa2c8e8d69507445b213d8 100644 (file)
@@ -139,6 +139,19 @@ static inline void mei_hcsr_set(struct mei_device *dev, u32 reg)
        mei_hcsr_write(dev, reg);
 }
 
+/**
+ * mei_hcsr_set_hig - set host interrupt (set H_IG)
+ *
+ * @dev: the device structure
+ */
+static inline void mei_hcsr_set_hig(struct mei_device *dev)
+{
+       u32 hcsr;
+
+       hcsr = mei_hcsr_read(dev) | H_IG;
+       mei_hcsr_set(dev, hcsr);
+}
+
 /**
  * mei_me_d0i3c_read - Reads 32bit data from the D0I3C register
  *
@@ -380,6 +393,19 @@ static bool mei_me_hw_is_ready(struct mei_device *dev)
        return (mecsr & ME_RDY_HRA) == ME_RDY_HRA;
 }
 
+/**
+ * mei_me_hw_is_resetting - check whether the me(hw) is in reset
+ *
+ * @dev: mei device
+ * Return: bool
+ */
+static bool mei_me_hw_is_resetting(struct mei_device *dev)
+{
+       u32 mecsr = mei_me_mecsr_read(dev);
+
+       return (mecsr & ME_RST_HRA) == ME_RST_HRA;
+}
+
 /**
  * mei_me_hw_ready_wait - wait until the me(hw) has turned ready
  *  or timeout is reached
@@ -505,7 +531,6 @@ static int mei_me_hbuf_write(struct mei_device *dev,
        unsigned long rem;
        unsigned long length = header->length;
        u32 *reg_buf = (u32 *)buf;
-       u32 hcsr;
        u32 dw_cnt;
        int i;
        int empty_slots;
@@ -532,8 +557,7 @@ static int mei_me_hbuf_write(struct mei_device *dev,
                mei_me_hcbww_write(dev, reg);
        }
 
-       hcsr = mei_hcsr_read(dev) | H_IG;
-       mei_hcsr_set(dev, hcsr);
+       mei_hcsr_set_hig(dev);
        if (!mei_me_hw_is_ready(dev))
                return -EIO;
 
@@ -580,7 +604,6 @@ static int mei_me_read_slots(struct mei_device *dev, unsigned char *buffer,
                    unsigned long buffer_length)
 {
        u32 *reg_buf = (u32 *)buffer;
-       u32 hcsr;
 
        for (; buffer_length >= sizeof(u32); buffer_length -= sizeof(u32))
                *reg_buf++ = mei_me_mecbrw_read(dev);
@@ -591,8 +614,7 @@ static int mei_me_read_slots(struct mei_device *dev, unsigned char *buffer,
                memcpy(reg_buf, &reg, buffer_length);
        }
 
-       hcsr = mei_hcsr_read(dev) | H_IG;
-       mei_hcsr_set(dev, hcsr);
+       mei_hcsr_set_hig(dev);
        return 0;
 }
 
@@ -1189,7 +1211,7 @@ irqreturn_t mei_me_irq_quick_handler(int irq, void *dev_id)
 irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
 {
        struct mei_device *dev = (struct mei_device *) dev_id;
-       struct mei_cl_cb complete_list;
+       struct list_head cmpl_list;
        s32 slots;
        u32 hcsr;
        int rets = 0;
@@ -1201,7 +1223,7 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
        hcsr = mei_hcsr_read(dev);
        me_intr_clear(dev, hcsr);
 
-       mei_io_list_init(&complete_list);
+       INIT_LIST_HEAD(&cmpl_list);
 
        /* check if ME wants a reset */
        if (!mei_hw_is_ready(dev) && dev->dev_state != MEI_DEV_RESETTING) {
@@ -1210,6 +1232,9 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
                goto end;
        }
 
+       if (mei_me_hw_is_resetting(dev))
+               mei_hcsr_set_hig(dev);
+
        mei_me_pg_intr(dev, me_intr_src(hcsr));
 
        /*  check if we need to start the dev */
@@ -1227,7 +1252,7 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
        slots = mei_count_full_read_slots(dev);
        while (slots > 0) {
                dev_dbg(dev->dev, "slots to read = %08x\n", slots);
-               rets = mei_irq_read_handler(dev, &complete_list, &slots);
+               rets = mei_irq_read_handler(dev, &cmpl_list, &slots);
                /* There is a race between ME write and interrupt delivery:
                 * Not all data is always available immediately after the
                 * interrupt, so try to read again on the next interrupt.
@@ -1252,11 +1277,11 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
         */
        if (dev->pg_event != MEI_PG_EVENT_WAIT &&
            dev->pg_event != MEI_PG_EVENT_RECEIVED) {
-               rets = mei_irq_write_handler(dev, &complete_list);
+               rets = mei_irq_write_handler(dev, &cmpl_list);
                dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
        }
 
-       mei_irq_compl_handler(dev, &complete_list);
+       mei_irq_compl_handler(dev, &cmpl_list);
 
 end:
        dev_dbg(dev->dev, "interrupt thread end ret = %d\n", rets);
@@ -1389,7 +1414,7 @@ const struct mei_cfg mei_me_pch8_sps_cfg = {
  * @pdev: The pci device structure
  * @cfg: per device generation config
  *
- * Return: The mei_device_device pointer on success, NULL on failure.
+ * Return: The mei_device pointer on success, NULL on failure.
  */
 struct mei_device *mei_me_dev_init(struct pci_dev *pdev,
                                   const struct mei_cfg *cfg)
@@ -1397,8 +1422,8 @@ struct mei_device *mei_me_dev_init(struct pci_dev *pdev,
        struct mei_device *dev;
        struct mei_me_hw *hw;
 
-       dev = kzalloc(sizeof(struct mei_device) +
-                        sizeof(struct mei_me_hw), GFP_KERNEL);
+       dev = devm_kzalloc(&pdev->dev, sizeof(struct mei_device) +
+                          sizeof(struct mei_me_hw), GFP_KERNEL);
        if (!dev)
                return NULL;
        hw = to_me_hw(dev);
index e9f8c0aeec13e19df0d51d2056610ba38045a0c7..24e4a4c966068de843e1189009112ebca54a6f78 100644 (file)
@@ -1057,7 +1057,7 @@ irqreturn_t mei_txe_irq_thread_handler(int irq, void *dev_id)
 {
        struct mei_device *dev = (struct mei_device *) dev_id;
        struct mei_txe_hw *hw = to_txe_hw(dev);
-       struct mei_cl_cb complete_list;
+       struct list_head cmpl_list;
        s32 slots;
        int rets = 0;
 
@@ -1069,7 +1069,7 @@ irqreturn_t mei_txe_irq_thread_handler(int irq, void *dev_id)
 
        /* initialize our complete list */
        mutex_lock(&dev->device_lock);
-       mei_io_list_init(&complete_list);
+       INIT_LIST_HEAD(&cmpl_list);
 
        if (pci_dev_msi_enabled(to_pci_dev(dev->dev)))
                mei_txe_check_and_ack_intrs(dev, true);
@@ -1126,7 +1126,7 @@ irqreturn_t mei_txe_irq_thread_handler(int irq, void *dev_id)
        slots = mei_count_full_read_slots(dev);
        if (test_and_clear_bit(TXE_INTR_OUT_DB_BIT, &hw->intr_cause)) {
                /* Read from TXE */
-               rets = mei_irq_read_handler(dev, &complete_list, &slots);
+               rets = mei_irq_read_handler(dev, &cmpl_list, &slots);
                if (rets && dev->dev_state != MEI_DEV_RESETTING) {
                        dev_err(dev->dev,
                                "mei_irq_read_handler ret = %d.\n", rets);
@@ -1144,14 +1144,14 @@ irqreturn_t mei_txe_irq_thread_handler(int irq, void *dev_id)
        if (hw->aliveness && dev->hbuf_is_ready) {
                /* get the real register value */
                dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
-               rets = mei_irq_write_handler(dev, &complete_list);
+               rets = mei_irq_write_handler(dev, &cmpl_list);
                if (rets && rets != -EMSGSIZE)
                        dev_err(dev->dev, "mei_irq_write_handler ret = %d.\n",
                                rets);
                dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
        }
 
-       mei_irq_compl_handler(dev, &complete_list);
+       mei_irq_compl_handler(dev, &cmpl_list);
 
 end:
        dev_dbg(dev->dev, "interrupt thread end ret = %d\n", rets);
@@ -1207,8 +1207,8 @@ struct mei_device *mei_txe_dev_init(struct pci_dev *pdev)
        struct mei_device *dev;
        struct mei_txe_hw *hw;
 
-       dev = kzalloc(sizeof(struct mei_device) +
-                        sizeof(struct mei_txe_hw), GFP_KERNEL);
+       dev = devm_kzalloc(&pdev->dev, sizeof(struct mei_device) +
+                          sizeof(struct mei_txe_hw), GFP_KERNEL);
        if (!dev)
                return NULL;
 
index ce3ed0b88b0c8d956a1939bcd0ba59cdfa4c565e..e1e8b66d764881f15c0e348f5429900e2d30a0d7 100644 (file)
@@ -45,7 +45,7 @@
  * @intr_cause:          translated interrupt cause
  */
 struct mei_txe_hw {
-       void __iomem *mem_addr[NUM_OF_MEM_BARS];
+       void __iomem * const *mem_addr;
        u32 aliveness;
        u32 readiness;
        u32 slots;
index 41e5760a6886dfee96840ecceaed372c9e45194a..cfb1cdf176fa9001e83894c63c9eb6480516d230 100644 (file)
@@ -349,16 +349,16 @@ EXPORT_SYMBOL_GPL(mei_stop);
 bool mei_write_is_idle(struct mei_device *dev)
 {
        bool idle = (dev->dev_state == MEI_DEV_ENABLED &&
-               list_empty(&dev->ctrl_wr_list.list) &&
-               list_empty(&dev->write_list.list)   &&
-               list_empty(&dev->write_waiting_list.list));
+               list_empty(&dev->ctrl_wr_list) &&
+               list_empty(&dev->write_list)   &&
+               list_empty(&dev->write_waiting_list));
 
        dev_dbg(dev->dev, "write pg: is idle[%d] state=%s ctrl=%01d write=%01d wwait=%01d\n",
                idle,
                mei_dev_state_str(dev->dev_state),
-               list_empty(&dev->ctrl_wr_list.list),
-               list_empty(&dev->write_list.list),
-               list_empty(&dev->write_waiting_list.list));
+               list_empty(&dev->ctrl_wr_list),
+               list_empty(&dev->write_list),
+               list_empty(&dev->write_waiting_list));
 
        return idle;
 }
@@ -388,17 +388,17 @@ void mei_device_init(struct mei_device *dev,
        dev->dev_state = MEI_DEV_INITIALIZING;
        dev->reset_count = 0;
 
-       mei_io_list_init(&dev->write_list);
-       mei_io_list_init(&dev->write_waiting_list);
-       mei_io_list_init(&dev->ctrl_wr_list);
-       mei_io_list_init(&dev->ctrl_rd_list);
+       INIT_LIST_HEAD(&dev->write_list);
+       INIT_LIST_HEAD(&dev->write_waiting_list);
+       INIT_LIST_HEAD(&dev->ctrl_wr_list);
+       INIT_LIST_HEAD(&dev->ctrl_rd_list);
 
        INIT_DELAYED_WORK(&dev->timer_work, mei_timer);
        INIT_WORK(&dev->reset_work, mei_reset_work);
        INIT_WORK(&dev->bus_rescan_work, mei_cl_bus_rescan_work);
 
        INIT_LIST_HEAD(&dev->iamthif_cl.link);
-       mei_io_list_init(&dev->amthif_cmd_list);
+       INIT_LIST_HEAD(&dev->amthif_cmd_list);
 
        bitmap_zero(dev->host_clients_map, MEI_CLIENTS_MAX);
        dev->open_handle_count = 0;
index b584749bcc4a1c630337566a6944dde900b74bf2..406e9e2b2fff66dc97e6e22758d98ad4ad9dbf3b 100644 (file)
  *     for the completed callbacks
  *
  * @dev: mei device
- * @compl_list: list of completed cbs
+ * @cmpl_list: list of completed cbs
  */
-void mei_irq_compl_handler(struct mei_device *dev, struct mei_cl_cb *compl_list)
+void mei_irq_compl_handler(struct mei_device *dev, struct list_head *cmpl_list)
 {
        struct mei_cl_cb *cb, *next;
        struct mei_cl *cl;
 
-       list_for_each_entry_safe(cb, next, &compl_list->list, list) {
+       list_for_each_entry_safe(cb, next, cmpl_list, list) {
                cl = cb->cl;
                list_del_init(&cb->list);
 
@@ -92,13 +92,13 @@ void mei_irq_discard_msg(struct mei_device *dev, struct mei_msg_hdr *hdr)
  *
  * @cl: reading client
  * @mei_hdr: header of mei client message
- * @complete_list: completion list
+ * @cmpl_list: completion list
  *
  * Return: always 0
  */
 int mei_cl_irq_read_msg(struct mei_cl *cl,
                       struct mei_msg_hdr *mei_hdr,
-                      struct mei_cl_cb *complete_list)
+                      struct list_head *cmpl_list)
 {
        struct mei_device *dev = cl->dev;
        struct mei_cl_cb *cb;
@@ -144,7 +144,7 @@ int mei_cl_irq_read_msg(struct mei_cl *cl,
 
        if (mei_hdr->msg_complete) {
                cl_dbg(dev, cl, "completed read length = %zu\n", cb->buf_idx);
-               list_move_tail(&cb->list, &complete_list->list);
+               list_move_tail(&cb->list, cmpl_list);
        } else {
                pm_runtime_mark_last_busy(dev->dev);
                pm_request_autosuspend(dev->dev);
@@ -154,7 +154,7 @@ int mei_cl_irq_read_msg(struct mei_cl *cl,
 
 discard:
        if (cb)
-               list_move_tail(&cb->list, &complete_list->list);
+               list_move_tail(&cb->list, cmpl_list);
        mei_irq_discard_msg(dev, mei_hdr);
        return 0;
 }
@@ -169,7 +169,7 @@ discard:
  * Return: 0, OK; otherwise, error.
  */
 static int mei_cl_irq_disconnect_rsp(struct mei_cl *cl, struct mei_cl_cb *cb,
-                                    struct mei_cl_cb *cmpl_list)
+                                    struct list_head *cmpl_list)
 {
        struct mei_device *dev = cl->dev;
        u32 msg_slots;
@@ -183,7 +183,7 @@ static int mei_cl_irq_disconnect_rsp(struct mei_cl *cl, struct mei_cl_cb *cb,
                return -EMSGSIZE;
 
        ret = mei_hbm_cl_disconnect_rsp(dev, cl);
-       list_move_tail(&cb->list, &cmpl_list->list);
+       list_move_tail(&cb->list, cmpl_list);
 
        return ret;
 }
@@ -199,7 +199,7 @@ static int mei_cl_irq_disconnect_rsp(struct mei_cl *cl, struct mei_cl_cb *cb,
  * Return: 0, OK; otherwise, error.
  */
 static int mei_cl_irq_read(struct mei_cl *cl, struct mei_cl_cb *cb,
-                          struct mei_cl_cb *cmpl_list)
+                          struct list_head *cmpl_list)
 {
        struct mei_device *dev = cl->dev;
        u32 msg_slots;
@@ -219,7 +219,7 @@ static int mei_cl_irq_read(struct mei_cl *cl, struct mei_cl_cb *cb,
        if (ret) {
                cl->status = ret;
                cb->buf_idx = 0;
-               list_move_tail(&cb->list, &cmpl_list->list);
+               list_move_tail(&cb->list, cmpl_list);
                return ret;
        }
 
@@ -249,7 +249,7 @@ static inline bool hdr_is_fixed(struct mei_msg_hdr *mei_hdr)
  * Return: 0 on success, <0 on failure.
  */
 int mei_irq_read_handler(struct mei_device *dev,
-               struct mei_cl_cb *cmpl_list, s32 *slots)
+                        struct list_head *cmpl_list, s32 *slots)
 {
        struct mei_msg_hdr *mei_hdr;
        struct mei_cl *cl;
@@ -347,12 +347,11 @@ EXPORT_SYMBOL_GPL(mei_irq_read_handler);
  *
  * Return: 0 on success, <0 on failure.
  */
-int mei_irq_write_handler(struct mei_device *dev, struct mei_cl_cb *cmpl_list)
+int mei_irq_write_handler(struct mei_device *dev, struct list_head *cmpl_list)
 {
 
        struct mei_cl *cl;
        struct mei_cl_cb *cb, *next;
-       struct mei_cl_cb *list;
        s32 slots;
        int ret;
 
@@ -367,19 +366,18 @@ int mei_irq_write_handler(struct mei_device *dev, struct mei_cl_cb *cmpl_list)
        /* complete all waiting for write CB */
        dev_dbg(dev->dev, "complete all waiting for write cb.\n");
 
-       list = &dev->write_waiting_list;
-       list_for_each_entry_safe(cb, next, &list->list, list) {
+       list_for_each_entry_safe(cb, next, &dev->write_waiting_list, list) {
                cl = cb->cl;
 
                cl->status = 0;
                cl_dbg(dev, cl, "MEI WRITE COMPLETE\n");
                cl->writing_state = MEI_WRITE_COMPLETE;
-               list_move_tail(&cb->list, &cmpl_list->list);
+               list_move_tail(&cb->list, cmpl_list);
        }
 
        /* complete control write list CB */
        dev_dbg(dev->dev, "complete control write list cb.\n");
-       list_for_each_entry_safe(cb, next, &dev->ctrl_wr_list.list, list) {
+       list_for_each_entry_safe(cb, next, &dev->ctrl_wr_list, list) {
                cl = cb->cl;
                switch (cb->fop_type) {
                case MEI_FOP_DISCONNECT:
@@ -423,7 +421,7 @@ int mei_irq_write_handler(struct mei_device *dev, struct mei_cl_cb *cmpl_list)
        }
        /* complete  write list CB */
        dev_dbg(dev->dev, "complete write list cb.\n");
-       list_for_each_entry_safe(cb, next, &dev->write_list.list, list) {
+       list_for_each_entry_safe(cb, next, &dev->write_list, list) {
                cl = cb->cl;
                if (cl == &dev->iamthif_cl)
                        ret = mei_amthif_irq_write(cl, cb, cmpl_list);
index e1bf54481fd682f0477211190726908e9933b349..9d0b7050c79a36698db92c9df113a15855fac4d4 100644 (file)
@@ -182,32 +182,36 @@ static ssize_t mei_read(struct file *file, char __user *ubuf,
                goto out;
        }
 
-       if (rets == -EBUSY &&
-           !mei_cl_enqueue_ctrl_wr_cb(cl, length, MEI_FOP_READ, file)) {
-               rets = -ENOMEM;
-               goto out;
-       }
 
-       do {
-               mutex_unlock(&dev->device_lock);
-
-               if (wait_event_interruptible(cl->rx_wait,
-                                            (!list_empty(&cl->rd_completed)) ||
-                                            (!mei_cl_is_connected(cl)))) {
+again:
+       mutex_unlock(&dev->device_lock);
+       if (wait_event_interruptible(cl->rx_wait,
+                                    !list_empty(&cl->rd_completed) ||
+                                    !mei_cl_is_connected(cl))) {
+               if (signal_pending(current))
+                       return -EINTR;
+               return -ERESTARTSYS;
+       }
+       mutex_lock(&dev->device_lock);
 
-                       if (signal_pending(current))
-                               return -EINTR;
-                       return -ERESTARTSYS;
-               }
+       if (!mei_cl_is_connected(cl)) {
+               rets = -ENODEV;
+               goto out;
+       }
 
-               mutex_lock(&dev->device_lock);
-               if (!mei_cl_is_connected(cl)) {
-                       rets = -ENODEV;
-                       goto out;
-               }
+       cb = mei_cl_read_cb(cl, file);
+       if (!cb) {
+               /*
+                * For amthif all the waiters are woken up,
+                * but only fp with matching cb->fp get the cb,
+                * the others have to return to wait on read.
+                */
+               if (cl == &dev->iamthif_cl)
+                       goto again;
 
-               cb = mei_cl_read_cb(cl, file);
-       } while (!cb);
+               rets = 0;
+               goto out;
+       }
 
 copy_buffer:
        /* now copy the data to user space */
index 8dadb98662a9e61da14ed8d95268bc9b4ec23d2b..d41aac53a2ac46648d1078e2a556bb9abcd31768 100644 (file)
@@ -328,6 +328,8 @@ ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length,
 bool mei_cl_bus_rx_event(struct mei_cl *cl);
 bool mei_cl_bus_notify_event(struct mei_cl *cl);
 void mei_cl_bus_remove_devices(struct mei_device *bus);
+bool mei_cl_bus_module_get(struct mei_cl *cl);
+void mei_cl_bus_module_put(struct mei_cl *cl);
 int mei_cl_bus_init(void);
 void mei_cl_bus_exit(void);
 
@@ -439,10 +441,10 @@ struct mei_device {
        struct cdev cdev;
        int minor;
 
-       struct mei_cl_cb write_list;
-       struct mei_cl_cb write_waiting_list;
-       struct mei_cl_cb ctrl_wr_list;
-       struct mei_cl_cb ctrl_rd_list;
+       struct list_head write_list;
+       struct list_head write_waiting_list;
+       struct list_head ctrl_wr_list;
+       struct list_head ctrl_rd_list;
 
        struct list_head file_list;
        long open_handle_count;
@@ -499,7 +501,7 @@ struct mei_device {
        bool override_fixed_address;
 
        /* amthif list for cmd waiting */
-       struct mei_cl_cb amthif_cmd_list;
+       struct list_head amthif_cmd_list;
        struct mei_cl iamthif_cl;
        long iamthif_open_count;
        u32 iamthif_stall_timer;
@@ -571,10 +573,10 @@ void mei_cancel_work(struct mei_device *dev);
 void mei_timer(struct work_struct *work);
 void mei_schedule_stall_timer(struct mei_device *dev);
 int mei_irq_read_handler(struct mei_device *dev,
-               struct mei_cl_cb *cmpl_list, s32 *slots);
+                        struct list_head *cmpl_list, s32 *slots);
 
-int mei_irq_write_handler(struct mei_device *dev, struct mei_cl_cb *cmpl_list);
-void mei_irq_compl_handler(struct mei_device *dev, struct mei_cl_cb *cmpl_list);
+int mei_irq_write_handler(struct mei_device *dev, struct list_head *cmpl_list);
+void mei_irq_compl_handler(struct mei_device *dev, struct list_head *cmpl_list);
 
 /*
  * AMTHIF - AMT Host Interface Functions
@@ -590,12 +592,12 @@ int mei_amthif_release(struct mei_device *dev, struct file *file);
 int mei_amthif_write(struct mei_cl *cl, struct mei_cl_cb *cb);
 int mei_amthif_run_next_cmd(struct mei_device *dev);
 int mei_amthif_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
-                       struct mei_cl_cb *cmpl_list);
+                        struct list_head *cmpl_list);
 
 void mei_amthif_complete(struct mei_cl *cl, struct mei_cl_cb *cb);
 int mei_amthif_irq_read_msg(struct mei_cl *cl,
                            struct mei_msg_hdr *mei_hdr,
-                           struct mei_cl_cb *complete_list);
+                           struct list_head *cmpl_list);
 int mei_amthif_irq_read(struct mei_device *dev, s32 *slots);
 
 /*
index f9c6ec4b98ab7d7b0c66c1f4d4af014440c3de23..0a668fdfbbe9fba4732325f95d14aa21d9320012 100644 (file)
@@ -149,18 +149,18 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                return -ENODEV;
 
        /* enable pci dev */
-       err = pci_enable_device(pdev);
+       err = pcim_enable_device(pdev);
        if (err) {
                dev_err(&pdev->dev, "failed to enable pci device.\n");
                goto end;
        }
        /* set PCI host mastering  */
        pci_set_master(pdev);
-       /* pci request regions for mei driver */
-       err = pci_request_regions(pdev, KBUILD_MODNAME);
+       /* pci request regions and mapping IO device memory for mei driver */
+       err = pcim_iomap_regions(pdev, BIT(0), KBUILD_MODNAME);
        if (err) {
                dev_err(&pdev->dev, "failed to get pci regions.\n");
-               goto disable_device;
+               goto end;
        }
 
        if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) ||
@@ -173,24 +173,18 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        }
        if (err) {
                dev_err(&pdev->dev, "No usable DMA configuration, aborting\n");
-               goto release_regions;
+               goto end;
        }
 
-
        /* allocates and initializes the mei dev structure */
        dev = mei_me_dev_init(pdev, cfg);
        if (!dev) {
                err = -ENOMEM;
-               goto release_regions;
+               goto end;
        }
        hw = to_me_hw(dev);
-       /* mapping  IO device memory */
-       hw->mem_addr = pci_iomap(pdev, 0, 0);
-       if (!hw->mem_addr) {
-               dev_err(&pdev->dev, "mapping I/O device memory failure.\n");
-               err = -ENOMEM;
-               goto free_device;
-       }
+       hw->mem_addr = pcim_iomap_table(pdev)[0];
+
        pci_enable_msi(pdev);
 
         /* request and enable interrupt */
@@ -203,7 +197,7 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (err) {
                dev_err(&pdev->dev, "request_threaded_irq failure. irq = %d\n",
                       pdev->irq);
-               goto disable_msi;
+               goto end;
        }
 
        if (mei_start(dev)) {
@@ -242,15 +236,6 @@ release_irq:
        mei_cancel_work(dev);
        mei_disable_interrupts(dev);
        free_irq(pdev->irq, dev);
-disable_msi:
-       pci_disable_msi(pdev);
-       pci_iounmap(pdev, hw->mem_addr);
-free_device:
-       kfree(dev);
-release_regions:
-       pci_release_regions(pdev);
-disable_device:
-       pci_disable_device(pdev);
 end:
        dev_err(&pdev->dev, "initialization failed.\n");
        return err;
@@ -267,7 +252,6 @@ end:
 static void mei_me_remove(struct pci_dev *pdev)
 {
        struct mei_device *dev;
-       struct mei_me_hw *hw;
 
        dev = pci_get_drvdata(pdev);
        if (!dev)
@@ -276,33 +260,19 @@ static void mei_me_remove(struct pci_dev *pdev)
        if (mei_pg_is_enabled(dev))
                pm_runtime_get_noresume(&pdev->dev);
 
-       hw = to_me_hw(dev);
-
-
        dev_dbg(&pdev->dev, "stop\n");
        mei_stop(dev);
 
        if (!pci_dev_run_wake(pdev))
                mei_me_unset_pm_domain(dev);
 
-       /* disable interrupts */
        mei_disable_interrupts(dev);
 
        free_irq(pdev->irq, dev);
-       pci_disable_msi(pdev);
-
-       if (hw->mem_addr)
-               pci_iounmap(pdev, hw->mem_addr);
 
        mei_deregister(dev);
-
-       kfree(dev);
-
-       pci_release_regions(pdev);
-       pci_disable_device(pdev);
-
-
 }
+
 #ifdef CONFIG_PM_SLEEP
 static int mei_me_pci_suspend(struct device *device)
 {
index 58ffd30dcc91822393cad09a96caeccab25620e1..fe088b40daf9c84cb9c64b3e0f880935efd444c7 100644 (file)
@@ -52,17 +52,6 @@ static inline void mei_txe_set_pm_domain(struct mei_device *dev) {}
 static inline void mei_txe_unset_pm_domain(struct mei_device *dev) {}
 #endif /* CONFIG_PM */
 
-static void mei_txe_pci_iounmap(struct pci_dev *pdev, struct mei_txe_hw *hw)
-{
-       int i;
-
-       for (i = SEC_BAR; i < NUM_OF_MEM_BARS; i++) {
-               if (hw->mem_addr[i]) {
-                       pci_iounmap(pdev, hw->mem_addr[i]);
-                       hw->mem_addr[i] = NULL;
-               }
-       }
-}
 /**
  * mei_txe_probe - Device Initialization Routine
  *
@@ -75,22 +64,22 @@ static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
        struct mei_device *dev;
        struct mei_txe_hw *hw;
+       const int mask = BIT(SEC_BAR) | BIT(BRIDGE_BAR);
        int err;
-       int i;
 
        /* enable pci dev */
-       err = pci_enable_device(pdev);
+       err = pcim_enable_device(pdev);
        if (err) {
                dev_err(&pdev->dev, "failed to enable pci device.\n");
                goto end;
        }
        /* set PCI host mastering  */
        pci_set_master(pdev);
-       /* pci request regions for mei driver */
-       err = pci_request_regions(pdev, KBUILD_MODNAME);
+       /* pci request regions and mapping IO device memory for mei driver */
+       err = pcim_iomap_regions(pdev, mask, KBUILD_MODNAME);
        if (err) {
                dev_err(&pdev->dev, "failed to get pci regions.\n");
-               goto disable_device;
+               goto end;
        }
 
        err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
@@ -98,7 +87,7 @@ static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
                if (err) {
                        dev_err(&pdev->dev, "No suitable DMA available.\n");
-                       goto release_regions;
+                       goto end;
                }
        }
 
@@ -106,20 +95,10 @@ static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        dev = mei_txe_dev_init(pdev);
        if (!dev) {
                err = -ENOMEM;
-               goto release_regions;
+               goto end;
        }
        hw = to_txe_hw(dev);
-
-       /* mapping  IO device memory */
-       for (i = SEC_BAR; i < NUM_OF_MEM_BARS; i++) {
-               hw->mem_addr[i] = pci_iomap(pdev, i, 0);
-               if (!hw->mem_addr[i]) {
-                       dev_err(&pdev->dev, "mapping I/O device memory failure.\n");
-                       err = -ENOMEM;
-                       goto free_device;
-               }
-       }
-
+       hw->mem_addr = pcim_iomap_table(pdev);
 
        pci_enable_msi(pdev);
 
@@ -140,7 +119,7 @@ static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (err) {
                dev_err(&pdev->dev, "mei: request_threaded_irq failure. irq = %d\n",
                        pdev->irq);
-               goto free_device;
+               goto end;
        }
 
        if (mei_start(dev)) {
@@ -173,23 +152,9 @@ static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 stop:
        mei_stop(dev);
 release_irq:
-
        mei_cancel_work(dev);
-
-       /* disable interrupts */
        mei_disable_interrupts(dev);
-
        free_irq(pdev->irq, dev);
-       pci_disable_msi(pdev);
-
-free_device:
-       mei_txe_pci_iounmap(pdev, hw);
-
-       kfree(dev);
-release_regions:
-       pci_release_regions(pdev);
-disable_device:
-       pci_disable_device(pdev);
 end:
        dev_err(&pdev->dev, "initialization failed.\n");
        return err;
@@ -206,38 +171,24 @@ end:
 static void mei_txe_remove(struct pci_dev *pdev)
 {
        struct mei_device *dev;
-       struct mei_txe_hw *hw;
 
        dev = pci_get_drvdata(pdev);
        if (!dev) {
-               dev_err(&pdev->dev, "mei: dev =NULL\n");
+               dev_err(&pdev->dev, "mei: dev =NULL\n");
                return;
        }
 
        pm_runtime_get_noresume(&pdev->dev);
 
-       hw = to_txe_hw(dev);
-
        mei_stop(dev);
 
        if (!pci_dev_run_wake(pdev))
                mei_txe_unset_pm_domain(dev);
 
-       /* disable interrupts */
        mei_disable_interrupts(dev);
        free_irq(pdev->irq, dev);
-       pci_disable_msi(pdev);
-
-       pci_set_drvdata(pdev, NULL);
-
-       mei_txe_pci_iounmap(pdev, hw);
 
        mei_deregister(dev);
-
-       kfree(dev);
-
-       pci_release_regions(pdev);
-       pci_disable_device(pdev);
 }
 
 
index 88e45234d527518e0d449d598c5f04a5be5538a1..fed992e2c2583907711a056ed54bc87a0888d532 100644 (file)
@@ -292,7 +292,6 @@ static int vop_virtio_add_device(struct vop_vdev *vdev,
        if (ret) {
                dev_err(vop_dev(vdev), "%s %d err %d\n",
                        __func__, __LINE__, ret);
-               kfree(vdev);
                return ret;
        }
 
index 6030ac5b8c63919018da1ad3b2e8957ae881636d..ef2ece0f26afc6b513ff72dabb08e2ac81c13a56 100644 (file)
@@ -56,6 +56,7 @@
 #include <linux/list.h>
 #include <linux/notifier.h>
 #include <linux/reboot.h>
+#include <linux/workqueue.h>
 #include <generated/utsrelease.h>
 
 #include <linux/io.h>
@@ -64,8 +65,6 @@
 #define LCD_MINOR              156
 #define KEYPAD_MINOR           185
 
-#define PANEL_VERSION          "0.9.5"
-
 #define LCD_MAXBYTES           256     /* max burst write */
 
 #define KEYPAD_BUFFER          64
@@ -77,8 +76,8 @@
 /* a key repeats this times INPUT_POLL_TIME */
 #define KEYPAD_REP_DELAY       (2)
 
-/* keep the light on this times INPUT_POLL_TIME for each flash */
-#define FLASH_LIGHT_TEMPO      (200)
+/* keep the light on this many seconds for each flash */
+#define FLASH_LIGHT_TEMPO      (4)
 
 /* converts an r_str() input to an active high, bits string : 000BAOSE */
 #define PNL_PINPUT(a)          ((((unsigned char)(a)) ^ 0x7F) >> 3)
 #define PIN_SELECP             17
 #define PIN_NOT_SET            127
 
-#define LCD_FLAG_S             0x0001
-#define LCD_FLAG_ID            0x0002
 #define LCD_FLAG_B             0x0004  /* blink on */
 #define LCD_FLAG_C             0x0008  /* cursor on */
 #define LCD_FLAG_D             0x0010  /* display on */
@@ -256,7 +253,10 @@ static struct {
        int hwidth;
        int charset;
        int proto;
-       int light_tempo;
+
+       struct delayed_work bl_work;
+       struct mutex bl_tempo_lock;     /* Protects access to bl_tempo */
+       bool bl_tempo;
 
        /* TODO: use union here? */
        struct {
@@ -661,8 +661,6 @@ static void lcd_get_bits(unsigned int port, int *val)
        }
 }
 
-static void init_scan_timer(void);
-
 /* sets data port bits according to current signals values */
 static int set_data_bits(void)
 {
@@ -794,11 +792,8 @@ static void lcd_send_serial(int byte)
 }
 
 /* turn the backlight on or off */
-static void lcd_backlight(int on)
+static void __lcd_backlight(int on)
 {
-       if (lcd.pins.bl == PIN_NONE)
-               return;
-
        /* The backlight is activated by setting the AUTOFEED line to +5V  */
        spin_lock_irq(&pprt_lock);
        if (on)
@@ -809,6 +804,44 @@ static void lcd_backlight(int on)
        spin_unlock_irq(&pprt_lock);
 }
 
+static void lcd_backlight(int on)
+{
+       if (lcd.pins.bl == PIN_NONE)
+               return;
+
+       mutex_lock(&lcd.bl_tempo_lock);
+       if (!lcd.bl_tempo)
+               __lcd_backlight(on);
+       mutex_unlock(&lcd.bl_tempo_lock);
+}
+
+static void lcd_bl_off(struct work_struct *work)
+{
+       mutex_lock(&lcd.bl_tempo_lock);
+       if (lcd.bl_tempo) {
+               lcd.bl_tempo = false;
+               if (!(lcd.flags & LCD_FLAG_L))
+                       __lcd_backlight(0);
+       }
+       mutex_unlock(&lcd.bl_tempo_lock);
+}
+
+/* turn the backlight on for a little while */
+static void lcd_poke(void)
+{
+       if (lcd.pins.bl == PIN_NONE)
+               return;
+
+       cancel_delayed_work_sync(&lcd.bl_work);
+
+       mutex_lock(&lcd.bl_tempo_lock);
+       if (!lcd.bl_tempo && !(lcd.flags & LCD_FLAG_L))
+               __lcd_backlight(1);
+       lcd.bl_tempo = true;
+       schedule_delayed_work(&lcd.bl_work, FLASH_LIGHT_TEMPO * HZ);
+       mutex_unlock(&lcd.bl_tempo_lock);
+}
+
 /* send a command to the LCD panel in serial mode */
 static void lcd_write_cmd_s(int cmd)
 {
@@ -907,6 +940,13 @@ static void lcd_gotoxy(void)
                         (lcd.hwidth - 1) : lcd.bwidth - 1));
 }
 
+static void lcd_home(void)
+{
+       lcd.addr.x = 0;
+       lcd.addr.y = 0;
+       lcd_gotoxy();
+}
+
 static void lcd_print(char c)
 {
        if (lcd.addr.x < lcd.bwidth) {
@@ -925,9 +965,7 @@ static void lcd_clear_fast_s(void)
 {
        int pos;
 
-       lcd.addr.x = 0;
-       lcd.addr.y = 0;
-       lcd_gotoxy();
+       lcd_home();
 
        spin_lock_irq(&pprt_lock);
        for (pos = 0; pos < lcd.height * lcd.hwidth; pos++) {
@@ -939,9 +977,7 @@ static void lcd_clear_fast_s(void)
        }
        spin_unlock_irq(&pprt_lock);
 
-       lcd.addr.x = 0;
-       lcd.addr.y = 0;
-       lcd_gotoxy();
+       lcd_home();
 }
 
 /* fills the display with spaces and resets X/Y */
@@ -949,9 +985,7 @@ static void lcd_clear_fast_p8(void)
 {
        int pos;
 
-       lcd.addr.x = 0;
-       lcd.addr.y = 0;
-       lcd_gotoxy();
+       lcd_home();
 
        spin_lock_irq(&pprt_lock);
        for (pos = 0; pos < lcd.height * lcd.hwidth; pos++) {
@@ -977,9 +1011,7 @@ static void lcd_clear_fast_p8(void)
        }
        spin_unlock_irq(&pprt_lock);
 
-       lcd.addr.x = 0;
-       lcd.addr.y = 0;
-       lcd_gotoxy();
+       lcd_home();
 }
 
 /* fills the display with spaces and resets X/Y */
@@ -987,9 +1019,7 @@ static void lcd_clear_fast_tilcd(void)
 {
        int pos;
 
-       lcd.addr.x = 0;
-       lcd.addr.y = 0;
-       lcd_gotoxy();
+       lcd_home();
 
        spin_lock_irq(&pprt_lock);
        for (pos = 0; pos < lcd.height * lcd.hwidth; pos++) {
@@ -1000,9 +1030,7 @@ static void lcd_clear_fast_tilcd(void)
 
        spin_unlock_irq(&pprt_lock);
 
-       lcd.addr.x = 0;
-       lcd.addr.y = 0;
-       lcd_gotoxy();
+       lcd_home();
 }
 
 /* clears the display and resets X/Y */
@@ -1108,13 +1136,8 @@ static inline int handle_lcd_special_code(void)
                processed = 1;
                break;
        case '*':
-               /* flash back light using the keypad timer */
-               if (scan_timer.function) {
-                       if (lcd.light_tempo == 0 &&
-                           ((lcd.flags & LCD_FLAG_L) == 0))
-                               lcd_backlight(1);
-                       lcd.light_tempo = FLASH_LIGHT_TEMPO;
-               }
+               /* flash back light */
+               lcd_poke();
                processed = 1;
                break;
        case 'f':       /* Small Font */
@@ -1278,21 +1301,14 @@ static inline int handle_lcd_special_code(void)
                        lcd_write_cmd(LCD_CMD_FUNCTION_SET
                                      | LCD_CMD_DATA_LEN_8BITS
                                      | ((lcd.flags & LCD_FLAG_F)
-                                                     ? LCD_CMD_TWO_LINES : 0)
-                                     | ((lcd.flags & LCD_FLAG_N)
                                                      ? LCD_CMD_FONT_5X10_DOTS
+                                                                     : 0)
+                                     | ((lcd.flags & LCD_FLAG_N)
+                                                     ? LCD_CMD_TWO_LINES
                                                                      : 0));
                /* check whether L flag was changed */
-               else if ((oldflags ^ lcd.flags) & (LCD_FLAG_L)) {
-                       if (lcd.flags & (LCD_FLAG_L))
-                               lcd_backlight(1);
-                       else if (lcd.light_tempo == 0)
-                               /*
-                                * switch off the light only when the tempo
-                                * lighting is gone
-                                */
-                               lcd_backlight(0);
-               }
+               else if ((oldflags ^ lcd.flags) & (LCD_FLAG_L))
+                       lcd_backlight(!!(lcd.flags & LCD_FLAG_L));
        }
 
        return processed;
@@ -1376,9 +1392,7 @@ static void lcd_write_char(char c)
                        processed = 1;
                } else if (!strcmp(lcd.esc_seq.buf, "[H")) {
                        /* cursor to home */
-                       lcd.addr.x = 0;
-                       lcd.addr.y = 0;
-                       lcd_gotoxy();
+                       lcd_home();
                        processed = 1;
                }
                /* codes starting with ^[[L */
@@ -1625,8 +1639,10 @@ static void lcd_init(void)
        else
                lcd_char_conv = NULL;
 
-       if (lcd.pins.bl != PIN_NONE)
-               init_scan_timer();
+       if (lcd.pins.bl != PIN_NONE) {
+               mutex_init(&lcd.bl_tempo_lock);
+               INIT_DELAYED_WORK(&lcd.bl_work, lcd_bl_off);
+       }
 
        pin_to_bits(lcd.pins.e, lcd_bits[LCD_PORT_D][LCD_BIT_E],
                    lcd_bits[LCD_PORT_C][LCD_BIT_E]);
@@ -1655,14 +1671,11 @@ static void lcd_init(void)
        panel_lcd_print("\x1b[Lc\x1b[Lb\x1b[L*" CONFIG_PANEL_BOOT_MESSAGE);
 #endif
 #else
-       panel_lcd_print("\x1b[Lc\x1b[Lb\x1b[L*Linux-" UTS_RELEASE "\nPanel-"
-                       PANEL_VERSION);
+       panel_lcd_print("\x1b[Lc\x1b[Lb\x1b[L*Linux-" UTS_RELEASE);
 #endif
-       lcd.addr.x = 0;
-       lcd.addr.y = 0;
        /* clear the display on the next device opening */
        lcd.must_clear = true;
-       lcd_gotoxy();
+       lcd_home();
 }
 
 /*
@@ -1997,19 +2010,8 @@ static void panel_scan_timer(void)
                        panel_process_inputs();
        }
 
-       if (lcd.enabled && lcd.initialized) {
-               if (keypressed) {
-                       if (lcd.light_tempo == 0 &&
-                           ((lcd.flags & LCD_FLAG_L) == 0))
-                               lcd_backlight(1);
-                       lcd.light_tempo = FLASH_LIGHT_TEMPO;
-               } else if (lcd.light_tempo > 0) {
-                       lcd.light_tempo--;
-                       if (lcd.light_tempo == 0 &&
-                           ((lcd.flags & LCD_FLAG_L) == 0))
-                               lcd_backlight(0);
-               }
-       }
+       if (keypressed && lcd.enabled && lcd.initialized)
+               lcd_poke();
 
        mod_timer(&scan_timer, jiffies + INPUT_POLL_TIME);
 }
@@ -2270,25 +2272,26 @@ static void panel_detach(struct parport *port)
        if (scan_timer.function)
                del_timer_sync(&scan_timer);
 
-       if (pprt) {
-               if (keypad.enabled) {
-                       misc_deregister(&keypad_dev);
-                       keypad_initialized = 0;
-               }
+       if (keypad.enabled) {
+               misc_deregister(&keypad_dev);
+               keypad_initialized = 0;
+       }
 
-               if (lcd.enabled) {
-                       panel_lcd_print("\x0cLCD driver " PANEL_VERSION
-                                       "\nunloaded.\x1b[Lc\x1b[Lb\x1b[L-");
-                       misc_deregister(&lcd_dev);
-                       lcd.initialized = false;
+       if (lcd.enabled) {
+               panel_lcd_print("\x0cLCD driver unloaded.\x1b[Lc\x1b[Lb\x1b[L-");
+               misc_deregister(&lcd_dev);
+               if (lcd.pins.bl != PIN_NONE) {
+                       cancel_delayed_work_sync(&lcd.bl_work);
+                       __lcd_backlight(0);
                }
-
-               /* TODO: free all input signals */
-               parport_release(pprt);
-               parport_unregister_device(pprt);
-               pprt = NULL;
-               unregister_reboot_notifier(&panel_notifier);
+               lcd.initialized = false;
        }
+
+       /* TODO: free all input signals */
+       parport_release(pprt);
+       parport_unregister_device(pprt);
+       pprt = NULL;
+       unregister_reboot_notifier(&panel_notifier);
 }
 
 static struct parport_driver panel_driver = {
@@ -2400,7 +2403,7 @@ static int __init panel_init_module(void)
 
        if (!lcd.enabled && !keypad.enabled) {
                /* no device enabled, let's exit */
-               pr_err("driver version " PANEL_VERSION " disabled.\n");
+               pr_err("panel driver disabled.\n");
                return -ENODEV;
        }
 
@@ -2411,12 +2414,10 @@ static int __init panel_init_module(void)
        }
 
        if (pprt)
-               pr_info("driver version " PANEL_VERSION
-                       " registered on parport%d (io=0x%lx).\n", parport,
-                       pprt->port->base);
+               pr_info("panel driver registered on parport%d (io=0x%lx).\n",
+                       parport, pprt->port->base);
        else
-               pr_info("driver version " PANEL_VERSION
-                       " not yet registered\n");
+               pr_info("panel driver not yet registered\n");
        return 0;
 }
 
diff --git a/drivers/misc/sram-exec.c b/drivers/misc/sram-exec.c
new file mode 100644 (file)
index 0000000..ac52241
--- /dev/null
@@ -0,0 +1,105 @@
+/*
+ * SRAM protect-exec region helper functions
+ *
+ * Copyright (C) 2017 Texas Instruments Incorporated - http://www.ti.com/
+ *     Dave Gerlach
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/genalloc.h>
+#include <linux/sram.h>
+
+#include <asm/cacheflush.h>
+
+#include "sram.h"
+
+static DEFINE_MUTEX(exec_pool_list_mutex);
+static LIST_HEAD(exec_pool_list);
+
+int sram_check_protect_exec(struct sram_dev *sram, struct sram_reserve *block,
+                           struct sram_partition *part)
+{
+       unsigned long base = (unsigned long)part->base;
+       unsigned long end = base + block->size;
+
+       if (!PAGE_ALIGNED(base) || !PAGE_ALIGNED(end)) {
+               dev_err(sram->dev,
+                       "SRAM pool marked with 'protect-exec' is not page aligned and will not be created.\n");
+               return -ENOMEM;
+       }
+
+       return 0;
+}
+
+int sram_add_protect_exec(struct sram_partition *part)
+{
+       mutex_lock(&exec_pool_list_mutex);
+       list_add_tail(&part->list, &exec_pool_list);
+       mutex_unlock(&exec_pool_list_mutex);
+
+       return 0;
+}
+
+/**
+ * sram_exec_copy - copy data to a protected executable region of sram
+ *
+ * @pool: struct gen_pool retrieved that is part of this sram
+ * @dst: Destination address for the copy, that must be inside pool
+ * @src: Source address for the data to copy
+ * @size: Size of copy to perform, which starting from dst, must reside in pool
+ *
+ * This helper function allows sram driver to act as central control location
+ * of 'protect-exec' pools which are normal sram pools but are always set
+ * read-only and executable except when copying data to them, at which point
+ * they are set to read-write non-executable, to make sure no memory is
+ * writeable and executable at the same time. This region must be page-aligned
+ * and is checked during probe, otherwise page attribute manipulation would
+ * not be possible.
+ */
+int sram_exec_copy(struct gen_pool *pool, void *dst, void *src,
+                  size_t size)
+{
+       struct sram_partition *part = NULL, *p;
+       unsigned long base;
+       int pages;
+
+       mutex_lock(&exec_pool_list_mutex);
+       list_for_each_entry(p, &exec_pool_list, list) {
+               if (p->pool == pool)
+                       part = p;
+       }
+       mutex_unlock(&exec_pool_list_mutex);
+
+       if (!part)
+               return -EINVAL;
+
+       if (!addr_in_gen_pool(pool, (unsigned long)dst, size))
+               return -EINVAL;
+
+       base = (unsigned long)part->base;
+       pages = PAGE_ALIGN(size) / PAGE_SIZE;
+
+       mutex_lock(&part->lock);
+
+       set_memory_nx((unsigned long)base, pages);
+       set_memory_rw((unsigned long)base, pages);
+
+       memcpy(dst, src, size);
+
+       set_memory_ro((unsigned long)base, pages);
+       set_memory_x((unsigned long)base, pages);
+
+       mutex_unlock(&part->lock);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(sram_exec_copy);
index b33ab8ce47ab7c3d9732f8e39b85bbc93c81d770..d1185b78cf9aa4208620f27913ba1bddad96d411 100644 (file)
 #include <linux/mfd/syscon.h>
 #include <soc/at91/atmel-secumod.h>
 
-#define SRAM_GRANULARITY       32
-
-struct sram_partition {
-       void __iomem *base;
-
-       struct gen_pool *pool;
-       struct bin_attribute battr;
-       struct mutex lock;
-};
-
-struct sram_dev {
-       struct device *dev;
-       void __iomem *virt_base;
-
-       struct gen_pool *pool;
-       struct clk *clk;
+#include "sram.h"
 
-       struct sram_partition *partition;
-       u32 partitions;
-};
-
-struct sram_reserve {
-       struct list_head list;
-       u32 start;
-       u32 size;
-       bool export;
-       bool pool;
-       const char *label;
-};
+#define SRAM_GRANULARITY       32
 
 static ssize_t sram_read(struct file *filp, struct kobject *kobj,
                         struct bin_attribute *attr,
@@ -148,6 +122,18 @@ static int sram_add_partition(struct sram_dev *sram, struct sram_reserve *block,
                if (ret)
                        return ret;
        }
+       if (block->protect_exec) {
+               ret = sram_check_protect_exec(sram, block, part);
+               if (ret)
+                       return ret;
+
+               ret = sram_add_pool(sram, block, start, part);
+               if (ret)
+                       return ret;
+
+               sram_add_protect_exec(part);
+       }
+
        sram->partitions++;
 
        return 0;
@@ -233,7 +219,11 @@ static int sram_reserve_regions(struct sram_dev *sram, struct resource *res)
                if (of_find_property(child, "pool", NULL))
                        block->pool = true;
 
-               if ((block->export || block->pool) && block->size) {
+               if (of_find_property(child, "protect-exec", NULL))
+                       block->protect_exec = true;
+
+               if ((block->export || block->pool || block->protect_exec) &&
+                   block->size) {
                        exports++;
 
                        label = NULL;
@@ -249,8 +239,10 @@ static int sram_reserve_regions(struct sram_dev *sram, struct resource *res)
 
                        block->label = devm_kstrdup(sram->dev,
                                                    label, GFP_KERNEL);
-                       if (!block->label)
+                       if (!block->label) {
+                               ret = -ENOMEM;
                                goto err_chunks;
+                       }
 
                        dev_dbg(sram->dev, "found %sblock '%s' 0x%x-0x%x\n",
                                block->export ? "exported " : "", block->label,
@@ -293,7 +285,8 @@ static int sram_reserve_regions(struct sram_dev *sram, struct resource *res)
                        goto err_chunks;
                }
 
-               if ((block->export || block->pool) && block->size) {
+               if ((block->export || block->pool || block->protect_exec) &&
+                   block->size) {
                        ret = sram_add_partition(sram, block,
                                                 res->start + block->start);
                        if (ret) {
diff --git a/drivers/misc/sram.h b/drivers/misc/sram.h
new file mode 100644 (file)
index 0000000..c181ce4
--- /dev/null
@@ -0,0 +1,58 @@
+/*
+ * Defines for the SRAM driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __SRAM_H
+#define __SRAM_H
+
+struct sram_partition {
+       void __iomem *base;
+
+       struct gen_pool *pool;
+       struct bin_attribute battr;
+       struct mutex lock;
+       struct list_head list;
+};
+
+struct sram_dev {
+       struct device *dev;
+       void __iomem *virt_base;
+
+       struct gen_pool *pool;
+       struct clk *clk;
+
+       struct sram_partition *partition;
+       u32 partitions;
+};
+
+struct sram_reserve {
+       struct list_head list;
+       u32 start;
+       u32 size;
+       bool export;
+       bool pool;
+       bool protect_exec;
+       const char *label;
+};
+
+#ifdef CONFIG_SRAM_EXEC
+int sram_check_protect_exec(struct sram_dev *sram, struct sram_reserve *block,
+                           struct sram_partition *part);
+int sram_add_protect_exec(struct sram_partition *part);
+#else
+static inline int sram_check_protect_exec(struct sram_dev *sram,
+                                         struct sram_reserve *block,
+                                         struct sram_partition *part)
+{
+       return -ENODEV;
+}
+
+static inline int sram_add_protect_exec(struct sram_partition *part)
+{
+       return -ENODEV;
+}
+#endif /* CONFIG_SRAM_EXEC */
+#endif /* __SRAM_H */
index 189b325197488d9ed48f1cecc80ddb06ce05e329..9d659542a335b444914f2ead245f0975ac970a2b 100644 (file)
@@ -54,10 +54,7 @@ struct vmci_guest_device {
        struct device *dev;     /* PCI device we are attached to */
        void __iomem *iobase;
 
-       unsigned int irq;
-       unsigned int intr_type;
        bool exclusive_vectors;
-       struct msix_entry msix_entries[VMCI_MAX_INTRS];
 
        struct tasklet_struct datagram_tasklet;
        struct tasklet_struct bm_tasklet;
@@ -368,30 +365,6 @@ static void vmci_process_bitmap(unsigned long data)
        vmci_dbell_scan_notification_entries(dev->notification_bitmap);
 }
 
-/*
- * Enable MSI-X.  Try exclusive vectors first, then shared vectors.
- */
-static int vmci_enable_msix(struct pci_dev *pdev,
-                           struct vmci_guest_device *vmci_dev)
-{
-       int i;
-       int result;
-
-       for (i = 0; i < VMCI_MAX_INTRS; ++i) {
-               vmci_dev->msix_entries[i].entry = i;
-               vmci_dev->msix_entries[i].vector = i;
-       }
-
-       result = pci_enable_msix_exact(pdev,
-                                      vmci_dev->msix_entries, VMCI_MAX_INTRS);
-       if (result == 0)
-               vmci_dev->exclusive_vectors = true;
-       else if (result == -ENOSPC)
-               result = pci_enable_msix_exact(pdev, vmci_dev->msix_entries, 1);
-
-       return result;
-}
-
 /*
  * Interrupt handler for legacy or MSI interrupt, or for first MSI-X
  * interrupt (vector VMCI_INTR_DATAGRAM).
@@ -406,7 +379,7 @@ static irqreturn_t vmci_interrupt(int irq, void *_dev)
         * Otherwise we must read the ICR to determine what to do.
         */
 
-       if (dev->intr_type == VMCI_INTR_TYPE_MSIX && dev->exclusive_vectors) {
+       if (dev->exclusive_vectors) {
                tasklet_schedule(&dev->datagram_tasklet);
        } else {
                unsigned int icr;
@@ -491,7 +464,6 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
        }
 
        vmci_dev->dev = &pdev->dev;
-       vmci_dev->intr_type = VMCI_INTR_TYPE_INTX;
        vmci_dev->exclusive_vectors = false;
        vmci_dev->iobase = iobase;
 
@@ -592,26 +564,26 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
         * Enable interrupts.  Try MSI-X first, then MSI, and then fallback on
         * legacy interrupts.
         */
-       if (!vmci_disable_msix && !vmci_enable_msix(pdev, vmci_dev)) {
-               vmci_dev->intr_type = VMCI_INTR_TYPE_MSIX;
-               vmci_dev->irq = vmci_dev->msix_entries[0].vector;
-       } else if (!vmci_disable_msi && !pci_enable_msi(pdev)) {
-               vmci_dev->intr_type = VMCI_INTR_TYPE_MSI;
-               vmci_dev->irq = pdev->irq;
+       error = pci_alloc_irq_vectors(pdev, VMCI_MAX_INTRS, VMCI_MAX_INTRS,
+                       PCI_IRQ_MSIX);
+       if (error) {
+               error = pci_alloc_irq_vectors(pdev, 1, 1,
+                               PCI_IRQ_MSIX | PCI_IRQ_MSI | PCI_IRQ_LEGACY);
+               if (error)
+                       goto err_remove_bitmap;
        } else {
-               vmci_dev->intr_type = VMCI_INTR_TYPE_INTX;
-               vmci_dev->irq = pdev->irq;
+               vmci_dev->exclusive_vectors = true;
        }
 
        /*
         * Request IRQ for legacy or MSI interrupts, or for first
         * MSI-X vector.
         */
-       error = request_irq(vmci_dev->irq, vmci_interrupt, IRQF_SHARED,
-                           KBUILD_MODNAME, vmci_dev);
+       error = request_irq(pci_irq_vector(pdev, 0), vmci_interrupt,
+                           IRQF_SHARED, KBUILD_MODNAME, vmci_dev);
        if (error) {
                dev_err(&pdev->dev, "Irq %u in use: %d\n",
-                       vmci_dev->irq, error);
+                       pci_irq_vector(pdev, 0), error);
                goto err_disable_msi;
        }
 
@@ -622,13 +594,13 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
         * between the vectors.
         */
        if (vmci_dev->exclusive_vectors) {
-               error = request_irq(vmci_dev->msix_entries[1].vector,
+               error = request_irq(pci_irq_vector(pdev, 1),
                                    vmci_interrupt_bm, 0, KBUILD_MODNAME,
                                    vmci_dev);
                if (error) {
                        dev_err(&pdev->dev,
                                "Failed to allocate irq %u: %d\n",
-                               vmci_dev->msix_entries[1].vector, error);
+                               pci_irq_vector(pdev, 1), error);
                        goto err_free_irq;
                }
        }
@@ -651,15 +623,12 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
        return 0;
 
 err_free_irq:
-       free_irq(vmci_dev->irq, vmci_dev);
+       free_irq(pci_irq_vector(pdev, 0), vmci_dev);
        tasklet_kill(&vmci_dev->datagram_tasklet);
        tasklet_kill(&vmci_dev->bm_tasklet);
 
 err_disable_msi:
-       if (vmci_dev->intr_type == VMCI_INTR_TYPE_MSIX)
-               pci_disable_msix(pdev);
-       else if (vmci_dev->intr_type == VMCI_INTR_TYPE_MSI)
-               pci_disable_msi(pdev);
+       pci_free_irq_vectors(pdev);
 
        vmci_err = vmci_event_unsubscribe(ctx_update_sub_id);
        if (vmci_err < VMCI_SUCCESS)
@@ -719,14 +688,10 @@ static void vmci_guest_remove_device(struct pci_dev *pdev)
         * MSI-X, we might have multiple vectors, each with their own
         * IRQ, which we must free too.
         */
-       free_irq(vmci_dev->irq, vmci_dev);
-       if (vmci_dev->intr_type == VMCI_INTR_TYPE_MSIX) {
-               if (vmci_dev->exclusive_vectors)
-                       free_irq(vmci_dev->msix_entries[1].vector, vmci_dev);
-               pci_disable_msix(pdev);
-       } else if (vmci_dev->intr_type == VMCI_INTR_TYPE_MSI) {
-               pci_disable_msi(pdev);
-       }
+       if (vmci_dev->exclusive_vectors)
+               free_irq(pci_irq_vector(pdev, 1), vmci_dev);
+       free_irq(pci_irq_vector(pdev, 0), vmci_dev);
+       pci_free_irq_vectors(pdev);
 
        tasklet_kill(&vmci_dev->datagram_tasklet);
        tasklet_kill(&vmci_dev->bm_tasklet);
index fd6ebbefd919344e0c7dab94316488a43bc3e2bc..d35ebd993b385255eaa441fb67272c117e328e7f 100644 (file)
@@ -703,8 +703,6 @@ static u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device,
        char *dest = start + (section_index * net_device->send_section_size)
                     + pend_size;
        int i;
-       bool is_data_pkt = (skb != NULL) ? true : false;
-       bool xmit_more = (skb != NULL) ? skb->xmit_more : false;
        u32 msg_size = 0;
        u32 padding = 0;
        u32 remain = packet->total_data_buflen % net_device->pkt_align;
@@ -712,7 +710,7 @@ static u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device,
                packet->page_buf_cnt;
 
        /* Add padding */
-       if (is_data_pkt && xmit_more && remain &&
+       if (skb && skb->xmit_more && remain &&
            !packet->cp_partial) {
                padding = net_device->pkt_align - remain;
                rndis_msg->msg_len += padding;
@@ -754,7 +752,6 @@ static inline int netvsc_send_pkt(
        int ret;
        struct hv_page_buffer *pgbuf;
        u32 ring_avail = hv_ringbuf_avail_percent(&out_channel->outbound);
-       bool xmit_more = (skb != NULL) ? skb->xmit_more : false;
 
        nvmsg.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT;
        if (skb != NULL) {
@@ -778,16 +775,6 @@ static inline int netvsc_send_pkt(
        if (out_channel->rescind)
                return -ENODEV;
 
-       /*
-        * It is possible that once we successfully place this packet
-        * on the ringbuffer, we may stop the queue. In that case, we want
-        * to notify the host independent of the xmit_more flag. We don't
-        * need to be precise here; in the worst case we may signal the host
-        * unnecessarily.
-        */
-       if (ring_avail < (RING_AVAIL_PERCENT_LOWATER + 1))
-               xmit_more = false;
-
        if (packet->page_buf_cnt) {
                pgbuf = packet->cp_partial ? (*pb) +
                        packet->rmsg_pgcnt : (*pb);
@@ -797,15 +784,13 @@ static inline int netvsc_send_pkt(
                                                      &nvmsg,
                                                      sizeof(struct nvsp_message),
                                                      req_id,
-                                                     VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED,
-                                                     !xmit_more);
+                                                     VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
        } else {
                ret = vmbus_sendpacket_ctl(out_channel, &nvmsg,
                                           sizeof(struct nvsp_message),
                                           req_id,
                                           VM_PKT_DATA_INBAND,
-                                          VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED,
-                                          !xmit_more);
+                                          VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
        }
 
        if (ret == 0) {
index 398ea7f54826b6bef0db0aa35aa2b1bb8493f432..408b521ee5209b9775c691113b854d027a8677fa 100644 (file)
@@ -608,7 +608,7 @@ static struct nvmem_device *nvmem_find(const char *name)
 /**
  * of_nvmem_device_get() - Get nvmem device from a given id
  *
- * @dev node: Device tree node that uses the nvmem device
+ * @np: Device tree node that uses the nvmem device.
  * @id: nvmem name from nvmem-names property.
  *
  * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
@@ -634,8 +634,8 @@ EXPORT_SYMBOL_GPL(of_nvmem_device_get);
 /**
  * nvmem_device_get() - Get nvmem device from a given id
  *
- * @dev : Device that uses the nvmem device
- * @id: nvmem name from nvmem-names property.
+ * @dev: Device that uses the nvmem device.
+ * @dev_name: name of the requested nvmem device.
  *
  * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
  * on success.
@@ -674,6 +674,7 @@ static void devm_nvmem_device_release(struct device *dev, void *res)
 /**
  * devm_nvmem_device_put() - put alredy got nvmem device
  *
+ * @dev: Device that uses the nvmem device.
  * @nvmem: pointer to nvmem device allocated by devm_nvmem_cell_get(),
  * that needs to be released.
  */
@@ -702,8 +703,8 @@ EXPORT_SYMBOL_GPL(nvmem_device_put);
 /**
  * devm_nvmem_device_get() - Get nvmem cell of device form a given id
  *
- * @dev node: Device tree node that uses the nvmem cell
- * @id: nvmem name in nvmems property.
+ * @dev: Device that requests the nvmem device.
+ * @id: name id for the requested nvmem device.
  *
  * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_cell
  * on success.  The nvmem_cell will be freed by the automatically once the
@@ -745,8 +746,10 @@ static struct nvmem_cell *nvmem_cell_get_from_list(const char *cell_id)
 /**
  * of_nvmem_cell_get() - Get a nvmem cell from given device node and cell id
  *
- * @dev node: Device tree node that uses the nvmem cell
- * @id: nvmem cell name from nvmem-cell-names property.
+ * @np: Device tree node that uses the nvmem cell.
+ * @name: nvmem cell name from nvmem-cell-names property, or NULL
+ *       for the cell at index 0 (the lone cell with no accompanying
+ *       nvmem-cell-names property).
  *
  * Return: Will be an ERR_PTR() on error or a valid pointer
  * to a struct nvmem_cell.  The nvmem_cell will be freed by the
@@ -759,9 +762,12 @@ struct nvmem_cell *of_nvmem_cell_get(struct device_node *np,
        struct nvmem_cell *cell;
        struct nvmem_device *nvmem;
        const __be32 *addr;
-       int rval, len, index;
+       int rval, len;
+       int index = 0;
 
-       index = of_property_match_string(np, "nvmem-cell-names", name);
+       /* if cell name exists, find index to the name */
+       if (name)
+               index = of_property_match_string(np, "nvmem-cell-names", name);
 
        cell_np = of_parse_phandle(np, "nvmem-cells", index);
        if (!cell_np)
@@ -830,8 +836,8 @@ EXPORT_SYMBOL_GPL(of_nvmem_cell_get);
 /**
  * nvmem_cell_get() - Get nvmem cell of device form a given cell name
  *
- * @dev node: Device tree node that uses the nvmem cell
- * @id: nvmem cell name to get.
+ * @dev: Device that requests the nvmem cell.
+ * @cell_id: nvmem cell name to get.
  *
  * Return: Will be an ERR_PTR() on error or a valid pointer
  * to a struct nvmem_cell.  The nvmem_cell will be freed by the
@@ -859,8 +865,8 @@ static void devm_nvmem_cell_release(struct device *dev, void *res)
 /**
  * devm_nvmem_cell_get() - Get nvmem cell of device form a given id
  *
- * @dev node: Device tree node that uses the nvmem cell
- * @id: nvmem id in nvmem-names property.
+ * @dev: Device that requests the nvmem cell.
+ * @id: nvmem cell name id to get.
  *
  * Return: Will be an ERR_PTR() on error or a valid pointer
  * to a struct nvmem_cell.  The nvmem_cell will be freed by the
@@ -900,7 +906,8 @@ static int devm_nvmem_cell_match(struct device *dev, void *res, void *data)
  * devm_nvmem_cell_put() - Release previously allocated nvmem cell
  * from devm_nvmem_cell_get.
  *
- * @cell: Previously allocated nvmem cell by devm_nvmem_cell_get()
+ * @dev: Device that requests the nvmem cell.
+ * @cell: Previously allocated nvmem cell by devm_nvmem_cell_get().
  */
 void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell)
 {
@@ -916,7 +923,7 @@ EXPORT_SYMBOL(devm_nvmem_cell_put);
 /**
  * nvmem_cell_put() - Release previously allocated nvmem cell.
  *
- * @cell: Previously allocated nvmem cell by nvmem_cell_get()
+ * @cell: Previously allocated nvmem cell by nvmem_cell_get().
  */
 void nvmem_cell_put(struct nvmem_cell *cell)
 {
@@ -970,7 +977,8 @@ static int __nvmem_cell_read(struct nvmem_device *nvmem,
        if (cell->bit_offset || cell->nbits)
                nvmem_shift_read_buffer_in_place(cell, buf);
 
-       *len = cell->bytes;
+       if (len)
+               *len = cell->bytes;
 
        return 0;
 }
@@ -979,7 +987,8 @@ static int __nvmem_cell_read(struct nvmem_device *nvmem,
  * nvmem_cell_read() - Read a given nvmem cell
  *
  * @cell: nvmem cell to be read.
- * @len: pointer to length of cell which will be populated on successful read.
+ * @len: pointer to length of cell which will be populated on successful read;
+ *      can be NULL.
  *
  * Return: ERR_PTR() on error or a valid pointer to a buffer on success. The
  * buffer should be freed by the consumer with a kfree().
@@ -1126,7 +1135,7 @@ EXPORT_SYMBOL_GPL(nvmem_device_cell_read);
  * nvmem_device_cell_write() - Write cell to a given nvmem device
  *
  * @nvmem: nvmem device to be written to.
- * @info: nvmem cell info to be written
+ * @info: nvmem cell info to be written.
  * @buf: buffer to be written to cell.
  *
  * Return: length of bytes written or negative error code on failure.
index 8e7b120696fa91da63a44bc7f4f8422b5d24bc7e..b8ca1e677b016db772027637d1ab9d3c30c28407 100644 (file)
@@ -73,6 +73,7 @@ static const struct of_device_id imx_ocotp_dt_ids[] = {
        { .compatible = "fsl,imx6q-ocotp",  (void *)128 },
        { .compatible = "fsl,imx6sl-ocotp", (void *)64 },
        { .compatible = "fsl,imx6sx-ocotp", (void *)128 },
+       { .compatible = "fsl,imx6ul-ocotp", (void *)128 },
        { },
 };
 MODULE_DEVICE_TABLE(of, imx_ocotp_dt_ids);
index 1f52462f4cdd4b7e1431723ca37b32c267da68a1..dd9ea463c2a4a547c2d26e32ed61c5990832a9e5 100644 (file)
@@ -157,23 +157,26 @@ static int goldfish_new_pdev(void)
 static irqreturn_t goldfish_pdev_bus_interrupt(int irq, void *dev_id)
 {
        irqreturn_t ret = IRQ_NONE;
+
        while (1) {
                u32 op = readl(pdev_bus_base + PDEV_BUS_OP);
-               switch (op) {
-               case PDEV_BUS_OP_DONE:
-                       return IRQ_NONE;
 
+               switch (op) {
                case PDEV_BUS_OP_REMOVE_DEV:
                        goldfish_pdev_remove();
+                       ret = IRQ_HANDLED;
                        break;
 
                case PDEV_BUS_OP_ADD_DEV:
                        goldfish_new_pdev();
+                       ret = IRQ_HANDLED;
                        break;
+
+               case PDEV_BUS_OP_DONE:
+               default:
+                       return ret;
                }
-               ret = IRQ_HANDLED;
        }
-       return ret;
 }
 
 static int goldfish_pdev_bus_probe(struct platform_device *pdev)
index 50958f167305317f3de1a361673cd3caf4f8f47b..48d5327d38d420d84f49d52aa9a904d2ee174d80 100644 (file)
@@ -125,7 +125,7 @@ hv_uio_probe(struct hv_device *dev,
                goto fail;
 
        dev->channel->inbound.ring_buffer->interrupt_mask = 1;
-       dev->channel->batched_reading = false;
+       set_channel_read_mode(dev->channel, HV_CALL_DIRECT);
 
        /* Fill general uio info */
        pdata->info.name = "uio_hv_generic";
index bdbadaa47ef3ecb481b200785d4bc1aa9981f6e4..0035cf79760a512563540d58535987bac70aa35c 100644 (file)
@@ -1625,10 +1625,25 @@ static int vme_bus_probe(struct device *dev)
        return retval;
 }
 
+static int vme_bus_remove(struct device *dev)
+{
+       int retval = -ENODEV;
+       struct vme_driver *driver;
+       struct vme_dev *vdev = dev_to_vme_dev(dev);
+
+       driver = dev->platform_data;
+
+       if (driver->remove != NULL)
+               retval = driver->remove(vdev);
+
+       return retval;
+}
+
 struct bus_type vme_bus_type = {
        .name = "vme",
        .match = vme_bus_match,
        .probe = vme_bus_probe,
+       .remove = vme_bus_remove,
 };
 EXPORT_SYMBOL(vme_bus_type);
 
index 049a884a756f8d1d5474ae2bc56f4dddcb089c35..be77b7914fad46aa11dd9184a5ddafeb2b762bab 100644 (file)
@@ -153,6 +153,9 @@ struct ds_device
         */
        u16                     spu_bit;
 
+       u8                      st_buf[ST_SIZE];
+       u8                      byte_buf;
+
        struct w1_bus_master    master;
 };
 
@@ -174,7 +177,6 @@ struct ds_status
        u8                      data_in_buffer_status;
        u8                      reserved1;
        u8                      reserved2;
-
 };
 
 static struct usb_device_id ds_id_table [] = {
@@ -244,28 +246,6 @@ static int ds_send_control(struct ds_device *dev, u16 value, u16 index)
        return err;
 }
 
-static int ds_recv_status_nodump(struct ds_device *dev, struct ds_status *st,
-                                unsigned char *buf, int size)
-{
-       int count, err;
-
-       memset(st, 0, sizeof(*st));
-
-       count = 0;
-       err = usb_interrupt_msg(dev->udev, usb_rcvintpipe(dev->udev,
-               dev->ep[EP_STATUS]), buf, size, &count, 1000);
-       if (err < 0) {
-               pr_err("Failed to read 1-wire data from 0x%x: err=%d.\n",
-                      dev->ep[EP_STATUS], err);
-               return err;
-       }
-
-       if (count >= sizeof(*st))
-               memcpy(st, buf, sizeof(*st));
-
-       return count;
-}
-
 static inline void ds_print_msg(unsigned char *buf, unsigned char *str, int off)
 {
        pr_info("%45s: %8x\n", str, buf[off]);
@@ -324,6 +304,35 @@ static void ds_dump_status(struct ds_device *dev, unsigned char *buf, int count)
        }
 }
 
+static int ds_recv_status(struct ds_device *dev, struct ds_status *st,
+                         bool dump)
+{
+       int count, err;
+
+       if (st)
+               memset(st, 0, sizeof(*st));
+
+       count = 0;
+       err = usb_interrupt_msg(dev->udev,
+                               usb_rcvintpipe(dev->udev,
+                                              dev->ep[EP_STATUS]),
+                               dev->st_buf, sizeof(dev->st_buf),
+                               &count, 1000);
+       if (err < 0) {
+               pr_err("Failed to read 1-wire data from 0x%x: err=%d.\n",
+                      dev->ep[EP_STATUS], err);
+               return err;
+       }
+
+       if (dump)
+               ds_dump_status(dev, dev->st_buf, count);
+
+       if (st && count >= sizeof(*st))
+               memcpy(st, dev->st_buf, sizeof(*st));
+
+       return count;
+}
+
 static void ds_reset_device(struct ds_device *dev)
 {
        ds_send_control_cmd(dev, CTL_RESET_DEVICE, 0);
@@ -344,7 +353,6 @@ static void ds_reset_device(struct ds_device *dev)
 static int ds_recv_data(struct ds_device *dev, unsigned char *buf, int size)
 {
        int count, err;
-       struct ds_status st;
 
        /* Careful on size.  If size is less than what is available in
         * the input buffer, the device fails the bulk transfer and
@@ -359,14 +367,9 @@ static int ds_recv_data(struct ds_device *dev, unsigned char *buf, int size)
        err = usb_bulk_msg(dev->udev, usb_rcvbulkpipe(dev->udev, dev->ep[EP_DATA_IN]),
                                buf, size, &count, 1000);
        if (err < 0) {
-               u8 buf[ST_SIZE];
-               int count;
-
                pr_info("Clearing ep0x%x.\n", dev->ep[EP_DATA_IN]);
                usb_clear_halt(dev->udev, usb_rcvbulkpipe(dev->udev, dev->ep[EP_DATA_IN]));
-
-               count = ds_recv_status_nodump(dev, &st, buf, sizeof(buf));
-               ds_dump_status(dev, buf, count);
+               ds_recv_status(dev, NULL, true);
                return err;
        }
 
@@ -404,7 +407,6 @@ int ds_stop_pulse(struct ds_device *dev, int limit)
 {
        struct ds_status st;
        int count = 0, err = 0;
-       u8 buf[ST_SIZE];
 
        do {
                err = ds_send_control(dev, CTL_HALT_EXE_IDLE, 0);
@@ -413,7 +415,7 @@ int ds_stop_pulse(struct ds_device *dev, int limit)
                err = ds_send_control(dev, CTL_RESUME_EXE, 0);
                if (err)
                        break;
-               err = ds_recv_status_nodump(dev, &st, buf, sizeof(buf));
+               err = ds_recv_status(dev, &st, false);
                if (err)
                        break;
 
@@ -456,18 +458,17 @@ int ds_detect(struct ds_device *dev, struct ds_status *st)
 
 static int ds_wait_status(struct ds_device *dev, struct ds_status *st)
 {
-       u8 buf[ST_SIZE];
        int err, count = 0;
 
        do {
                st->status = 0;
-               err = ds_recv_status_nodump(dev, st, buf, sizeof(buf));
+               err = ds_recv_status(dev, st, false);
 #if 0
                if (err >= 0) {
                        int i;
                        printk("0x%x: count=%d, status: ", dev->ep[EP_STATUS], err);
                        for (i=0; i<err; ++i)
-                               printk("%02x ", buf[i]);
+                               printk("%02x ", dev->st_buf[i]);
                        printk("\n");
                }
 #endif
@@ -485,7 +486,7 @@ static int ds_wait_status(struct ds_device *dev, struct ds_status *st)
         * can do something with it).
         */
        if (err > 16 || count >= 100 || err < 0)
-               ds_dump_status(dev, buf, err);
+               ds_dump_status(dev, dev->st_buf, err);
 
        /* Extended data isn't an error.  Well, a short is, but the dump
         * would have already told the user that and we can't do anything
@@ -608,7 +609,6 @@ static int ds_write_byte(struct ds_device *dev, u8 byte)
 {
        int err;
        struct ds_status st;
-       u8 rbyte;
 
        err = ds_send_control(dev, COMM_BYTE_IO | COMM_IM | dev->spu_bit, byte);
        if (err)
@@ -621,11 +621,11 @@ static int ds_write_byte(struct ds_device *dev, u8 byte)
        if (err)
                return err;
 
-       err = ds_recv_data(dev, &rbyte, sizeof(rbyte));
+       err = ds_recv_data(dev, &dev->byte_buf, 1);
        if (err < 0)
                return err;
 
-       return !(byte == rbyte);
+       return !(byte == dev->byte_buf);
 }
 
 static int ds_read_byte(struct ds_device *dev, u8 *byte)
@@ -712,7 +712,6 @@ static void ds9490r_search(void *data, struct w1_master *master,
        int err;
        u16 value, index;
        struct ds_status st;
-       u8 st_buf[ST_SIZE];
        int search_limit;
        int found = 0;
        int i;
@@ -724,7 +723,12 @@ static void ds9490r_search(void *data, struct w1_master *master,
        /* FIFO 128 bytes, bulk packet size 64, read a multiple of the
         * packet size.
         */
-       u64 buf[2*64/8];
+       const size_t bufsize = 2 * 64;
+       u64 *buf;
+
+       buf = kmalloc(bufsize, GFP_KERNEL);
+       if (!buf)
+               return;
 
        mutex_lock(&master->bus_mutex);
 
@@ -745,10 +749,9 @@ static void ds9490r_search(void *data, struct w1_master *master,
        do {
                schedule_timeout(jtime);
 
-               if (ds_recv_status_nodump(dev, &st, st_buf, sizeof(st_buf)) <
-                       sizeof(st)) {
+               err = ds_recv_status(dev, &st, false);
+               if (err < 0 || err < sizeof(st))
                        break;
-               }
 
                if (st.data_in_buffer_status) {
                        /* Bulk in can receive partial ids, but when it does
@@ -758,7 +761,7 @@ static void ds9490r_search(void *data, struct w1_master *master,
                         * bulk without first checking if status says there
                         * is data to read.
                         */
-                       err = ds_recv_data(dev, (u8 *)buf, sizeof(buf));
+                       err = ds_recv_data(dev, (u8 *)buf, bufsize);
                        if (err < 0)
                                break;
                        for (i = 0; i < err/8; ++i) {
@@ -794,9 +797,14 @@ static void ds9490r_search(void *data, struct w1_master *master,
        }
 search_out:
        mutex_unlock(&master->bus_mutex);
+       kfree(buf);
 }
 
 #if 0
+/*
+ * FIXME: if this disabled code is ever used in the future all ds_send_data()
+ * calls must be changed to use a DMAable buffer.
+ */
 static int ds_match_access(struct ds_device *dev, u64 init)
 {
        int err;
@@ -845,13 +853,12 @@ static int ds_set_path(struct ds_device *dev, u64 init)
 
 static u8 ds9490r_touch_bit(void *data, u8 bit)
 {
-       u8 ret;
        struct ds_device *dev = data;
 
-       if (ds_touch_bit(dev, bit, &ret))
+       if (ds_touch_bit(dev, bit, &dev->byte_buf))
                return 0;
 
-       return ret;
+       return dev->byte_buf;
 }
 
 #if 0
@@ -866,13 +873,12 @@ static u8 ds9490r_read_bit(void *data)
 {
        struct ds_device *dev = data;
        int err;
-       u8 bit = 0;
 
-       err = ds_touch_bit(dev, 1, &bit);
+       err = ds_touch_bit(dev, 1, &dev->byte_buf);
        if (err)
                return 0;
 
-       return bit & 1;
+       return dev->byte_buf & 1;
 }
 #endif
 
@@ -887,32 +893,51 @@ static u8 ds9490r_read_byte(void *data)
 {
        struct ds_device *dev = data;
        int err;
-       u8 byte = 0;
 
-       err = ds_read_byte(dev, &byte);
+       err = ds_read_byte(dev, &dev->byte_buf);
        if (err)
                return 0;
 
-       return byte;
+       return dev->byte_buf;
 }
 
 static void ds9490r_write_block(void *data, const u8 *buf, int len)
 {
        struct ds_device *dev = data;
+       u8 *tbuf;
+
+       if (len <= 0)
+               return;
+
+       tbuf = kmemdup(buf, len, GFP_KERNEL);
+       if (!tbuf)
+               return;
 
-       ds_write_block(dev, (u8 *)buf, len);
+       ds_write_block(dev, tbuf, len);
+
+       kfree(tbuf);
 }
 
 static u8 ds9490r_read_block(void *data, u8 *buf, int len)
 {
        struct ds_device *dev = data;
        int err;
+       u8 *tbuf;
 
-       err = ds_read_block(dev, buf, len);
-       if (err < 0)
+       if (len <= 0)
+               return 0;
+
+       tbuf = kmalloc(len, GFP_KERNEL);
+       if (!tbuf)
                return 0;
 
-       return len;
+       err = ds_read_block(dev, tbuf, len);
+       if (err >= 0)
+               memcpy(buf, tbuf, len);
+
+       kfree(tbuf);
+
+       return err >= 0 ? len : 0;
 }
 
 static u8 ds9490r_reset(void *data)
index bb09de633939222e7f47d1b11908ab09aba0e79b..fb190c2596070eb7042de766198462f69c3cca96 100644 (file)
@@ -715,7 +715,7 @@ static int omap_hdq_probe(struct platform_device *pdev)
        ret = _omap_hdq_reset(hdq_data);
        if (ret) {
                dev_dbg(&pdev->dev, "reset failed\n");
-               return -EINVAL;
+               goto err_irq;
        }
 
        rev = hdq_reg_in(hdq_data, OMAP_HDQ_REVISION);
index cfe74d09932e09f441c957a06662d97faccfd967..0ef9f2663dbd1f81b5f08b1b667c06303f7fa46b 100644 (file)
@@ -16,6 +16,14 @@ config W1_SLAVE_SMEM
          Say Y here if you want to connect 1-wire
          simple 64bit memory rom(ds2401/ds2411/ds1990*) to your wire.
 
+config W1_SLAVE_DS2405
+       tristate "DS2405 Addressable Switch"
+       help
+         Say Y or M here if you want to use a DS2405 1-wire
+         single-channel addressable switch.
+         This device can also work as a single-channel
+         binary remote sensor.
+
 config W1_SLAVE_DS2408
        tristate "8-Channel Addressable Switch (IO Expander) 0x29 family support (DS2408)"
        help
index 1e9989afe7bf74d72ab93375e71ddb985a3bcb39..b4a358955ef9b89a2bca762be87f3f2f0d78497d 100644 (file)
@@ -4,6 +4,7 @@
 
 obj-$(CONFIG_W1_SLAVE_THERM)   += w1_therm.o
 obj-$(CONFIG_W1_SLAVE_SMEM)    += w1_smem.o
+obj-$(CONFIG_W1_SLAVE_DS2405)  += w1_ds2405.o
 obj-$(CONFIG_W1_SLAVE_DS2408)  += w1_ds2408.o
 obj-$(CONFIG_W1_SLAVE_DS2413)  += w1_ds2413.o
 obj-$(CONFIG_W1_SLAVE_DS2406)  += w1_ds2406.o
diff --git a/drivers/w1/slaves/w1_ds2405.c b/drivers/w1/slaves/w1_ds2405.c
new file mode 100644 (file)
index 0000000..d5d5487
--- /dev/null
@@ -0,0 +1,227 @@
+/*
+ *     w1_ds2405.c
+ *
+ * Copyright (c) 2017 Maciej S. Szmigiero <mail@maciej.szmigiero.name>
+ * Based on w1_therm.c copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the therms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/mutex.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+#include "../w1.h"
+#include "../w1_family.h"
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Maciej S. Szmigiero <mail@maciej.szmigiero.name>");
+MODULE_DESCRIPTION("Driver for 1-wire Dallas DS2405 PIO.");
+MODULE_ALIAS("w1-family-" __stringify(W1_FAMILY_DS2405));
+
+static int w1_ds2405_select(struct w1_slave *sl, bool only_active)
+{
+       struct w1_master *dev = sl->master;
+
+       u64 dev_addr = le64_to_cpu(*(u64 *)&sl->reg_num);
+       unsigned int bit_ctr;
+
+       if (w1_reset_bus(dev) != 0)
+               return 0;
+
+       /*
+        * We cannot use a normal Match ROM command
+        * since doing so would toggle PIO state
+        */
+       w1_write_8(dev, only_active ? W1_ALARM_SEARCH : W1_SEARCH);
+
+       for (bit_ctr = 0; bit_ctr < 64; bit_ctr++) {
+               int bit2send = !!(dev_addr & BIT(bit_ctr));
+               u8 ret;
+
+               ret = w1_triplet(dev, bit2send);
+
+               if ((ret & (BIT(0) | BIT(1))) ==
+                   (BIT(0) | BIT(1))) /* no devices found */
+                       return 0;
+
+               if (!!(ret & BIT(2)) != bit2send)
+                       /* wrong direction taken - no such device */
+                       return 0;
+       }
+
+       return 1;
+}
+
+static int w1_ds2405_read_pio(struct w1_slave *sl)
+{
+       if (w1_ds2405_select(sl, true))
+               return 0; /* "active" means PIO is low */
+
+       if (w1_ds2405_select(sl, false))
+               return 1;
+
+       return -ENODEV;
+}
+
+static ssize_t state_show(struct device *device,
+                         struct device_attribute *attr, char *buf)
+{
+       struct w1_slave *sl = dev_to_w1_slave(device);
+       struct w1_master *dev = sl->master;
+
+       int ret;
+       ssize_t f_retval;
+       u8 state;
+
+       ret = mutex_lock_interruptible(&dev->bus_mutex);
+       if (ret)
+               return ret;
+
+       if (!w1_ds2405_select(sl, false)) {
+               f_retval = -ENODEV;
+               goto out_unlock;
+       }
+
+       state = w1_read_8(dev);
+       if (state != 0 &&
+           state != 0xff) {
+               dev_err(device, "non-consistent state %x\n", state);
+               f_retval = -EIO;
+               goto out_unlock;
+       }
+
+       *buf = state ? '1' : '0';
+       f_retval = 1;
+
+out_unlock:
+       w1_reset_bus(dev);
+       mutex_unlock(&dev->bus_mutex);
+
+       return f_retval;
+}
+
+static ssize_t output_show(struct device *device,
+                          struct device_attribute *attr, char *buf)
+{
+       struct w1_slave *sl = dev_to_w1_slave(device);
+       struct w1_master *dev = sl->master;
+
+       int ret;
+       ssize_t f_retval;
+
+       ret = mutex_lock_interruptible(&dev->bus_mutex);
+       if (ret)
+               return ret;
+
+       ret = w1_ds2405_read_pio(sl);
+       if (ret < 0) {
+               f_retval = ret;
+               goto out_unlock;
+       }
+
+       *buf = ret ? '1' : '0';
+       f_retval = 1;
+
+out_unlock:
+       w1_reset_bus(dev);
+       mutex_unlock(&dev->bus_mutex);
+
+       return f_retval;
+}
+
+static ssize_t output_store(struct device *device,
+                           struct device_attribute *attr,
+                           const char *buf, size_t count)
+{
+       struct w1_slave *sl = dev_to_w1_slave(device);
+       struct w1_master *dev = sl->master;
+
+       int ret, current_pio;
+       unsigned int val;
+       ssize_t f_retval;
+
+       if (count < 1)
+               return -EINVAL;
+
+       if (sscanf(buf, " %u%n", &val, &ret) < 1)
+               return -EINVAL;
+
+       if (val != 0 && val != 1)
+               return -EINVAL;
+
+       f_retval = ret;
+
+       ret = mutex_lock_interruptible(&dev->bus_mutex);
+       if (ret)
+               return ret;
+
+       current_pio = w1_ds2405_read_pio(sl);
+       if (current_pio < 0) {
+               f_retval = current_pio;
+               goto out_unlock;
+       }
+
+       if (current_pio == val)
+               goto out_unlock;
+
+       if (w1_reset_bus(dev) != 0) {
+               f_retval = -ENODEV;
+               goto out_unlock;
+       }
+
+       /*
+        * can't use w1_reset_select_slave() here since it uses Skip ROM if
+        * there is only one device on bus
+        */
+       do {
+               u64 dev_addr = le64_to_cpu(*(u64 *)&sl->reg_num);
+               u8 cmd[9];
+
+               cmd[0] = W1_MATCH_ROM;
+               memcpy(&cmd[1], &dev_addr, sizeof(dev_addr));
+
+               w1_write_block(dev, cmd, sizeof(cmd));
+       } while (0);
+
+out_unlock:
+       w1_reset_bus(dev);
+       mutex_unlock(&dev->bus_mutex);
+
+       return f_retval;
+}
+
+static DEVICE_ATTR_RO(state);
+static DEVICE_ATTR_RW(output);
+
+static struct attribute *w1_ds2405_attrs[] = {
+       &dev_attr_state.attr,
+       &dev_attr_output.attr,
+       NULL
+};
+
+ATTRIBUTE_GROUPS(w1_ds2405);
+
+static struct w1_family_ops w1_ds2405_fops = {
+       .groups = w1_ds2405_groups
+};
+
+static struct w1_family w1_family_ds2405 = {
+       .fid = W1_FAMILY_DS2405,
+       .fops = &w1_ds2405_fops
+};
+
+module_w1_family(w1_family_ds2405);
index e213c678bbfe0cee97afe47a6f082ca4b7ed9f2f..90a3d9338fd282a5bc1c8a678c4a505544b86f70 100644 (file)
@@ -1,9 +1,6 @@
 /*
- *     w1.c
- *
  * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net>
  *
- *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
  * the Free Software Foundation; either version 2 of the License, or
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  */
 
 #include <linux/delay.h>
@@ -763,6 +756,7 @@ int w1_attach_slave_device(struct w1_master *dev, struct w1_reg_num *rn)
                dev_err(&dev->dev, "%s: Attaching %s failed.\n", __func__,
                         sl->name);
                w1_family_put(sl->family);
+               atomic_dec(&sl->master->refcnt);
                kfree(sl);
                return err;
        }
index 129895f562b06630b464f2579dc9e5bdc105f35e..758a7a6322e983d527c1f26e4262a5f853b877bc 100644 (file)
@@ -1,9 +1,6 @@
 /*
- *     w1.h
- *
  * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net>
  *
- *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
  * the Free Software Foundation; either version 2 of the License, or
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  */
 
 #ifndef __W1_H
index 1dc3051f7d76171e0b9b7b8a3879899086cf5ac6..df1c9bb90eb50b4b4863da49e3a40d38442713a9 100644 (file)
@@ -1,9 +1,6 @@
 /*
- *     w1_family.c
- *
  * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net>
  *
- *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
  * the Free Software Foundation; either version 2 of the License, or
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  */
 
 #include <linux/spinlock.h>
index 10a7a0767187c52d67a41c3fcffa7ecd38c80e63..c4a6b257a367ffc9e4ce0cf108623ca052c4d84e 100644 (file)
@@ -1,9 +1,6 @@
 /*
- *     w1_family.h
- *
  * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net>
  *
- *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
  * the Free Software Foundation; either version 2 of the License, or
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  */
 
 #ifndef __W1_FAMILY_H
@@ -30,6 +23,7 @@
 #define W1_FAMILY_BQ27000      0x01
 #define W1_FAMILY_SMEM_01      0x01
 #define W1_FAMILY_SMEM_81      0x81
+#define W1_FAMILY_DS2405       0x05
 #define W1_THERM_DS18S20       0x10
 #define W1_FAMILY_DS28E04      0x1C
 #define W1_COUNTER_DS2423      0x1D
index 20f766afa4c7d7b8116565687541f84184fe61f4..4ce1b66d5092fb2fd96c145b4bc7a3b81b3dc4db 100644 (file)
@@ -1,9 +1,6 @@
 /*
- *     w1_int.c
- *
  * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net>
  *
- *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
  * the Free Software Foundation; either version 2 of the License, or
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  */
 
 #include <linux/kernel.h>
index 2ad7d4414bed8b9052eb44aac75d43b04a9a9c81..371989159216456e7f7b76895490f81b42cae424 100644 (file)
@@ -1,9 +1,6 @@
 /*
- *     w1_int.h
- *
  * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net>
  *
- *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
  * the Free Software Foundation; either version 2 of the License, or
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  */
 
 #ifndef __W1_INT_H
index f4bc8c100a01b1f04a62ebca2528c51c05f40520..de8bebc278967552222d025dc143054af040a8af 100644 (file)
@@ -1,9 +1,6 @@
 /*
- *     w1_io.c
- *
  * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net>
  *
- *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
  * the Free Software Foundation; either version 2 of the License, or
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  */
 
 #include <asm/io.h>
@@ -233,6 +226,7 @@ u8 w1_triplet(struct w1_master *dev, int bdir)
                return retval;
        }
 }
+EXPORT_SYMBOL_GPL(w1_triplet);
 
 /**
  * w1_read_8() - Reads 8 bits.
index f9eecff23b8d088e9496984abf0c6fd89c12fce0..dd1422b6afbb11a9fb2376621dff049378c4fc42 100644 (file)
@@ -1,9 +1,6 @@
 /*
- *     w1_log.h
- *
  * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net>
  *
- *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
  * the Free Software Foundation; either version 2 of the License, or
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  */
 
 #ifndef __W1_LOG_H
index 881597a191b89c3ba4a1482f5a581efe00cdd668..49e520ca79c5395215701d507c8323391cc6ce93 100644 (file)
@@ -1,9 +1,6 @@
 /*
- * w1_netlink.c
- *
  * Copyright (c) 2003 Evgeniy Polyakov <zbr@ioremap.net>
  *
- *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
  * the Free Software Foundation; either version 2 of the License, or
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  */
 
 #include <linux/slab.h>
index c99a9ce05e62110069c78adaa5b1066244086c29..b389e5ff5fa503435b9ac5dd21c01dee6e51a4e5 100644 (file)
@@ -1,9 +1,6 @@
 /*
- * w1_netlink.h
- *
  * Copyright (c) 2003 Evgeniy Polyakov <zbr@ioremap.net>
  *
- *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
  * the Free Software Foundation; either version 2 of the License, or
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  */
 
 #ifndef __W1_NETLINK_H
index b871c0cb1f02b277df53f835a1d51fe3c99fff28..7010fb01a81a342e20abd9ac917d5c3f7aa3de14 100644 (file)
 #define EXTCON_USB             1
 #define EXTCON_USB_HOST                2
 
-/* Charging external connector */
+/*
+ * Charging external connector
+ *
+ * When one SDP charger connector was reported, we should also report
+ * the USB connector, which means EXTCON_CHG_USB_SDP should always
+ * appear together with EXTCON_USB. The same as ACA charger connector,
+ * EXTCON_CHG_USB_ACA would normally appear with EXTCON_USB_HOST.
+ *
+ * The EXTCON_CHG_USB_SLOW connector can provide at least 500mA of
+ * current at 5V. The EXTCON_CHG_USB_FAST connector can provide at
+ * least 1A of current at 5V.
+ */
 #define EXTCON_CHG_USB_SDP     5       /* Standard Downstream Port */
 #define EXTCON_CHG_USB_DCP     6       /* Dedicated Charging Port */
 #define EXTCON_CHG_USB_CDP     7       /* Charging Downstream Port */
@@ -54,6 +65,7 @@
 #define EXTCON_CHG_USB_FAST    9
 #define EXTCON_CHG_USB_SLOW    10
 #define EXTCON_CHG_WPT         11      /* Wireless Power Transfer */
+#define EXTCON_CHG_USB_PD      12      /* USB Power Delivery */
 
 /* Jack external connector */
 #define EXTCON_JACK_MICROPHONE 20
@@ -160,62 +172,7 @@ union extcon_property_value {
 };
 
 struct extcon_cable;
-
-/**
- * struct extcon_dev - An extcon device represents one external connector.
- * @name:              The name of this extcon device. Parent device name is
- *                     used if NULL.
- * @supported_cable:   Array of supported cable names ending with EXTCON_NONE.
- *                     If supported_cable is NULL, cable name related APIs
- *                     are disabled.
- * @mutually_exclusive:        Array of mutually exclusive set of cables that cannot
- *                     be attached simultaneously. The array should be
- *                     ending with NULL or be NULL (no mutually exclusive
- *                     cables). For example, if it is { 0x7, 0x30, 0}, then,
- *                     {0, 1}, {0, 1, 2}, {0, 2}, {1, 2}, or {4, 5} cannot
- *                     be attached simulataneously. {0x7, 0} is equivalent to
- *                     {0x3, 0x6, 0x5, 0}. If it is {0xFFFFFFFF, 0}, there
- *                     can be no simultaneous connections.
- * @dev:               Device of this extcon.
- * @state:             Attach/detach state of this extcon. Do not provide at
- *                     register-time.
- * @nh:                        Notifier for the state change events from this extcon
- * @entry:             To support list of extcon devices so that users can
- *                     search for extcon devices based on the extcon name.
- * @lock:
- * @max_supported:     Internal value to store the number of cables.
- * @extcon_dev_type:   Device_type struct to provide attribute_groups
- *                     customized for each extcon device.
- * @cables:            Sysfs subdirectories. Each represents one cable.
- *
- * In most cases, users only need to provide "User initializing data" of
- * this struct when registering an extcon. In some exceptional cases,
- * optional callbacks may be needed. However, the values in "internal data"
- * are overwritten by register function.
- */
-struct extcon_dev {
-       /* Optional user initializing data */
-       const char *name;
-       const unsigned int *supported_cable;
-       const u32 *mutually_exclusive;
-
-       /* Internal data. Please do not set. */
-       struct device dev;
-       struct raw_notifier_head *nh;
-       struct list_head entry;
-       int max_supported;
-       spinlock_t lock;        /* could be called by irq handler */
-       u32 state;
-
-       /* /sys/class/extcon/.../cable.n/... */
-       struct device_type extcon_dev_type;
-       struct extcon_cable *cables;
-
-       /* /sys/class/extcon/.../mutually_exclusive/... */
-       struct attribute_group attr_g_muex;
-       struct attribute **attrs_muex;
-       struct device_attribute *d_attrs_muex;
-};
+struct extcon_dev;
 
 #if IS_ENABLED(CONFIG_EXTCON)
 
index a0e03b13b449dbaa721abae03977470e571afc39..2aa32075bca15bc29302d02836057ebc0880512d 100644 (file)
@@ -59,7 +59,7 @@ struct adc_jack_pdata {
        const char *name;
        const char *consumer_channel;
 
-       const enum extcon *cable_names;
+       const unsigned int *cable_names;
 
        /* The last entry's state should be 0 */
        struct adc_jack_cond *adc_conditions;
index 16551d5eac36a70a1fefd1a0ee4d997df18d15c1..57beb5d09bfcb2b876266ba89c25e7a9ed81734b 100644 (file)
@@ -22,6 +22,7 @@
 #define _LINUX_FPGA_MGR_H
 
 struct fpga_manager;
+struct sg_table;
 
 /**
  * enum fpga_mgr_states - fpga framework states
@@ -88,6 +89,7 @@ struct fpga_image_info {
  * @state: returns an enum value of the FPGA's state
  * @write_init: prepare the FPGA to receive confuration data
  * @write: write count bytes of configuration data to the FPGA
+ * @write_sg: write the scatter list of configuration data to the FPGA
  * @write_complete: set FPGA to operating state after writing is done
  * @fpga_remove: optional: Set FPGA into a specific state during driver remove
  *
@@ -102,6 +104,7 @@ struct fpga_manager_ops {
                          struct fpga_image_info *info,
                          const char *buf, size_t count);
        int (*write)(struct fpga_manager *mgr, const char *buf, size_t count);
+       int (*write_sg)(struct fpga_manager *mgr, struct sg_table *sgt);
        int (*write_complete)(struct fpga_manager *mgr,
                              struct fpga_image_info *info);
        void (*fpga_remove)(struct fpga_manager *mgr);
@@ -129,6 +132,8 @@ struct fpga_manager {
 
 int fpga_mgr_buf_load(struct fpga_manager *mgr, struct fpga_image_info *info,
                      const char *buf, size_t count);
+int fpga_mgr_buf_load_sg(struct fpga_manager *mgr, struct fpga_image_info *info,
+                        struct sg_table *sgt);
 
 int fpga_mgr_firmware_load(struct fpga_manager *mgr,
                           struct fpga_image_info *info,
diff --git a/include/linux/fsi.h b/include/linux/fsi.h
new file mode 100644 (file)
index 0000000..273cbf6
--- /dev/null
@@ -0,0 +1,50 @@
+/* FSI device & driver interfaces
+ *
+ * Copyright (C) IBM Corporation 2016
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef LINUX_FSI_H
+#define LINUX_FSI_H
+
+#include <linux/device.h>
+
+struct fsi_device {
+       struct device           dev;
+       u8                      engine_type;
+       u8                      version;
+};
+
+struct fsi_device_id {
+       u8      engine_type;
+       u8      version;
+};
+
+#define FSI_VERSION_ANY                0
+
+#define FSI_DEVICE(t) \
+       .engine_type = (t), .version = FSI_VERSION_ANY,
+
+#define FSI_DEVICE_VERSIONED(t, v) \
+       .engine_type = (t), .version = (v),
+
+
+struct fsi_driver {
+       struct device_driver            drv;
+       const struct fsi_device_id      *id_table;
+};
+
+#define to_fsi_dev(devp) container_of(devp, struct fsi_device, dev)
+#define to_fsi_drv(drvp) container_of(drvp, struct fsi_driver, drv)
+
+extern struct bus_type fsi_bus_type;
+
+#endif /* LINUX_FSI_H */
index 183efde54269e18c5d4d1eda7dc448717fe85800..62bbf3c1aa4a04409fac1a001b03a87cf0162fd1 100644 (file)
 #include <linux/scatterlist.h>
 #include <linux/list.h>
 #include <linux/timer.h>
-#include <linux/workqueue.h>
 #include <linux/completion.h>
 #include <linux/device.h>
 #include <linux/mod_devicetable.h>
-
+#include <linux/interrupt.h>
 
 #define MAX_PAGE_BUFFER_COUNT                          32
 #define MAX_MULTIPAGE_BUFFER_COUNT                     32 /* 128K */
@@ -139,8 +138,8 @@ struct hv_ring_buffer_info {
  * for the specified ring buffer
  */
 static inline void
-hv_get_ringbuffer_availbytes(struct hv_ring_buffer_info *rbi,
-                         u32 *read, u32 *write)
+hv_get_ringbuffer_availbytes(const struct hv_ring_buffer_info *rbi,
+                            u32 *read, u32 *write)
 {
        u32 read_loc, write_loc, dsize;
 
@@ -154,7 +153,7 @@ hv_get_ringbuffer_availbytes(struct hv_ring_buffer_info *rbi,
        *read = dsize - *write;
 }
 
-static inline u32 hv_get_bytes_to_read(struct hv_ring_buffer_info *rbi)
+static inline u32 hv_get_bytes_to_read(const struct hv_ring_buffer_info *rbi)
 {
        u32 read_loc, write_loc, dsize, read;
 
@@ -168,7 +167,7 @@ static inline u32 hv_get_bytes_to_read(struct hv_ring_buffer_info *rbi)
        return read;
 }
 
-static inline u32 hv_get_bytes_to_write(struct hv_ring_buffer_info *rbi)
+static inline u32 hv_get_bytes_to_write(const struct hv_ring_buffer_info *rbi)
 {
        u32 read_loc, write_loc, dsize, write;
 
@@ -641,6 +640,7 @@ struct vmbus_channel_msginfo {
 
        /* Synchronize the request/response if needed */
        struct completion  waitevent;
+       struct vmbus_channel *waiting_channel;
        union {
                struct vmbus_channel_version_supported version_supported;
                struct vmbus_channel_open_result open_result;
@@ -683,11 +683,6 @@ struct hv_input_signal_event_buffer {
        struct hv_input_signal_event event;
 };
 
-enum hv_signal_policy {
-       HV_SIGNAL_POLICY_DEFAULT = 0,
-       HV_SIGNAL_POLICY_EXPLICIT,
-};
-
 enum hv_numa_policy {
        HV_BALANCED = 0,
        HV_LOCALIZED,
@@ -747,26 +742,27 @@ struct vmbus_channel {
 
        struct vmbus_close_msg close_msg;
 
-       /* Channel callback are invoked in this workqueue context */
-       /* HANDLE dataWorkQueue; */
-
+       /* Channel callback's invoked in softirq context */
+       struct tasklet_struct callback_event;
        void (*onchannel_callback)(void *context);
        void *channel_callback_context;
 
        /*
-        * A channel can be marked for efficient (batched)
-        * reading:
-        * If batched_reading is set to "true", we read until the
-        * channel is empty and hold off interrupts from the host
-        * during the entire read process.
-        * If batched_reading is set to "false", the client is not
-        * going to perform batched reading.
-        *
-        * By default we will enable batched reading; specific
-        * drivers that don't want this behavior can turn it off.
+        * A channel can be marked for one of three modes of reading:
+        *   BATCHED - callback called from taslket and should read
+        *            channel until empty. Interrupts from the host
+        *            are masked while read is in process (default).
+        *   DIRECT - callback called from tasklet (softirq).
+        *   ISR - callback called in interrupt context and must
+        *         invoke its own deferred processing.
+        *         Host interrupts are disabled and must be re-enabled
+        *         when ring is empty.
         */
-
-       bool batched_reading;
+       enum hv_callback_mode {
+               HV_CALL_BATCHED,
+               HV_CALL_DIRECT,
+               HV_CALL_ISR
+       } callback_mode;
 
        bool is_dedicated_interrupt;
        struct hv_input_signal_event_buffer sig_buf;
@@ -849,23 +845,6 @@ struct vmbus_channel {
         * link up channels based on their CPU affinity.
         */
        struct list_head percpu_list;
-       /*
-        * Host signaling policy: The default policy will be
-        * based on the ring buffer state. We will also support
-        * a policy where the client driver can have explicit
-        * signaling control.
-        */
-       enum hv_signal_policy  signal_policy;
-       /*
-        * On the channel send side, many of the VMBUS
-        * device drivers explicity serialize access to the
-        * outgoing ring buffer. Give more control to the
-        * VMBUS device drivers in terms how to serialize
-        * accesss to the outgoing ring buffer.
-        * The default behavior will be to aquire the
-        * ring lock to preserve the current behavior.
-        */
-       bool acquire_ring_lock;
        /*
         * For performance critical channels (storage, networking
         * etc,), Hyper-V has a mechanism to enhance the throughput
@@ -906,32 +885,22 @@ struct vmbus_channel {
 
 };
 
-static inline void set_channel_lock_state(struct vmbus_channel *c, bool state)
-{
-       c->acquire_ring_lock = state;
-}
-
 static inline bool is_hvsock_channel(const struct vmbus_channel *c)
 {
        return !!(c->offermsg.offer.chn_flags &
                  VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER);
 }
 
-static inline void set_channel_signal_state(struct vmbus_channel *c,
-                                           enum hv_signal_policy policy)
-{
-       c->signal_policy = policy;
-}
-
 static inline void set_channel_affinity_state(struct vmbus_channel *c,
                                              enum hv_numa_policy policy)
 {
        c->affinity_policy = policy;
 }
 
-static inline void set_channel_read_state(struct vmbus_channel *c, bool state)
+static inline void set_channel_read_mode(struct vmbus_channel *c,
+                                       enum hv_callback_mode mode)
 {
-       c->batched_reading = state;
+       c->callback_mode = mode;
 }
 
 static inline void set_per_channel_state(struct vmbus_channel *c, void *s)
@@ -1054,8 +1023,7 @@ extern int vmbus_sendpacket_ctl(struct vmbus_channel *channel,
                                  u32 bufferLen,
                                  u64 requestid,
                                  enum vmbus_packet_type type,
-                                 u32 flags,
-                                 bool kick_q);
+                                 u32 flags);
 
 extern int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
                                            struct hv_page_buffer pagebuffers[],
@@ -1070,8 +1038,7 @@ extern int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel,
                                           void *buffer,
                                           u32 bufferlen,
                                           u64 requestid,
-                                          u32 flags,
-                                          bool kick_q);
+                                          u32 flags);
 
 extern int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
                                        struct hv_multipage_buffer *mpb,
@@ -1458,9 +1425,10 @@ struct hyperv_service_callback {
 };
 
 #define MAX_SRV_VER    0x7ffffff
-extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *,
-                                       struct icmsg_negotiate *, u8 *, int,
-                                       int);
+extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp, u8 *buf,
+                               const int *fw_version, int fw_vercnt,
+                               const int *srv_version, int srv_vercnt,
+                               int *nego_fw_version, int *nego_srv_version);
 
 void hv_event_tasklet_disable(struct vmbus_channel *channel);
 void hv_event_tasklet_enable(struct vmbus_channel *channel);
@@ -1480,9 +1448,9 @@ void vmbus_set_event(struct vmbus_channel *channel);
 
 /* Get the start of the ring buffer. */
 static inline void *
-hv_get_ring_buffer(struct hv_ring_buffer_info *ring_info)
+hv_get_ring_buffer(const struct hv_ring_buffer_info *ring_info)
 {
-       return (void *)ring_info->ring_buffer->buffer;
+       return ring_info->ring_buffer->buffer;
 }
 
 /*
@@ -1544,6 +1512,36 @@ init_cached_read_index(struct vmbus_channel *channel)
        rbi->cached_read_index = rbi->ring_buffer->read_index;
 }
 
+/*
+ * Mask off host interrupt callback notifications
+ */
+static inline void hv_begin_read(struct hv_ring_buffer_info *rbi)
+{
+       rbi->ring_buffer->interrupt_mask = 1;
+
+       /* make sure mask update is not reordered */
+       virt_mb();
+}
+
+/*
+ * Re-enable host callback and return number of outstanding bytes
+ */
+static inline u32 hv_end_read(struct hv_ring_buffer_info *rbi)
+{
+
+       rbi->ring_buffer->interrupt_mask = 0;
+
+       /* make sure mask update is not reordered */
+       virt_mb();
+
+       /*
+        * Now check to see if the ring buffer is still empty.
+        * If it is not, we raced and we need to process new
+        * incoming messages.
+        */
+       return hv_get_bytes_to_read(rbi);
+}
+
 /*
  * An API to support in-place processing of incoming VMBUS packets.
  */
index ed30d5d713e3400944e6247a7079ee4840a1ae94..0590263c462ceeb71124b3e5339ea68756bbdef2 100644 (file)
@@ -22,6 +22,7 @@
 /*#define ADB_MOUSE_MINOR      10      FIXME OBSOLETE */
 #define WATCHDOG_MINOR         130     /* Watchdog timer     */
 #define TEMP_MINOR             131     /* Temperature Sensor */
+#define APM_MINOR_DEV          134
 #define RTC_MINOR              135
 #define EFI_RTC_MINOR          136     /* EFI Time services */
 #define VHCI_MINOR             137
diff --git a/include/linux/platform_data/ti-aemif.h b/include/linux/platform_data/ti-aemif.h
new file mode 100644 (file)
index 0000000..ac72e11
--- /dev/null
@@ -0,0 +1,23 @@
+/*
+ * TI DaVinci AEMIF platform glue.
+ *
+ * Copyright (C) 2017 BayLibre SAS
+ *
+ * Author:
+ *   Bartosz Golaszewski <bgolaszewski@baylibre.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __TI_DAVINCI_AEMIF_DATA_H__
+#define __TI_DAVINCI_AEMIF_DATA_H__
+
+#include <linux/of_platform.h>
+
+struct aemif_platform_data {
+       struct of_dev_auxdata *dev_lookup;
+};
+
+#endif /* __TI_DAVINCI_AEMIF_DATA_H__ */
diff --git a/include/linux/sram.h b/include/linux/sram.h
new file mode 100644 (file)
index 0000000..c97dcbe
--- /dev/null
@@ -0,0 +1,27 @@
+/*
+ * Generic SRAM Driver Interface
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef __LINUX_SRAM_H__
+#define __LINUX_SRAM_H__
+
+struct gen_pool;
+
+#ifdef CONFIG_SRAM_EXEC
+int sram_exec_copy(struct gen_pool *pool, void *dst, void *src, size_t size);
+#else
+static inline int sram_exec_copy(struct gen_pool *pool, void *dst, void *src,
+                                size_t size)
+{
+       return -ENODEV;
+}
+#endif /* CONFIG_SRAM_EXEC */
+#endif /* __LINUX_SRAM_H__ */
index 8c589176c2f86a7241a37de8eb86498cec0e0577..ec5e8bf6118e7b1d99be2a5d0293c9a9babe6b1e 100644 (file)
@@ -108,7 +108,6 @@ struct vme_dev {
 };
 
 struct vme_driver {
-       struct list_head node;
        const char *name;
        int (*match)(struct vme_dev *);
        int (*probe)(struct vme_dev *);
index 1bd31a38c51edfe699f4e26a0607a882cd31f828..b724ef7005de6745bdd8856a8ab50d68bbef06ed 100644 (file)
 #define VMCI_IMR_DATAGRAM      0x1
 #define VMCI_IMR_NOTIFICATION  0x2
 
-/* Interrupt type. */
-enum {
-       VMCI_INTR_TYPE_INTX = 0,
-       VMCI_INTR_TYPE_MSI = 1,
-       VMCI_INTR_TYPE_MSIX = 2,
-};
-
 /* Maximum MSI/MSI-X interrupt vectors in the device. */
 #define VMCI_MAX_INTRS 2
 
index 41420e341e75de5f2e46cd4fac5eab9e15f68168..51f891fb1b18ad9b14713fe3a4f2a541c9ca5442 100644 (file)
@@ -33,6 +33,8 @@ enum {
        BINDER_TYPE_HANDLE      = B_PACK_CHARS('s', 'h', '*', B_TYPE_LARGE),
        BINDER_TYPE_WEAK_HANDLE = B_PACK_CHARS('w', 'h', '*', B_TYPE_LARGE),
        BINDER_TYPE_FD          = B_PACK_CHARS('f', 'd', '*', B_TYPE_LARGE),
+       BINDER_TYPE_FDA         = B_PACK_CHARS('f', 'd', 'a', B_TYPE_LARGE),
+       BINDER_TYPE_PTR         = B_PACK_CHARS('p', 't', '*', B_TYPE_LARGE),
 };
 
 enum {
@@ -48,6 +50,14 @@ typedef __u64 binder_size_t;
 typedef __u64 binder_uintptr_t;
 #endif
 
+/**
+ * struct binder_object_header - header shared by all binder metadata objects.
+ * @type:      type of the object
+ */
+struct binder_object_header {
+       __u32        type;
+};
+
 /*
  * This is the flattened representation of a Binder object for transfer
  * between processes.  The 'offsets' supplied as part of a binder transaction
@@ -56,9 +66,8 @@ typedef __u64 binder_uintptr_t;
  * between processes.
  */
 struct flat_binder_object {
-       /* 8 bytes for large_flat_header. */
-       __u32           type;
-       __u32           flags;
+       struct binder_object_header     hdr;
+       __u32                           flags;
 
        /* 8 bytes of data. */
        union {
@@ -70,6 +79,84 @@ struct flat_binder_object {
        binder_uintptr_t        cookie;
 };
 
+/**
+ * struct binder_fd_object - describes a filedescriptor to be fixed up.
+ * @hdr:       common header structure
+ * @pad_flags: padding to remain compatible with old userspace code
+ * @pad_binder:        padding to remain compatible with old userspace code
+ * @fd:                file descriptor
+ * @cookie:    opaque data, used by user-space
+ */
+struct binder_fd_object {
+       struct binder_object_header     hdr;
+       __u32                           pad_flags;
+       union {
+               binder_uintptr_t        pad_binder;
+               __u32                   fd;
+       };
+
+       binder_uintptr_t                cookie;
+};
+
+/* struct binder_buffer_object - object describing a userspace buffer
+ * @hdr:               common header structure
+ * @flags:             one or more BINDER_BUFFER_* flags
+ * @buffer:            address of the buffer
+ * @length:            length of the buffer
+ * @parent:            index in offset array pointing to parent buffer
+ * @parent_offset:     offset in @parent pointing to this buffer
+ *
+ * A binder_buffer object represents an object that the
+ * binder kernel driver can copy verbatim to the target
+ * address space. A buffer itself may be pointed to from
+ * within another buffer, meaning that the pointer inside
+ * that other buffer needs to be fixed up as well. This
+ * can be done by setting the BINDER_BUFFER_FLAG_HAS_PARENT
+ * flag in @flags, by setting @parent buffer to the index
+ * in the offset array pointing to the parent binder_buffer_object,
+ * and by setting @parent_offset to the offset in the parent buffer
+ * at which the pointer to this buffer is located.
+ */
+struct binder_buffer_object {
+       struct binder_object_header     hdr;
+       __u32                           flags;
+       binder_uintptr_t                buffer;
+       binder_size_t                   length;
+       binder_size_t                   parent;
+       binder_size_t                   parent_offset;
+};
+
+enum {
+       BINDER_BUFFER_FLAG_HAS_PARENT = 0x01,
+};
+
+/* struct binder_fd_array_object - object describing an array of fds in a buffer
+ * @hdr:               common header structure
+ * @num_fds:           number of file descriptors in the buffer
+ * @parent:            index in offset array to buffer holding the fd array
+ * @parent_offset:     start offset of fd array in the buffer
+ *
+ * A binder_fd_array object represents an array of file
+ * descriptors embedded in a binder_buffer_object. It is
+ * different from a regular binder_buffer_object because it
+ * describes a list of file descriptors to fix up, not an opaque
+ * blob of memory, and hence the kernel needs to treat it differently.
+ *
+ * An example of how this would be used is with Android's
+ * native_handle_t object, which is a struct with a list of integers
+ * and a list of file descriptors. The native_handle_t struct itself
+ * will be represented by a struct binder_buffer_objct, whereas the
+ * embedded list of file descriptors is represented by a
+ * struct binder_fd_array_object with that binder_buffer_object as
+ * a parent.
+ */
+struct binder_fd_array_object {
+       struct binder_object_header     hdr;
+       binder_size_t                   num_fds;
+       binder_size_t                   parent;
+       binder_size_t                   parent_offset;
+};
+
 /*
  * On 64-bit platforms where user code may run in 32-bits the driver must
  * translate the buffer (and local binder) addresses appropriately.
@@ -162,6 +249,11 @@ struct binder_transaction_data {
        } data;
 };
 
+struct binder_transaction_data_sg {
+       struct binder_transaction_data transaction_data;
+       binder_size_t buffers_size;
+};
+
 struct binder_ptr_cookie {
        binder_uintptr_t ptr;
        binder_uintptr_t cookie;
@@ -346,6 +438,12 @@ enum binder_driver_command_protocol {
        /*
         * void *: cookie
         */
+
+       BC_TRANSACTION_SG = _IOW('c', 17, struct binder_transaction_data_sg),
+       BC_REPLY_SG = _IOW('c', 18, struct binder_transaction_data_sg),
+       /*
+        * binder_transaction_data_sg: the sent command.
+        */
 };
 
 #endif /* _UAPI_LINUX_BINDER_H */
index 2655abb8f310318b39565a08dea727186747daf5..55bb6fbc294eeca90b936cd50bf1da4b7e487fb1 100644 (file)
@@ -1706,6 +1706,13 @@ config PERF_USE_VMALLOC
        help
          See tools/perf/design.txt for details
 
+config PC104
+       bool "PC/104 support"
+       help
+         Expose PC/104 form factor device drivers and options available for
+         selection and configuration. Enable this option if your target
+         machine has a PC/104 bus.
+
 menu "Kernel Performance Events And Counters"
 
 config PERF_EVENTS
index a3e8ec3fb1c50c59192abd8402601960e78d4001..09371b0a9bafb5443de9664c767d62315bdcdd1b 100644 (file)
@@ -42,12 +42,6 @@ static const struct file_operations test_fw_fops = {
        .read           = test_fw_misc_read,
 };
 
-static struct miscdevice test_fw_misc_device = {
-       .minor          = MISC_DYNAMIC_MINOR,
-       .name           = "test_firmware",
-       .fops           = &test_fw_fops,
-};
-
 static ssize_t trigger_request_store(struct device *dev,
                                     struct device_attribute *attr,
                                     const char *buf, size_t count)
@@ -132,39 +126,81 @@ out:
 }
 static DEVICE_ATTR_WO(trigger_async_request);
 
-static int __init test_firmware_init(void)
+static ssize_t trigger_custom_fallback_store(struct device *dev,
+                                            struct device_attribute *attr,
+                                            const char *buf, size_t count)
 {
        int rc;
+       char *name;
 
-       rc = misc_register(&test_fw_misc_device);
+       name = kstrndup(buf, count, GFP_KERNEL);
+       if (!name)
+               return -ENOSPC;
+
+       pr_info("loading '%s' using custom fallback mechanism\n", name);
+
+       mutex_lock(&test_fw_mutex);
+       release_firmware(test_firmware);
+       test_firmware = NULL;
+       rc = request_firmware_nowait(THIS_MODULE, FW_ACTION_NOHOTPLUG, name,
+                                    dev, GFP_KERNEL, NULL,
+                                    trigger_async_request_cb);
        if (rc) {
-               pr_err("could not register misc device: %d\n", rc);
-               return rc;
+               pr_info("async load of '%s' failed: %d\n", name, rc);
+               kfree(name);
+               goto out;
        }
-       rc = device_create_file(test_fw_misc_device.this_device,
-                               &dev_attr_trigger_request);
-       if (rc) {
-               pr_err("could not create sysfs interface: %d\n", rc);
-               goto dereg;
+       /* Free 'name' ASAP, to test for race conditions */
+       kfree(name);
+
+       wait_for_completion(&async_fw_done);
+
+       if (test_firmware) {
+               pr_info("loaded: %zu\n", test_firmware->size);
+               rc = count;
+       } else {
+               pr_err("failed to async load firmware\n");
+               rc = -ENODEV;
        }
 
-       rc = device_create_file(test_fw_misc_device.this_device,
-                               &dev_attr_trigger_async_request);
+out:
+       mutex_unlock(&test_fw_mutex);
+
+       return rc;
+}
+static DEVICE_ATTR_WO(trigger_custom_fallback);
+
+#define TEST_FW_DEV_ATTR(name)          &dev_attr_##name.attr
+
+static struct attribute *test_dev_attrs[] = {
+       TEST_FW_DEV_ATTR(trigger_request),
+       TEST_FW_DEV_ATTR(trigger_async_request),
+       TEST_FW_DEV_ATTR(trigger_custom_fallback),
+       NULL,
+};
+
+ATTRIBUTE_GROUPS(test_dev);
+
+static struct miscdevice test_fw_misc_device = {
+       .minor          = MISC_DYNAMIC_MINOR,
+       .name           = "test_firmware",
+       .fops           = &test_fw_fops,
+       .groups         = test_dev_groups,
+};
+
+static int __init test_firmware_init(void)
+{
+       int rc;
+
+       rc = misc_register(&test_fw_misc_device);
        if (rc) {
-               pr_err("could not create async sysfs interface: %d\n", rc);
-               goto remove_file;
+               pr_err("could not register misc device: %d\n", rc);
+               return rc;
        }
 
        pr_warn("interface ready\n");
 
        return 0;
-
-remove_file:
-       device_remove_file(test_fw_misc_device.this_device,
-                          &dev_attr_trigger_async_request);
-dereg:
-       misc_deregister(&test_fw_misc_device);
-       return rc;
 }
 
 module_init(test_firmware_init);
@@ -172,10 +208,6 @@ module_init(test_firmware_init);
 static void __exit test_firmware_exit(void)
 {
        release_firmware(test_firmware);
-       device_remove_file(test_fw_misc_device.this_device,
-                          &dev_attr_trigger_async_request);
-       device_remove_file(test_fw_misc_device.this_device,
-                          &dev_attr_trigger_request);
        misc_deregister(&test_fw_misc_device);
        pr_warn("removed interface\n");
 }
index 3820f00b066a73f6ab23e68b869a2f42e46c4e89..8cd16c65d3c55ef5b504504f976597c2f6d040c6 100755 (executable)
@@ -2,7 +2,7 @@
 
 """Find Kconfig symbols that are referenced but not defined."""
 
-# (c) 2014-2016 Valentin Rothberg <valentinrothberg@gmail.com>
+# (c) 2014-2017 Valentin Rothberg <valentinrothberg@gmail.com>
 # (c) 2014 Stefan Hengelein <stefan.hengelein@fau.de>
 #
 # Licensed under the terms of the GNU GPL License version 2
@@ -24,7 +24,7 @@ SYMBOL = r"(?:\w*[A-Z0-9]\w*){2,}"
 DEF = r"^\s*(?:menu){,1}config\s+(" + SYMBOL + r")\s*"
 EXPR = r"(?:" + OPERATORS + r"|\s|" + SYMBOL + r")+"
 DEFAULT = r"default\s+.*?(?:if\s.+){,1}"
-STMT = r"^\s*(?:if|select|depends\s+on|(?:" + DEFAULT + r"))\s+" + EXPR
+STMT = r"^\s*(?:if|select|imply|depends\s+on|(?:" + DEFAULT + r"))\s+" + EXPR
 SOURCE_SYMBOL = r"(?:\W|\b)+[D]{,1}CONFIG_(" + SYMBOL + r")"
 
 # regex objects
@@ -269,7 +269,7 @@ def find_sims(symbol, ignore, defined=[]):
     """Return a list of max. ten Kconfig symbols that are string-similar to
     @symbol."""
     if defined:
-        return sorted(difflib.get_close_matches(symbol, set(defined), 10))
+        return difflib.get_close_matches(symbol, set(defined), 10)
 
     pool = Pool(cpu_count(), init_worker)
     kfiles = []
@@ -284,7 +284,7 @@ def find_sims(symbol, ignore, defined=[]):
     for res in pool.map(parse_kconfig_files, arglist):
         defined.extend(res[0])
 
-    return sorted(difflib.get_close_matches(symbol, set(defined), 10))
+    return difflib.get_close_matches(symbol, set(defined), 10)
 
 
 def get_files():
index 9bf82234855b8f5b0a831ecc97385a5dc5ef4728..1894d625af2da540803c0ef70b3502db4f1c190d 100644 (file)
@@ -3,7 +3,7 @@
 # No binaries, but make sure arg-less "make" doesn't trigger "run_tests"
 all:
 
-TEST_PROGS := fw_filesystem.sh fw_userhelper.sh
+TEST_PROGS := fw_filesystem.sh fw_fallback.sh
 
 include ../lib.mk
 
diff --git a/tools/testing/selftests/firmware/fw_fallback.sh b/tools/testing/selftests/firmware/fw_fallback.sh
new file mode 100755 (executable)
index 0000000..2e4c22d
--- /dev/null
@@ -0,0 +1,224 @@
+#!/bin/sh
+# This validates that the kernel will fall back to using the fallback mechanism
+# to load firmware it can't find on disk itself. We must request a firmware
+# that the kernel won't find, and any installed helper (e.g. udev) also
+# won't find so that we can do the load ourself manually.
+set -e
+
+modprobe test_firmware
+
+DIR=/sys/devices/virtual/misc/test_firmware
+
+# CONFIG_FW_LOADER_USER_HELPER has a sysfs class under /sys/class/firmware/
+# These days no one enables CONFIG_FW_LOADER_USER_HELPER so check for that
+# as an indicator for CONFIG_FW_LOADER_USER_HELPER.
+HAS_FW_LOADER_USER_HELPER=$(if [ -d /sys/class/firmware/ ]; then echo yes; else echo no; fi)
+
+if [ "$HAS_FW_LOADER_USER_HELPER" = "yes" ]; then
+       OLD_TIMEOUT=$(cat /sys/class/firmware/timeout)
+else
+       echo "usermode helper disabled so ignoring test"
+       exit 0
+fi
+
+FWPATH=$(mktemp -d)
+FW="$FWPATH/test-firmware.bin"
+
+test_finish()
+{
+       echo "$OLD_TIMEOUT" >/sys/class/firmware/timeout
+       rm -f "$FW"
+       rmdir "$FWPATH"
+}
+
+load_fw()
+{
+       local name="$1"
+       local file="$2"
+
+       # This will block until our load (below) has finished.
+       echo -n "$name" >"$DIR"/trigger_request &
+
+       # Give kernel a chance to react.
+       local timeout=10
+       while [ ! -e "$DIR"/"$name"/loading ]; do
+               sleep 0.1
+               timeout=$(( $timeout - 1 ))
+               if [ "$timeout" -eq 0 ]; then
+                       echo "$0: firmware interface never appeared" >&2
+                       exit 1
+               fi
+       done
+
+       echo 1 >"$DIR"/"$name"/loading
+       cat "$file" >"$DIR"/"$name"/data
+       echo 0 >"$DIR"/"$name"/loading
+
+       # Wait for request to finish.
+       wait
+}
+
+load_fw_cancel()
+{
+       local name="$1"
+       local file="$2"
+
+       # This will block until our load (below) has finished.
+       echo -n "$name" >"$DIR"/trigger_request 2>/dev/null &
+
+       # Give kernel a chance to react.
+       local timeout=10
+       while [ ! -e "$DIR"/"$name"/loading ]; do
+               sleep 0.1
+               timeout=$(( $timeout - 1 ))
+               if [ "$timeout" -eq 0 ]; then
+                       echo "$0: firmware interface never appeared" >&2
+                       exit 1
+               fi
+       done
+
+       echo -1 >"$DIR"/"$name"/loading
+
+       # Wait for request to finish.
+       wait
+}
+
+load_fw_custom()
+{
+       local name="$1"
+       local file="$2"
+
+       echo -n "$name" >"$DIR"/trigger_custom_fallback 2>/dev/null &
+
+       # Give kernel a chance to react.
+       local timeout=10
+       while [ ! -e "$DIR"/"$name"/loading ]; do
+               sleep 0.1
+               timeout=$(( $timeout - 1 ))
+               if [ "$timeout" -eq 0 ]; then
+                       echo "$0: firmware interface never appeared" >&2
+                       exit 1
+               fi
+       done
+
+       echo 1 >"$DIR"/"$name"/loading
+       cat "$file" >"$DIR"/"$name"/data
+       echo 0 >"$DIR"/"$name"/loading
+
+       # Wait for request to finish.
+       wait
+}
+
+
+load_fw_custom_cancel()
+{
+       local name="$1"
+       local file="$2"
+
+       echo -n "$name" >"$DIR"/trigger_custom_fallback 2>/dev/null &
+
+       # Give kernel a chance to react.
+       local timeout=10
+       while [ ! -e "$DIR"/"$name"/loading ]; do
+               sleep 0.1
+               timeout=$(( $timeout - 1 ))
+               if [ "$timeout" -eq 0 ]; then
+                       echo "$0: firmware interface never appeared" >&2
+                       exit 1
+               fi
+       done
+
+       echo -1 >"$DIR"/"$name"/loading
+
+       # Wait for request to finish.
+       wait
+}
+
+
+trap "test_finish" EXIT
+
+# This is an unlikely real-world firmware content. :)
+echo "ABCD0123" >"$FW"
+NAME=$(basename "$FW")
+
+DEVPATH="$DIR"/"nope-$NAME"/loading
+
+# Test failure when doing nothing (timeout works).
+echo -n 2 >/sys/class/firmware/timeout
+echo -n "nope-$NAME" >"$DIR"/trigger_request 2>/dev/null &
+
+# Give the kernel some time to load the loading file, must be less
+# than the timeout above.
+sleep 1
+if [ ! -f $DEVPATH ]; then
+       echo "$0: fallback mechanism immediately cancelled"
+       echo ""
+       echo "The file never appeared: $DEVPATH"
+       echo ""
+       echo "This might be a distribution udev rule setup by your distribution"
+       echo "to immediately cancel all fallback requests, this must be"
+       echo "removed before running these tests. To confirm look for"
+       echo "a firmware rule like /lib/udev/rules.d/50-firmware.rules"
+       echo "and see if you have something like this:"
+       echo ""
+       echo "SUBSYSTEM==\"firmware\", ACTION==\"add\", ATTR{loading}=\"-1\""
+       echo ""
+       echo "If you do remove this file or comment out this line before"
+       echo "proceeding with these tests."
+       exit 1
+fi
+
+if diff -q "$FW" /dev/test_firmware >/dev/null ; then
+       echo "$0: firmware was not expected to match" >&2
+       exit 1
+else
+       echo "$0: timeout works"
+fi
+
+# Put timeout high enough for us to do work but not so long that failures
+# slow down this test too much.
+echo 4 >/sys/class/firmware/timeout
+
+# Load this script instead of the desired firmware.
+load_fw "$NAME" "$0"
+if diff -q "$FW" /dev/test_firmware >/dev/null ; then
+       echo "$0: firmware was not expected to match" >&2
+       exit 1
+else
+       echo "$0: firmware comparison works"
+fi
+
+# Do a proper load, which should work correctly.
+load_fw "$NAME" "$FW"
+if ! diff -q "$FW" /dev/test_firmware >/dev/null ; then
+       echo "$0: firmware was not loaded" >&2
+       exit 1
+else
+       echo "$0: fallback mechanism works"
+fi
+
+load_fw_cancel "nope-$NAME" "$FW"
+if diff -q "$FW" /dev/test_firmware >/dev/null ; then
+       echo "$0: firmware was expected to be cancelled" >&2
+       exit 1
+else
+       echo "$0: cancelling fallback mechanism works"
+fi
+
+load_fw_custom "$NAME" "$FW"
+if ! diff -q "$FW" /dev/test_firmware >/dev/null ; then
+       echo "$0: firmware was not loaded" >&2
+       exit 1
+else
+       echo "$0: custom fallback loading mechanism works"
+fi
+
+load_fw_custom_cancel "nope-$NAME" "$FW"
+if diff -q "$FW" /dev/test_firmware >/dev/null ; then
+       echo "$0: firmware was expected to be cancelled" >&2
+       exit 1
+else
+       echo "$0: cancelling custom fallback mechanism works"
+fi
+
+exit 0
diff --git a/tools/testing/selftests/firmware/fw_userhelper.sh b/tools/testing/selftests/firmware/fw_userhelper.sh
deleted file mode 100755 (executable)
index b9983f8..0000000
+++ /dev/null
@@ -1,99 +0,0 @@
-#!/bin/sh
-# This validates that the kernel will fall back to using the user helper
-# to load firmware it can't find on disk itself. We must request a firmware
-# that the kernel won't find, and any installed helper (e.g. udev) also
-# won't find so that we can do the load ourself manually.
-set -e
-
-modprobe test_firmware
-
-DIR=/sys/devices/virtual/misc/test_firmware
-
-# CONFIG_FW_LOADER_USER_HELPER has a sysfs class under /sys/class/firmware/
-# These days no one enables CONFIG_FW_LOADER_USER_HELPER so check for that
-# as an indicator for CONFIG_FW_LOADER_USER_HELPER.
-HAS_FW_LOADER_USER_HELPER=$(if [ -d /sys/class/firmware/ ]; then echo yes; else echo no; fi)
-
-if [ "$HAS_FW_LOADER_USER_HELPER" = "yes" ]; then
-       OLD_TIMEOUT=$(cat /sys/class/firmware/timeout)
-else
-       echo "usermode helper disabled so ignoring test"
-       exit 0
-fi
-
-FWPATH=$(mktemp -d)
-FW="$FWPATH/test-firmware.bin"
-
-test_finish()
-{
-       echo "$OLD_TIMEOUT" >/sys/class/firmware/timeout
-       rm -f "$FW"
-       rmdir "$FWPATH"
-}
-
-load_fw()
-{
-       local name="$1"
-       local file="$2"
-
-       # This will block until our load (below) has finished.
-       echo -n "$name" >"$DIR"/trigger_request &
-
-       # Give kernel a chance to react.
-       local timeout=10
-       while [ ! -e "$DIR"/"$name"/loading ]; do
-               sleep 0.1
-               timeout=$(( $timeout - 1 ))
-               if [ "$timeout" -eq 0 ]; then
-                       echo "$0: firmware interface never appeared" >&2
-                       exit 1
-               fi
-       done
-
-       echo 1 >"$DIR"/"$name"/loading
-       cat "$file" >"$DIR"/"$name"/data
-       echo 0 >"$DIR"/"$name"/loading
-
-       # Wait for request to finish.
-       wait
-}
-
-trap "test_finish" EXIT
-
-# This is an unlikely real-world firmware content. :)
-echo "ABCD0123" >"$FW"
-NAME=$(basename "$FW")
-
-# Test failure when doing nothing (timeout works).
-echo 1 >/sys/class/firmware/timeout
-echo -n "$NAME" >"$DIR"/trigger_request
-if diff -q "$FW" /dev/test_firmware >/dev/null ; then
-       echo "$0: firmware was not expected to match" >&2
-       exit 1
-else
-       echo "$0: timeout works"
-fi
-
-# Put timeout high enough for us to do work but not so long that failures
-# slow down this test too much.
-echo 4 >/sys/class/firmware/timeout
-
-# Load this script instead of the desired firmware.
-load_fw "$NAME" "$0"
-if diff -q "$FW" /dev/test_firmware >/dev/null ; then
-       echo "$0: firmware was not expected to match" >&2
-       exit 1
-else
-       echo "$0: firmware comparison works"
-fi
-
-# Do a proper load, which should work correctly.
-load_fw "$NAME" "$FW"
-if ! diff -q "$FW" /dev/test_firmware >/dev/null ; then
-       echo "$0: firmware was not loaded" >&2
-       exit 1
-else
-       echo "$0: user helper firmware loading works"
-fi
-
-exit 0