]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/commitdiff
Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
authorLinus Torvalds <torvalds@linux-foundation.org>
Tue, 1 Sep 2015 00:38:39 +0000 (17:38 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 1 Sep 2015 00:38:39 +0000 (17:38 -0700)
Pull crypto updates from Herbert Xu:
 "Here is the crypto update for 4.3:

  API:

   - the AEAD interface transition is now complete.
   - add top-level skcipher interface.

  Drivers:

   - x86-64 acceleration for chacha20/poly1305.
   - add sunxi-ss Allwinner Security System crypto accelerator.
   - add RSA algorithm to qat driver.
   - add SRIOV support to qat driver.
   - add LS1021A support to caam.
   - add i.MX6 support to caam"

* git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (163 commits)
  crypto: algif_aead - fix for multiple operations on AF_ALG sockets
  crypto: qat - enable legacy VFs
  MPI: Fix mpi_read_buffer
  crypto: qat - silence a static checker warning
  crypto: vmx - Fixing opcode issue
  crypto: caam - Use the preferred style for memory allocations
  crypto: caam - Propagate the real error code in caam_probe
  crypto: caam - Fix the error handling in caam_probe
  crypto: caam - fix writing to JQCR_MS when using service interface
  crypto: hash - Add AHASH_REQUEST_ON_STACK
  crypto: testmgr - Use new skcipher interface
  crypto: skcipher - Add top-level skcipher interface
  crypto: cmac - allow usage in FIPS mode
  crypto: sahara - Use dmam_alloc_coherent
  crypto: caam - Add support for LS1021A
  crypto: qat - Don't move data inside output buffer
  crypto: vmx - Fixing GHASH Key issue on little endian
  crypto: vmx - Fixing AES-CTR counter bug
  crypto: null - Add missing Kconfig tristate for NULL2
  crypto: nx - Add forward declaration for struct crypto_aead
  ...

156 files changed:
Documentation/DocBook/crypto-API.tmpl
Documentation/devicetree/bindings/crypto/fsl-sec4.txt
Documentation/devicetree/bindings/crypto/sun4i-ss.txt [new file with mode: 0644]
MAINTAINERS
arch/arm/boot/dts/imx6qdl.dtsi
arch/arm/boot/dts/imx6sx.dtsi
arch/arm/boot/dts/sun4i-a10.dtsi
arch/arm/boot/dts/sun6i-a31.dtsi
arch/arm/boot/dts/sun7i-a20.dtsi
arch/arm/configs/imx_v6_v7_defconfig
arch/arm/crypto/.gitignore
arch/arm64/crypto/aes-ce-ccm-glue.c
arch/powerpc/include/asm/switch_to.h
arch/powerpc/kernel/process.c
arch/x86/crypto/Makefile
arch/x86/crypto/aesni-intel_glue.c
arch/x86/crypto/chacha20-avx2-x86_64.S [new file with mode: 0644]
arch/x86/crypto/chacha20-ssse3-x86_64.S [new file with mode: 0644]
arch/x86/crypto/chacha20_glue.c [new file with mode: 0644]
arch/x86/crypto/poly1305-avx2-x86_64.S [new file with mode: 0644]
arch/x86/crypto/poly1305-sse2-x86_64.S [new file with mode: 0644]
arch/x86/crypto/poly1305_glue.c [new file with mode: 0644]
crypto/Kconfig
crypto/Makefile
crypto/aead.c
crypto/algapi.c
crypto/algboss.c
crypto/algif_aead.c
crypto/authenc.c
crypto/authencesn.c
crypto/ccm.c
crypto/chacha20_generic.c
crypto/chacha20poly1305.c
crypto/cryptd.c
crypto/crypto_user.c
crypto/echainiv.c
crypto/gcm.c
crypto/jitterentropy-kcapi.c
crypto/pcrypt.c
crypto/poly1305_generic.c
crypto/rsa.c
crypto/rsa_helper.c
crypto/seqiv.c
crypto/skcipher.c [new file with mode: 0644]
crypto/tcrypt.c
crypto/tcrypt.h
crypto/testmgr.c
crypto/testmgr.h
drivers/clk/imx/clk-imx6q.c
drivers/crypto/Kconfig
drivers/crypto/Makefile
drivers/crypto/amcc/crypto4xx_core.c
drivers/crypto/caam/Kconfig
drivers/crypto/caam/caamalg.c
drivers/crypto/caam/caamhash.c
drivers/crypto/caam/caamrng.c
drivers/crypto/caam/compat.h
drivers/crypto/caam/ctrl.c
drivers/crypto/caam/desc.h
drivers/crypto/caam/desc_constr.h
drivers/crypto/caam/intern.h
drivers/crypto/caam/jr.c
drivers/crypto/caam/regs.h
drivers/crypto/caam/sg_sw_sec4.h
drivers/crypto/ccp/ccp-platform.c
drivers/crypto/img-hash.c
drivers/crypto/ixp4xx_crypto.c
drivers/crypto/marvell/cesa.c
drivers/crypto/nx/Kconfig
drivers/crypto/nx/Makefile
drivers/crypto/nx/nx-842-crypto.c [deleted file]
drivers/crypto/nx/nx-842-platform.c [deleted file]
drivers/crypto/nx/nx-842-powernv.c
drivers/crypto/nx/nx-842-pseries.c
drivers/crypto/nx/nx-842.c
drivers/crypto/nx/nx-842.h
drivers/crypto/nx/nx-aes-ccm.c
drivers/crypto/nx/nx-aes-ctr.c
drivers/crypto/nx/nx-aes-gcm.c
drivers/crypto/nx/nx.c
drivers/crypto/nx/nx.h
drivers/crypto/omap-aes.c
drivers/crypto/picoxcell_crypto.c
drivers/crypto/qat/Kconfig
drivers/crypto/qat/Makefile
drivers/crypto/qat/qat_common/.gitignore [new file with mode: 0644]
drivers/crypto/qat/qat_common/Makefile
drivers/crypto/qat/qat_common/adf_accel_devices.h
drivers/crypto/qat/qat_common/adf_accel_engine.c
drivers/crypto/qat/qat_common/adf_admin.c [new file with mode: 0644]
drivers/crypto/qat/qat_common/adf_aer.c
drivers/crypto/qat/qat_common/adf_cfg.c
drivers/crypto/qat/qat_common/adf_cfg_common.h
drivers/crypto/qat/qat_common/adf_common_drv.h
drivers/crypto/qat/qat_common/adf_ctl_drv.c
drivers/crypto/qat/qat_common/adf_dev_mgr.c
drivers/crypto/qat/qat_common/adf_hw_arbiter.c [new file with mode: 0644]
drivers/crypto/qat/qat_common/adf_init.c
drivers/crypto/qat/qat_common/adf_pf2vf_msg.c [new file with mode: 0644]
drivers/crypto/qat/qat_common/adf_pf2vf_msg.h [new file with mode: 0644]
drivers/crypto/qat/qat_common/adf_sriov.c [new file with mode: 0644]
drivers/crypto/qat/qat_common/adf_transport.c
drivers/crypto/qat/qat_common/adf_transport_access_macros.h
drivers/crypto/qat/qat_common/icp_qat_fw.h
drivers/crypto/qat/qat_common/icp_qat_fw_pke.h [new file with mode: 0644]
drivers/crypto/qat/qat_common/qat_algs.c
drivers/crypto/qat/qat_common/qat_asym_algs.c [new file with mode: 0644]
drivers/crypto/qat/qat_common/qat_crypto.c
drivers/crypto/qat/qat_common/qat_crypto.h
drivers/crypto/qat/qat_common/qat_hal.c
drivers/crypto/qat/qat_common/qat_rsakey.asn1 [new file with mode: 0644]
drivers/crypto/qat/qat_common/qat_uclo.c
drivers/crypto/qat/qat_dh895xcc/Makefile
drivers/crypto/qat/qat_dh895xcc/adf_admin.c [deleted file]
drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h
drivers/crypto/qat/qat_dh895xcc/adf_drv.c
drivers/crypto/qat/qat_dh895xcc/adf_drv.h
drivers/crypto/qat/qat_dh895xcc/adf_hw_arbiter.c [deleted file]
drivers/crypto/qat/qat_dh895xcc/adf_isr.c
drivers/crypto/qat/qat_dh895xcc/qat_admin.c [deleted file]
drivers/crypto/qat/qat_dh895xccvf/Makefile [new file with mode: 0644]
drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c [new file with mode: 0644]
drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h [new file with mode: 0644]
drivers/crypto/qat/qat_dh895xccvf/adf_drv.c [new file with mode: 0644]
drivers/crypto/qat/qat_dh895xccvf/adf_drv.h [new file with mode: 0644]
drivers/crypto/qat/qat_dh895xccvf/adf_isr.c [new file with mode: 0644]
drivers/crypto/sahara.c
drivers/crypto/sunxi-ss/Makefile [new file with mode: 0644]
drivers/crypto/sunxi-ss/sun4i-ss-cipher.c [new file with mode: 0644]
drivers/crypto/sunxi-ss/sun4i-ss-core.c [new file with mode: 0644]
drivers/crypto/sunxi-ss/sun4i-ss-hash.c [new file with mode: 0644]
drivers/crypto/sunxi-ss/sun4i-ss.h [new file with mode: 0644]
drivers/crypto/talitos.c
drivers/crypto/talitos.h
drivers/crypto/vmx/aes.c
drivers/crypto/vmx/aes_cbc.c
drivers/crypto/vmx/aes_ctr.c
drivers/crypto/vmx/aesp8-ppc.pl
drivers/crypto/vmx/ghash.c
drivers/crypto/vmx/ghashp8-ppc.pl
drivers/crypto/vmx/ppc-xlate.pl
drivers/pci/quirks.c
include/crypto/aead.h
include/crypto/algapi.h
include/crypto/chacha20.h [new file with mode: 0644]
include/crypto/hash.h
include/crypto/internal/aead.h
include/crypto/internal/geniv.h
include/crypto/internal/skcipher.h
include/crypto/poly1305.h [new file with mode: 0644]
include/crypto/skcipher.h
include/dt-bindings/clock/imx6qdl-clock.h
include/linux/crypto.h
lib/mpi/mpicoder.c
net/xfrm/xfrm_algo.c

index 0992531ffefb761eb047d22003130cf45ede9584..07df23ea06e4936d6de435ba4c862ffdb4b299d1 100644 (file)
@@ -585,7 +585,7 @@ kernel crypto API                                |   IPSEC Layer
 +-----------+                                    |
 |           |            (1)
 |   aead    | <-----------------------------------  esp_output
-| (seqniv)  | ---+
+|  (seqiv)  | ---+
 +-----------+    |
                  | (2)
 +-----------+    |
@@ -1101,7 +1101,7 @@ kernel crypto API            |       Caller
     </para>
 
     <para>
-     [1] http://www.chronox.de/libkcapi.html
+     [1] <ulink url="http://www.chronox.de/libkcapi.html">http://www.chronox.de/libkcapi.html</ulink>
     </para>
 
    </sect1>
@@ -1661,7 +1661,7 @@ read(opfd, out, outlen);
     </para>
 
     <para>
-     [1] http://www.chronox.de/libkcapi.html
+     [1] <ulink url="http://www.chronox.de/libkcapi.html">http://www.chronox.de/libkcapi.html</ulink>
     </para>
 
    </sect1>
@@ -1687,7 +1687,7 @@ read(opfd, out, outlen);
 !Pinclude/linux/crypto.h Block Cipher Algorithm Definitions
 !Finclude/linux/crypto.h crypto_alg
 !Finclude/linux/crypto.h ablkcipher_alg
-!Finclude/linux/crypto.h aead_alg
+!Finclude/crypto/aead.h aead_alg
 !Finclude/linux/crypto.h blkcipher_alg
 !Finclude/linux/crypto.h cipher_alg
 !Finclude/crypto/rng.h rng_alg
index e4022776ac6e361c47a6564b4d03477835571d0d..100307304766e0e6c0e5205824e1b8e031fd2b6f 100644 (file)
@@ -106,6 +106,18 @@ PROPERTIES
           to the interrupt parent to which the child domain
           is being mapped.
 
+   - clocks
+      Usage: required if SEC 4.0 requires explicit enablement of clocks
+      Value type: <prop_encoded-array>
+      Definition:  A list of phandle and clock specifier pairs describing
+          the clocks required for enabling and disabling SEC 4.0.
+
+   - clock-names
+      Usage: required if SEC 4.0 requires explicit enablement of clocks
+      Value type: <string>
+      Definition: A list of clock name strings in the same order as the
+          clocks property.
+
    Note: All other standard properties (see the ePAPR) are allowed
    but are optional.
 
@@ -120,6 +132,11 @@ EXAMPLE
                ranges = <0 0x300000 0x10000>;
                interrupt-parent = <&mpic>;
                interrupts = <92 2>;
+               clocks = <&clks IMX6QDL_CLK_CAAM_MEM>,
+                        <&clks IMX6QDL_CLK_CAAM_ACLK>,
+                        <&clks IMX6QDL_CLK_CAAM_IPG>,
+                        <&clks IMX6QDL_CLK_EIM_SLOW>;
+               clock-names = "mem", "aclk", "ipg", "emi_slow";
        };
 
 =====================================================================
diff --git a/Documentation/devicetree/bindings/crypto/sun4i-ss.txt b/Documentation/devicetree/bindings/crypto/sun4i-ss.txt
new file mode 100644 (file)
index 0000000..5d38e9b
--- /dev/null
@@ -0,0 +1,23 @@
+* Allwinner Security System found on A20 SoC
+
+Required properties:
+- compatible : Should be "allwinner,sun4i-a10-crypto".
+- reg: Should contain the Security System register location and length.
+- interrupts: Should contain the IRQ line for the Security System.
+- clocks : List of clock specifiers, corresponding to ahb and ss.
+- clock-names : Name of the functional clock, should be
+       * "ahb" : AHB gating clock
+       * "mod" : SS controller clock
+
+Optional properties:
+ - resets : phandle + reset specifier pair
+ - reset-names : must contain "ahb"
+
+Example:
+       crypto: crypto-engine@01c15000 {
+               compatible = "allwinner,sun4i-a10-crypto";
+               reg = <0x01c15000 0x1000>;
+               interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&ahb_gates 5>, <&ss_clk>;
+               clock-names = "ahb", "mod";
+       };
index cdcd9c5d3c0e5e0a6194ec87ddddae7533c97705..e16584a5091a417b2b83bafaa01714131aa2d3a1 100644 (file)
@@ -556,6 +556,12 @@ S: Maintained
 F:     Documentation/i2c/busses/i2c-ali1563
 F:     drivers/i2c/busses/i2c-ali1563.c
 
+ALLWINNER SECURITY SYSTEM
+M:     Corentin Labbe <clabbe.montjoie@gmail.com>
+L:     linux-crypto@vger.kernel.org
+S:     Maintained
+F:     drivers/crypto/sunxi-ss/
+
 ALPHA PORT
 M:     Richard Henderson <rth@twiddle.net>
 M:     Ivan Kokshaysky <ink@jurassic.park.msu.ru>
@@ -5078,9 +5084,21 @@ T:       git git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux.git
 S:     Maintained
 F:     arch/ia64/
 
+IBM Power VMX Cryptographic instructions
+M:     Leonidas S. Barbosa <leosilva@linux.vnet.ibm.com>
+M:     Paulo Flabiano Smorigo <pfsmorigo@linux.vnet.ibm.com>
+L:     linux-crypto@vger.kernel.org
+S:     Supported
+F:     drivers/crypto/vmx/Makefile
+F:     drivers/crypto/vmx/Kconfig
+F:     drivers/crypto/vmx/vmx.c
+F:     drivers/crypto/vmx/aes*
+F:     drivers/crypto/vmx/ghash*
+F:     drivers/crypto/vmx/ppc-xlate.pl
+
 IBM Power in-Nest Crypto Acceleration
-M:     Marcelo Henrique Cerri <mhcerri@linux.vnet.ibm.com>
-M:     Fionnuala Gunter <fin@linux.vnet.ibm.com>
+M:     Leonidas S. Barbosa <leosilva@linux.vnet.ibm.com>
+M:     Paulo Flabiano Smorigo <pfsmorigo@linux.vnet.ibm.com>
 L:     linux-crypto@vger.kernel.org
 S:     Supported
 F:     drivers/crypto/nx/Makefile
@@ -5092,7 +5110,7 @@ F:        drivers/crypto/nx/nx_csbcpb.h
 F:     drivers/crypto/nx/nx_debugfs.h
 
 IBM Power 842 compression accelerator
-M:     Dan Streetman <ddstreet@us.ibm.com>
+M:     Dan Streetman <ddstreet@ieee.org>
 S:     Supported
 F:     drivers/crypto/nx/Makefile
 F:     drivers/crypto/nx/Kconfig
index b57033e8c633187a5f52c367a788f46196967fdc..10d0b26c93f1dac3974df73f1ae11e2a1f535484 100644 (file)
                        reg = <0x02100000 0x100000>;
                        ranges;
 
-                       caam@02100000 {
-                               reg = <0x02100000 0x40000>;
-                               interrupts = <0 105 IRQ_TYPE_LEVEL_HIGH>,
-                                            <0 106 IRQ_TYPE_LEVEL_HIGH>;
+                       crypto: caam@2100000 {
+                               compatible = "fsl,sec-v4.0";
+                               fsl,sec-era = <4>;
+                               #address-cells = <1>;
+                               #size-cells = <1>;
+                               reg = <0x2100000 0x10000>;
+                               ranges = <0 0x2100000 0x10000>;
+                               interrupt-parent = <&intc>;
+                               clocks = <&clks IMX6QDL_CLK_CAAM_MEM>,
+                                        <&clks IMX6QDL_CLK_CAAM_ACLK>,
+                                        <&clks IMX6QDL_CLK_CAAM_IPG>,
+                                        <&clks IMX6QDL_CLK_EIM_SLOW>;
+                               clock-names = "mem", "aclk", "ipg", "emi_slow";
+
+                               sec_jr0: jr0@1000 {
+                                       compatible = "fsl,sec-v4.0-job-ring";
+                                       reg = <0x1000 0x1000>;
+                                       interrupts = <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>;
+                               };
+
+                               sec_jr1: jr1@2000 {
+                                       compatible = "fsl,sec-v4.0-job-ring";
+                                       reg = <0x2000 0x1000>;
+                                       interrupts = <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>;
+                               };
                        };
 
                        aipstz@0217c000 { /* AIPSTZ2 */
index 708175d59b9c31085877da68ad378765fe701369..e6223d8e79af72d1964cdd09946a77d32dce28fc 100644 (file)
                        reg = <0x02100000 0x100000>;
                        ranges;
 
+                       crypto: caam@2100000 {
+                               compatible = "fsl,sec-v4.0";
+                               fsl,sec-era = <4>;
+                               #address-cells = <1>;
+                               #size-cells = <1>;
+                               reg = <0x2100000 0x10000>;
+                               ranges = <0 0x2100000 0x10000>;
+                               interrupt-parent = <&intc>;
+                               clocks = <&clks IMX6SX_CLK_CAAM_MEM>,
+                                        <&clks IMX6SX_CLK_CAAM_ACLK>,
+                                        <&clks IMX6SX_CLK_CAAM_IPG>,
+                                        <&clks IMX6SX_CLK_EIM_SLOW>;
+                               clock-names = "mem", "aclk", "ipg", "emi_slow";
+
+                               sec_jr0: jr0@1000 {
+                                       compatible = "fsl,sec-v4.0-job-ring";
+                                       reg = <0x1000 0x1000>;
+                                       interrupts = <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>;
+                               };
+
+                               sec_jr1: jr1@2000 {
+                                       compatible = "fsl,sec-v4.0-job-ring";
+                                       reg = <0x2000 0x1000>;
+                                       interrupts = <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>;
+                               };
+                       };
+
                        usbotg1: usb@02184000 {
                                compatible = "fsl,imx6sx-usb", "fsl,imx27-usb";
                                reg = <0x02184000 0x200>;
index ab0e131587bb2dccdfff15c189bbb07771388129..adaa57b7a9437035445bde2e14fe958c85b5ab97 100644 (file)
                        status = "disabled";
                };
 
+               crypto: crypto-engine@01c15000 {
+                       compatible = "allwinner,sun4i-a10-crypto";
+                       reg = <0x01c15000 0x1000>;
+                       interrupts = <86>;
+                       clocks = <&ahb_gates 5>, <&ss_clk>;
+                       clock-names = "ahb", "mod";
+               };
+
                spi2: spi@01c17000 {
                        compatible = "allwinner,sun4i-a10-spi";
                        reg = <0x01c17000 0x1000>;
index 3ec456fa03a4f84c2a1ea6566c9ddf5337a8e3b1..e4d3484d97bd055f2bb1fe848049918f8d3e24e8 100644 (file)
                                             "mmc3_sample";
                };
 
+               ss_clk: clk@01c2009c {
+                       #clock-cells = <0>;
+                       compatible = "allwinner,sun4i-a10-mod0-clk";
+                       reg = <0x01c2009c 0x4>;
+                       clocks = <&osc24M>, <&pll6 0>;
+                       clock-output-names = "ss";
+               };
+
                spi0_clk: clk@01c200a0 {
                        #clock-cells = <0>;
                        compatible = "allwinner,sun4i-a10-mod0-clk";
                        #size-cells = <0>;
                };
 
+               crypto: crypto-engine@01c15000 {
+                       compatible = "allwinner,sun4i-a10-crypto";
+                       reg = <0x01c15000 0x1000>;
+                       interrupts = <GIC_SPI 80 IRQ_TYPE_LEVEL_HIGH>;
+                       clocks = <&ahb1_gates 5>, <&ss_clk>;
+                       clock-names = "ahb", "mod";
+                       resets = <&ahb1_rst 5>;
+                       reset-names = "ahb";
+               };
+
                timer@01c60000 {
                        compatible = "allwinner,sun6i-a31-hstimer",
                                     "allwinner,sun7i-a20-hstimer";
index ca0b01a96c52b5762f9c97a411bc75c107c491a7..d3b2f26417aae78c59d8eeebbf8f4b28c4855d6d 100644 (file)
                        status = "disabled";
                };
 
+               crypto: crypto-engine@01c15000 {
+                       compatible = "allwinner,sun4i-a10-crypto";
+                       reg = <0x01c15000 0x1000>;
+                       interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
+                       clocks = <&ahb_gates 5>, <&ss_clk>;
+                       clock-names = "ahb", "mod";
+               };
+
                spi2: spi@01c17000 {
                        compatible = "allwinner,sun4i-a10-spi";
                        reg = <0x01c17000 0x1000>;
index b47863d49ac6aaf192f4feac7111f0b7e8d21488..7569b391704e0fbd2c3c91a9a7b051a717daf9a4 100644 (file)
@@ -354,8 +354,7 @@ CONFIG_PROVE_LOCKING=y
 # CONFIG_FTRACE is not set
 # CONFIG_ARM_UNWIND is not set
 CONFIG_SECURITYFS=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
-# CONFIG_CRYPTO_HW is not set
+CONFIG_CRYPTO_DEV_FSL_CAAM=y
 CONFIG_CRC_CCITT=m
 CONFIG_CRC_T10DIF=y
 CONFIG_CRC7=m
index 6231d36b3635260cf0f2a7185182099ce844ccb9..31e1f538df7dee529af3cf36a30fd941314de95c 100644 (file)
@@ -1 +1,3 @@
 aesbs-core.S
+sha256-core.S
+sha512-core.S
index 3303e8a7b837c9fc033da5a94206af4bb0594ba5..f4bf2f2a014cdccf0838f01f4ba1f452d65b43f6 100644 (file)
@@ -124,7 +124,7 @@ static void ccm_calculate_auth_mac(struct aead_request *req, u8 mac[])
 
        ce_aes_ccm_auth_data(mac, (u8 *)&ltag, ltag.len, &macp, ctx->key_enc,
                             num_rounds(ctx));
-       scatterwalk_start(&walk, req->assoc);
+       scatterwalk_start(&walk, req->src);
 
        do {
                u32 n = scatterwalk_clamp(&walk, len);
@@ -151,6 +151,10 @@ static int ccm_encrypt(struct aead_request *req)
        struct crypto_aes_ctx *ctx = crypto_aead_ctx(aead);
        struct blkcipher_desc desc = { .info = req->iv };
        struct blkcipher_walk walk;
+       struct scatterlist srcbuf[2];
+       struct scatterlist dstbuf[2];
+       struct scatterlist *src;
+       struct scatterlist *dst;
        u8 __aligned(8) mac[AES_BLOCK_SIZE];
        u8 buf[AES_BLOCK_SIZE];
        u32 len = req->cryptlen;
@@ -168,7 +172,12 @@ static int ccm_encrypt(struct aead_request *req)
        /* preserve the original iv for the final round */
        memcpy(buf, req->iv, AES_BLOCK_SIZE);
 
-       blkcipher_walk_init(&walk, req->dst, req->src, len);
+       src = scatterwalk_ffwd(srcbuf, req->src, req->assoclen);
+       dst = src;
+       if (req->src != req->dst)
+               dst = scatterwalk_ffwd(dstbuf, req->dst, req->assoclen);
+
+       blkcipher_walk_init(&walk, dst, src, len);
        err = blkcipher_aead_walk_virt_block(&desc, &walk, aead,
                                             AES_BLOCK_SIZE);
 
@@ -194,7 +203,7 @@ static int ccm_encrypt(struct aead_request *req)
                return err;
 
        /* copy authtag to end of dst */
-       scatterwalk_map_and_copy(mac, req->dst, req->cryptlen,
+       scatterwalk_map_and_copy(mac, dst, req->cryptlen,
                                 crypto_aead_authsize(aead), 1);
 
        return 0;
@@ -207,6 +216,10 @@ static int ccm_decrypt(struct aead_request *req)
        unsigned int authsize = crypto_aead_authsize(aead);
        struct blkcipher_desc desc = { .info = req->iv };
        struct blkcipher_walk walk;
+       struct scatterlist srcbuf[2];
+       struct scatterlist dstbuf[2];
+       struct scatterlist *src;
+       struct scatterlist *dst;
        u8 __aligned(8) mac[AES_BLOCK_SIZE];
        u8 buf[AES_BLOCK_SIZE];
        u32 len = req->cryptlen - authsize;
@@ -224,7 +237,12 @@ static int ccm_decrypt(struct aead_request *req)
        /* preserve the original iv for the final round */
        memcpy(buf, req->iv, AES_BLOCK_SIZE);
 
-       blkcipher_walk_init(&walk, req->dst, req->src, len);
+       src = scatterwalk_ffwd(srcbuf, req->src, req->assoclen);
+       dst = src;
+       if (req->src != req->dst)
+               dst = scatterwalk_ffwd(dstbuf, req->dst, req->assoclen);
+
+       blkcipher_walk_init(&walk, dst, src, len);
        err = blkcipher_aead_walk_virt_block(&desc, &walk, aead,
                                             AES_BLOCK_SIZE);
 
@@ -250,44 +268,42 @@ static int ccm_decrypt(struct aead_request *req)
                return err;
 
        /* compare calculated auth tag with the stored one */
-       scatterwalk_map_and_copy(buf, req->src, req->cryptlen - authsize,
+       scatterwalk_map_and_copy(buf, src, req->cryptlen - authsize,
                                 authsize, 0);
 
-       if (memcmp(mac, buf, authsize))
+       if (crypto_memneq(mac, buf, authsize))
                return -EBADMSG;
        return 0;
 }
 
-static struct crypto_alg ccm_aes_alg = {
-       .cra_name               = "ccm(aes)",
-       .cra_driver_name        = "ccm-aes-ce",
-       .cra_priority           = 300,
-       .cra_flags              = CRYPTO_ALG_TYPE_AEAD,
-       .cra_blocksize          = 1,
-       .cra_ctxsize            = sizeof(struct crypto_aes_ctx),
-       .cra_alignmask          = 7,
-       .cra_type               = &crypto_aead_type,
-       .cra_module             = THIS_MODULE,
-       .cra_aead = {
-               .ivsize         = AES_BLOCK_SIZE,
-               .maxauthsize    = AES_BLOCK_SIZE,
-               .setkey         = ccm_setkey,
-               .setauthsize    = ccm_setauthsize,
-               .encrypt        = ccm_encrypt,
-               .decrypt        = ccm_decrypt,
-       }
+static struct aead_alg ccm_aes_alg = {
+       .base = {
+               .cra_name               = "ccm(aes)",
+               .cra_driver_name        = "ccm-aes-ce",
+               .cra_priority           = 300,
+               .cra_blocksize          = 1,
+               .cra_ctxsize            = sizeof(struct crypto_aes_ctx),
+               .cra_alignmask          = 7,
+               .cra_module             = THIS_MODULE,
+       },
+       .ivsize         = AES_BLOCK_SIZE,
+       .maxauthsize    = AES_BLOCK_SIZE,
+       .setkey         = ccm_setkey,
+       .setauthsize    = ccm_setauthsize,
+       .encrypt        = ccm_encrypt,
+       .decrypt        = ccm_decrypt,
 };
 
 static int __init aes_mod_init(void)
 {
        if (!(elf_hwcap & HWCAP_AES))
                return -ENODEV;
-       return crypto_register_alg(&ccm_aes_alg);
+       return crypto_register_aead(&ccm_aes_alg);
 }
 
 static void __exit aes_mod_exit(void)
 {
-       crypto_unregister_alg(&ccm_aes_alg);
+       crypto_unregister_aead(&ccm_aes_alg);
 }
 
 module_init(aes_mod_init);
index 58abeda64cb7afa271078497f25e3ae5a8a0e26c..15cca17cba4b9fe47c5598006f9e61214e20295c 100644 (file)
@@ -29,6 +29,7 @@ static inline void save_early_sprs(struct thread_struct *prev) {}
 
 extern void enable_kernel_fp(void);
 extern void enable_kernel_altivec(void);
+extern void enable_kernel_vsx(void);
 extern int emulate_altivec(struct pt_regs *);
 extern void __giveup_vsx(struct task_struct *);
 extern void giveup_vsx(struct task_struct *);
index 8005e18d1b40381f6b815890b0d58c99a382b4e8..64e6e9d9e656280d819da7f0c9725026b4bf83a9 100644 (file)
@@ -204,8 +204,6 @@ EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
 #endif /* CONFIG_ALTIVEC */
 
 #ifdef CONFIG_VSX
-#if 0
-/* not currently used, but some crazy RAID module might want to later */
 void enable_kernel_vsx(void)
 {
        WARN_ON(preemptible());
@@ -220,7 +218,6 @@ void enable_kernel_vsx(void)
 #endif /* CONFIG_SMP */
 }
 EXPORT_SYMBOL(enable_kernel_vsx);
-#endif
 
 void giveup_vsx(struct task_struct *tsk)
 {
index 5a4a089e8b1fd7166e396b52917424e1d9a421b5..9a2838cf05916a132ee573ad4c0061138fda716a 100644 (file)
@@ -20,6 +20,7 @@ obj-$(CONFIG_CRYPTO_BLOWFISH_X86_64) += blowfish-x86_64.o
 obj-$(CONFIG_CRYPTO_TWOFISH_X86_64) += twofish-x86_64.o
 obj-$(CONFIG_CRYPTO_TWOFISH_X86_64_3WAY) += twofish-x86_64-3way.o
 obj-$(CONFIG_CRYPTO_SALSA20_X86_64) += salsa20-x86_64.o
+obj-$(CONFIG_CRYPTO_CHACHA20_X86_64) += chacha20-x86_64.o
 obj-$(CONFIG_CRYPTO_SERPENT_SSE2_X86_64) += serpent-sse2-x86_64.o
 obj-$(CONFIG_CRYPTO_AES_NI_INTEL) += aesni-intel.o
 obj-$(CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL) += ghash-clmulni-intel.o
@@ -30,6 +31,7 @@ obj-$(CONFIG_CRYPTO_CRC32_PCLMUL) += crc32-pclmul.o
 obj-$(CONFIG_CRYPTO_SHA256_SSSE3) += sha256-ssse3.o
 obj-$(CONFIG_CRYPTO_SHA512_SSSE3) += sha512-ssse3.o
 obj-$(CONFIG_CRYPTO_CRCT10DIF_PCLMUL) += crct10dif-pclmul.o
+obj-$(CONFIG_CRYPTO_POLY1305_X86_64) += poly1305-x86_64.o
 
 # These modules require assembler to support AVX.
 ifeq ($(avx_supported),yes)
@@ -60,6 +62,7 @@ blowfish-x86_64-y := blowfish-x86_64-asm_64.o blowfish_glue.o
 twofish-x86_64-y := twofish-x86_64-asm_64.o twofish_glue.o
 twofish-x86_64-3way-y := twofish-x86_64-asm_64-3way.o twofish_glue_3way.o
 salsa20-x86_64-y := salsa20-x86_64-asm_64.o salsa20_glue.o
+chacha20-x86_64-y := chacha20-ssse3-x86_64.o chacha20_glue.o
 serpent-sse2-x86_64-y := serpent-sse2-x86_64-asm_64.o serpent_sse2_glue.o
 
 ifeq ($(avx_supported),yes)
@@ -75,6 +78,7 @@ endif
 
 ifeq ($(avx2_supported),yes)
        camellia-aesni-avx2-y := camellia-aesni-avx2-asm_64.o camellia_aesni_avx2_glue.o
+       chacha20-x86_64-y += chacha20-avx2-x86_64.o
        serpent-avx2-y := serpent-avx2-asm_64.o serpent_avx2_glue.o
 endif
 
@@ -82,8 +86,10 @@ aesni-intel-y := aesni-intel_asm.o aesni-intel_glue.o fpu.o
 aesni-intel-$(CONFIG_64BIT) += aesni-intel_avx-x86_64.o aes_ctrby8_avx-x86_64.o
 ghash-clmulni-intel-y := ghash-clmulni-intel_asm.o ghash-clmulni-intel_glue.o
 sha1-ssse3-y := sha1_ssse3_asm.o sha1_ssse3_glue.o
+poly1305-x86_64-y := poly1305-sse2-x86_64.o poly1305_glue.o
 ifeq ($(avx2_supported),yes)
 sha1-ssse3-y += sha1_avx2_x86_64_asm.o
+poly1305-x86_64-y += poly1305-avx2-x86_64.o
 endif
 crc32c-intel-y := crc32c-intel_glue.o
 crc32c-intel-$(CONFIG_64BIT) += crc32c-pcl-intel-asm_64.o
index dccad38b59a8d741fe5f442b558b63de2b4d931d..3633ad6145c5229a57f40ccf30421916872d4f2a 100644 (file)
@@ -803,10 +803,7 @@ static int rfc4106_init(struct crypto_aead *aead)
                return PTR_ERR(cryptd_tfm);
 
        *ctx = cryptd_tfm;
-       crypto_aead_set_reqsize(
-               aead,
-               sizeof(struct aead_request) +
-               crypto_aead_reqsize(&cryptd_tfm->base));
+       crypto_aead_set_reqsize(aead, crypto_aead_reqsize(&cryptd_tfm->base));
        return 0;
 }
 
@@ -955,8 +952,8 @@ static int helper_rfc4106_encrypt(struct aead_request *req)
 
        /* Assuming we are supporting rfc4106 64-bit extended */
        /* sequence numbers We need to have the AAD length equal */
-       /* to 8 or 12 bytes */
-       if (unlikely(req->assoclen != 8 && req->assoclen != 12))
+       /* to 16 or 20 bytes */
+       if (unlikely(req->assoclen != 16 && req->assoclen != 20))
                return -EINVAL;
 
        /* IV below built */
@@ -992,9 +989,9 @@ static int helper_rfc4106_encrypt(struct aead_request *req)
        }
 
        kernel_fpu_begin();
-       aesni_gcm_enc_tfm(aes_ctx, dst, src, (unsigned long)req->cryptlen, iv,
-               ctx->hash_subkey, assoc, (unsigned long)req->assoclen, dst
-               + ((unsigned long)req->cryptlen), auth_tag_len);
+       aesni_gcm_enc_tfm(aes_ctx, dst, src, req->cryptlen, iv,
+                         ctx->hash_subkey, assoc, req->assoclen - 8,
+                         dst + req->cryptlen, auth_tag_len);
        kernel_fpu_end();
 
        /* The authTag (aka the Integrity Check Value) needs to be written
@@ -1033,12 +1030,12 @@ static int helper_rfc4106_decrypt(struct aead_request *req)
        struct scatter_walk dst_sg_walk;
        unsigned int i;
 
-       if (unlikely(req->assoclen != 8 && req->assoclen != 12))
+       if (unlikely(req->assoclen != 16 && req->assoclen != 20))
                return -EINVAL;
 
        /* Assuming we are supporting rfc4106 64-bit extended */
        /* sequence numbers We need to have the AAD length */
-       /* equal to 8 or 12 bytes */
+       /* equal to 16 or 20 bytes */
 
        tempCipherLen = (unsigned long)(req->cryptlen - auth_tag_len);
        /* IV below built */
@@ -1075,8 +1072,8 @@ static int helper_rfc4106_decrypt(struct aead_request *req)
 
        kernel_fpu_begin();
        aesni_gcm_dec_tfm(aes_ctx, dst, src, tempCipherLen, iv,
-               ctx->hash_subkey, assoc, (unsigned long)req->assoclen,
-               authTag, auth_tag_len);
+                         ctx->hash_subkey, assoc, req->assoclen - 8,
+                         authTag, auth_tag_len);
        kernel_fpu_end();
 
        /* Compare generated tag with passed in tag. */
@@ -1105,19 +1102,12 @@ static int rfc4106_encrypt(struct aead_request *req)
        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
        struct cryptd_aead **ctx = crypto_aead_ctx(tfm);
        struct cryptd_aead *cryptd_tfm = *ctx;
-       struct aead_request *subreq = aead_request_ctx(req);
 
-       aead_request_set_tfm(subreq, irq_fpu_usable() ?
-                                    cryptd_aead_child(cryptd_tfm) :
-                                    &cryptd_tfm->base);
+       aead_request_set_tfm(req, irq_fpu_usable() ?
+                                 cryptd_aead_child(cryptd_tfm) :
+                                 &cryptd_tfm->base);
 
-       aead_request_set_callback(subreq, req->base.flags,
-                                 req->base.complete, req->base.data);
-       aead_request_set_crypt(subreq, req->src, req->dst,
-                              req->cryptlen, req->iv);
-       aead_request_set_ad(subreq, req->assoclen);
-
-       return crypto_aead_encrypt(subreq);
+       return crypto_aead_encrypt(req);
 }
 
 static int rfc4106_decrypt(struct aead_request *req)
@@ -1125,19 +1115,12 @@ static int rfc4106_decrypt(struct aead_request *req)
        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
        struct cryptd_aead **ctx = crypto_aead_ctx(tfm);
        struct cryptd_aead *cryptd_tfm = *ctx;
-       struct aead_request *subreq = aead_request_ctx(req);
-
-       aead_request_set_tfm(subreq, irq_fpu_usable() ?
-                                    cryptd_aead_child(cryptd_tfm) :
-                                    &cryptd_tfm->base);
 
-       aead_request_set_callback(subreq, req->base.flags,
-                                 req->base.complete, req->base.data);
-       aead_request_set_crypt(subreq, req->src, req->dst,
-                              req->cryptlen, req->iv);
-       aead_request_set_ad(subreq, req->assoclen);
+       aead_request_set_tfm(req, irq_fpu_usable() ?
+                                 cryptd_aead_child(cryptd_tfm) :
+                                 &cryptd_tfm->base);
 
-       return crypto_aead_decrypt(subreq);
+       return crypto_aead_decrypt(req);
 }
 #endif
 
diff --git a/arch/x86/crypto/chacha20-avx2-x86_64.S b/arch/x86/crypto/chacha20-avx2-x86_64.S
new file mode 100644 (file)
index 0000000..16694e6
--- /dev/null
@@ -0,0 +1,443 @@
+/*
+ * ChaCha20 256-bit cipher algorithm, RFC7539, x64 AVX2 functions
+ *
+ * Copyright (C) 2015 Martin Willi
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/linkage.h>
+
+.data
+.align 32
+
+ROT8:  .octa 0x0e0d0c0f0a09080b0605040702010003
+       .octa 0x0e0d0c0f0a09080b0605040702010003
+ROT16: .octa 0x0d0c0f0e09080b0a0504070601000302
+       .octa 0x0d0c0f0e09080b0a0504070601000302
+CTRINC:        .octa 0x00000003000000020000000100000000
+       .octa 0x00000007000000060000000500000004
+
+.text
+
+ENTRY(chacha20_8block_xor_avx2)
+       # %rdi: Input state matrix, s
+       # %rsi: 8 data blocks output, o
+       # %rdx: 8 data blocks input, i
+
+       # This function encrypts eight consecutive ChaCha20 blocks by loading
+       # the state matrix in AVX registers eight times. As we need some
+       # scratch registers, we save the first four registers on the stack. The
+       # algorithm performs each operation on the corresponding word of each
+       # state matrix, hence requires no word shuffling. For final XORing step
+       # we transpose the matrix by interleaving 32-, 64- and then 128-bit
+       # words, which allows us to do XOR in AVX registers. 8/16-bit word
+       # rotation is done with the slightly better performing byte shuffling,
+       # 7/12-bit word rotation uses traditional shift+OR.
+
+       vzeroupper
+       # 4 * 32 byte stack, 32-byte aligned
+       mov             %rsp, %r8
+       and             $~31, %rsp
+       sub             $0x80, %rsp
+
+       # x0..15[0-7] = s[0..15]
+       vpbroadcastd    0x00(%rdi),%ymm0
+       vpbroadcastd    0x04(%rdi),%ymm1
+       vpbroadcastd    0x08(%rdi),%ymm2
+       vpbroadcastd    0x0c(%rdi),%ymm3
+       vpbroadcastd    0x10(%rdi),%ymm4
+       vpbroadcastd    0x14(%rdi),%ymm5
+       vpbroadcastd    0x18(%rdi),%ymm6
+       vpbroadcastd    0x1c(%rdi),%ymm7
+       vpbroadcastd    0x20(%rdi),%ymm8
+       vpbroadcastd    0x24(%rdi),%ymm9
+       vpbroadcastd    0x28(%rdi),%ymm10
+       vpbroadcastd    0x2c(%rdi),%ymm11
+       vpbroadcastd    0x30(%rdi),%ymm12
+       vpbroadcastd    0x34(%rdi),%ymm13
+       vpbroadcastd    0x38(%rdi),%ymm14
+       vpbroadcastd    0x3c(%rdi),%ymm15
+       # x0..3 on stack
+       vmovdqa         %ymm0,0x00(%rsp)
+       vmovdqa         %ymm1,0x20(%rsp)
+       vmovdqa         %ymm2,0x40(%rsp)
+       vmovdqa         %ymm3,0x60(%rsp)
+
+       vmovdqa         CTRINC(%rip),%ymm1
+       vmovdqa         ROT8(%rip),%ymm2
+       vmovdqa         ROT16(%rip),%ymm3
+
+       # x12 += counter values 0-3
+       vpaddd          %ymm1,%ymm12,%ymm12
+
+       mov             $10,%ecx
+
+.Ldoubleround8:
+       # x0 += x4, x12 = rotl32(x12 ^ x0, 16)
+       vpaddd          0x00(%rsp),%ymm4,%ymm0
+       vmovdqa         %ymm0,0x00(%rsp)
+       vpxor           %ymm0,%ymm12,%ymm12
+       vpshufb         %ymm3,%ymm12,%ymm12
+       # x1 += x5, x13 = rotl32(x13 ^ x1, 16)
+       vpaddd          0x20(%rsp),%ymm5,%ymm0
+       vmovdqa         %ymm0,0x20(%rsp)
+       vpxor           %ymm0,%ymm13,%ymm13
+       vpshufb         %ymm3,%ymm13,%ymm13
+       # x2 += x6, x14 = rotl32(x14 ^ x2, 16)
+       vpaddd          0x40(%rsp),%ymm6,%ymm0
+       vmovdqa         %ymm0,0x40(%rsp)
+       vpxor           %ymm0,%ymm14,%ymm14
+       vpshufb         %ymm3,%ymm14,%ymm14
+       # x3 += x7, x15 = rotl32(x15 ^ x3, 16)
+       vpaddd          0x60(%rsp),%ymm7,%ymm0
+       vmovdqa         %ymm0,0x60(%rsp)
+       vpxor           %ymm0,%ymm15,%ymm15
+       vpshufb         %ymm3,%ymm15,%ymm15
+
+       # x8 += x12, x4 = rotl32(x4 ^ x8, 12)
+       vpaddd          %ymm12,%ymm8,%ymm8
+       vpxor           %ymm8,%ymm4,%ymm4
+       vpslld          $12,%ymm4,%ymm0
+       vpsrld          $20,%ymm4,%ymm4
+       vpor            %ymm0,%ymm4,%ymm4
+       # x9 += x13, x5 = rotl32(x5 ^ x9, 12)
+       vpaddd          %ymm13,%ymm9,%ymm9
+       vpxor           %ymm9,%ymm5,%ymm5
+       vpslld          $12,%ymm5,%ymm0
+       vpsrld          $20,%ymm5,%ymm5
+       vpor            %ymm0,%ymm5,%ymm5
+       # x10 += x14, x6 = rotl32(x6 ^ x10, 12)
+       vpaddd          %ymm14,%ymm10,%ymm10
+       vpxor           %ymm10,%ymm6,%ymm6
+       vpslld          $12,%ymm6,%ymm0
+       vpsrld          $20,%ymm6,%ymm6
+       vpor            %ymm0,%ymm6,%ymm6
+       # x11 += x15, x7 = rotl32(x7 ^ x11, 12)
+       vpaddd          %ymm15,%ymm11,%ymm11
+       vpxor           %ymm11,%ymm7,%ymm7
+       vpslld          $12,%ymm7,%ymm0
+       vpsrld          $20,%ymm7,%ymm7
+       vpor            %ymm0,%ymm7,%ymm7
+
+       # x0 += x4, x12 = rotl32(x12 ^ x0, 8)
+       vpaddd          0x00(%rsp),%ymm4,%ymm0
+       vmovdqa         %ymm0,0x00(%rsp)
+       vpxor           %ymm0,%ymm12,%ymm12
+       vpshufb         %ymm2,%ymm12,%ymm12
+       # x1 += x5, x13 = rotl32(x13 ^ x1, 8)
+       vpaddd          0x20(%rsp),%ymm5,%ymm0
+       vmovdqa         %ymm0,0x20(%rsp)
+       vpxor           %ymm0,%ymm13,%ymm13
+       vpshufb         %ymm2,%ymm13,%ymm13
+       # x2 += x6, x14 = rotl32(x14 ^ x2, 8)
+       vpaddd          0x40(%rsp),%ymm6,%ymm0
+       vmovdqa         %ymm0,0x40(%rsp)
+       vpxor           %ymm0,%ymm14,%ymm14
+       vpshufb         %ymm2,%ymm14,%ymm14
+       # x3 += x7, x15 = rotl32(x15 ^ x3, 8)
+       vpaddd          0x60(%rsp),%ymm7,%ymm0
+       vmovdqa         %ymm0,0x60(%rsp)
+       vpxor           %ymm0,%ymm15,%ymm15
+       vpshufb         %ymm2,%ymm15,%ymm15
+
+       # x8 += x12, x4 = rotl32(x4 ^ x8, 7)
+       vpaddd          %ymm12,%ymm8,%ymm8
+       vpxor           %ymm8,%ymm4,%ymm4
+       vpslld          $7,%ymm4,%ymm0
+       vpsrld          $25,%ymm4,%ymm4
+       vpor            %ymm0,%ymm4,%ymm4
+       # x9 += x13, x5 = rotl32(x5 ^ x9, 7)
+       vpaddd          %ymm13,%ymm9,%ymm9
+       vpxor           %ymm9,%ymm5,%ymm5
+       vpslld          $7,%ymm5,%ymm0
+       vpsrld          $25,%ymm5,%ymm5
+       vpor            %ymm0,%ymm5,%ymm5
+       # x10 += x14, x6 = rotl32(x6 ^ x10, 7)
+       vpaddd          %ymm14,%ymm10,%ymm10
+       vpxor           %ymm10,%ymm6,%ymm6
+       vpslld          $7,%ymm6,%ymm0
+       vpsrld          $25,%ymm6,%ymm6
+       vpor            %ymm0,%ymm6,%ymm6
+       # x11 += x15, x7 = rotl32(x7 ^ x11, 7)
+       vpaddd          %ymm15,%ymm11,%ymm11
+       vpxor           %ymm11,%ymm7,%ymm7
+       vpslld          $7,%ymm7,%ymm0
+       vpsrld          $25,%ymm7,%ymm7
+       vpor            %ymm0,%ymm7,%ymm7
+
+       # x0 += x5, x15 = rotl32(x15 ^ x0, 16)
+       vpaddd          0x00(%rsp),%ymm5,%ymm0
+       vmovdqa         %ymm0,0x00(%rsp)
+       vpxor           %ymm0,%ymm15,%ymm15
+       vpshufb         %ymm3,%ymm15,%ymm15
+       # x1 += x6, x12 = rotl32(x12 ^ x1, 16)%ymm0
+       vpaddd          0x20(%rsp),%ymm6,%ymm0
+       vmovdqa         %ymm0,0x20(%rsp)
+       vpxor           %ymm0,%ymm12,%ymm12
+       vpshufb         %ymm3,%ymm12,%ymm12
+       # x2 += x7, x13 = rotl32(x13 ^ x2, 16)
+       vpaddd          0x40(%rsp),%ymm7,%ymm0
+       vmovdqa         %ymm0,0x40(%rsp)
+       vpxor           %ymm0,%ymm13,%ymm13
+       vpshufb         %ymm3,%ymm13,%ymm13
+       # x3 += x4, x14 = rotl32(x14 ^ x3, 16)
+       vpaddd          0x60(%rsp),%ymm4,%ymm0
+       vmovdqa         %ymm0,0x60(%rsp)
+       vpxor           %ymm0,%ymm14,%ymm14
+       vpshufb         %ymm3,%ymm14,%ymm14
+
+       # x10 += x15, x5 = rotl32(x5 ^ x10, 12)
+       vpaddd          %ymm15,%ymm10,%ymm10
+       vpxor           %ymm10,%ymm5,%ymm5
+       vpslld          $12,%ymm5,%ymm0
+       vpsrld          $20,%ymm5,%ymm5
+       vpor            %ymm0,%ymm5,%ymm5
+       # x11 += x12, x6 = rotl32(x6 ^ x11, 12)
+       vpaddd          %ymm12,%ymm11,%ymm11
+       vpxor           %ymm11,%ymm6,%ymm6
+       vpslld          $12,%ymm6,%ymm0
+       vpsrld          $20,%ymm6,%ymm6
+       vpor            %ymm0,%ymm6,%ymm6
+       # x8 += x13, x7 = rotl32(x7 ^ x8, 12)
+       vpaddd          %ymm13,%ymm8,%ymm8
+       vpxor           %ymm8,%ymm7,%ymm7
+       vpslld          $12,%ymm7,%ymm0
+       vpsrld          $20,%ymm7,%ymm7
+       vpor            %ymm0,%ymm7,%ymm7
+       # x9 += x14, x4 = rotl32(x4 ^ x9, 12)
+       vpaddd          %ymm14,%ymm9,%ymm9
+       vpxor           %ymm9,%ymm4,%ymm4
+       vpslld          $12,%ymm4,%ymm0
+       vpsrld          $20,%ymm4,%ymm4
+       vpor            %ymm0,%ymm4,%ymm4
+
+       # x0 += x5, x15 = rotl32(x15 ^ x0, 8)
+       vpaddd          0x00(%rsp),%ymm5,%ymm0
+       vmovdqa         %ymm0,0x00(%rsp)
+       vpxor           %ymm0,%ymm15,%ymm15
+       vpshufb         %ymm2,%ymm15,%ymm15
+       # x1 += x6, x12 = rotl32(x12 ^ x1, 8)
+       vpaddd          0x20(%rsp),%ymm6,%ymm0
+       vmovdqa         %ymm0,0x20(%rsp)
+       vpxor           %ymm0,%ymm12,%ymm12
+       vpshufb         %ymm2,%ymm12,%ymm12
+       # x2 += x7, x13 = rotl32(x13 ^ x2, 8)
+       vpaddd          0x40(%rsp),%ymm7,%ymm0
+       vmovdqa         %ymm0,0x40(%rsp)
+       vpxor           %ymm0,%ymm13,%ymm13
+       vpshufb         %ymm2,%ymm13,%ymm13
+       # x3 += x4, x14 = rotl32(x14 ^ x3, 8)
+       vpaddd          0x60(%rsp),%ymm4,%ymm0
+       vmovdqa         %ymm0,0x60(%rsp)
+       vpxor           %ymm0,%ymm14,%ymm14
+       vpshufb         %ymm2,%ymm14,%ymm14
+
+       # x10 += x15, x5 = rotl32(x5 ^ x10, 7)
+       vpaddd          %ymm15,%ymm10,%ymm10
+       vpxor           %ymm10,%ymm5,%ymm5
+       vpslld          $7,%ymm5,%ymm0
+       vpsrld          $25,%ymm5,%ymm5
+       vpor            %ymm0,%ymm5,%ymm5
+       # x11 += x12, x6 = rotl32(x6 ^ x11, 7)
+       vpaddd          %ymm12,%ymm11,%ymm11
+       vpxor           %ymm11,%ymm6,%ymm6
+       vpslld          $7,%ymm6,%ymm0
+       vpsrld          $25,%ymm6,%ymm6
+       vpor            %ymm0,%ymm6,%ymm6
+       # x8 += x13, x7 = rotl32(x7 ^ x8, 7)
+       vpaddd          %ymm13,%ymm8,%ymm8
+       vpxor           %ymm8,%ymm7,%ymm7
+       vpslld          $7,%ymm7,%ymm0
+       vpsrld          $25,%ymm7,%ymm7
+       vpor            %ymm0,%ymm7,%ymm7
+       # x9 += x14, x4 = rotl32(x4 ^ x9, 7)
+       vpaddd          %ymm14,%ymm9,%ymm9
+       vpxor           %ymm9,%ymm4,%ymm4
+       vpslld          $7,%ymm4,%ymm0
+       vpsrld          $25,%ymm4,%ymm4
+       vpor            %ymm0,%ymm4,%ymm4
+
+       dec             %ecx
+       jnz             .Ldoubleround8
+
+       # x0..15[0-3] += s[0..15]
+       vpbroadcastd    0x00(%rdi),%ymm0
+       vpaddd          0x00(%rsp),%ymm0,%ymm0
+       vmovdqa         %ymm0,0x00(%rsp)
+       vpbroadcastd    0x04(%rdi),%ymm0
+       vpaddd          0x20(%rsp),%ymm0,%ymm0
+       vmovdqa         %ymm0,0x20(%rsp)
+       vpbroadcastd    0x08(%rdi),%ymm0
+       vpaddd          0x40(%rsp),%ymm0,%ymm0
+       vmovdqa         %ymm0,0x40(%rsp)
+       vpbroadcastd    0x0c(%rdi),%ymm0
+       vpaddd          0x60(%rsp),%ymm0,%ymm0
+       vmovdqa         %ymm0,0x60(%rsp)
+       vpbroadcastd    0x10(%rdi),%ymm0
+       vpaddd          %ymm0,%ymm4,%ymm4
+       vpbroadcastd    0x14(%rdi),%ymm0
+       vpaddd          %ymm0,%ymm5,%ymm5
+       vpbroadcastd    0x18(%rdi),%ymm0
+       vpaddd          %ymm0,%ymm6,%ymm6
+       vpbroadcastd    0x1c(%rdi),%ymm0
+       vpaddd          %ymm0,%ymm7,%ymm7
+       vpbroadcastd    0x20(%rdi),%ymm0
+       vpaddd          %ymm0,%ymm8,%ymm8
+       vpbroadcastd    0x24(%rdi),%ymm0
+       vpaddd          %ymm0,%ymm9,%ymm9
+       vpbroadcastd    0x28(%rdi),%ymm0
+       vpaddd          %ymm0,%ymm10,%ymm10
+       vpbroadcastd    0x2c(%rdi),%ymm0
+       vpaddd          %ymm0,%ymm11,%ymm11
+       vpbroadcastd    0x30(%rdi),%ymm0
+       vpaddd          %ymm0,%ymm12,%ymm12
+       vpbroadcastd    0x34(%rdi),%ymm0
+       vpaddd          %ymm0,%ymm13,%ymm13
+       vpbroadcastd    0x38(%rdi),%ymm0
+       vpaddd          %ymm0,%ymm14,%ymm14
+       vpbroadcastd    0x3c(%rdi),%ymm0
+       vpaddd          %ymm0,%ymm15,%ymm15
+
+       # x12 += counter values 0-3
+       vpaddd          %ymm1,%ymm12,%ymm12
+
+       # interleave 32-bit words in state n, n+1
+       vmovdqa         0x00(%rsp),%ymm0
+       vmovdqa         0x20(%rsp),%ymm1
+       vpunpckldq      %ymm1,%ymm0,%ymm2
+       vpunpckhdq      %ymm1,%ymm0,%ymm1
+       vmovdqa         %ymm2,0x00(%rsp)
+       vmovdqa         %ymm1,0x20(%rsp)
+       vmovdqa         0x40(%rsp),%ymm0
+       vmovdqa         0x60(%rsp),%ymm1
+       vpunpckldq      %ymm1,%ymm0,%ymm2
+       vpunpckhdq      %ymm1,%ymm0,%ymm1
+       vmovdqa         %ymm2,0x40(%rsp)
+       vmovdqa         %ymm1,0x60(%rsp)
+       vmovdqa         %ymm4,%ymm0
+       vpunpckldq      %ymm5,%ymm0,%ymm4
+       vpunpckhdq      %ymm5,%ymm0,%ymm5
+       vmovdqa         %ymm6,%ymm0
+       vpunpckldq      %ymm7,%ymm0,%ymm6
+       vpunpckhdq      %ymm7,%ymm0,%ymm7
+       vmovdqa         %ymm8,%ymm0
+       vpunpckldq      %ymm9,%ymm0,%ymm8
+       vpunpckhdq      %ymm9,%ymm0,%ymm9
+       vmovdqa         %ymm10,%ymm0
+       vpunpckldq      %ymm11,%ymm0,%ymm10
+       vpunpckhdq      %ymm11,%ymm0,%ymm11
+       vmovdqa         %ymm12,%ymm0
+       vpunpckldq      %ymm13,%ymm0,%ymm12
+       vpunpckhdq      %ymm13,%ymm0,%ymm13
+       vmovdqa         %ymm14,%ymm0
+       vpunpckldq      %ymm15,%ymm0,%ymm14
+       vpunpckhdq      %ymm15,%ymm0,%ymm15
+
+       # interleave 64-bit words in state n, n+2
+       vmovdqa         0x00(%rsp),%ymm0
+       vmovdqa         0x40(%rsp),%ymm2
+       vpunpcklqdq     %ymm2,%ymm0,%ymm1
+       vpunpckhqdq     %ymm2,%ymm0,%ymm2
+       vmovdqa         %ymm1,0x00(%rsp)
+       vmovdqa         %ymm2,0x40(%rsp)
+       vmovdqa         0x20(%rsp),%ymm0
+       vmovdqa         0x60(%rsp),%ymm2
+       vpunpcklqdq     %ymm2,%ymm0,%ymm1
+       vpunpckhqdq     %ymm2,%ymm0,%ymm2
+       vmovdqa         %ymm1,0x20(%rsp)
+       vmovdqa         %ymm2,0x60(%rsp)
+       vmovdqa         %ymm4,%ymm0
+       vpunpcklqdq     %ymm6,%ymm0,%ymm4
+       vpunpckhqdq     %ymm6,%ymm0,%ymm6
+       vmovdqa         %ymm5,%ymm0
+       vpunpcklqdq     %ymm7,%ymm0,%ymm5
+       vpunpckhqdq     %ymm7,%ymm0,%ymm7
+       vmovdqa         %ymm8,%ymm0
+       vpunpcklqdq     %ymm10,%ymm0,%ymm8
+       vpunpckhqdq     %ymm10,%ymm0,%ymm10
+       vmovdqa         %ymm9,%ymm0
+       vpunpcklqdq     %ymm11,%ymm0,%ymm9
+       vpunpckhqdq     %ymm11,%ymm0,%ymm11
+       vmovdqa         %ymm12,%ymm0
+       vpunpcklqdq     %ymm14,%ymm0,%ymm12
+       vpunpckhqdq     %ymm14,%ymm0,%ymm14
+       vmovdqa         %ymm13,%ymm0
+       vpunpcklqdq     %ymm15,%ymm0,%ymm13
+       vpunpckhqdq     %ymm15,%ymm0,%ymm15
+
+       # interleave 128-bit words in state n, n+4
+       vmovdqa         0x00(%rsp),%ymm0
+       vperm2i128      $0x20,%ymm4,%ymm0,%ymm1
+       vperm2i128      $0x31,%ymm4,%ymm0,%ymm4
+       vmovdqa         %ymm1,0x00(%rsp)
+       vmovdqa         0x20(%rsp),%ymm0
+       vperm2i128      $0x20,%ymm5,%ymm0,%ymm1
+       vperm2i128      $0x31,%ymm5,%ymm0,%ymm5
+       vmovdqa         %ymm1,0x20(%rsp)
+       vmovdqa         0x40(%rsp),%ymm0
+       vperm2i128      $0x20,%ymm6,%ymm0,%ymm1
+       vperm2i128      $0x31,%ymm6,%ymm0,%ymm6
+       vmovdqa         %ymm1,0x40(%rsp)
+       vmovdqa         0x60(%rsp),%ymm0
+       vperm2i128      $0x20,%ymm7,%ymm0,%ymm1
+       vperm2i128      $0x31,%ymm7,%ymm0,%ymm7
+       vmovdqa         %ymm1,0x60(%rsp)
+       vperm2i128      $0x20,%ymm12,%ymm8,%ymm0
+       vperm2i128      $0x31,%ymm12,%ymm8,%ymm12
+       vmovdqa         %ymm0,%ymm8
+       vperm2i128      $0x20,%ymm13,%ymm9,%ymm0
+       vperm2i128      $0x31,%ymm13,%ymm9,%ymm13
+       vmovdqa         %ymm0,%ymm9
+       vperm2i128      $0x20,%ymm14,%ymm10,%ymm0
+       vperm2i128      $0x31,%ymm14,%ymm10,%ymm14
+       vmovdqa         %ymm0,%ymm10
+       vperm2i128      $0x20,%ymm15,%ymm11,%ymm0
+       vperm2i128      $0x31,%ymm15,%ymm11,%ymm15
+       vmovdqa         %ymm0,%ymm11
+
+       # xor with corresponding input, write to output
+       vmovdqa         0x00(%rsp),%ymm0
+       vpxor           0x0000(%rdx),%ymm0,%ymm0
+       vmovdqu         %ymm0,0x0000(%rsi)
+       vmovdqa         0x20(%rsp),%ymm0
+       vpxor           0x0080(%rdx),%ymm0,%ymm0
+       vmovdqu         %ymm0,0x0080(%rsi)
+       vmovdqa         0x40(%rsp),%ymm0
+       vpxor           0x0040(%rdx),%ymm0,%ymm0
+       vmovdqu         %ymm0,0x0040(%rsi)
+       vmovdqa         0x60(%rsp),%ymm0
+       vpxor           0x00c0(%rdx),%ymm0,%ymm0
+       vmovdqu         %ymm0,0x00c0(%rsi)
+       vpxor           0x0100(%rdx),%ymm4,%ymm4
+       vmovdqu         %ymm4,0x0100(%rsi)
+       vpxor           0x0180(%rdx),%ymm5,%ymm5
+       vmovdqu         %ymm5,0x00180(%rsi)
+       vpxor           0x0140(%rdx),%ymm6,%ymm6
+       vmovdqu         %ymm6,0x0140(%rsi)
+       vpxor           0x01c0(%rdx),%ymm7,%ymm7
+       vmovdqu         %ymm7,0x01c0(%rsi)
+       vpxor           0x0020(%rdx),%ymm8,%ymm8
+       vmovdqu         %ymm8,0x0020(%rsi)
+       vpxor           0x00a0(%rdx),%ymm9,%ymm9
+       vmovdqu         %ymm9,0x00a0(%rsi)
+       vpxor           0x0060(%rdx),%ymm10,%ymm10
+       vmovdqu         %ymm10,0x0060(%rsi)
+       vpxor           0x00e0(%rdx),%ymm11,%ymm11
+       vmovdqu         %ymm11,0x00e0(%rsi)
+       vpxor           0x0120(%rdx),%ymm12,%ymm12
+       vmovdqu         %ymm12,0x0120(%rsi)
+       vpxor           0x01a0(%rdx),%ymm13,%ymm13
+       vmovdqu         %ymm13,0x01a0(%rsi)
+       vpxor           0x0160(%rdx),%ymm14,%ymm14
+       vmovdqu         %ymm14,0x0160(%rsi)
+       vpxor           0x01e0(%rdx),%ymm15,%ymm15
+       vmovdqu         %ymm15,0x01e0(%rsi)
+
+       vzeroupper
+       mov             %r8,%rsp
+       ret
+ENDPROC(chacha20_8block_xor_avx2)
diff --git a/arch/x86/crypto/chacha20-ssse3-x86_64.S b/arch/x86/crypto/chacha20-ssse3-x86_64.S
new file mode 100644 (file)
index 0000000..712b130
--- /dev/null
@@ -0,0 +1,625 @@
+/*
+ * ChaCha20 256-bit cipher algorithm, RFC7539, x64 SSSE3 functions
+ *
+ * Copyright (C) 2015 Martin Willi
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/linkage.h>
+
+.data
+.align 16
+
+ROT8:  .octa 0x0e0d0c0f0a09080b0605040702010003
+ROT16: .octa 0x0d0c0f0e09080b0a0504070601000302
+CTRINC:        .octa 0x00000003000000020000000100000000
+
+.text
+
+ENTRY(chacha20_block_xor_ssse3)
+       # %rdi: Input state matrix, s
+       # %rsi: 1 data block output, o
+       # %rdx: 1 data block input, i
+
+       # This function encrypts one ChaCha20 block by loading the state matrix
+       # in four SSE registers. It performs matrix operation on four words in
+       # parallel, but requireds shuffling to rearrange the words after each
+       # round. 8/16-bit word rotation is done with the slightly better
+       # performing SSSE3 byte shuffling, 7/12-bit word rotation uses
+       # traditional shift+OR.
+
+       # x0..3 = s0..3
+       movdqa          0x00(%rdi),%xmm0
+       movdqa          0x10(%rdi),%xmm1
+       movdqa          0x20(%rdi),%xmm2
+       movdqa          0x30(%rdi),%xmm3
+       movdqa          %xmm0,%xmm8
+       movdqa          %xmm1,%xmm9
+       movdqa          %xmm2,%xmm10
+       movdqa          %xmm3,%xmm11
+
+       movdqa          ROT8(%rip),%xmm4
+       movdqa          ROT16(%rip),%xmm5
+
+       mov     $10,%ecx
+
+.Ldoubleround:
+
+       # x0 += x1, x3 = rotl32(x3 ^ x0, 16)
+       paddd           %xmm1,%xmm0
+       pxor            %xmm0,%xmm3
+       pshufb          %xmm5,%xmm3
+
+       # x2 += x3, x1 = rotl32(x1 ^ x2, 12)
+       paddd           %xmm3,%xmm2
+       pxor            %xmm2,%xmm1
+       movdqa          %xmm1,%xmm6
+       pslld           $12,%xmm6
+       psrld           $20,%xmm1
+       por             %xmm6,%xmm1
+
+       # x0 += x1, x3 = rotl32(x3 ^ x0, 8)
+       paddd           %xmm1,%xmm0
+       pxor            %xmm0,%xmm3
+       pshufb          %xmm4,%xmm3
+
+       # x2 += x3, x1 = rotl32(x1 ^ x2, 7)
+       paddd           %xmm3,%xmm2
+       pxor            %xmm2,%xmm1
+       movdqa          %xmm1,%xmm7
+       pslld           $7,%xmm7
+       psrld           $25,%xmm1
+       por             %xmm7,%xmm1
+
+       # x1 = shuffle32(x1, MASK(0, 3, 2, 1))
+       pshufd          $0x39,%xmm1,%xmm1
+       # x2 = shuffle32(x2, MASK(1, 0, 3, 2))
+       pshufd          $0x4e,%xmm2,%xmm2
+       # x3 = shuffle32(x3, MASK(2, 1, 0, 3))
+       pshufd          $0x93,%xmm3,%xmm3
+
+       # x0 += x1, x3 = rotl32(x3 ^ x0, 16)
+       paddd           %xmm1,%xmm0
+       pxor            %xmm0,%xmm3
+       pshufb          %xmm5,%xmm3
+
+       # x2 += x3, x1 = rotl32(x1 ^ x2, 12)
+       paddd           %xmm3,%xmm2
+       pxor            %xmm2,%xmm1
+       movdqa          %xmm1,%xmm6
+       pslld           $12,%xmm6
+       psrld           $20,%xmm1
+       por             %xmm6,%xmm1
+
+       # x0 += x1, x3 = rotl32(x3 ^ x0, 8)
+       paddd           %xmm1,%xmm0
+       pxor            %xmm0,%xmm3
+       pshufb          %xmm4,%xmm3
+
+       # x2 += x3, x1 = rotl32(x1 ^ x2, 7)
+       paddd           %xmm3,%xmm2
+       pxor            %xmm2,%xmm1
+       movdqa          %xmm1,%xmm7
+       pslld           $7,%xmm7
+       psrld           $25,%xmm1
+       por             %xmm7,%xmm1
+
+       # x1 = shuffle32(x1, MASK(2, 1, 0, 3))
+       pshufd          $0x93,%xmm1,%xmm1
+       # x2 = shuffle32(x2, MASK(1, 0, 3, 2))
+       pshufd          $0x4e,%xmm2,%xmm2
+       # x3 = shuffle32(x3, MASK(0, 3, 2, 1))
+       pshufd          $0x39,%xmm3,%xmm3
+
+       dec             %ecx
+       jnz             .Ldoubleround
+
+       # o0 = i0 ^ (x0 + s0)
+       movdqu          0x00(%rdx),%xmm4
+       paddd           %xmm8,%xmm0
+       pxor            %xmm4,%xmm0
+       movdqu          %xmm0,0x00(%rsi)
+       # o1 = i1 ^ (x1 + s1)
+       movdqu          0x10(%rdx),%xmm5
+       paddd           %xmm9,%xmm1
+       pxor            %xmm5,%xmm1
+       movdqu          %xmm1,0x10(%rsi)
+       # o2 = i2 ^ (x2 + s2)
+       movdqu          0x20(%rdx),%xmm6
+       paddd           %xmm10,%xmm2
+       pxor            %xmm6,%xmm2
+       movdqu          %xmm2,0x20(%rsi)
+       # o3 = i3 ^ (x3 + s3)
+       movdqu          0x30(%rdx),%xmm7
+       paddd           %xmm11,%xmm3
+       pxor            %xmm7,%xmm3
+       movdqu          %xmm3,0x30(%rsi)
+
+       ret
+ENDPROC(chacha20_block_xor_ssse3)
+
+ENTRY(chacha20_4block_xor_ssse3)
+       # %rdi: Input state matrix, s
+       # %rsi: 4 data blocks output, o
+       # %rdx: 4 data blocks input, i
+
+       # This function encrypts four consecutive ChaCha20 blocks by loading the
+       # the state matrix in SSE registers four times. As we need some scratch
+       # registers, we save the first four registers on the stack. The
+       # algorithm performs each operation on the corresponding word of each
+       # state matrix, hence requires no word shuffling. For final XORing step
+       # we transpose the matrix by interleaving 32- and then 64-bit words,
+       # which allows us to do XOR in SSE registers. 8/16-bit word rotation is
+       # done with the slightly better performing SSSE3 byte shuffling,
+       # 7/12-bit word rotation uses traditional shift+OR.
+
+       sub             $0x40,%rsp
+
+       # x0..15[0-3] = s0..3[0..3]
+       movq            0x00(%rdi),%xmm1
+       pshufd          $0x00,%xmm1,%xmm0
+       pshufd          $0x55,%xmm1,%xmm1
+       movq            0x08(%rdi),%xmm3
+       pshufd          $0x00,%xmm3,%xmm2
+       pshufd          $0x55,%xmm3,%xmm3
+       movq            0x10(%rdi),%xmm5
+       pshufd          $0x00,%xmm5,%xmm4
+       pshufd          $0x55,%xmm5,%xmm5
+       movq            0x18(%rdi),%xmm7
+       pshufd          $0x00,%xmm7,%xmm6
+       pshufd          $0x55,%xmm7,%xmm7
+       movq            0x20(%rdi),%xmm9
+       pshufd          $0x00,%xmm9,%xmm8
+       pshufd          $0x55,%xmm9,%xmm9
+       movq            0x28(%rdi),%xmm11
+       pshufd          $0x00,%xmm11,%xmm10
+       pshufd          $0x55,%xmm11,%xmm11
+       movq            0x30(%rdi),%xmm13
+       pshufd          $0x00,%xmm13,%xmm12
+       pshufd          $0x55,%xmm13,%xmm13
+       movq            0x38(%rdi),%xmm15
+       pshufd          $0x00,%xmm15,%xmm14
+       pshufd          $0x55,%xmm15,%xmm15
+       # x0..3 on stack
+       movdqa          %xmm0,0x00(%rsp)
+       movdqa          %xmm1,0x10(%rsp)
+       movdqa          %xmm2,0x20(%rsp)
+       movdqa          %xmm3,0x30(%rsp)
+
+       movdqa          CTRINC(%rip),%xmm1
+       movdqa          ROT8(%rip),%xmm2
+       movdqa          ROT16(%rip),%xmm3
+
+       # x12 += counter values 0-3
+       paddd           %xmm1,%xmm12
+
+       mov             $10,%ecx
+
+.Ldoubleround4:
+       # x0 += x4, x12 = rotl32(x12 ^ x0, 16)
+       movdqa          0x00(%rsp),%xmm0
+       paddd           %xmm4,%xmm0
+       movdqa          %xmm0,0x00(%rsp)
+       pxor            %xmm0,%xmm12
+       pshufb          %xmm3,%xmm12
+       # x1 += x5, x13 = rotl32(x13 ^ x1, 16)
+       movdqa          0x10(%rsp),%xmm0
+       paddd           %xmm5,%xmm0
+       movdqa          %xmm0,0x10(%rsp)
+       pxor            %xmm0,%xmm13
+       pshufb          %xmm3,%xmm13
+       # x2 += x6, x14 = rotl32(x14 ^ x2, 16)
+       movdqa          0x20(%rsp),%xmm0
+       paddd           %xmm6,%xmm0
+       movdqa          %xmm0,0x20(%rsp)
+       pxor            %xmm0,%xmm14
+       pshufb          %xmm3,%xmm14
+       # x3 += x7, x15 = rotl32(x15 ^ x3, 16)
+       movdqa          0x30(%rsp),%xmm0
+       paddd           %xmm7,%xmm0
+       movdqa          %xmm0,0x30(%rsp)
+       pxor            %xmm0,%xmm15
+       pshufb          %xmm3,%xmm15
+
+       # x8 += x12, x4 = rotl32(x4 ^ x8, 12)
+       paddd           %xmm12,%xmm8
+       pxor            %xmm8,%xmm4
+       movdqa          %xmm4,%xmm0
+       pslld           $12,%xmm0
+       psrld           $20,%xmm4
+       por             %xmm0,%xmm4
+       # x9 += x13, x5 = rotl32(x5 ^ x9, 12)
+       paddd           %xmm13,%xmm9
+       pxor            %xmm9,%xmm5
+       movdqa          %xmm5,%xmm0
+       pslld           $12,%xmm0
+       psrld           $20,%xmm5
+       por             %xmm0,%xmm5
+       # x10 += x14, x6 = rotl32(x6 ^ x10, 12)
+       paddd           %xmm14,%xmm10
+       pxor            %xmm10,%xmm6
+       movdqa          %xmm6,%xmm0
+       pslld           $12,%xmm0
+       psrld           $20,%xmm6
+       por             %xmm0,%xmm6
+       # x11 += x15, x7 = rotl32(x7 ^ x11, 12)
+       paddd           %xmm15,%xmm11
+       pxor            %xmm11,%xmm7
+       movdqa          %xmm7,%xmm0
+       pslld           $12,%xmm0
+       psrld           $20,%xmm7
+       por             %xmm0,%xmm7
+
+       # x0 += x4, x12 = rotl32(x12 ^ x0, 8)
+       movdqa          0x00(%rsp),%xmm0
+       paddd           %xmm4,%xmm0
+       movdqa          %xmm0,0x00(%rsp)
+       pxor            %xmm0,%xmm12
+       pshufb          %xmm2,%xmm12
+       # x1 += x5, x13 = rotl32(x13 ^ x1, 8)
+       movdqa          0x10(%rsp),%xmm0
+       paddd           %xmm5,%xmm0
+       movdqa          %xmm0,0x10(%rsp)
+       pxor            %xmm0,%xmm13
+       pshufb          %xmm2,%xmm13
+       # x2 += x6, x14 = rotl32(x14 ^ x2, 8)
+       movdqa          0x20(%rsp),%xmm0
+       paddd           %xmm6,%xmm0
+       movdqa          %xmm0,0x20(%rsp)
+       pxor            %xmm0,%xmm14
+       pshufb          %xmm2,%xmm14
+       # x3 += x7, x15 = rotl32(x15 ^ x3, 8)
+       movdqa          0x30(%rsp),%xmm0
+       paddd           %xmm7,%xmm0
+       movdqa          %xmm0,0x30(%rsp)
+       pxor            %xmm0,%xmm15
+       pshufb          %xmm2,%xmm15
+
+       # x8 += x12, x4 = rotl32(x4 ^ x8, 7)
+       paddd           %xmm12,%xmm8
+       pxor            %xmm8,%xmm4
+       movdqa          %xmm4,%xmm0
+       pslld           $7,%xmm0
+       psrld           $25,%xmm4
+       por             %xmm0,%xmm4
+       # x9 += x13, x5 = rotl32(x5 ^ x9, 7)
+       paddd           %xmm13,%xmm9
+       pxor            %xmm9,%xmm5
+       movdqa          %xmm5,%xmm0
+       pslld           $7,%xmm0
+       psrld           $25,%xmm5
+       por             %xmm0,%xmm5
+       # x10 += x14, x6 = rotl32(x6 ^ x10, 7)
+       paddd           %xmm14,%xmm10
+       pxor            %xmm10,%xmm6
+       movdqa          %xmm6,%xmm0
+       pslld           $7,%xmm0
+       psrld           $25,%xmm6
+       por             %xmm0,%xmm6
+       # x11 += x15, x7 = rotl32(x7 ^ x11, 7)
+       paddd           %xmm15,%xmm11
+       pxor            %xmm11,%xmm7
+       movdqa          %xmm7,%xmm0
+       pslld           $7,%xmm0
+       psrld           $25,%xmm7
+       por             %xmm0,%xmm7
+
+       # x0 += x5, x15 = rotl32(x15 ^ x0, 16)
+       movdqa          0x00(%rsp),%xmm0
+       paddd           %xmm5,%xmm0
+       movdqa          %xmm0,0x00(%rsp)
+       pxor            %xmm0,%xmm15
+       pshufb          %xmm3,%xmm15
+       # x1 += x6, x12 = rotl32(x12 ^ x1, 16)
+       movdqa          0x10(%rsp),%xmm0
+       paddd           %xmm6,%xmm0
+       movdqa          %xmm0,0x10(%rsp)
+       pxor            %xmm0,%xmm12
+       pshufb          %xmm3,%xmm12
+       # x2 += x7, x13 = rotl32(x13 ^ x2, 16)
+       movdqa          0x20(%rsp),%xmm0
+       paddd           %xmm7,%xmm0
+       movdqa          %xmm0,0x20(%rsp)
+       pxor            %xmm0,%xmm13
+       pshufb          %xmm3,%xmm13
+       # x3 += x4, x14 = rotl32(x14 ^ x3, 16)
+       movdqa          0x30(%rsp),%xmm0
+       paddd           %xmm4,%xmm0
+       movdqa          %xmm0,0x30(%rsp)
+       pxor            %xmm0,%xmm14
+       pshufb          %xmm3,%xmm14
+
+       # x10 += x15, x5 = rotl32(x5 ^ x10, 12)
+       paddd           %xmm15,%xmm10
+       pxor            %xmm10,%xmm5
+       movdqa          %xmm5,%xmm0
+       pslld           $12,%xmm0
+       psrld           $20,%xmm5
+       por             %xmm0,%xmm5
+       # x11 += x12, x6 = rotl32(x6 ^ x11, 12)
+       paddd           %xmm12,%xmm11
+       pxor            %xmm11,%xmm6
+       movdqa          %xmm6,%xmm0
+       pslld           $12,%xmm0
+       psrld           $20,%xmm6
+       por             %xmm0,%xmm6
+       # x8 += x13, x7 = rotl32(x7 ^ x8, 12)
+       paddd           %xmm13,%xmm8
+       pxor            %xmm8,%xmm7
+       movdqa          %xmm7,%xmm0
+       pslld           $12,%xmm0
+       psrld           $20,%xmm7
+       por             %xmm0,%xmm7
+       # x9 += x14, x4 = rotl32(x4 ^ x9, 12)
+       paddd           %xmm14,%xmm9
+       pxor            %xmm9,%xmm4
+       movdqa          %xmm4,%xmm0
+       pslld           $12,%xmm0
+       psrld           $20,%xmm4
+       por             %xmm0,%xmm4
+
+       # x0 += x5, x15 = rotl32(x15 ^ x0, 8)
+       movdqa          0x00(%rsp),%xmm0
+       paddd           %xmm5,%xmm0
+       movdqa          %xmm0,0x00(%rsp)
+       pxor            %xmm0,%xmm15
+       pshufb          %xmm2,%xmm15
+       # x1 += x6, x12 = rotl32(x12 ^ x1, 8)
+       movdqa          0x10(%rsp),%xmm0
+       paddd           %xmm6,%xmm0
+       movdqa          %xmm0,0x10(%rsp)
+       pxor            %xmm0,%xmm12
+       pshufb          %xmm2,%xmm12
+       # x2 += x7, x13 = rotl32(x13 ^ x2, 8)
+       movdqa          0x20(%rsp),%xmm0
+       paddd           %xmm7,%xmm0
+       movdqa          %xmm0,0x20(%rsp)
+       pxor            %xmm0,%xmm13
+       pshufb          %xmm2,%xmm13
+       # x3 += x4, x14 = rotl32(x14 ^ x3, 8)
+       movdqa          0x30(%rsp),%xmm0
+       paddd           %xmm4,%xmm0
+       movdqa          %xmm0,0x30(%rsp)
+       pxor            %xmm0,%xmm14
+       pshufb          %xmm2,%xmm14
+
+       # x10 += x15, x5 = rotl32(x5 ^ x10, 7)
+       paddd           %xmm15,%xmm10
+       pxor            %xmm10,%xmm5
+       movdqa          %xmm5,%xmm0
+       pslld           $7,%xmm0
+       psrld           $25,%xmm5
+       por             %xmm0,%xmm5
+       # x11 += x12, x6 = rotl32(x6 ^ x11, 7)
+       paddd           %xmm12,%xmm11
+       pxor            %xmm11,%xmm6
+       movdqa          %xmm6,%xmm0
+       pslld           $7,%xmm0
+       psrld           $25,%xmm6
+       por             %xmm0,%xmm6
+       # x8 += x13, x7 = rotl32(x7 ^ x8, 7)
+       paddd           %xmm13,%xmm8
+       pxor            %xmm8,%xmm7
+       movdqa          %xmm7,%xmm0
+       pslld           $7,%xmm0
+       psrld           $25,%xmm7
+       por             %xmm0,%xmm7
+       # x9 += x14, x4 = rotl32(x4 ^ x9, 7)
+       paddd           %xmm14,%xmm9
+       pxor            %xmm9,%xmm4
+       movdqa          %xmm4,%xmm0
+       pslld           $7,%xmm0
+       psrld           $25,%xmm4
+       por             %xmm0,%xmm4
+
+       dec             %ecx
+       jnz             .Ldoubleround4
+
+       # x0[0-3] += s0[0]
+       # x1[0-3] += s0[1]
+       movq            0x00(%rdi),%xmm3
+       pshufd          $0x00,%xmm3,%xmm2
+       pshufd          $0x55,%xmm3,%xmm3
+       paddd           0x00(%rsp),%xmm2
+       movdqa          %xmm2,0x00(%rsp)
+       paddd           0x10(%rsp),%xmm3
+       movdqa          %xmm3,0x10(%rsp)
+       # x2[0-3] += s0[2]
+       # x3[0-3] += s0[3]
+       movq            0x08(%rdi),%xmm3
+       pshufd          $0x00,%xmm3,%xmm2
+       pshufd          $0x55,%xmm3,%xmm3
+       paddd           0x20(%rsp),%xmm2
+       movdqa          %xmm2,0x20(%rsp)
+       paddd           0x30(%rsp),%xmm3
+       movdqa          %xmm3,0x30(%rsp)
+
+       # x4[0-3] += s1[0]
+       # x5[0-3] += s1[1]
+       movq            0x10(%rdi),%xmm3
+       pshufd          $0x00,%xmm3,%xmm2
+       pshufd          $0x55,%xmm3,%xmm3
+       paddd           %xmm2,%xmm4
+       paddd           %xmm3,%xmm5
+       # x6[0-3] += s1[2]
+       # x7[0-3] += s1[3]
+       movq            0x18(%rdi),%xmm3
+       pshufd          $0x00,%xmm3,%xmm2
+       pshufd          $0x55,%xmm3,%xmm3
+       paddd           %xmm2,%xmm6
+       paddd           %xmm3,%xmm7
+
+       # x8[0-3] += s2[0]
+       # x9[0-3] += s2[1]
+       movq            0x20(%rdi),%xmm3
+       pshufd          $0x00,%xmm3,%xmm2
+       pshufd          $0x55,%xmm3,%xmm3
+       paddd           %xmm2,%xmm8
+       paddd           %xmm3,%xmm9
+       # x10[0-3] += s2[2]
+       # x11[0-3] += s2[3]
+       movq            0x28(%rdi),%xmm3
+       pshufd          $0x00,%xmm3,%xmm2
+       pshufd          $0x55,%xmm3,%xmm3
+       paddd           %xmm2,%xmm10
+       paddd           %xmm3,%xmm11
+
+       # x12[0-3] += s3[0]
+       # x13[0-3] += s3[1]
+       movq            0x30(%rdi),%xmm3
+       pshufd          $0x00,%xmm3,%xmm2
+       pshufd          $0x55,%xmm3,%xmm3
+       paddd           %xmm2,%xmm12
+       paddd           %xmm3,%xmm13
+       # x14[0-3] += s3[2]
+       # x15[0-3] += s3[3]
+       movq            0x38(%rdi),%xmm3
+       pshufd          $0x00,%xmm3,%xmm2
+       pshufd          $0x55,%xmm3,%xmm3
+       paddd           %xmm2,%xmm14
+       paddd           %xmm3,%xmm15
+
+       # x12 += counter values 0-3
+       paddd           %xmm1,%xmm12
+
+       # interleave 32-bit words in state n, n+1
+       movdqa          0x00(%rsp),%xmm0
+       movdqa          0x10(%rsp),%xmm1
+       movdqa          %xmm0,%xmm2
+       punpckldq       %xmm1,%xmm2
+       punpckhdq       %xmm1,%xmm0
+       movdqa          %xmm2,0x00(%rsp)
+       movdqa          %xmm0,0x10(%rsp)
+       movdqa          0x20(%rsp),%xmm0
+       movdqa          0x30(%rsp),%xmm1
+       movdqa          %xmm0,%xmm2
+       punpckldq       %xmm1,%xmm2
+       punpckhdq       %xmm1,%xmm0
+       movdqa          %xmm2,0x20(%rsp)
+       movdqa          %xmm0,0x30(%rsp)
+       movdqa          %xmm4,%xmm0
+       punpckldq       %xmm5,%xmm4
+       punpckhdq       %xmm5,%xmm0
+       movdqa          %xmm0,%xmm5
+       movdqa          %xmm6,%xmm0
+       punpckldq       %xmm7,%xmm6
+       punpckhdq       %xmm7,%xmm0
+       movdqa          %xmm0,%xmm7
+       movdqa          %xmm8,%xmm0
+       punpckldq       %xmm9,%xmm8
+       punpckhdq       %xmm9,%xmm0
+       movdqa          %xmm0,%xmm9
+       movdqa          %xmm10,%xmm0
+       punpckldq       %xmm11,%xmm10
+       punpckhdq       %xmm11,%xmm0
+       movdqa          %xmm0,%xmm11
+       movdqa          %xmm12,%xmm0
+       punpckldq       %xmm13,%xmm12
+       punpckhdq       %xmm13,%xmm0
+       movdqa          %xmm0,%xmm13
+       movdqa          %xmm14,%xmm0
+       punpckldq       %xmm15,%xmm14
+       punpckhdq       %xmm15,%xmm0
+       movdqa          %xmm0,%xmm15
+
+       # interleave 64-bit words in state n, n+2
+       movdqa          0x00(%rsp),%xmm0
+       movdqa          0x20(%rsp),%xmm1
+       movdqa          %xmm0,%xmm2
+       punpcklqdq      %xmm1,%xmm2
+       punpckhqdq      %xmm1,%xmm0
+       movdqa          %xmm2,0x00(%rsp)
+       movdqa          %xmm0,0x20(%rsp)
+       movdqa          0x10(%rsp),%xmm0
+       movdqa          0x30(%rsp),%xmm1
+       movdqa          %xmm0,%xmm2
+       punpcklqdq      %xmm1,%xmm2
+       punpckhqdq      %xmm1,%xmm0
+       movdqa          %xmm2,0x10(%rsp)
+       movdqa          %xmm0,0x30(%rsp)
+       movdqa          %xmm4,%xmm0
+       punpcklqdq      %xmm6,%xmm4
+       punpckhqdq      %xmm6,%xmm0
+       movdqa          %xmm0,%xmm6
+       movdqa          %xmm5,%xmm0
+       punpcklqdq      %xmm7,%xmm5
+       punpckhqdq      %xmm7,%xmm0
+       movdqa          %xmm0,%xmm7
+       movdqa          %xmm8,%xmm0
+       punpcklqdq      %xmm10,%xmm8
+       punpckhqdq      %xmm10,%xmm0
+       movdqa          %xmm0,%xmm10
+       movdqa          %xmm9,%xmm0
+       punpcklqdq      %xmm11,%xmm9
+       punpckhqdq      %xmm11,%xmm0
+       movdqa          %xmm0,%xmm11
+       movdqa          %xmm12,%xmm0
+       punpcklqdq      %xmm14,%xmm12
+       punpckhqdq      %xmm14,%xmm0
+       movdqa          %xmm0,%xmm14
+       movdqa          %xmm13,%xmm0
+       punpcklqdq      %xmm15,%xmm13
+       punpckhqdq      %xmm15,%xmm0
+       movdqa          %xmm0,%xmm15
+
+       # xor with corresponding input, write to output
+       movdqa          0x00(%rsp),%xmm0
+       movdqu          0x00(%rdx),%xmm1
+       pxor            %xmm1,%xmm0
+       movdqu          %xmm0,0x00(%rsi)
+       movdqa          0x10(%rsp),%xmm0
+       movdqu          0x80(%rdx),%xmm1
+       pxor            %xmm1,%xmm0
+       movdqu          %xmm0,0x80(%rsi)
+       movdqa          0x20(%rsp),%xmm0
+       movdqu          0x40(%rdx),%xmm1
+       pxor            %xmm1,%xmm0
+       movdqu          %xmm0,0x40(%rsi)
+       movdqa          0x30(%rsp),%xmm0
+       movdqu          0xc0(%rdx),%xmm1
+       pxor            %xmm1,%xmm0
+       movdqu          %xmm0,0xc0(%rsi)
+       movdqu          0x10(%rdx),%xmm1
+       pxor            %xmm1,%xmm4
+       movdqu          %xmm4,0x10(%rsi)
+       movdqu          0x90(%rdx),%xmm1
+       pxor            %xmm1,%xmm5
+       movdqu          %xmm5,0x90(%rsi)
+       movdqu          0x50(%rdx),%xmm1
+       pxor            %xmm1,%xmm6
+       movdqu          %xmm6,0x50(%rsi)
+       movdqu          0xd0(%rdx),%xmm1
+       pxor            %xmm1,%xmm7
+       movdqu          %xmm7,0xd0(%rsi)
+       movdqu          0x20(%rdx),%xmm1
+       pxor            %xmm1,%xmm8
+       movdqu          %xmm8,0x20(%rsi)
+       movdqu          0xa0(%rdx),%xmm1
+       pxor            %xmm1,%xmm9
+       movdqu          %xmm9,0xa0(%rsi)
+       movdqu          0x60(%rdx),%xmm1
+       pxor            %xmm1,%xmm10
+       movdqu          %xmm10,0x60(%rsi)
+       movdqu          0xe0(%rdx),%xmm1
+       pxor            %xmm1,%xmm11
+       movdqu          %xmm11,0xe0(%rsi)
+       movdqu          0x30(%rdx),%xmm1
+       pxor            %xmm1,%xmm12
+       movdqu          %xmm12,0x30(%rsi)
+       movdqu          0xb0(%rdx),%xmm1
+       pxor            %xmm1,%xmm13
+       movdqu          %xmm13,0xb0(%rsi)
+       movdqu          0x70(%rdx),%xmm1
+       pxor            %xmm1,%xmm14
+       movdqu          %xmm14,0x70(%rsi)
+       movdqu          0xf0(%rdx),%xmm1
+       pxor            %xmm1,%xmm15
+       movdqu          %xmm15,0xf0(%rsi)
+
+       add             $0x40,%rsp
+       ret
+ENDPROC(chacha20_4block_xor_ssse3)
diff --git a/arch/x86/crypto/chacha20_glue.c b/arch/x86/crypto/chacha20_glue.c
new file mode 100644 (file)
index 0000000..effe216
--- /dev/null
@@ -0,0 +1,150 @@
+/*
+ * ChaCha20 256-bit cipher algorithm, RFC7539, SIMD glue code
+ *
+ * Copyright (C) 2015 Martin Willi
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <crypto/algapi.h>
+#include <crypto/chacha20.h>
+#include <linux/crypto.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <asm/fpu/api.h>
+#include <asm/simd.h>
+
+#define CHACHA20_STATE_ALIGN 16
+
+asmlinkage void chacha20_block_xor_ssse3(u32 *state, u8 *dst, const u8 *src);
+asmlinkage void chacha20_4block_xor_ssse3(u32 *state, u8 *dst, const u8 *src);
+#ifdef CONFIG_AS_AVX2
+asmlinkage void chacha20_8block_xor_avx2(u32 *state, u8 *dst, const u8 *src);
+static bool chacha20_use_avx2;
+#endif
+
+static void chacha20_dosimd(u32 *state, u8 *dst, const u8 *src,
+                           unsigned int bytes)
+{
+       u8 buf[CHACHA20_BLOCK_SIZE];
+
+#ifdef CONFIG_AS_AVX2
+       if (chacha20_use_avx2) {
+               while (bytes >= CHACHA20_BLOCK_SIZE * 8) {
+                       chacha20_8block_xor_avx2(state, dst, src);
+                       bytes -= CHACHA20_BLOCK_SIZE * 8;
+                       src += CHACHA20_BLOCK_SIZE * 8;
+                       dst += CHACHA20_BLOCK_SIZE * 8;
+                       state[12] += 8;
+               }
+       }
+#endif
+       while (bytes >= CHACHA20_BLOCK_SIZE * 4) {
+               chacha20_4block_xor_ssse3(state, dst, src);
+               bytes -= CHACHA20_BLOCK_SIZE * 4;
+               src += CHACHA20_BLOCK_SIZE * 4;
+               dst += CHACHA20_BLOCK_SIZE * 4;
+               state[12] += 4;
+       }
+       while (bytes >= CHACHA20_BLOCK_SIZE) {
+               chacha20_block_xor_ssse3(state, dst, src);
+               bytes -= CHACHA20_BLOCK_SIZE;
+               src += CHACHA20_BLOCK_SIZE;
+               dst += CHACHA20_BLOCK_SIZE;
+               state[12]++;
+       }
+       if (bytes) {
+               memcpy(buf, src, bytes);
+               chacha20_block_xor_ssse3(state, buf, buf);
+               memcpy(dst, buf, bytes);
+       }
+}
+
+static int chacha20_simd(struct blkcipher_desc *desc, struct scatterlist *dst,
+                        struct scatterlist *src, unsigned int nbytes)
+{
+       u32 *state, state_buf[16 + (CHACHA20_STATE_ALIGN / sizeof(u32)) - 1];
+       struct blkcipher_walk walk;
+       int err;
+
+       if (!may_use_simd())
+               return crypto_chacha20_crypt(desc, dst, src, nbytes);
+
+       state = (u32 *)roundup((uintptr_t)state_buf, CHACHA20_STATE_ALIGN);
+
+       blkcipher_walk_init(&walk, dst, src, nbytes);
+       err = blkcipher_walk_virt_block(desc, &walk, CHACHA20_BLOCK_SIZE);
+
+       crypto_chacha20_init(state, crypto_blkcipher_ctx(desc->tfm), walk.iv);
+
+       kernel_fpu_begin();
+
+       while (walk.nbytes >= CHACHA20_BLOCK_SIZE) {
+               chacha20_dosimd(state, walk.dst.virt.addr, walk.src.virt.addr,
+                               rounddown(walk.nbytes, CHACHA20_BLOCK_SIZE));
+               err = blkcipher_walk_done(desc, &walk,
+                                         walk.nbytes % CHACHA20_BLOCK_SIZE);
+       }
+
+       if (walk.nbytes) {
+               chacha20_dosimd(state, walk.dst.virt.addr, walk.src.virt.addr,
+                               walk.nbytes);
+               err = blkcipher_walk_done(desc, &walk, 0);
+       }
+
+       kernel_fpu_end();
+
+       return err;
+}
+
+static struct crypto_alg alg = {
+       .cra_name               = "chacha20",
+       .cra_driver_name        = "chacha20-simd",
+       .cra_priority           = 300,
+       .cra_flags              = CRYPTO_ALG_TYPE_BLKCIPHER,
+       .cra_blocksize          = 1,
+       .cra_type               = &crypto_blkcipher_type,
+       .cra_ctxsize            = sizeof(struct chacha20_ctx),
+       .cra_alignmask          = sizeof(u32) - 1,
+       .cra_module             = THIS_MODULE,
+       .cra_u                  = {
+               .blkcipher = {
+                       .min_keysize    = CHACHA20_KEY_SIZE,
+                       .max_keysize    = CHACHA20_KEY_SIZE,
+                       .ivsize         = CHACHA20_IV_SIZE,
+                       .geniv          = "seqiv",
+                       .setkey         = crypto_chacha20_setkey,
+                       .encrypt        = chacha20_simd,
+                       .decrypt        = chacha20_simd,
+               },
+       },
+};
+
+static int __init chacha20_simd_mod_init(void)
+{
+       if (!cpu_has_ssse3)
+               return -ENODEV;
+
+#ifdef CONFIG_AS_AVX2
+       chacha20_use_avx2 = cpu_has_avx && cpu_has_avx2 &&
+                           cpu_has_xfeatures(XSTATE_SSE | XSTATE_YMM, NULL);
+#endif
+       return crypto_register_alg(&alg);
+}
+
+static void __exit chacha20_simd_mod_fini(void)
+{
+       crypto_unregister_alg(&alg);
+}
+
+module_init(chacha20_simd_mod_init);
+module_exit(chacha20_simd_mod_fini);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Martin Willi <martin@strongswan.org>");
+MODULE_DESCRIPTION("chacha20 cipher algorithm, SIMD accelerated");
+MODULE_ALIAS_CRYPTO("chacha20");
+MODULE_ALIAS_CRYPTO("chacha20-simd");
diff --git a/arch/x86/crypto/poly1305-avx2-x86_64.S b/arch/x86/crypto/poly1305-avx2-x86_64.S
new file mode 100644 (file)
index 0000000..eff2f41
--- /dev/null
@@ -0,0 +1,386 @@
+/*
+ * Poly1305 authenticator algorithm, RFC7539, x64 AVX2 functions
+ *
+ * Copyright (C) 2015 Martin Willi
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/linkage.h>
+
+.data
+.align 32
+
+ANMASK:        .octa 0x0000000003ffffff0000000003ffffff
+       .octa 0x0000000003ffffff0000000003ffffff
+ORMASK:        .octa 0x00000000010000000000000001000000
+       .octa 0x00000000010000000000000001000000
+
+.text
+
+#define h0 0x00(%rdi)
+#define h1 0x04(%rdi)
+#define h2 0x08(%rdi)
+#define h3 0x0c(%rdi)
+#define h4 0x10(%rdi)
+#define r0 0x00(%rdx)
+#define r1 0x04(%rdx)
+#define r2 0x08(%rdx)
+#define r3 0x0c(%rdx)
+#define r4 0x10(%rdx)
+#define u0 0x00(%r8)
+#define u1 0x04(%r8)
+#define u2 0x08(%r8)
+#define u3 0x0c(%r8)
+#define u4 0x10(%r8)
+#define w0 0x14(%r8)
+#define w1 0x18(%r8)
+#define w2 0x1c(%r8)
+#define w3 0x20(%r8)
+#define w4 0x24(%r8)
+#define y0 0x28(%r8)
+#define y1 0x2c(%r8)
+#define y2 0x30(%r8)
+#define y3 0x34(%r8)
+#define y4 0x38(%r8)
+#define m %rsi
+#define hc0 %ymm0
+#define hc1 %ymm1
+#define hc2 %ymm2
+#define hc3 %ymm3
+#define hc4 %ymm4
+#define hc0x %xmm0
+#define hc1x %xmm1
+#define hc2x %xmm2
+#define hc3x %xmm3
+#define hc4x %xmm4
+#define t1 %ymm5
+#define t2 %ymm6
+#define t1x %xmm5
+#define t2x %xmm6
+#define ruwy0 %ymm7
+#define ruwy1 %ymm8
+#define ruwy2 %ymm9
+#define ruwy3 %ymm10
+#define ruwy4 %ymm11
+#define ruwy0x %xmm7
+#define ruwy1x %xmm8
+#define ruwy2x %xmm9
+#define ruwy3x %xmm10
+#define ruwy4x %xmm11
+#define svxz1 %ymm12
+#define svxz2 %ymm13
+#define svxz3 %ymm14
+#define svxz4 %ymm15
+#define d0 %r9
+#define d1 %r10
+#define d2 %r11
+#define d3 %r12
+#define d4 %r13
+
+ENTRY(poly1305_4block_avx2)
+       # %rdi: Accumulator h[5]
+       # %rsi: 64 byte input block m
+       # %rdx: Poly1305 key r[5]
+       # %rcx: Quadblock count
+       # %r8:  Poly1305 derived key r^2 u[5], r^3 w[5], r^4 y[5],
+
+       # This four-block variant uses loop unrolled block processing. It
+       # requires 4 Poly1305 keys: r, r^2, r^3 and r^4:
+       # h = (h + m) * r  =>  h = (h + m1) * r^4 + m2 * r^3 + m3 * r^2 + m4 * r
+
+       vzeroupper
+       push            %rbx
+       push            %r12
+       push            %r13
+
+       # combine r0,u0,w0,y0
+       vmovd           y0,ruwy0x
+       vmovd           w0,t1x
+       vpunpcklqdq     t1,ruwy0,ruwy0
+       vmovd           u0,t1x
+       vmovd           r0,t2x
+       vpunpcklqdq     t2,t1,t1
+       vperm2i128      $0x20,t1,ruwy0,ruwy0
+
+       # combine r1,u1,w1,y1 and s1=r1*5,v1=u1*5,x1=w1*5,z1=y1*5
+       vmovd           y1,ruwy1x
+       vmovd           w1,t1x
+       vpunpcklqdq     t1,ruwy1,ruwy1
+       vmovd           u1,t1x
+       vmovd           r1,t2x
+       vpunpcklqdq     t2,t1,t1
+       vperm2i128      $0x20,t1,ruwy1,ruwy1
+       vpslld          $2,ruwy1,svxz1
+       vpaddd          ruwy1,svxz1,svxz1
+
+       # combine r2,u2,w2,y2 and s2=r2*5,v2=u2*5,x2=w2*5,z2=y2*5
+       vmovd           y2,ruwy2x
+       vmovd           w2,t1x
+       vpunpcklqdq     t1,ruwy2,ruwy2
+       vmovd           u2,t1x
+       vmovd           r2,t2x
+       vpunpcklqdq     t2,t1,t1
+       vperm2i128      $0x20,t1,ruwy2,ruwy2
+       vpslld          $2,ruwy2,svxz2
+       vpaddd          ruwy2,svxz2,svxz2
+
+       # combine r3,u3,w3,y3 and s3=r3*5,v3=u3*5,x3=w3*5,z3=y3*5
+       vmovd           y3,ruwy3x
+       vmovd           w3,t1x
+       vpunpcklqdq     t1,ruwy3,ruwy3
+       vmovd           u3,t1x
+       vmovd           r3,t2x
+       vpunpcklqdq     t2,t1,t1
+       vperm2i128      $0x20,t1,ruwy3,ruwy3
+       vpslld          $2,ruwy3,svxz3
+       vpaddd          ruwy3,svxz3,svxz3
+
+       # combine r4,u4,w4,y4 and s4=r4*5,v4=u4*5,x4=w4*5,z4=y4*5
+       vmovd           y4,ruwy4x
+       vmovd           w4,t1x
+       vpunpcklqdq     t1,ruwy4,ruwy4
+       vmovd           u4,t1x
+       vmovd           r4,t2x
+       vpunpcklqdq     t2,t1,t1
+       vperm2i128      $0x20,t1,ruwy4,ruwy4
+       vpslld          $2,ruwy4,svxz4
+       vpaddd          ruwy4,svxz4,svxz4
+
+.Ldoblock4:
+       # hc0 = [m[48-51] & 0x3ffffff, m[32-35] & 0x3ffffff,
+       #        m[16-19] & 0x3ffffff, m[ 0- 3] & 0x3ffffff + h0]
+       vmovd           0x00(m),hc0x
+       vmovd           0x10(m),t1x
+       vpunpcklqdq     t1,hc0,hc0
+       vmovd           0x20(m),t1x
+       vmovd           0x30(m),t2x
+       vpunpcklqdq     t2,t1,t1
+       vperm2i128      $0x20,t1,hc0,hc0
+       vpand           ANMASK(%rip),hc0,hc0
+       vmovd           h0,t1x
+       vpaddd          t1,hc0,hc0
+       # hc1 = [(m[51-54] >> 2) & 0x3ffffff, (m[35-38] >> 2) & 0x3ffffff,
+       #        (m[19-22] >> 2) & 0x3ffffff, (m[ 3- 6] >> 2) & 0x3ffffff + h1]
+       vmovd           0x03(m),hc1x
+       vmovd           0x13(m),t1x
+       vpunpcklqdq     t1,hc1,hc1
+       vmovd           0x23(m),t1x
+       vmovd           0x33(m),t2x
+       vpunpcklqdq     t2,t1,t1
+       vperm2i128      $0x20,t1,hc1,hc1
+       vpsrld          $2,hc1,hc1
+       vpand           ANMASK(%rip),hc1,hc1
+       vmovd           h1,t1x
+       vpaddd          t1,hc1,hc1
+       # hc2 = [(m[54-57] >> 4) & 0x3ffffff, (m[38-41] >> 4) & 0x3ffffff,
+       #        (m[22-25] >> 4) & 0x3ffffff, (m[ 6- 9] >> 4) & 0x3ffffff + h2]
+       vmovd           0x06(m),hc2x
+       vmovd           0x16(m),t1x
+       vpunpcklqdq     t1,hc2,hc2
+       vmovd           0x26(m),t1x
+       vmovd           0x36(m),t2x
+       vpunpcklqdq     t2,t1,t1
+       vperm2i128      $0x20,t1,hc2,hc2
+       vpsrld          $4,hc2,hc2
+       vpand           ANMASK(%rip),hc2,hc2
+       vmovd           h2,t1x
+       vpaddd          t1,hc2,hc2
+       # hc3 = [(m[57-60] >> 6) & 0x3ffffff, (m[41-44] >> 6) & 0x3ffffff,
+       #        (m[25-28] >> 6) & 0x3ffffff, (m[ 9-12] >> 6) & 0x3ffffff + h3]
+       vmovd           0x09(m),hc3x
+       vmovd           0x19(m),t1x
+       vpunpcklqdq     t1,hc3,hc3
+       vmovd           0x29(m),t1x
+       vmovd           0x39(m),t2x
+       vpunpcklqdq     t2,t1,t1
+       vperm2i128      $0x20,t1,hc3,hc3
+       vpsrld          $6,hc3,hc3
+       vpand           ANMASK(%rip),hc3,hc3
+       vmovd           h3,t1x
+       vpaddd          t1,hc3,hc3
+       # hc4 = [(m[60-63] >> 8) | (1<<24), (m[44-47] >> 8) | (1<<24),
+       #        (m[28-31] >> 8) | (1<<24), (m[12-15] >> 8) | (1<<24) + h4]
+       vmovd           0x0c(m),hc4x
+       vmovd           0x1c(m),t1x
+       vpunpcklqdq     t1,hc4,hc4
+       vmovd           0x2c(m),t1x
+       vmovd           0x3c(m),t2x
+       vpunpcklqdq     t2,t1,t1
+       vperm2i128      $0x20,t1,hc4,hc4
+       vpsrld          $8,hc4,hc4
+       vpor            ORMASK(%rip),hc4,hc4
+       vmovd           h4,t1x
+       vpaddd          t1,hc4,hc4
+
+       # t1 = [ hc0[3] * r0, hc0[2] * u0, hc0[1] * w0, hc0[0] * y0 ]
+       vpmuludq        hc0,ruwy0,t1
+       # t1 += [ hc1[3] * s4, hc1[2] * v4, hc1[1] * x4, hc1[0] * z4 ]
+       vpmuludq        hc1,svxz4,t2
+       vpaddq          t2,t1,t1
+       # t1 += [ hc2[3] * s3, hc2[2] * v3, hc2[1] * x3, hc2[0] * z3 ]
+       vpmuludq        hc2,svxz3,t2
+       vpaddq          t2,t1,t1
+       # t1 += [ hc3[3] * s2, hc3[2] * v2, hc3[1] * x2, hc3[0] * z2 ]
+       vpmuludq        hc3,svxz2,t2
+       vpaddq          t2,t1,t1
+       # t1 += [ hc4[3] * s1, hc4[2] * v1, hc4[1] * x1, hc4[0] * z1 ]
+       vpmuludq        hc4,svxz1,t2
+       vpaddq          t2,t1,t1
+       # d0 = t1[0] + t1[1] + t[2] + t[3]
+       vpermq          $0xee,t1,t2
+       vpaddq          t2,t1,t1
+       vpsrldq         $8,t1,t2
+       vpaddq          t2,t1,t1
+       vmovq           t1x,d0
+
+       # t1 = [ hc0[3] * r1, hc0[2] * u1,hc0[1] * w1, hc0[0] * y1 ]
+       vpmuludq        hc0,ruwy1,t1
+       # t1 += [ hc1[3] * r0, hc1[2] * u0, hc1[1] * w0, hc1[0] * y0 ]
+       vpmuludq        hc1,ruwy0,t2
+       vpaddq          t2,t1,t1
+       # t1 += [ hc2[3] * s4, hc2[2] * v4, hc2[1] * x4, hc2[0] * z4 ]
+       vpmuludq        hc2,svxz4,t2
+       vpaddq          t2,t1,t1
+       # t1 += [ hc3[3] * s3, hc3[2] * v3, hc3[1] * x3, hc3[0] * z3 ]
+       vpmuludq        hc3,svxz3,t2
+       vpaddq          t2,t1,t1
+       # t1 += [ hc4[3] * s2, hc4[2] * v2, hc4[1] * x2, hc4[0] * z2 ]
+       vpmuludq        hc4,svxz2,t2
+       vpaddq          t2,t1,t1
+       # d1 = t1[0] + t1[1] + t1[3] + t1[4]
+       vpermq          $0xee,t1,t2
+       vpaddq          t2,t1,t1
+       vpsrldq         $8,t1,t2
+       vpaddq          t2,t1,t1
+       vmovq           t1x,d1
+
+       # t1 = [ hc0[3] * r2, hc0[2] * u2, hc0[1] * w2, hc0[0] * y2 ]
+       vpmuludq        hc0,ruwy2,t1
+       # t1 += [ hc1[3] * r1, hc1[2] * u1, hc1[1] * w1, hc1[0] * y1 ]
+       vpmuludq        hc1,ruwy1,t2
+       vpaddq          t2,t1,t1
+       # t1 += [ hc2[3] * r0, hc2[2] * u0, hc2[1] * w0, hc2[0] * y0 ]
+       vpmuludq        hc2,ruwy0,t2
+       vpaddq          t2,t1,t1
+       # t1 += [ hc3[3] * s4, hc3[2] * v4, hc3[1] * x4, hc3[0] * z4 ]
+       vpmuludq        hc3,svxz4,t2
+       vpaddq          t2,t1,t1
+       # t1 += [ hc4[3] * s3, hc4[2] * v3, hc4[1] * x3, hc4[0] * z3 ]
+       vpmuludq        hc4,svxz3,t2
+       vpaddq          t2,t1,t1
+       # d2 = t1[0] + t1[1] + t1[2] + t1[3]
+       vpermq          $0xee,t1,t2
+       vpaddq          t2,t1,t1
+       vpsrldq         $8,t1,t2
+       vpaddq          t2,t1,t1
+       vmovq           t1x,d2
+
+       # t1 = [ hc0[3] * r3, hc0[2] * u3, hc0[1] * w3, hc0[0] * y3 ]
+       vpmuludq        hc0,ruwy3,t1
+       # t1 += [ hc1[3] * r2, hc1[2] * u2, hc1[1] * w2, hc1[0] * y2 ]
+       vpmuludq        hc1,ruwy2,t2
+       vpaddq          t2,t1,t1
+       # t1 += [ hc2[3] * r1, hc2[2] * u1, hc2[1] * w1, hc2[0] * y1 ]
+       vpmuludq        hc2,ruwy1,t2
+       vpaddq          t2,t1,t1
+       # t1 += [ hc3[3] * r0, hc3[2] * u0, hc3[1] * w0, hc3[0] * y0 ]
+       vpmuludq        hc3,ruwy0,t2
+       vpaddq          t2,t1,t1
+       # t1 += [ hc4[3] * s4, hc4[2] * v4, hc4[1] * x4, hc4[0] * z4 ]
+       vpmuludq        hc4,svxz4,t2
+       vpaddq          t2,t1,t1
+       # d3 = t1[0] + t1[1] + t1[2] + t1[3]
+       vpermq          $0xee,t1,t2
+       vpaddq          t2,t1,t1
+       vpsrldq         $8,t1,t2
+       vpaddq          t2,t1,t1
+       vmovq           t1x,d3
+
+       # t1 = [ hc0[3] * r4, hc0[2] * u4, hc0[1] * w4, hc0[0] * y4 ]
+       vpmuludq        hc0,ruwy4,t1
+       # t1 += [ hc1[3] * r3, hc1[2] * u3, hc1[1] * w3, hc1[0] * y3 ]
+       vpmuludq        hc1,ruwy3,t2
+       vpaddq          t2,t1,t1
+       # t1 += [ hc2[3] * r2, hc2[2] * u2, hc2[1] * w2, hc2[0] * y2 ]
+       vpmuludq        hc2,ruwy2,t2
+       vpaddq          t2,t1,t1
+       # t1 += [ hc3[3] * r1, hc3[2] * u1, hc3[1] * w1, hc3[0] * y1 ]
+       vpmuludq        hc3,ruwy1,t2
+       vpaddq          t2,t1,t1
+       # t1 += [ hc4[3] * r0, hc4[2] * u0, hc4[1] * w0, hc4[0] * y0 ]
+       vpmuludq        hc4,ruwy0,t2
+       vpaddq          t2,t1,t1
+       # d4 = t1[0] + t1[1] + t1[2] + t1[3]
+       vpermq          $0xee,t1,t2
+       vpaddq          t2,t1,t1
+       vpsrldq         $8,t1,t2
+       vpaddq          t2,t1,t1
+       vmovq           t1x,d4
+
+       # d1 += d0 >> 26
+       mov             d0,%rax
+       shr             $26,%rax
+       add             %rax,d1
+       # h0 = d0 & 0x3ffffff
+       mov             d0,%rbx
+       and             $0x3ffffff,%ebx
+
+       # d2 += d1 >> 26
+       mov             d1,%rax
+       shr             $26,%rax
+       add             %rax,d2
+       # h1 = d1 & 0x3ffffff
+       mov             d1,%rax
+       and             $0x3ffffff,%eax
+       mov             %eax,h1
+
+       # d3 += d2 >> 26
+       mov             d2,%rax
+       shr             $26,%rax
+       add             %rax,d3
+       # h2 = d2 & 0x3ffffff
+       mov             d2,%rax
+       and             $0x3ffffff,%eax
+       mov             %eax,h2
+
+       # d4 += d3 >> 26
+       mov             d3,%rax
+       shr             $26,%rax
+       add             %rax,d4
+       # h3 = d3 & 0x3ffffff
+       mov             d3,%rax
+       and             $0x3ffffff,%eax
+       mov             %eax,h3
+
+       # h0 += (d4 >> 26) * 5
+       mov             d4,%rax
+       shr             $26,%rax
+       lea             (%eax,%eax,4),%eax
+       add             %eax,%ebx
+       # h4 = d4 & 0x3ffffff
+       mov             d4,%rax
+       and             $0x3ffffff,%eax
+       mov             %eax,h4
+
+       # h1 += h0 >> 26
+       mov             %ebx,%eax
+       shr             $26,%eax
+       add             %eax,h1
+       # h0 = h0 & 0x3ffffff
+       andl            $0x3ffffff,%ebx
+       mov             %ebx,h0
+
+       add             $0x40,m
+       dec             %rcx
+       jnz             .Ldoblock4
+
+       vzeroupper
+       pop             %r13
+       pop             %r12
+       pop             %rbx
+       ret
+ENDPROC(poly1305_4block_avx2)
diff --git a/arch/x86/crypto/poly1305-sse2-x86_64.S b/arch/x86/crypto/poly1305-sse2-x86_64.S
new file mode 100644 (file)
index 0000000..338c748
--- /dev/null
@@ -0,0 +1,582 @@
+/*
+ * Poly1305 authenticator algorithm, RFC7539, x64 SSE2 functions
+ *
+ * Copyright (C) 2015 Martin Willi
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/linkage.h>
+
+.data
+.align 16
+
+ANMASK:        .octa 0x0000000003ffffff0000000003ffffff
+ORMASK:        .octa 0x00000000010000000000000001000000
+
+.text
+
+#define h0 0x00(%rdi)
+#define h1 0x04(%rdi)
+#define h2 0x08(%rdi)
+#define h3 0x0c(%rdi)
+#define h4 0x10(%rdi)
+#define r0 0x00(%rdx)
+#define r1 0x04(%rdx)
+#define r2 0x08(%rdx)
+#define r3 0x0c(%rdx)
+#define r4 0x10(%rdx)
+#define s1 0x00(%rsp)
+#define s2 0x04(%rsp)
+#define s3 0x08(%rsp)
+#define s4 0x0c(%rsp)
+#define m %rsi
+#define h01 %xmm0
+#define h23 %xmm1
+#define h44 %xmm2
+#define t1 %xmm3
+#define t2 %xmm4
+#define t3 %xmm5
+#define t4 %xmm6
+#define mask %xmm7
+#define d0 %r8
+#define d1 %r9
+#define d2 %r10
+#define d3 %r11
+#define d4 %r12
+
+ENTRY(poly1305_block_sse2)
+       # %rdi: Accumulator h[5]
+       # %rsi: 16 byte input block m
+       # %rdx: Poly1305 key r[5]
+       # %rcx: Block count
+
+       # This single block variant tries to improve performance by doing two
+       # multiplications in parallel using SSE instructions. There is quite
+       # some quardword packing involved, hence the speedup is marginal.
+
+       push            %rbx
+       push            %r12
+       sub             $0x10,%rsp
+
+       # s1..s4 = r1..r4 * 5
+       mov             r1,%eax
+       lea             (%eax,%eax,4),%eax
+       mov             %eax,s1
+       mov             r2,%eax
+       lea             (%eax,%eax,4),%eax
+       mov             %eax,s2
+       mov             r3,%eax
+       lea             (%eax,%eax,4),%eax
+       mov             %eax,s3
+       mov             r4,%eax
+       lea             (%eax,%eax,4),%eax
+       mov             %eax,s4
+
+       movdqa          ANMASK(%rip),mask
+
+.Ldoblock:
+       # h01 = [0, h1, 0, h0]
+       # h23 = [0, h3, 0, h2]
+       # h44 = [0, h4, 0, h4]
+       movd            h0,h01
+       movd            h1,t1
+       movd            h2,h23
+       movd            h3,t2
+       movd            h4,h44
+       punpcklqdq      t1,h01
+       punpcklqdq      t2,h23
+       punpcklqdq      h44,h44
+
+       # h01 += [ (m[3-6] >> 2) & 0x3ffffff, m[0-3] & 0x3ffffff ]
+       movd            0x00(m),t1
+       movd            0x03(m),t2
+       psrld           $2,t2
+       punpcklqdq      t2,t1
+       pand            mask,t1
+       paddd           t1,h01
+       # h23 += [ (m[9-12] >> 6) & 0x3ffffff, (m[6-9] >> 4) & 0x3ffffff ]
+       movd            0x06(m),t1
+       movd            0x09(m),t2
+       psrld           $4,t1
+       psrld           $6,t2
+       punpcklqdq      t2,t1
+       pand            mask,t1
+       paddd           t1,h23
+       # h44 += [ (m[12-15] >> 8) | (1 << 24), (m[12-15] >> 8) | (1 << 24) ]
+       mov             0x0c(m),%eax
+       shr             $8,%eax
+       or              $0x01000000,%eax
+       movd            %eax,t1
+       pshufd          $0xc4,t1,t1
+       paddd           t1,h44
+
+       # t1[0] = h0 * r0 + h2 * s3
+       # t1[1] = h1 * s4 + h3 * s2
+       movd            r0,t1
+       movd            s4,t2
+       punpcklqdq      t2,t1
+       pmuludq         h01,t1
+       movd            s3,t2
+       movd            s2,t3
+       punpcklqdq      t3,t2
+       pmuludq         h23,t2
+       paddq           t2,t1
+       # t2[0] = h0 * r1 + h2 * s4
+       # t2[1] = h1 * r0 + h3 * s3
+       movd            r1,t2
+       movd            r0,t3
+       punpcklqdq      t3,t2
+       pmuludq         h01,t2
+       movd            s4,t3
+       movd            s3,t4
+       punpcklqdq      t4,t3
+       pmuludq         h23,t3
+       paddq           t3,t2
+       # t3[0] = h4 * s1
+       # t3[1] = h4 * s2
+       movd            s1,t3
+       movd            s2,t4
+       punpcklqdq      t4,t3
+       pmuludq         h44,t3
+       # d0 = t1[0] + t1[1] + t3[0]
+       # d1 = t2[0] + t2[1] + t3[1]
+       movdqa          t1,t4
+       punpcklqdq      t2,t4
+       punpckhqdq      t2,t1
+       paddq           t4,t1
+       paddq           t3,t1
+       movq            t1,d0
+       psrldq          $8,t1
+       movq            t1,d1
+
+       # t1[0] = h0 * r2 + h2 * r0
+       # t1[1] = h1 * r1 + h3 * s4
+       movd            r2,t1
+       movd            r1,t2
+       punpcklqdq      t2,t1
+       pmuludq         h01,t1
+       movd            r0,t2
+       movd            s4,t3
+       punpcklqdq      t3,t2
+       pmuludq         h23,t2
+       paddq           t2,t1
+       # t2[0] = h0 * r3 + h2 * r1
+       # t2[1] = h1 * r2 + h3 * r0
+       movd            r3,t2
+       movd            r2,t3
+       punpcklqdq      t3,t2
+       pmuludq         h01,t2
+       movd            r1,t3
+       movd            r0,t4
+       punpcklqdq      t4,t3
+       pmuludq         h23,t3
+       paddq           t3,t2
+       # t3[0] = h4 * s3
+       # t3[1] = h4 * s4
+       movd            s3,t3
+       movd            s4,t4
+       punpcklqdq      t4,t3
+       pmuludq         h44,t3
+       # d2 = t1[0] + t1[1] + t3[0]
+       # d3 = t2[0] + t2[1] + t3[1]
+       movdqa          t1,t4
+       punpcklqdq      t2,t4
+       punpckhqdq      t2,t1
+       paddq           t4,t1
+       paddq           t3,t1
+       movq            t1,d2
+       psrldq          $8,t1
+       movq            t1,d3
+
+       # t1[0] = h0 * r4 + h2 * r2
+       # t1[1] = h1 * r3 + h3 * r1
+       movd            r4,t1
+       movd            r3,t2
+       punpcklqdq      t2,t1
+       pmuludq         h01,t1
+       movd            r2,t2
+       movd            r1,t3
+       punpcklqdq      t3,t2
+       pmuludq         h23,t2
+       paddq           t2,t1
+       # t3[0] = h4 * r0
+       movd            r0,t3
+       pmuludq         h44,t3
+       # d4 = t1[0] + t1[1] + t3[0]
+       movdqa          t1,t4
+       psrldq          $8,t4
+       paddq           t4,t1
+       paddq           t3,t1
+       movq            t1,d4
+
+       # d1 += d0 >> 26
+       mov             d0,%rax
+       shr             $26,%rax
+       add             %rax,d1
+       # h0 = d0 & 0x3ffffff
+       mov             d0,%rbx
+       and             $0x3ffffff,%ebx
+
+       # d2 += d1 >> 26
+       mov             d1,%rax
+       shr             $26,%rax
+       add             %rax,d2
+       # h1 = d1 & 0x3ffffff
+       mov             d1,%rax
+       and             $0x3ffffff,%eax
+       mov             %eax,h1
+
+       # d3 += d2 >> 26
+       mov             d2,%rax
+       shr             $26,%rax
+       add             %rax,d3
+       # h2 = d2 & 0x3ffffff
+       mov             d2,%rax
+       and             $0x3ffffff,%eax
+       mov             %eax,h2
+
+       # d4 += d3 >> 26
+       mov             d3,%rax
+       shr             $26,%rax
+       add             %rax,d4
+       # h3 = d3 & 0x3ffffff
+       mov             d3,%rax
+       and             $0x3ffffff,%eax
+       mov             %eax,h3
+
+       # h0 += (d4 >> 26) * 5
+       mov             d4,%rax
+       shr             $26,%rax
+       lea             (%eax,%eax,4),%eax
+       add             %eax,%ebx
+       # h4 = d4 & 0x3ffffff
+       mov             d4,%rax
+       and             $0x3ffffff,%eax
+       mov             %eax,h4
+
+       # h1 += h0 >> 26
+       mov             %ebx,%eax
+       shr             $26,%eax
+       add             %eax,h1
+       # h0 = h0 & 0x3ffffff
+       andl            $0x3ffffff,%ebx
+       mov             %ebx,h0
+
+       add             $0x10,m
+       dec             %rcx
+       jnz             .Ldoblock
+
+       add             $0x10,%rsp
+       pop             %r12
+       pop             %rbx
+       ret
+ENDPROC(poly1305_block_sse2)
+
+
+#define u0 0x00(%r8)
+#define u1 0x04(%r8)
+#define u2 0x08(%r8)
+#define u3 0x0c(%r8)
+#define u4 0x10(%r8)
+#define hc0 %xmm0
+#define hc1 %xmm1
+#define hc2 %xmm2
+#define hc3 %xmm5
+#define hc4 %xmm6
+#define ru0 %xmm7
+#define ru1 %xmm8
+#define ru2 %xmm9
+#define ru3 %xmm10
+#define ru4 %xmm11
+#define sv1 %xmm12
+#define sv2 %xmm13
+#define sv3 %xmm14
+#define sv4 %xmm15
+#undef d0
+#define d0 %r13
+
+ENTRY(poly1305_2block_sse2)
+       # %rdi: Accumulator h[5]
+       # %rsi: 16 byte input block m
+       # %rdx: Poly1305 key r[5]
+       # %rcx: Doubleblock count
+       # %r8:  Poly1305 derived key r^2 u[5]
+
+       # This two-block variant further improves performance by using loop
+       # unrolled block processing. This is more straight forward and does
+       # less byte shuffling, but requires a second Poly1305 key r^2:
+       # h = (h + m) * r    =>    h = (h + m1) * r^2 + m2 * r
+
+       push            %rbx
+       push            %r12
+       push            %r13
+
+       # combine r0,u0
+       movd            u0,ru0
+       movd            r0,t1
+       punpcklqdq      t1,ru0
+
+       # combine r1,u1 and s1=r1*5,v1=u1*5
+       movd            u1,ru1
+       movd            r1,t1
+       punpcklqdq      t1,ru1
+       movdqa          ru1,sv1
+       pslld           $2,sv1
+       paddd           ru1,sv1
+
+       # combine r2,u2 and s2=r2*5,v2=u2*5
+       movd            u2,ru2
+       movd            r2,t1
+       punpcklqdq      t1,ru2
+       movdqa          ru2,sv2
+       pslld           $2,sv2
+       paddd           ru2,sv2
+
+       # combine r3,u3 and s3=r3*5,v3=u3*5
+       movd            u3,ru3
+       movd            r3,t1
+       punpcklqdq      t1,ru3
+       movdqa          ru3,sv3
+       pslld           $2,sv3
+       paddd           ru3,sv3
+
+       # combine r4,u4 and s4=r4*5,v4=u4*5
+       movd            u4,ru4
+       movd            r4,t1
+       punpcklqdq      t1,ru4
+       movdqa          ru4,sv4
+       pslld           $2,sv4
+       paddd           ru4,sv4
+
+.Ldoblock2:
+       # hc0 = [ m[16-19] & 0x3ffffff, h0 + m[0-3] & 0x3ffffff ]
+       movd            0x00(m),hc0
+       movd            0x10(m),t1
+       punpcklqdq      t1,hc0
+       pand            ANMASK(%rip),hc0
+       movd            h0,t1
+       paddd           t1,hc0
+       # hc1 = [ (m[19-22] >> 2) & 0x3ffffff, h1 + (m[3-6] >> 2) & 0x3ffffff ]
+       movd            0x03(m),hc1
+       movd            0x13(m),t1
+       punpcklqdq      t1,hc1
+       psrld           $2,hc1
+       pand            ANMASK(%rip),hc1
+       movd            h1,t1
+       paddd           t1,hc1
+       # hc2 = [ (m[22-25] >> 4) & 0x3ffffff, h2 + (m[6-9] >> 4) & 0x3ffffff ]
+       movd            0x06(m),hc2
+       movd            0x16(m),t1
+       punpcklqdq      t1,hc2
+       psrld           $4,hc2
+       pand            ANMASK(%rip),hc2
+       movd            h2,t1
+       paddd           t1,hc2
+       # hc3 = [ (m[25-28] >> 6) & 0x3ffffff, h3 + (m[9-12] >> 6) & 0x3ffffff ]
+       movd            0x09(m),hc3
+       movd            0x19(m),t1
+       punpcklqdq      t1,hc3
+       psrld           $6,hc3
+       pand            ANMASK(%rip),hc3
+       movd            h3,t1
+       paddd           t1,hc3
+       # hc4 = [ (m[28-31] >> 8) | (1<<24), h4 + (m[12-15] >> 8) | (1<<24) ]
+       movd            0x0c(m),hc4
+       movd            0x1c(m),t1
+       punpcklqdq      t1,hc4
+       psrld           $8,hc4
+       por             ORMASK(%rip),hc4
+       movd            h4,t1
+       paddd           t1,hc4
+
+       # t1 = [ hc0[1] * r0, hc0[0] * u0 ]
+       movdqa          ru0,t1
+       pmuludq         hc0,t1
+       # t1 += [ hc1[1] * s4, hc1[0] * v4 ]
+       movdqa          sv4,t2
+       pmuludq         hc1,t2
+       paddq           t2,t1
+       # t1 += [ hc2[1] * s3, hc2[0] * v3 ]
+       movdqa          sv3,t2
+       pmuludq         hc2,t2
+       paddq           t2,t1
+       # t1 += [ hc3[1] * s2, hc3[0] * v2 ]
+       movdqa          sv2,t2
+       pmuludq         hc3,t2
+       paddq           t2,t1
+       # t1 += [ hc4[1] * s1, hc4[0] * v1 ]
+       movdqa          sv1,t2
+       pmuludq         hc4,t2
+       paddq           t2,t1
+       # d0 = t1[0] + t1[1]
+       movdqa          t1,t2
+       psrldq          $8,t2
+       paddq           t2,t1
+       movq            t1,d0
+
+       # t1 = [ hc0[1] * r1, hc0[0] * u1 ]
+       movdqa          ru1,t1
+       pmuludq         hc0,t1
+       # t1 += [ hc1[1] * r0, hc1[0] * u0 ]
+       movdqa          ru0,t2
+       pmuludq         hc1,t2
+       paddq           t2,t1
+       # t1 += [ hc2[1] * s4, hc2[0] * v4 ]
+       movdqa          sv4,t2
+       pmuludq         hc2,t2
+       paddq           t2,t1
+       # t1 += [ hc3[1] * s3, hc3[0] * v3 ]
+       movdqa          sv3,t2
+       pmuludq         hc3,t2
+       paddq           t2,t1
+       # t1 += [ hc4[1] * s2, hc4[0] * v2 ]
+       movdqa          sv2,t2
+       pmuludq         hc4,t2
+       paddq           t2,t1
+       # d1 = t1[0] + t1[1]
+       movdqa          t1,t2
+       psrldq          $8,t2
+       paddq           t2,t1
+       movq            t1,d1
+
+       # t1 = [ hc0[1] * r2, hc0[0] * u2 ]
+       movdqa          ru2,t1
+       pmuludq         hc0,t1
+       # t1 += [ hc1[1] * r1, hc1[0] * u1 ]
+       movdqa          ru1,t2
+       pmuludq         hc1,t2
+       paddq           t2,t1
+       # t1 += [ hc2[1] * r0, hc2[0] * u0 ]
+       movdqa          ru0,t2
+       pmuludq         hc2,t2
+       paddq           t2,t1
+       # t1 += [ hc3[1] * s4, hc3[0] * v4 ]
+       movdqa          sv4,t2
+       pmuludq         hc3,t2
+       paddq           t2,t1
+       # t1 += [ hc4[1] * s3, hc4[0] * v3 ]
+       movdqa          sv3,t2
+       pmuludq         hc4,t2
+       paddq           t2,t1
+       # d2 = t1[0] + t1[1]
+       movdqa          t1,t2
+       psrldq          $8,t2
+       paddq           t2,t1
+       movq            t1,d2
+
+       # t1 = [ hc0[1] * r3, hc0[0] * u3 ]
+       movdqa          ru3,t1
+       pmuludq         hc0,t1
+       # t1 += [ hc1[1] * r2, hc1[0] * u2 ]
+       movdqa          ru2,t2
+       pmuludq         hc1,t2
+       paddq           t2,t1
+       # t1 += [ hc2[1] * r1, hc2[0] * u1 ]
+       movdqa          ru1,t2
+       pmuludq         hc2,t2
+       paddq           t2,t1
+       # t1 += [ hc3[1] * r0, hc3[0] * u0 ]
+       movdqa          ru0,t2
+       pmuludq         hc3,t2
+       paddq           t2,t1
+       # t1 += [ hc4[1] * s4, hc4[0] * v4 ]
+       movdqa          sv4,t2
+       pmuludq         hc4,t2
+       paddq           t2,t1
+       # d3 = t1[0] + t1[1]
+       movdqa          t1,t2
+       psrldq          $8,t2
+       paddq           t2,t1
+       movq            t1,d3
+
+       # t1 = [ hc0[1] * r4, hc0[0] * u4 ]
+       movdqa          ru4,t1
+       pmuludq         hc0,t1
+       # t1 += [ hc1[1] * r3, hc1[0] * u3 ]
+       movdqa          ru3,t2
+       pmuludq         hc1,t2
+       paddq           t2,t1
+       # t1 += [ hc2[1] * r2, hc2[0] * u2 ]
+       movdqa          ru2,t2
+       pmuludq         hc2,t2
+       paddq           t2,t1
+       # t1 += [ hc3[1] * r1, hc3[0] * u1 ]
+       movdqa          ru1,t2
+       pmuludq         hc3,t2
+       paddq           t2,t1
+       # t1 += [ hc4[1] * r0, hc4[0] * u0 ]
+       movdqa          ru0,t2
+       pmuludq         hc4,t2
+       paddq           t2,t1
+       # d4 = t1[0] + t1[1]
+       movdqa          t1,t2
+       psrldq          $8,t2
+       paddq           t2,t1
+       movq            t1,d4
+
+       # d1 += d0 >> 26
+       mov             d0,%rax
+       shr             $26,%rax
+       add             %rax,d1
+       # h0 = d0 & 0x3ffffff
+       mov             d0,%rbx
+       and             $0x3ffffff,%ebx
+
+       # d2 += d1 >> 26
+       mov             d1,%rax
+       shr             $26,%rax
+       add             %rax,d2
+       # h1 = d1 & 0x3ffffff
+       mov             d1,%rax
+       and             $0x3ffffff,%eax
+       mov             %eax,h1
+
+       # d3 += d2 >> 26
+       mov             d2,%rax
+       shr             $26,%rax
+       add             %rax,d3
+       # h2 = d2 & 0x3ffffff
+       mov             d2,%rax
+       and             $0x3ffffff,%eax
+       mov             %eax,h2
+
+       # d4 += d3 >> 26
+       mov             d3,%rax
+       shr             $26,%rax
+       add             %rax,d4
+       # h3 = d3 & 0x3ffffff
+       mov             d3,%rax
+       and             $0x3ffffff,%eax
+       mov             %eax,h3
+
+       # h0 += (d4 >> 26) * 5
+       mov             d4,%rax
+       shr             $26,%rax
+       lea             (%eax,%eax,4),%eax
+       add             %eax,%ebx
+       # h4 = d4 & 0x3ffffff
+       mov             d4,%rax
+       and             $0x3ffffff,%eax
+       mov             %eax,h4
+
+       # h1 += h0 >> 26
+       mov             %ebx,%eax
+       shr             $26,%eax
+       add             %eax,h1
+       # h0 = h0 & 0x3ffffff
+       andl            $0x3ffffff,%ebx
+       mov             %ebx,h0
+
+       add             $0x20,m
+       dec             %rcx
+       jnz             .Ldoblock2
+
+       pop             %r13
+       pop             %r12
+       pop             %rbx
+       ret
+ENDPROC(poly1305_2block_sse2)
diff --git a/arch/x86/crypto/poly1305_glue.c b/arch/x86/crypto/poly1305_glue.c
new file mode 100644 (file)
index 0000000..f7170d7
--- /dev/null
@@ -0,0 +1,207 @@
+/*
+ * Poly1305 authenticator algorithm, RFC7539, SIMD glue code
+ *
+ * Copyright (C) 2015 Martin Willi
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <crypto/algapi.h>
+#include <crypto/internal/hash.h>
+#include <crypto/poly1305.h>
+#include <linux/crypto.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <asm/fpu/api.h>
+#include <asm/simd.h>
+
+struct poly1305_simd_desc_ctx {
+       struct poly1305_desc_ctx base;
+       /* derived key u set? */
+       bool uset;
+#ifdef CONFIG_AS_AVX2
+       /* derived keys r^3, r^4 set? */
+       bool wset;
+#endif
+       /* derived Poly1305 key r^2 */
+       u32 u[5];
+       /* ... silently appended r^3 and r^4 when using AVX2 */
+};
+
+asmlinkage void poly1305_block_sse2(u32 *h, const u8 *src,
+                                   const u32 *r, unsigned int blocks);
+asmlinkage void poly1305_2block_sse2(u32 *h, const u8 *src, const u32 *r,
+                                    unsigned int blocks, const u32 *u);
+#ifdef CONFIG_AS_AVX2
+asmlinkage void poly1305_4block_avx2(u32 *h, const u8 *src, const u32 *r,
+                                    unsigned int blocks, const u32 *u);
+static bool poly1305_use_avx2;
+#endif
+
+static int poly1305_simd_init(struct shash_desc *desc)
+{
+       struct poly1305_simd_desc_ctx *sctx = shash_desc_ctx(desc);
+
+       sctx->uset = false;
+#ifdef CONFIG_AS_AVX2
+       sctx->wset = false;
+#endif
+
+       return crypto_poly1305_init(desc);
+}
+
+static void poly1305_simd_mult(u32 *a, const u32 *b)
+{
+       u8 m[POLY1305_BLOCK_SIZE];
+
+       memset(m, 0, sizeof(m));
+       /* The poly1305 block function adds a hi-bit to the accumulator which
+        * we don't need for key multiplication; compensate for it. */
+       a[4] -= 1 << 24;
+       poly1305_block_sse2(a, m, b, 1);
+}
+
+static unsigned int poly1305_simd_blocks(struct poly1305_desc_ctx *dctx,
+                                        const u8 *src, unsigned int srclen)
+{
+       struct poly1305_simd_desc_ctx *sctx;
+       unsigned int blocks, datalen;
+
+       BUILD_BUG_ON(offsetof(struct poly1305_simd_desc_ctx, base));
+       sctx = container_of(dctx, struct poly1305_simd_desc_ctx, base);
+
+       if (unlikely(!dctx->sset)) {
+               datalen = crypto_poly1305_setdesckey(dctx, src, srclen);
+               src += srclen - datalen;
+               srclen = datalen;
+       }
+
+#ifdef CONFIG_AS_AVX2
+       if (poly1305_use_avx2 && srclen >= POLY1305_BLOCK_SIZE * 4) {
+               if (unlikely(!sctx->wset)) {
+                       if (!sctx->uset) {
+                               memcpy(sctx->u, dctx->r, sizeof(sctx->u));
+                               poly1305_simd_mult(sctx->u, dctx->r);
+                               sctx->uset = true;
+                       }
+                       memcpy(sctx->u + 5, sctx->u, sizeof(sctx->u));
+                       poly1305_simd_mult(sctx->u + 5, dctx->r);
+                       memcpy(sctx->u + 10, sctx->u + 5, sizeof(sctx->u));
+                       poly1305_simd_mult(sctx->u + 10, dctx->r);
+                       sctx->wset = true;
+               }
+               blocks = srclen / (POLY1305_BLOCK_SIZE * 4);
+               poly1305_4block_avx2(dctx->h, src, dctx->r, blocks, sctx->u);
+               src += POLY1305_BLOCK_SIZE * 4 * blocks;
+               srclen -= POLY1305_BLOCK_SIZE * 4 * blocks;
+       }
+#endif
+       if (likely(srclen >= POLY1305_BLOCK_SIZE * 2)) {
+               if (unlikely(!sctx->uset)) {
+                       memcpy(sctx->u, dctx->r, sizeof(sctx->u));
+                       poly1305_simd_mult(sctx->u, dctx->r);
+                       sctx->uset = true;
+               }
+               blocks = srclen / (POLY1305_BLOCK_SIZE * 2);
+               poly1305_2block_sse2(dctx->h, src, dctx->r, blocks, sctx->u);
+               src += POLY1305_BLOCK_SIZE * 2 * blocks;
+               srclen -= POLY1305_BLOCK_SIZE * 2 * blocks;
+       }
+       if (srclen >= POLY1305_BLOCK_SIZE) {
+               poly1305_block_sse2(dctx->h, src, dctx->r, 1);
+               srclen -= POLY1305_BLOCK_SIZE;
+       }
+       return srclen;
+}
+
+static int poly1305_simd_update(struct shash_desc *desc,
+                               const u8 *src, unsigned int srclen)
+{
+       struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
+       unsigned int bytes;
+
+       /* kernel_fpu_begin/end is costly, use fallback for small updates */
+       if (srclen <= 288 || !may_use_simd())
+               return crypto_poly1305_update(desc, src, srclen);
+
+       kernel_fpu_begin();
+
+       if (unlikely(dctx->buflen)) {
+               bytes = min(srclen, POLY1305_BLOCK_SIZE - dctx->buflen);
+               memcpy(dctx->buf + dctx->buflen, src, bytes);
+               src += bytes;
+               srclen -= bytes;
+               dctx->buflen += bytes;
+
+               if (dctx->buflen == POLY1305_BLOCK_SIZE) {
+                       poly1305_simd_blocks(dctx, dctx->buf,
+                                            POLY1305_BLOCK_SIZE);
+                       dctx->buflen = 0;
+               }
+       }
+
+       if (likely(srclen >= POLY1305_BLOCK_SIZE)) {
+               bytes = poly1305_simd_blocks(dctx, src, srclen);
+               src += srclen - bytes;
+               srclen = bytes;
+       }
+
+       kernel_fpu_end();
+
+       if (unlikely(srclen)) {
+               dctx->buflen = srclen;
+               memcpy(dctx->buf, src, srclen);
+       }
+
+       return 0;
+}
+
+static struct shash_alg alg = {
+       .digestsize     = POLY1305_DIGEST_SIZE,
+       .init           = poly1305_simd_init,
+       .update         = poly1305_simd_update,
+       .final          = crypto_poly1305_final,
+       .setkey         = crypto_poly1305_setkey,
+       .descsize       = sizeof(struct poly1305_simd_desc_ctx),
+       .base           = {
+               .cra_name               = "poly1305",
+               .cra_driver_name        = "poly1305-simd",
+               .cra_priority           = 300,
+               .cra_flags              = CRYPTO_ALG_TYPE_SHASH,
+               .cra_alignmask          = sizeof(u32) - 1,
+               .cra_blocksize          = POLY1305_BLOCK_SIZE,
+               .cra_module             = THIS_MODULE,
+       },
+};
+
+static int __init poly1305_simd_mod_init(void)
+{
+       if (!cpu_has_xmm2)
+               return -ENODEV;
+
+#ifdef CONFIG_AS_AVX2
+       poly1305_use_avx2 = cpu_has_avx && cpu_has_avx2 &&
+                           cpu_has_xfeatures(XSTATE_SSE | XSTATE_YMM, NULL);
+       alg.descsize = sizeof(struct poly1305_simd_desc_ctx);
+       if (poly1305_use_avx2)
+               alg.descsize += 10 * sizeof(u32);
+#endif
+       return crypto_register_shash(&alg);
+}
+
+static void __exit poly1305_simd_mod_exit(void)
+{
+       crypto_unregister_shash(&alg);
+}
+
+module_init(poly1305_simd_mod_init);
+module_exit(poly1305_simd_mod_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Martin Willi <martin@strongswan.org>");
+MODULE_DESCRIPTION("Poly1305 authenticator");
+MODULE_ALIAS_CRYPTO("poly1305");
+MODULE_ALIAS_CRYPTO("poly1305-simd");
index b4cfc5754033b9ddfa17d8ad395baf0e751891bd..b582ea7f78d3f4dd71d8effef93d5db0d3f8cf92 100644 (file)
@@ -48,6 +48,8 @@ config CRYPTO_AEAD
 config CRYPTO_AEAD2
        tristate
        select CRYPTO_ALGAPI2
+       select CRYPTO_NULL2
+       select CRYPTO_RNG2
 
 config CRYPTO_BLKCIPHER
        tristate
@@ -150,12 +152,16 @@ config CRYPTO_GF128MUL
 
 config CRYPTO_NULL
        tristate "Null algorithms"
-       select CRYPTO_ALGAPI
-       select CRYPTO_BLKCIPHER
-       select CRYPTO_HASH
+       select CRYPTO_NULL2
        help
          These are 'Null' algorithms, used by IPsec, which do nothing.
 
+config CRYPTO_NULL2
+       tristate
+       select CRYPTO_ALGAPI2
+       select CRYPTO_BLKCIPHER2
+       select CRYPTO_HASH2
+
 config CRYPTO_PCRYPT
        tristate "Parallel crypto engine"
        depends on SMP
@@ -200,6 +206,7 @@ config CRYPTO_AUTHENC
        select CRYPTO_BLKCIPHER
        select CRYPTO_MANAGER
        select CRYPTO_HASH
+       select CRYPTO_NULL
        help
          Authenc: Combined mode wrapper for IPsec.
          This is required for IPSec.
@@ -470,6 +477,18 @@ config CRYPTO_POLY1305
          It is used for the ChaCha20-Poly1305 AEAD, specified in RFC7539 for use
          in IETF protocols. This is the portable C implementation of Poly1305.
 
+config CRYPTO_POLY1305_X86_64
+       tristate "Poly1305 authenticator algorithm (x86_64/SSE2/AVX2)"
+       depends on X86 && 64BIT
+       select CRYPTO_POLY1305
+       help
+         Poly1305 authenticator algorithm, RFC7539.
+
+         Poly1305 is an authenticator algorithm designed by Daniel J. Bernstein.
+         It is used for the ChaCha20-Poly1305 AEAD, specified in RFC7539 for use
+         in IETF protocols. This is the x86_64 assembler implementation using SIMD
+         instructions.
+
 config CRYPTO_MD4
        tristate "MD4 digest algorithm"
        select CRYPTO_HASH
@@ -1213,6 +1232,21 @@ config CRYPTO_CHACHA20
          See also:
          <http://cr.yp.to/chacha/chacha-20080128.pdf>
 
+config CRYPTO_CHACHA20_X86_64
+       tristate "ChaCha20 cipher algorithm (x86_64/SSSE3/AVX2)"
+       depends on X86 && 64BIT
+       select CRYPTO_BLKCIPHER
+       select CRYPTO_CHACHA20
+       help
+         ChaCha20 cipher algorithm, RFC7539.
+
+         ChaCha20 is a 256-bit high-speed stream cipher designed by Daniel J.
+         Bernstein and further specified in RFC7539 for use in IETF protocols.
+         This is the x86_64 assembler implementation using SIMD instructions.
+
+         See also:
+         <http://cr.yp.to/chacha/chacha-20080128.pdf>
+
 config CRYPTO_SEED
        tristate "SEED cipher algorithm"
        select CRYPTO_ALGAPI
index a16a7e7f2d607cf4a89af86b60c22905c268fd6f..e2c59819b236f61303f87989193e26517d5ad3a9 100644 (file)
@@ -17,6 +17,7 @@ obj-$(CONFIG_CRYPTO_AEAD2) += aead.o
 
 crypto_blkcipher-y := ablkcipher.o
 crypto_blkcipher-y += blkcipher.o
+crypto_blkcipher-y += skcipher.o
 obj-$(CONFIG_CRYPTO_BLKCIPHER2) += crypto_blkcipher.o
 obj-$(CONFIG_CRYPTO_BLKCIPHER2) += chainiv.o
 obj-$(CONFIG_CRYPTO_BLKCIPHER2) += eseqiv.o
@@ -46,7 +47,7 @@ obj-$(CONFIG_CRYPTO_CMAC) += cmac.o
 obj-$(CONFIG_CRYPTO_HMAC) += hmac.o
 obj-$(CONFIG_CRYPTO_VMAC) += vmac.o
 obj-$(CONFIG_CRYPTO_XCBC) += xcbc.o
-obj-$(CONFIG_CRYPTO_NULL) += crypto_null.o
+obj-$(CONFIG_CRYPTO_NULL2) += crypto_null.o
 obj-$(CONFIG_CRYPTO_MD4) += md4.o
 obj-$(CONFIG_CRYPTO_MD5) += md5.o
 obj-$(CONFIG_CRYPTO_RMD128) += rmd128.o
index 07bf99773548bf9f088b6ff8380edb4caf7cda4a..9b18a1e40d6af896be358c72113cbddb78c7acaa 100644 (file)
@@ -3,7 +3,7 @@
  *
  * This file provides API support for AEAD algorithms.
  *
- * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au>
+ * Copyright (c) 2007-2015 Herbert Xu <herbert@gondor.apana.org.au>
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License as published by the Free
  */
 
 #include <crypto/internal/geniv.h>
+#include <crypto/internal/rng.h>
+#include <crypto/null.h>
 #include <crypto/scatterwalk.h>
 #include <linux/err.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/rtnetlink.h>
-#include <linux/sched.h>
 #include <linux/slab.h>
 #include <linux/seq_file.h>
 #include <linux/cryptouser.h>
 
 #include "internal.h"
 
-struct compat_request_ctx {
-       struct scatterlist src[2];
-       struct scatterlist dst[2];
-       struct scatterlist ivbuf[2];
-       struct scatterlist *ivsg;
-       struct aead_givcrypt_request subreq;
-};
-
-static int aead_null_givencrypt(struct aead_givcrypt_request *req);
-static int aead_null_givdecrypt(struct aead_givcrypt_request *req);
-
 static int setkey_unaligned(struct crypto_aead *tfm, const u8 *key,
                            unsigned int keylen)
 {
@@ -53,7 +43,7 @@ static int setkey_unaligned(struct crypto_aead *tfm, const u8 *key,
 
        alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
        memcpy(alignbuffer, key, keylen);
-       ret = tfm->setkey(tfm, alignbuffer, keylen);
+       ret = crypto_aead_alg(tfm)->setkey(tfm, alignbuffer, keylen);
        memset(alignbuffer, 0, keylen);
        kfree(buffer);
        return ret;
@@ -64,12 +54,10 @@ int crypto_aead_setkey(struct crypto_aead *tfm,
 {
        unsigned long alignmask = crypto_aead_alignmask(tfm);
 
-       tfm = tfm->child;
-
        if ((unsigned long)key & alignmask)
                return setkey_unaligned(tfm, key, keylen);
 
-       return tfm->setkey(tfm, key, keylen);
+       return crypto_aead_alg(tfm)->setkey(tfm, key, keylen);
 }
 EXPORT_SYMBOL_GPL(crypto_aead_setkey);
 
@@ -80,100 +68,17 @@ int crypto_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
        if (authsize > crypto_aead_maxauthsize(tfm))
                return -EINVAL;
 
-       if (tfm->setauthsize) {
-               err = tfm->setauthsize(tfm->child, authsize);
+       if (crypto_aead_alg(tfm)->setauthsize) {
+               err = crypto_aead_alg(tfm)->setauthsize(tfm, authsize);
                if (err)
                        return err;
        }
 
-       tfm->child->authsize = authsize;
        tfm->authsize = authsize;
        return 0;
 }
 EXPORT_SYMBOL_GPL(crypto_aead_setauthsize);
 
-struct aead_old_request {
-       struct scatterlist srcbuf[2];
-       struct scatterlist dstbuf[2];
-       struct aead_request subreq;
-};
-
-unsigned int crypto_aead_reqsize(struct crypto_aead *tfm)
-{
-       return tfm->reqsize + sizeof(struct aead_old_request);
-}
-EXPORT_SYMBOL_GPL(crypto_aead_reqsize);
-
-static int old_crypt(struct aead_request *req,
-                    int (*crypt)(struct aead_request *req))
-{
-       struct aead_old_request *nreq = aead_request_ctx(req);
-       struct crypto_aead *aead = crypto_aead_reqtfm(req);
-       struct scatterlist *src, *dst;
-
-       if (req->old)
-               return crypt(req);
-
-       src = scatterwalk_ffwd(nreq->srcbuf, req->src, req->assoclen);
-       dst = req->src == req->dst ?
-             src : scatterwalk_ffwd(nreq->dstbuf, req->dst, req->assoclen);
-
-       aead_request_set_tfm(&nreq->subreq, aead);
-       aead_request_set_callback(&nreq->subreq, aead_request_flags(req),
-                                 req->base.complete, req->base.data);
-       aead_request_set_crypt(&nreq->subreq, src, dst, req->cryptlen,
-                              req->iv);
-       aead_request_set_assoc(&nreq->subreq, req->src, req->assoclen);
-
-       return crypt(&nreq->subreq);
-}
-
-static int old_encrypt(struct aead_request *req)
-{
-       struct crypto_aead *aead = crypto_aead_reqtfm(req);
-       struct old_aead_alg *alg = crypto_old_aead_alg(aead);
-
-       return old_crypt(req, alg->encrypt);
-}
-
-static int old_decrypt(struct aead_request *req)
-{
-       struct crypto_aead *aead = crypto_aead_reqtfm(req);
-       struct old_aead_alg *alg = crypto_old_aead_alg(aead);
-
-       return old_crypt(req, alg->decrypt);
-}
-
-static int no_givcrypt(struct aead_givcrypt_request *req)
-{
-       return -ENOSYS;
-}
-
-static int crypto_old_aead_init_tfm(struct crypto_tfm *tfm)
-{
-       struct old_aead_alg *alg = &tfm->__crt_alg->cra_aead;
-       struct crypto_aead *crt = __crypto_aead_cast(tfm);
-
-       if (max(alg->maxauthsize, alg->ivsize) > PAGE_SIZE / 8)
-               return -EINVAL;
-
-       crt->setkey = alg->setkey;
-       crt->setauthsize = alg->setauthsize;
-       crt->encrypt = old_encrypt;
-       crt->decrypt = old_decrypt;
-       if (alg->ivsize) {
-               crt->givencrypt = alg->givencrypt ?: no_givcrypt;
-               crt->givdecrypt = alg->givdecrypt ?: no_givcrypt;
-       } else {
-               crt->givencrypt = aead_null_givencrypt;
-               crt->givdecrypt = aead_null_givdecrypt;
-       }
-       crt->child = __crypto_aead_cast(tfm);
-       crt->authsize = alg->maxauthsize;
-
-       return 0;
-}
-
 static void crypto_aead_exit_tfm(struct crypto_tfm *tfm)
 {
        struct crypto_aead *aead = __crypto_aead_cast(tfm);
@@ -187,14 +92,6 @@ static int crypto_aead_init_tfm(struct crypto_tfm *tfm)
        struct crypto_aead *aead = __crypto_aead_cast(tfm);
        struct aead_alg *alg = crypto_aead_alg(aead);
 
-       if (crypto_old_aead_alg(aead)->encrypt)
-               return crypto_old_aead_init_tfm(tfm);
-
-       aead->setkey = alg->setkey;
-       aead->setauthsize = alg->setauthsize;
-       aead->encrypt = alg->encrypt;
-       aead->decrypt = alg->decrypt;
-       aead->child = __crypto_aead_cast(tfm);
        aead->authsize = alg->maxauthsize;
 
        if (alg->exit)
@@ -206,64 +103,6 @@ static int crypto_aead_init_tfm(struct crypto_tfm *tfm)
        return 0;
 }
 
-#ifdef CONFIG_NET
-static int crypto_old_aead_report(struct sk_buff *skb, struct crypto_alg *alg)
-{
-       struct crypto_report_aead raead;
-       struct old_aead_alg *aead = &alg->cra_aead;
-
-       strncpy(raead.type, "aead", sizeof(raead.type));
-       strncpy(raead.geniv, aead->geniv ?: "<built-in>", sizeof(raead.geniv));
-
-       raead.blocksize = alg->cra_blocksize;
-       raead.maxauthsize = aead->maxauthsize;
-       raead.ivsize = aead->ivsize;
-
-       if (nla_put(skb, CRYPTOCFGA_REPORT_AEAD,
-                   sizeof(struct crypto_report_aead), &raead))
-               goto nla_put_failure;
-       return 0;
-
-nla_put_failure:
-       return -EMSGSIZE;
-}
-#else
-static int crypto_old_aead_report(struct sk_buff *skb, struct crypto_alg *alg)
-{
-       return -ENOSYS;
-}
-#endif
-
-static void crypto_old_aead_show(struct seq_file *m, struct crypto_alg *alg)
-       __attribute__ ((unused));
-static void crypto_old_aead_show(struct seq_file *m, struct crypto_alg *alg)
-{
-       struct old_aead_alg *aead = &alg->cra_aead;
-
-       seq_printf(m, "type         : aead\n");
-       seq_printf(m, "async        : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
-                                            "yes" : "no");
-       seq_printf(m, "blocksize    : %u\n", alg->cra_blocksize);
-       seq_printf(m, "ivsize       : %u\n", aead->ivsize);
-       seq_printf(m, "maxauthsize  : %u\n", aead->maxauthsize);
-       seq_printf(m, "geniv        : %s\n", aead->geniv ?: "<built-in>");
-}
-
-const struct crypto_type crypto_aead_type = {
-       .extsize = crypto_alg_extsize,
-       .init_tfm = crypto_aead_init_tfm,
-#ifdef CONFIG_PROC_FS
-       .show = crypto_old_aead_show,
-#endif
-       .report = crypto_old_aead_report,
-       .lookup = crypto_lookup_aead,
-       .maskclear = ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV),
-       .maskset = CRYPTO_ALG_TYPE_MASK,
-       .type = CRYPTO_ALG_TYPE_AEAD,
-       .tfmsize = offsetof(struct crypto_aead, base),
-};
-EXPORT_SYMBOL_GPL(crypto_aead_type);
-
 #ifdef CONFIG_NET
 static int crypto_aead_report(struct sk_buff *skb, struct crypto_alg *alg)
 {
@@ -307,93 +146,31 @@ static void crypto_aead_show(struct seq_file *m, struct crypto_alg *alg)
        seq_printf(m, "geniv        : <none>\n");
 }
 
-static const struct crypto_type crypto_new_aead_type = {
-       .extsize = crypto_alg_extsize,
-       .init_tfm = crypto_aead_init_tfm,
-#ifdef CONFIG_PROC_FS
-       .show = crypto_aead_show,
-#endif
-       .report = crypto_aead_report,
-       .maskclear = ~CRYPTO_ALG_TYPE_MASK,
-       .maskset = CRYPTO_ALG_TYPE_MASK,
-       .type = CRYPTO_ALG_TYPE_AEAD,
-       .tfmsize = offsetof(struct crypto_aead, base),
-};
-
-static int aead_null_givencrypt(struct aead_givcrypt_request *req)
-{
-       return crypto_aead_encrypt(&req->areq);
-}
-
-static int aead_null_givdecrypt(struct aead_givcrypt_request *req)
-{
-       return crypto_aead_decrypt(&req->areq);
-}
-
-#ifdef CONFIG_NET
-static int crypto_nivaead_report(struct sk_buff *skb, struct crypto_alg *alg)
-{
-       struct crypto_report_aead raead;
-       struct old_aead_alg *aead = &alg->cra_aead;
-
-       strncpy(raead.type, "nivaead", sizeof(raead.type));
-       strncpy(raead.geniv, aead->geniv, sizeof(raead.geniv));
-
-       raead.blocksize = alg->cra_blocksize;
-       raead.maxauthsize = aead->maxauthsize;
-       raead.ivsize = aead->ivsize;
-
-       if (nla_put(skb, CRYPTOCFGA_REPORT_AEAD,
-                   sizeof(struct crypto_report_aead), &raead))
-               goto nla_put_failure;
-       return 0;
-
-nla_put_failure:
-       return -EMSGSIZE;
-}
-#else
-static int crypto_nivaead_report(struct sk_buff *skb, struct crypto_alg *alg)
+static void crypto_aead_free_instance(struct crypto_instance *inst)
 {
-       return -ENOSYS;
-}
-#endif
-
+       struct aead_instance *aead = aead_instance(inst);
 
-static void crypto_nivaead_show(struct seq_file *m, struct crypto_alg *alg)
-       __attribute__ ((unused));
-static void crypto_nivaead_show(struct seq_file *m, struct crypto_alg *alg)
-{
-       struct old_aead_alg *aead = &alg->cra_aead;
+       if (!aead->free) {
+               inst->tmpl->free(inst);
+               return;
+       }
 
-       seq_printf(m, "type         : nivaead\n");
-       seq_printf(m, "async        : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
-                                            "yes" : "no");
-       seq_printf(m, "blocksize    : %u\n", alg->cra_blocksize);
-       seq_printf(m, "ivsize       : %u\n", aead->ivsize);
-       seq_printf(m, "maxauthsize  : %u\n", aead->maxauthsize);
-       seq_printf(m, "geniv        : %s\n", aead->geniv);
+       aead->free(aead);
 }
 
-const struct crypto_type crypto_nivaead_type = {
+static const struct crypto_type crypto_aead_type = {
        .extsize = crypto_alg_extsize,
        .init_tfm = crypto_aead_init_tfm,
+       .free = crypto_aead_free_instance,
 #ifdef CONFIG_PROC_FS
-       .show = crypto_nivaead_show,
+       .show = crypto_aead_show,
 #endif
-       .report = crypto_nivaead_report,
-       .maskclear = ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV),
-       .maskset = CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV,
+       .report = crypto_aead_report,
+       .maskclear = ~CRYPTO_ALG_TYPE_MASK,
+       .maskset = CRYPTO_ALG_TYPE_MASK,
        .type = CRYPTO_ALG_TYPE_AEAD,
        .tfmsize = offsetof(struct crypto_aead, base),
 };
-EXPORT_SYMBOL_GPL(crypto_nivaead_type);
-
-static int crypto_grab_nivaead(struct crypto_aead_spawn *spawn,
-                              const char *name, u32 type, u32 mask)
-{
-       spawn->base.frontend = &crypto_nivaead_type;
-       return crypto_grab_spawn(&spawn->base, name, type, mask);
-}
 
 static int aead_geniv_setkey(struct crypto_aead *tfm,
                             const u8 *key, unsigned int keylen)
@@ -411,169 +188,6 @@ static int aead_geniv_setauthsize(struct crypto_aead *tfm,
        return crypto_aead_setauthsize(ctx->child, authsize);
 }
 
-static void compat_encrypt_complete2(struct aead_request *req, int err)
-{
-       struct compat_request_ctx *rctx = aead_request_ctx(req);
-       struct aead_givcrypt_request *subreq = &rctx->subreq;
-       struct crypto_aead *geniv;
-
-       if (err == -EINPROGRESS)
-               return;
-
-       if (err)
-               goto out;
-
-       geniv = crypto_aead_reqtfm(req);
-       scatterwalk_map_and_copy(subreq->giv, rctx->ivsg, 0,
-                                crypto_aead_ivsize(geniv), 1);
-
-out:
-       kzfree(subreq->giv);
-}
-
-static void compat_encrypt_complete(struct crypto_async_request *base, int err)
-{
-       struct aead_request *req = base->data;
-
-       compat_encrypt_complete2(req, err);
-       aead_request_complete(req, err);
-}
-
-static int compat_encrypt(struct aead_request *req)
-{
-       struct crypto_aead *geniv = crypto_aead_reqtfm(req);
-       struct aead_geniv_ctx *ctx = crypto_aead_ctx(geniv);
-       struct compat_request_ctx *rctx = aead_request_ctx(req);
-       struct aead_givcrypt_request *subreq = &rctx->subreq;
-       unsigned int ivsize = crypto_aead_ivsize(geniv);
-       struct scatterlist *src, *dst;
-       crypto_completion_t compl;
-       void *data;
-       u8 *info;
-       __be64 seq;
-       int err;
-
-       if (req->cryptlen < ivsize)
-               return -EINVAL;
-
-       compl = req->base.complete;
-       data = req->base.data;
-
-       rctx->ivsg = scatterwalk_ffwd(rctx->ivbuf, req->dst, req->assoclen);
-       info = PageHighMem(sg_page(rctx->ivsg)) ? NULL : sg_virt(rctx->ivsg);
-
-       if (!info) {
-               info = kmalloc(ivsize, req->base.flags &
-                                      CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL:
-                                                                 GFP_ATOMIC);
-               if (!info)
-                       return -ENOMEM;
-
-               compl = compat_encrypt_complete;
-               data = req;
-       }
-
-       memcpy(&seq, req->iv + ivsize - sizeof(seq), sizeof(seq));
-
-       src = scatterwalk_ffwd(rctx->src, req->src, req->assoclen + ivsize);
-       dst = req->src == req->dst ?
-             src : scatterwalk_ffwd(rctx->dst, rctx->ivsg, ivsize);
-
-       aead_givcrypt_set_tfm(subreq, ctx->child);
-       aead_givcrypt_set_callback(subreq, req->base.flags,
-                                  req->base.complete, req->base.data);
-       aead_givcrypt_set_crypt(subreq, src, dst,
-                               req->cryptlen - ivsize, req->iv);
-       aead_givcrypt_set_assoc(subreq, req->src, req->assoclen);
-       aead_givcrypt_set_giv(subreq, info, be64_to_cpu(seq));
-
-       err = crypto_aead_givencrypt(subreq);
-       if (unlikely(PageHighMem(sg_page(rctx->ivsg))))
-               compat_encrypt_complete2(req, err);
-       return err;
-}
-
-static int compat_decrypt(struct aead_request *req)
-{
-       struct crypto_aead *geniv = crypto_aead_reqtfm(req);
-       struct aead_geniv_ctx *ctx = crypto_aead_ctx(geniv);
-       struct compat_request_ctx *rctx = aead_request_ctx(req);
-       struct aead_request *subreq = &rctx->subreq.areq;
-       unsigned int ivsize = crypto_aead_ivsize(geniv);
-       struct scatterlist *src, *dst;
-       crypto_completion_t compl;
-       void *data;
-
-       if (req->cryptlen < ivsize)
-               return -EINVAL;
-
-       aead_request_set_tfm(subreq, ctx->child);
-
-       compl = req->base.complete;
-       data = req->base.data;
-
-       src = scatterwalk_ffwd(rctx->src, req->src, req->assoclen + ivsize);
-       dst = req->src == req->dst ?
-             src : scatterwalk_ffwd(rctx->dst, req->dst,
-                                    req->assoclen + ivsize);
-
-       aead_request_set_callback(subreq, req->base.flags, compl, data);
-       aead_request_set_crypt(subreq, src, dst,
-                              req->cryptlen - ivsize, req->iv);
-       aead_request_set_assoc(subreq, req->src, req->assoclen);
-
-       scatterwalk_map_and_copy(req->iv, req->src, req->assoclen, ivsize, 0);
-
-       return crypto_aead_decrypt(subreq);
-}
-
-static int compat_encrypt_first(struct aead_request *req)
-{
-       struct crypto_aead *geniv = crypto_aead_reqtfm(req);
-       struct aead_geniv_ctx *ctx = crypto_aead_ctx(geniv);
-       int err = 0;
-
-       spin_lock_bh(&ctx->lock);
-       if (geniv->encrypt != compat_encrypt_first)
-               goto unlock;
-
-       geniv->encrypt = compat_encrypt;
-
-unlock:
-       spin_unlock_bh(&ctx->lock);
-
-       if (err)
-               return err;
-
-       return compat_encrypt(req);
-}
-
-static int aead_geniv_init_compat(struct crypto_tfm *tfm)
-{
-       struct crypto_aead *geniv = __crypto_aead_cast(tfm);
-       struct aead_geniv_ctx *ctx = crypto_aead_ctx(geniv);
-       int err;
-
-       spin_lock_init(&ctx->lock);
-
-       crypto_aead_set_reqsize(geniv, sizeof(struct compat_request_ctx));
-
-       err = aead_geniv_init(tfm);
-
-       ctx->child = geniv->child;
-       geniv->child = geniv;
-
-       return err;
-}
-
-static void aead_geniv_exit_compat(struct crypto_tfm *tfm)
-{
-       struct crypto_aead *geniv = __crypto_aead_cast(tfm);
-       struct aead_geniv_ctx *ctx = crypto_aead_ctx(geniv);
-
-       crypto_free_aead(ctx->child);
-}
-
 struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl,
                                       struct rtattr **tb, u32 type, u32 mask)
 {
@@ -590,8 +204,7 @@ struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl,
        if (IS_ERR(algt))
                return ERR_CAST(algt);
 
-       if ((algt->type ^ (CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_GENIV)) &
-           algt->mask)
+       if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
                return ERR_PTR(-EINVAL);
 
        name = crypto_attr_alg_name(tb[1]);
@@ -608,9 +221,7 @@ struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl,
        mask |= crypto_requires_sync(algt->type, algt->mask);
 
        crypto_set_aead_spawn(spawn, aead_crypto_instance(inst));
-       err = (algt->mask & CRYPTO_ALG_GENIV) ?
-             crypto_grab_nivaead(spawn, name, type, mask) :
-             crypto_grab_aead(spawn, name, type, mask);
+       err = crypto_grab_aead(spawn, name, type, mask);
        if (err)
                goto err_free_inst;
 
@@ -623,43 +234,6 @@ struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl,
        if (ivsize < sizeof(u64))
                goto err_drop_alg;
 
-       /*
-        * This is only true if we're constructing an algorithm with its
-        * default IV generator.  For the default generator we elide the
-        * template name and double-check the IV generator.
-        */
-       if (algt->mask & CRYPTO_ALG_GENIV) {
-               if (!alg->base.cra_aead.encrypt)
-                       goto err_drop_alg;
-               if (strcmp(tmpl->name, alg->base.cra_aead.geniv))
-                       goto err_drop_alg;
-
-               memcpy(inst->alg.base.cra_name, alg->base.cra_name,
-                      CRYPTO_MAX_ALG_NAME);
-               memcpy(inst->alg.base.cra_driver_name,
-                      alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME);
-
-               inst->alg.base.cra_flags = CRYPTO_ALG_TYPE_AEAD |
-                                          CRYPTO_ALG_GENIV;
-               inst->alg.base.cra_flags |= alg->base.cra_flags &
-                                           CRYPTO_ALG_ASYNC;
-               inst->alg.base.cra_priority = alg->base.cra_priority;
-               inst->alg.base.cra_blocksize = alg->base.cra_blocksize;
-               inst->alg.base.cra_alignmask = alg->base.cra_alignmask;
-               inst->alg.base.cra_type = &crypto_aead_type;
-
-               inst->alg.base.cra_aead.ivsize = ivsize;
-               inst->alg.base.cra_aead.maxauthsize = maxauthsize;
-
-               inst->alg.base.cra_aead.setkey = alg->base.cra_aead.setkey;
-               inst->alg.base.cra_aead.setauthsize =
-                       alg->base.cra_aead.setauthsize;
-               inst->alg.base.cra_aead.encrypt = alg->base.cra_aead.encrypt;
-               inst->alg.base.cra_aead.decrypt = alg->base.cra_aead.decrypt;
-
-               goto out;
-       }
-
        err = -ENAMETOOLONG;
        if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
                     "%s(%s)", tmpl->name, alg->base.cra_name) >=
@@ -682,12 +256,6 @@ struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl,
        inst->alg.ivsize = ivsize;
        inst->alg.maxauthsize = maxauthsize;
 
-       inst->alg.encrypt = compat_encrypt_first;
-       inst->alg.decrypt = compat_decrypt;
-
-       inst->alg.base.cra_init = aead_geniv_init_compat;
-       inst->alg.base.cra_exit = aead_geniv_exit_compat;
-
 out:
        return inst;
 
@@ -707,147 +275,58 @@ void aead_geniv_free(struct aead_instance *inst)
 }
 EXPORT_SYMBOL_GPL(aead_geniv_free);
 
-int aead_geniv_init(struct crypto_tfm *tfm)
+int aead_init_geniv(struct crypto_aead *aead)
 {
-       struct crypto_instance *inst = (void *)tfm->__crt_alg;
+       struct aead_geniv_ctx *ctx = crypto_aead_ctx(aead);
+       struct aead_instance *inst = aead_alg_instance(aead);
        struct crypto_aead *child;
-       struct crypto_aead *aead;
-
-       aead = __crypto_aead_cast(tfm);
-
-       child = crypto_spawn_aead(crypto_instance_ctx(inst));
-       if (IS_ERR(child))
-               return PTR_ERR(child);
-
-       aead->child = child;
-       aead->reqsize += crypto_aead_reqsize(child);
-
-       return 0;
-}
-EXPORT_SYMBOL_GPL(aead_geniv_init);
-
-void aead_geniv_exit(struct crypto_tfm *tfm)
-{
-       crypto_free_aead(__crypto_aead_cast(tfm)->child);
-}
-EXPORT_SYMBOL_GPL(aead_geniv_exit);
-
-static int crypto_nivaead_default(struct crypto_alg *alg, u32 type, u32 mask)
-{
-       struct rtattr *tb[3];
-       struct {
-               struct rtattr attr;
-               struct crypto_attr_type data;
-       } ptype;
-       struct {
-               struct rtattr attr;
-               struct crypto_attr_alg data;
-       } palg;
-       struct crypto_template *tmpl;
-       struct crypto_instance *inst;
-       struct crypto_alg *larval;
-       const char *geniv;
        int err;
 
-       larval = crypto_larval_lookup(alg->cra_driver_name,
-                                     CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_GENIV,
-                                     CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
-       err = PTR_ERR(larval);
-       if (IS_ERR(larval))
-               goto out;
-
-       err = -EAGAIN;
-       if (!crypto_is_larval(larval))
-               goto drop_larval;
-
-       ptype.attr.rta_len = sizeof(ptype);
-       ptype.attr.rta_type = CRYPTOA_TYPE;
-       ptype.data.type = type | CRYPTO_ALG_GENIV;
-       /* GENIV tells the template that we're making a default geniv. */
-       ptype.data.mask = mask | CRYPTO_ALG_GENIV;
-       tb[0] = &ptype.attr;
-
-       palg.attr.rta_len = sizeof(palg);
-       palg.attr.rta_type = CRYPTOA_ALG;
-       /* Must use the exact name to locate ourselves. */
-       memcpy(palg.data.name, alg->cra_driver_name, CRYPTO_MAX_ALG_NAME);
-       tb[1] = &palg.attr;
-
-       tb[2] = NULL;
+       spin_lock_init(&ctx->lock);
 
-       geniv = alg->cra_aead.geniv;
+       err = crypto_get_default_rng();
+       if (err)
+               goto out;
 
-       tmpl = crypto_lookup_template(geniv);
-       err = -ENOENT;
-       if (!tmpl)
-               goto kill_larval;
+       err = crypto_rng_get_bytes(crypto_default_rng, ctx->salt,
+                                  crypto_aead_ivsize(aead));
+       crypto_put_default_rng();
+       if (err)
+               goto out;
 
-       if (tmpl->create) {
-               err = tmpl->create(tmpl, tb);
-               if (err)
-                       goto put_tmpl;
-               goto ok;
-       }
+       ctx->null = crypto_get_default_null_skcipher();
+       err = PTR_ERR(ctx->null);
+       if (IS_ERR(ctx->null))
+               goto out;
 
-       inst = tmpl->alloc(tb);
-       err = PTR_ERR(inst);
-       if (IS_ERR(inst))
-               goto put_tmpl;
+       child = crypto_spawn_aead(aead_instance_ctx(inst));
+       err = PTR_ERR(child);
+       if (IS_ERR(child))
+               goto drop_null;
 
-       err = crypto_register_instance(tmpl, inst);
-       if (err) {
-               tmpl->free(inst);
-               goto put_tmpl;
-       }
+       ctx->child = child;
+       crypto_aead_set_reqsize(aead, crypto_aead_reqsize(child) +
+                                     sizeof(struct aead_request));
 
-ok:
-       /* Redo the lookup to use the instance we just registered. */
-       err = -EAGAIN;
+       err = 0;
 
-put_tmpl:
-       crypto_tmpl_put(tmpl);
-kill_larval:
-       crypto_larval_kill(larval);
-drop_larval:
-       crypto_mod_put(larval);
 out:
-       crypto_mod_put(alg);
        return err;
+
+drop_null:
+       crypto_put_default_null_skcipher();
+       goto out;
 }
+EXPORT_SYMBOL_GPL(aead_init_geniv);
 
-struct crypto_alg *crypto_lookup_aead(const char *name, u32 type, u32 mask)
+void aead_exit_geniv(struct crypto_aead *tfm)
 {
-       struct crypto_alg *alg;
-
-       alg = crypto_alg_mod_lookup(name, type, mask);
-       if (IS_ERR(alg))
-               return alg;
-
-       if (alg->cra_type == &crypto_aead_type)
-               return alg;
-
-       if (!alg->cra_aead.ivsize)
-               return alg;
-
-       crypto_mod_put(alg);
-       alg = crypto_alg_mod_lookup(name, type | CRYPTO_ALG_TESTED,
-                                   mask & ~CRYPTO_ALG_TESTED);
-       if (IS_ERR(alg))
-               return alg;
-
-       if (alg->cra_type == &crypto_aead_type) {
-               if (~alg->cra_flags & (type ^ ~mask) & CRYPTO_ALG_TESTED) {
-                       crypto_mod_put(alg);
-                       alg = ERR_PTR(-ENOENT);
-               }
-               return alg;
-       }
-
-       BUG_ON(!alg->cra_aead.ivsize);
+       struct aead_geniv_ctx *ctx = crypto_aead_ctx(tfm);
 
-       return ERR_PTR(crypto_nivaead_default(alg, type, mask));
+       crypto_free_aead(ctx->child);
+       crypto_put_default_null_skcipher();
 }
-EXPORT_SYMBOL_GPL(crypto_lookup_aead);
+EXPORT_SYMBOL_GPL(aead_exit_geniv);
 
 int crypto_grab_aead(struct crypto_aead_spawn *spawn, const char *name,
                     u32 type, u32 mask)
@@ -870,7 +349,7 @@ static int aead_prepare_alg(struct aead_alg *alg)
        if (max(alg->maxauthsize, alg->ivsize) > PAGE_SIZE / 8)
                return -EINVAL;
 
-       base->cra_type = &crypto_new_aead_type;
+       base->cra_type = &crypto_aead_type;
        base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
        base->cra_flags |= CRYPTO_ALG_TYPE_AEAD;
 
index 3c079b7f23f6bada906f9d444a50cd15b0831d65..d130b41dbaea244000c35328d7eba7b463158bc8 100644 (file)
@@ -67,12 +67,22 @@ static int crypto_check_alg(struct crypto_alg *alg)
        return crypto_set_driver_name(alg);
 }
 
+static void crypto_free_instance(struct crypto_instance *inst)
+{
+       if (!inst->alg.cra_type->free) {
+               inst->tmpl->free(inst);
+               return;
+       }
+
+       inst->alg.cra_type->free(inst);
+}
+
 static void crypto_destroy_instance(struct crypto_alg *alg)
 {
        struct crypto_instance *inst = (void *)alg;
        struct crypto_template *tmpl = inst->tmpl;
 
-       tmpl->free(inst);
+       crypto_free_instance(inst);
        crypto_tmpl_put(tmpl);
 }
 
@@ -481,7 +491,7 @@ void crypto_unregister_template(struct crypto_template *tmpl)
 
        hlist_for_each_entry_safe(inst, n, list, list) {
                BUG_ON(atomic_read(&inst->alg.cra_refcnt) != 1);
-               tmpl->free(inst);
+               crypto_free_instance(inst);
        }
        crypto_remove_final(&users);
 }
@@ -892,7 +902,7 @@ out:
 }
 EXPORT_SYMBOL_GPL(crypto_enqueue_request);
 
-void *__crypto_dequeue_request(struct crypto_queue *queue, unsigned int offset)
+struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue)
 {
        struct list_head *request;
 
@@ -907,14 +917,7 @@ void *__crypto_dequeue_request(struct crypto_queue *queue, unsigned int offset)
        request = queue->list.next;
        list_del(request);
 
-       return (char *)list_entry(request, struct crypto_async_request, list) -
-              offset;
-}
-EXPORT_SYMBOL_GPL(__crypto_dequeue_request);
-
-struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue)
-{
-       return __crypto_dequeue_request(queue, 0);
+       return list_entry(request, struct crypto_async_request, list);
 }
 EXPORT_SYMBOL_GPL(crypto_dequeue_request);
 
index 76fc0b23fc6cfd4c33188cd6e5b7454603b7a751..6e39d9c05b98a532400f83d173ab9884cbb43a96 100644 (file)
@@ -248,13 +248,11 @@ static int cryptomgr_schedule_test(struct crypto_alg *alg)
        type = alg->cra_flags;
 
        /* This piece of crap needs to disappear into per-type test hooks. */
-       if ((!((type ^ CRYPTO_ALG_TYPE_BLKCIPHER) &
-              CRYPTO_ALG_TYPE_BLKCIPHER_MASK) && !(type & CRYPTO_ALG_GENIV) &&
-            ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
-             CRYPTO_ALG_TYPE_BLKCIPHER ? alg->cra_blkcipher.ivsize :
-                                         alg->cra_ablkcipher.ivsize)) ||
-           (!((type ^ CRYPTO_ALG_TYPE_AEAD) & CRYPTO_ALG_TYPE_MASK) &&
-            alg->cra_type == &crypto_nivaead_type && alg->cra_aead.ivsize))
+       if (!((type ^ CRYPTO_ALG_TYPE_BLKCIPHER) &
+             CRYPTO_ALG_TYPE_BLKCIPHER_MASK) && !(type & CRYPTO_ALG_GENIV) &&
+           ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
+            CRYPTO_ALG_TYPE_BLKCIPHER ? alg->cra_blkcipher.ivsize :
+                                        alg->cra_ablkcipher.ivsize))
                type |= CRYPTO_ALG_TESTED;
 
        param->type = type;
index e0408a480d2f4eb8f818c9590ec9a3d7d166e25d..0aa6fdfb448a8c4081e06aa9dcb041433dc280a5 100644 (file)
@@ -90,6 +90,7 @@ static void aead_put_sgl(struct sock *sk)
                put_page(sg_page(sg + i));
                sg_assign_page(sg + i, NULL);
        }
+       sg_init_table(sg, ALG_MAX_PAGES);
        sgl->cur = 0;
        ctx->used = 0;
        ctx->more = 0;
@@ -514,8 +515,7 @@ static struct proto_ops algif_aead_ops = {
 
 static void *aead_bind(const char *name, u32 type, u32 mask)
 {
-       return crypto_alloc_aead(name, type | CRYPTO_ALG_AEAD_NEW,
-                                mask | CRYPTO_ALG_AEAD_NEW);
+       return crypto_alloc_aead(name, type, mask);
 }
 
 static void aead_release(void *private)
index 3e852299afb434de793afedc5bc6d570150d8ae5..55a354d572513b642566b50db7252d3aecdceb38 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Authenc: Simple AEAD wrapper for IPsec
  *
- * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au>
+ * Copyright (c) 2007-2015 Herbert Xu <herbert@gondor.apana.org.au>
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License as published by the Free
@@ -14,6 +14,7 @@
 #include <crypto/internal/hash.h>
 #include <crypto/internal/skcipher.h>
 #include <crypto/authenc.h>
+#include <crypto/null.h>
 #include <crypto/scatterwalk.h>
 #include <linux/err.h>
 #include <linux/init.h>
 #include <linux/slab.h>
 #include <linux/spinlock.h>
 
-typedef u8 *(*authenc_ahash_t)(struct aead_request *req, unsigned int flags);
-
 struct authenc_instance_ctx {
        struct crypto_ahash_spawn auth;
        struct crypto_skcipher_spawn enc;
+       unsigned int reqoff;
 };
 
 struct crypto_authenc_ctx {
-       unsigned int reqoff;
        struct crypto_ahash *auth;
        struct crypto_ablkcipher *enc;
+       struct crypto_blkcipher *null;
 };
 
 struct authenc_request_ctx {
-       unsigned int cryptlen;
-       struct scatterlist *sg;
-       struct scatterlist asg[2];
-       struct scatterlist cipher[2];
-       crypto_completion_t complete;
-       crypto_completion_t update_complete;
+       struct scatterlist src[2];
+       struct scatterlist dst[2];
        char tail[];
 };
 
@@ -119,189 +115,35 @@ badkey:
        goto out;
 }
 
-static void authenc_geniv_ahash_update_done(struct crypto_async_request *areq,
-                                           int err)
-{
-       struct aead_request *req = areq->data;
-       struct crypto_aead *authenc = crypto_aead_reqtfm(req);
-       struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
-       struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
-       struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
-
-       if (err)
-               goto out;
-
-       ahash_request_set_crypt(ahreq, areq_ctx->sg, ahreq->result,
-                               areq_ctx->cryptlen);
-       ahash_request_set_callback(ahreq, aead_request_flags(req) &
-                                         CRYPTO_TFM_REQ_MAY_SLEEP,
-                                  areq_ctx->complete, req);
-
-       err = crypto_ahash_finup(ahreq);
-       if (err)
-               goto out;
-
-       scatterwalk_map_and_copy(ahreq->result, areq_ctx->sg,
-                                areq_ctx->cryptlen,
-                                crypto_aead_authsize(authenc), 1);
-
-out:
-       authenc_request_complete(req, err);
-}
-
 static void authenc_geniv_ahash_done(struct crypto_async_request *areq, int err)
 {
        struct aead_request *req = areq->data;
        struct crypto_aead *authenc = crypto_aead_reqtfm(req);
-       struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
+       struct aead_instance *inst = aead_alg_instance(authenc);
+       struct authenc_instance_ctx *ictx = aead_instance_ctx(inst);
        struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
-       struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
+       struct ahash_request *ahreq = (void *)(areq_ctx->tail + ictx->reqoff);
 
        if (err)
                goto out;
 
-       scatterwalk_map_and_copy(ahreq->result, areq_ctx->sg,
-                                areq_ctx->cryptlen,
+       scatterwalk_map_and_copy(ahreq->result, req->dst,
+                                req->assoclen + req->cryptlen,
                                 crypto_aead_authsize(authenc), 1);
 
 out:
        aead_request_complete(req, err);
 }
 
-static void authenc_verify_ahash_update_done(struct crypto_async_request *areq,
-                                            int err)
-{
-       u8 *ihash;
-       unsigned int authsize;
-       struct ablkcipher_request *abreq;
-       struct aead_request *req = areq->data;
-       struct crypto_aead *authenc = crypto_aead_reqtfm(req);
-       struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
-       struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
-       struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
-       unsigned int cryptlen = req->cryptlen;
-
-       if (err)
-               goto out;
-
-       ahash_request_set_crypt(ahreq, areq_ctx->sg, ahreq->result,
-                               areq_ctx->cryptlen);
-       ahash_request_set_callback(ahreq, aead_request_flags(req) &
-                                         CRYPTO_TFM_REQ_MAY_SLEEP,
-                                  areq_ctx->complete, req);
-
-       err = crypto_ahash_finup(ahreq);
-       if (err)
-               goto out;
-
-       authsize = crypto_aead_authsize(authenc);
-       cryptlen -= authsize;
-       ihash = ahreq->result + authsize;
-       scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
-                                authsize, 0);
-
-       err = crypto_memneq(ihash, ahreq->result, authsize) ? -EBADMSG : 0;
-       if (err)
-               goto out;
-
-       abreq = aead_request_ctx(req);
-       ablkcipher_request_set_tfm(abreq, ctx->enc);
-       ablkcipher_request_set_callback(abreq, aead_request_flags(req),
-                                       req->base.complete, req->base.data);
-       ablkcipher_request_set_crypt(abreq, req->src, req->dst,
-                                    cryptlen, req->iv);
-
-       err = crypto_ablkcipher_decrypt(abreq);
-
-out:
-       authenc_request_complete(req, err);
-}
-
-static void authenc_verify_ahash_done(struct crypto_async_request *areq,
-                                     int err)
-{
-       u8 *ihash;
-       unsigned int authsize;
-       struct ablkcipher_request *abreq;
-       struct aead_request *req = areq->data;
-       struct crypto_aead *authenc = crypto_aead_reqtfm(req);
-       struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
-       struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
-       struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
-       unsigned int cryptlen = req->cryptlen;
-
-       if (err)
-               goto out;
-
-       authsize = crypto_aead_authsize(authenc);
-       cryptlen -= authsize;
-       ihash = ahreq->result + authsize;
-       scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
-                                authsize, 0);
-
-       err = crypto_memneq(ihash, ahreq->result, authsize) ? -EBADMSG : 0;
-       if (err)
-               goto out;
-
-       abreq = aead_request_ctx(req);
-       ablkcipher_request_set_tfm(abreq, ctx->enc);
-       ablkcipher_request_set_callback(abreq, aead_request_flags(req),
-                                       req->base.complete, req->base.data);
-       ablkcipher_request_set_crypt(abreq, req->src, req->dst,
-                                    cryptlen, req->iv);
-
-       err = crypto_ablkcipher_decrypt(abreq);
-
-out:
-       authenc_request_complete(req, err);
-}
-
-static u8 *crypto_authenc_ahash_fb(struct aead_request *req, unsigned int flags)
-{
-       struct crypto_aead *authenc = crypto_aead_reqtfm(req);
-       struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
-       struct crypto_ahash *auth = ctx->auth;
-       struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
-       struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
-       u8 *hash = areq_ctx->tail;
-       int err;
-
-       hash = (u8 *)ALIGN((unsigned long)hash + crypto_ahash_alignmask(auth),
-                           crypto_ahash_alignmask(auth) + 1);
-
-       ahash_request_set_tfm(ahreq, auth);
-
-       err = crypto_ahash_init(ahreq);
-       if (err)
-               return ERR_PTR(err);
-
-       ahash_request_set_crypt(ahreq, req->assoc, hash, req->assoclen);
-       ahash_request_set_callback(ahreq, aead_request_flags(req) & flags,
-                                  areq_ctx->update_complete, req);
-
-       err = crypto_ahash_update(ahreq);
-       if (err)
-               return ERR_PTR(err);
-
-       ahash_request_set_crypt(ahreq, areq_ctx->sg, hash,
-                               areq_ctx->cryptlen);
-       ahash_request_set_callback(ahreq, aead_request_flags(req) & flags,
-                                  areq_ctx->complete, req);
-
-       err = crypto_ahash_finup(ahreq);
-       if (err)
-               return ERR_PTR(err);
-
-       return hash;
-}
-
-static u8 *crypto_authenc_ahash(struct aead_request *req, unsigned int flags)
+static int crypto_authenc_genicv(struct aead_request *req, unsigned int flags)
 {
        struct crypto_aead *authenc = crypto_aead_reqtfm(req);
+       struct aead_instance *inst = aead_alg_instance(authenc);
        struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
+       struct authenc_instance_ctx *ictx = aead_instance_ctx(inst);
        struct crypto_ahash *auth = ctx->auth;
        struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
-       struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
+       struct ahash_request *ahreq = (void *)(areq_ctx->tail + ictx->reqoff);
        u8 *hash = areq_ctx->tail;
        int err;
 
@@ -309,66 +151,18 @@ static u8 *crypto_authenc_ahash(struct aead_request *req, unsigned int flags)
                           crypto_ahash_alignmask(auth) + 1);
 
        ahash_request_set_tfm(ahreq, auth);
-       ahash_request_set_crypt(ahreq, areq_ctx->sg, hash,
-                               areq_ctx->cryptlen);
-       ahash_request_set_callback(ahreq, aead_request_flags(req) & flags,
-                                  areq_ctx->complete, req);
+       ahash_request_set_crypt(ahreq, req->dst, hash,
+                               req->assoclen + req->cryptlen);
+       ahash_request_set_callback(ahreq, flags,
+                                  authenc_geniv_ahash_done, req);
 
        err = crypto_ahash_digest(ahreq);
        if (err)
-               return ERR_PTR(err);
-
-       return hash;
-}
-
-static int crypto_authenc_genicv(struct aead_request *req, u8 *iv,
-                                unsigned int flags)
-{
-       struct crypto_aead *authenc = crypto_aead_reqtfm(req);
-       struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
-       struct scatterlist *dst = req->dst;
-       struct scatterlist *assoc = req->assoc;
-       struct scatterlist *cipher = areq_ctx->cipher;
-       struct scatterlist *asg = areq_ctx->asg;
-       unsigned int ivsize = crypto_aead_ivsize(authenc);
-       unsigned int cryptlen = req->cryptlen;
-       authenc_ahash_t authenc_ahash_fn = crypto_authenc_ahash_fb;
-       struct page *dstp;
-       u8 *vdst;
-       u8 *hash;
-
-       dstp = sg_page(dst);
-       vdst = PageHighMem(dstp) ? NULL : page_address(dstp) + dst->offset;
-
-       if (ivsize) {
-               sg_init_table(cipher, 2);
-               sg_set_buf(cipher, iv, ivsize);
-               scatterwalk_crypto_chain(cipher, dst, vdst == iv + ivsize, 2);
-               dst = cipher;
-               cryptlen += ivsize;
-       }
-
-       if (req->assoclen && sg_is_last(assoc)) {
-               authenc_ahash_fn = crypto_authenc_ahash;
-               sg_init_table(asg, 2);
-               sg_set_page(asg, sg_page(assoc), assoc->length, assoc->offset);
-               scatterwalk_crypto_chain(asg, dst, 0, 2);
-               dst = asg;
-               cryptlen += req->assoclen;
-       }
-
-       areq_ctx->cryptlen = cryptlen;
-       areq_ctx->sg = dst;
-
-       areq_ctx->complete = authenc_geniv_ahash_done;
-       areq_ctx->update_complete = authenc_geniv_ahash_update_done;
-
-       hash = authenc_ahash_fn(req, flags);
-       if (IS_ERR(hash))
-               return PTR_ERR(hash);
+               return err;
 
-       scatterwalk_map_and_copy(hash, dst, cryptlen,
+       scatterwalk_map_and_copy(hash, req->dst, req->assoclen + req->cryptlen,
                                 crypto_aead_authsize(authenc), 1);
+
        return 0;
 }
 
@@ -377,180 +171,155 @@ static void crypto_authenc_encrypt_done(struct crypto_async_request *req,
 {
        struct aead_request *areq = req->data;
 
-       if (!err) {
-               struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
-               struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
-               struct authenc_request_ctx *areq_ctx = aead_request_ctx(areq);
-               struct ablkcipher_request *abreq = (void *)(areq_ctx->tail
-                                                           + ctx->reqoff);
-               u8 *iv = (u8 *)abreq - crypto_ablkcipher_ivsize(ctx->enc);
+       if (err)
+               goto out;
 
-               err = crypto_authenc_genicv(areq, iv, 0);
-       }
+       err = crypto_authenc_genicv(areq, 0);
 
+out:
        authenc_request_complete(areq, err);
 }
 
+static int crypto_authenc_copy_assoc(struct aead_request *req)
+{
+       struct crypto_aead *authenc = crypto_aead_reqtfm(req);
+       struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
+       struct blkcipher_desc desc = {
+               .tfm = ctx->null,
+       };
+
+       return crypto_blkcipher_encrypt(&desc, req->dst, req->src,
+                                       req->assoclen);
+}
+
 static int crypto_authenc_encrypt(struct aead_request *req)
 {
        struct crypto_aead *authenc = crypto_aead_reqtfm(req);
+       struct aead_instance *inst = aead_alg_instance(authenc);
        struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
+       struct authenc_instance_ctx *ictx = aead_instance_ctx(inst);
        struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
        struct crypto_ablkcipher *enc = ctx->enc;
-       struct scatterlist *dst = req->dst;
        unsigned int cryptlen = req->cryptlen;
-       struct ablkcipher_request *abreq = (void *)(areq_ctx->tail
-                                                   ctx->reqoff);
-       u8 *iv = (u8 *)abreq - crypto_ablkcipher_ivsize(enc);
+       struct ablkcipher_request *abreq = (void *)(areq_ctx->tail +
+                                                   ictx->reqoff);
+       struct scatterlist *src, *dst;
        int err;
 
+       sg_init_table(areq_ctx->src, 2);
+       src = scatterwalk_ffwd(areq_ctx->src, req->src, req->assoclen);
+       dst = src;
+
+       if (req->src != req->dst) {
+               err = crypto_authenc_copy_assoc(req);
+               if (err)
+                       return err;
+
+               sg_init_table(areq_ctx->dst, 2);
+               dst = scatterwalk_ffwd(areq_ctx->dst, req->dst, req->assoclen);
+       }
+
        ablkcipher_request_set_tfm(abreq, enc);
        ablkcipher_request_set_callback(abreq, aead_request_flags(req),
                                        crypto_authenc_encrypt_done, req);
-       ablkcipher_request_set_crypt(abreq, req->src, dst, cryptlen, req->iv);
-
-       memcpy(iv, req->iv, crypto_aead_ivsize(authenc));
+       ablkcipher_request_set_crypt(abreq, src, dst, cryptlen, req->iv);
 
        err = crypto_ablkcipher_encrypt(abreq);
        if (err)
                return err;
 
-       return crypto_authenc_genicv(req, iv, CRYPTO_TFM_REQ_MAY_SLEEP);
+       return crypto_authenc_genicv(req, aead_request_flags(req));
 }
 
-static void crypto_authenc_givencrypt_done(struct crypto_async_request *req,
-                                          int err)
+static int crypto_authenc_decrypt_tail(struct aead_request *req,
+                                      unsigned int flags)
 {
-       struct aead_request *areq = req->data;
-
-       if (!err) {
-               struct skcipher_givcrypt_request *greq = aead_request_ctx(areq);
-
-               err = crypto_authenc_genicv(areq, greq->giv, 0);
-       }
+       struct crypto_aead *authenc = crypto_aead_reqtfm(req);
+       struct aead_instance *inst = aead_alg_instance(authenc);
+       struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
+       struct authenc_instance_ctx *ictx = aead_instance_ctx(inst);
+       struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
+       struct ahash_request *ahreq = (void *)(areq_ctx->tail + ictx->reqoff);
+       struct ablkcipher_request *abreq = (void *)(areq_ctx->tail +
+                                                   ictx->reqoff);
+       unsigned int authsize = crypto_aead_authsize(authenc);
+       u8 *ihash = ahreq->result + authsize;
+       struct scatterlist *src, *dst;
 
-       authenc_request_complete(areq, err);
-}
+       scatterwalk_map_and_copy(ihash, req->src, ahreq->nbytes, authsize, 0);
 
-static int crypto_authenc_givencrypt(struct aead_givcrypt_request *req)
-{
-       struct crypto_aead *authenc = aead_givcrypt_reqtfm(req);
-       struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
-       struct aead_request *areq = &req->areq;
-       struct skcipher_givcrypt_request *greq = aead_request_ctx(areq);
-       u8 *iv = req->giv;
-       int err;
+       if (crypto_memneq(ihash, ahreq->result, authsize))
+               return -EBADMSG;
 
-       skcipher_givcrypt_set_tfm(greq, ctx->enc);
-       skcipher_givcrypt_set_callback(greq, aead_request_flags(areq),
-                                      crypto_authenc_givencrypt_done, areq);
-       skcipher_givcrypt_set_crypt(greq, areq->src, areq->dst, areq->cryptlen,
-                                   areq->iv);
-       skcipher_givcrypt_set_giv(greq, iv, req->seq);
+       sg_init_table(areq_ctx->src, 2);
+       src = scatterwalk_ffwd(areq_ctx->src, req->src, req->assoclen);
+       dst = src;
 
-       err = crypto_skcipher_givencrypt(greq);
-       if (err)
-               return err;
+       if (req->src != req->dst) {
+               sg_init_table(areq_ctx->dst, 2);
+               dst = scatterwalk_ffwd(areq_ctx->dst, req->dst, req->assoclen);
+       }
 
-       return crypto_authenc_genicv(areq, iv, CRYPTO_TFM_REQ_MAY_SLEEP);
-}
+       ablkcipher_request_set_tfm(abreq, ctx->enc);
+       ablkcipher_request_set_callback(abreq, aead_request_flags(req),
+                                       req->base.complete, req->base.data);
+       ablkcipher_request_set_crypt(abreq, src, dst,
+                                    req->cryptlen - authsize, req->iv);
 
-static int crypto_authenc_verify(struct aead_request *req,
-                                authenc_ahash_t authenc_ahash_fn)
-{
-       struct crypto_aead *authenc = crypto_aead_reqtfm(req);
-       struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
-       u8 *ohash;
-       u8 *ihash;
-       unsigned int authsize;
-
-       areq_ctx->complete = authenc_verify_ahash_done;
-       areq_ctx->update_complete = authenc_verify_ahash_update_done;
-
-       ohash = authenc_ahash_fn(req, CRYPTO_TFM_REQ_MAY_SLEEP);
-       if (IS_ERR(ohash))
-               return PTR_ERR(ohash);
-
-       authsize = crypto_aead_authsize(authenc);
-       ihash = ohash + authsize;
-       scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
-                                authsize, 0);
-       return crypto_memneq(ihash, ohash, authsize) ? -EBADMSG : 0;
+       return crypto_ablkcipher_decrypt(abreq);
 }
 
-static int crypto_authenc_iverify(struct aead_request *req, u8 *iv,
-                                 unsigned int cryptlen)
+static void authenc_verify_ahash_done(struct crypto_async_request *areq,
+                                     int err)
 {
-       struct crypto_aead *authenc = crypto_aead_reqtfm(req);
-       struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
-       struct scatterlist *src = req->src;
-       struct scatterlist *assoc = req->assoc;
-       struct scatterlist *cipher = areq_ctx->cipher;
-       struct scatterlist *asg = areq_ctx->asg;
-       unsigned int ivsize = crypto_aead_ivsize(authenc);
-       authenc_ahash_t authenc_ahash_fn = crypto_authenc_ahash_fb;
-       struct page *srcp;
-       u8 *vsrc;
-
-       srcp = sg_page(src);
-       vsrc = PageHighMem(srcp) ? NULL : page_address(srcp) + src->offset;
-
-       if (ivsize) {
-               sg_init_table(cipher, 2);
-               sg_set_buf(cipher, iv, ivsize);
-               scatterwalk_crypto_chain(cipher, src, vsrc == iv + ivsize, 2);
-               src = cipher;
-               cryptlen += ivsize;
-       }
+       struct aead_request *req = areq->data;
 
-       if (req->assoclen && sg_is_last(assoc)) {
-               authenc_ahash_fn = crypto_authenc_ahash;
-               sg_init_table(asg, 2);
-               sg_set_page(asg, sg_page(assoc), assoc->length, assoc->offset);
-               scatterwalk_crypto_chain(asg, src, 0, 2);
-               src = asg;
-               cryptlen += req->assoclen;
-       }
+       if (err)
+               goto out;
 
-       areq_ctx->cryptlen = cryptlen;
-       areq_ctx->sg = src;
+       err = crypto_authenc_decrypt_tail(req, 0);
 
-       return crypto_authenc_verify(req, authenc_ahash_fn);
+out:
+       authenc_request_complete(req, err);
 }
 
 static int crypto_authenc_decrypt(struct aead_request *req)
 {
        struct crypto_aead *authenc = crypto_aead_reqtfm(req);
-       struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
-       struct ablkcipher_request *abreq = aead_request_ctx(req);
-       unsigned int cryptlen = req->cryptlen;
        unsigned int authsize = crypto_aead_authsize(authenc);
-       u8 *iv = req->iv;
+       struct aead_instance *inst = aead_alg_instance(authenc);
+       struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
+       struct authenc_instance_ctx *ictx = aead_instance_ctx(inst);
+       struct crypto_ahash *auth = ctx->auth;
+       struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
+       struct ahash_request *ahreq = (void *)(areq_ctx->tail + ictx->reqoff);
+       u8 *hash = areq_ctx->tail;
        int err;
 
-       if (cryptlen < authsize)
-               return -EINVAL;
-       cryptlen -= authsize;
+       hash = (u8 *)ALIGN((unsigned long)hash + crypto_ahash_alignmask(auth),
+                          crypto_ahash_alignmask(auth) + 1);
 
-       err = crypto_authenc_iverify(req, iv, cryptlen);
+       ahash_request_set_tfm(ahreq, auth);
+       ahash_request_set_crypt(ahreq, req->src, hash,
+                               req->assoclen + req->cryptlen - authsize);
+       ahash_request_set_callback(ahreq, aead_request_flags(req),
+                                  authenc_verify_ahash_done, req);
+
+       err = crypto_ahash_digest(ahreq);
        if (err)
                return err;
 
-       ablkcipher_request_set_tfm(abreq, ctx->enc);
-       ablkcipher_request_set_callback(abreq, aead_request_flags(req),
-                                       req->base.complete, req->base.data);
-       ablkcipher_request_set_crypt(abreq, req->src, req->dst, cryptlen, iv);
-
-       return crypto_ablkcipher_decrypt(abreq);
+       return crypto_authenc_decrypt_tail(req, aead_request_flags(req));
 }
 
-static int crypto_authenc_init_tfm(struct crypto_tfm *tfm)
+static int crypto_authenc_init_tfm(struct crypto_aead *tfm)
 {
-       struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
-       struct authenc_instance_ctx *ictx = crypto_instance_ctx(inst);
-       struct crypto_authenc_ctx *ctx = crypto_tfm_ctx(tfm);
+       struct aead_instance *inst = aead_alg_instance(tfm);
+       struct authenc_instance_ctx *ictx = aead_instance_ctx(inst);
+       struct crypto_authenc_ctx *ctx = crypto_aead_ctx(tfm);
        struct crypto_ahash *auth;
        struct crypto_ablkcipher *enc;
+       struct crypto_blkcipher *null;
        int err;
 
        auth = crypto_spawn_ahash(&ictx->auth);
@@ -562,42 +331,57 @@ static int crypto_authenc_init_tfm(struct crypto_tfm *tfm)
        if (IS_ERR(enc))
                goto err_free_ahash;
 
+       null = crypto_get_default_null_skcipher();
+       err = PTR_ERR(null);
+       if (IS_ERR(null))
+               goto err_free_skcipher;
+
        ctx->auth = auth;
        ctx->enc = enc;
+       ctx->null = null;
 
-       ctx->reqoff = ALIGN(2 * crypto_ahash_digestsize(auth) +
-                           crypto_ahash_alignmask(auth),
-                           crypto_ahash_alignmask(auth) + 1) +
-                     crypto_ablkcipher_ivsize(enc);
-
-       crypto_aead_set_reqsize(__crypto_aead_cast(tfm),
+       crypto_aead_set_reqsize(
+               tfm,
                sizeof(struct authenc_request_ctx) +
-               ctx->reqoff +
+               ictx->reqoff +
                max_t(unsigned int,
-                       crypto_ahash_reqsize(auth) +
-                       sizeof(struct ahash_request),
-                       sizeof(struct skcipher_givcrypt_request) +
-                       crypto_ablkcipher_reqsize(enc)));
+                     crypto_ahash_reqsize(auth) +
+                     sizeof(struct ahash_request),
+                     sizeof(struct ablkcipher_request) +
+                     crypto_ablkcipher_reqsize(enc)));
 
        return 0;
 
+err_free_skcipher:
+       crypto_free_ablkcipher(enc);
 err_free_ahash:
        crypto_free_ahash(auth);
        return err;
 }
 
-static void crypto_authenc_exit_tfm(struct crypto_tfm *tfm)
+static void crypto_authenc_exit_tfm(struct crypto_aead *tfm)
 {
-       struct crypto_authenc_ctx *ctx = crypto_tfm_ctx(tfm);
+       struct crypto_authenc_ctx *ctx = crypto_aead_ctx(tfm);
 
        crypto_free_ahash(ctx->auth);
        crypto_free_ablkcipher(ctx->enc);
+       crypto_put_default_null_skcipher();
 }
 
-static struct crypto_instance *crypto_authenc_alloc(struct rtattr **tb)
+static void crypto_authenc_free(struct aead_instance *inst)
+{
+       struct authenc_instance_ctx *ctx = aead_instance_ctx(inst);
+
+       crypto_drop_skcipher(&ctx->enc);
+       crypto_drop_ahash(&ctx->auth);
+       kfree(inst);
+}
+
+static int crypto_authenc_create(struct crypto_template *tmpl,
+                                struct rtattr **tb)
 {
        struct crypto_attr_type *algt;
-       struct crypto_instance *inst;
+       struct aead_instance *inst;
        struct hash_alg_common *auth;
        struct crypto_alg *auth_base;
        struct crypto_alg *enc;
@@ -607,15 +391,15 @@ static struct crypto_instance *crypto_authenc_alloc(struct rtattr **tb)
 
        algt = crypto_get_attr_type(tb);
        if (IS_ERR(algt))
-               return ERR_CAST(algt);
+               return PTR_ERR(algt);
 
        if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
-               return ERR_PTR(-EINVAL);
+               return -EINVAL;
 
        auth = ahash_attr_alg(tb[1], CRYPTO_ALG_TYPE_HASH,
                               CRYPTO_ALG_TYPE_AHASH_MASK);
        if (IS_ERR(auth))
-               return ERR_CAST(auth);
+               return PTR_ERR(auth);
 
        auth_base = &auth->base;
 
@@ -629,13 +413,14 @@ static struct crypto_instance *crypto_authenc_alloc(struct rtattr **tb)
        if (!inst)
                goto out_put_auth;
 
-       ctx = crypto_instance_ctx(inst);
+       ctx = aead_instance_ctx(inst);
 
-       err = crypto_init_ahash_spawn(&ctx->auth, auth, inst);
+       err = crypto_init_ahash_spawn(&ctx->auth, auth,
+                                     aead_crypto_instance(inst));
        if (err)
                goto err_free_inst;
 
-       crypto_set_skcipher_spawn(&ctx->enc, inst);
+       crypto_set_skcipher_spawn(&ctx->enc, aead_crypto_instance(inst));
        err = crypto_grab_skcipher(&ctx->enc, enc_name, 0,
                                   crypto_requires_sync(algt->type,
                                                        algt->mask));
@@ -644,41 +429,47 @@ static struct crypto_instance *crypto_authenc_alloc(struct rtattr **tb)
 
        enc = crypto_skcipher_spawn_alg(&ctx->enc);
 
+       ctx->reqoff = ALIGN(2 * auth->digestsize + auth_base->cra_alignmask,
+                           auth_base->cra_alignmask + 1);
+
        err = -ENAMETOOLONG;
-       if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME,
+       if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
                     "authenc(%s,%s)", auth_base->cra_name, enc->cra_name) >=
            CRYPTO_MAX_ALG_NAME)
                goto err_drop_enc;
 
-       if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
+       if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
                     "authenc(%s,%s)", auth_base->cra_driver_name,
                     enc->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
                goto err_drop_enc;
 
-       inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD;
-       inst->alg.cra_flags |= enc->cra_flags & CRYPTO_ALG_ASYNC;
-       inst->alg.cra_priority = enc->cra_priority *
-                                10 + auth_base->cra_priority;
-       inst->alg.cra_blocksize = enc->cra_blocksize;
-       inst->alg.cra_alignmask = auth_base->cra_alignmask | enc->cra_alignmask;
-       inst->alg.cra_type = &crypto_aead_type;
+       inst->alg.base.cra_flags = enc->cra_flags & CRYPTO_ALG_ASYNC;
+       inst->alg.base.cra_priority = enc->cra_priority * 10 +
+                                     auth_base->cra_priority;
+       inst->alg.base.cra_blocksize = enc->cra_blocksize;
+       inst->alg.base.cra_alignmask = auth_base->cra_alignmask |
+                                      enc->cra_alignmask;
+       inst->alg.base.cra_ctxsize = sizeof(struct crypto_authenc_ctx);
+
+       inst->alg.ivsize = enc->cra_ablkcipher.ivsize;
+       inst->alg.maxauthsize = auth->digestsize;
 
-       inst->alg.cra_aead.ivsize = enc->cra_ablkcipher.ivsize;
-       inst->alg.cra_aead.maxauthsize = auth->digestsize;
+       inst->alg.init = crypto_authenc_init_tfm;
+       inst->alg.exit = crypto_authenc_exit_tfm;
 
-       inst->alg.cra_ctxsize = sizeof(struct crypto_authenc_ctx);
+       inst->alg.setkey = crypto_authenc_setkey;
+       inst->alg.encrypt = crypto_authenc_encrypt;
+       inst->alg.decrypt = crypto_authenc_decrypt;
 
-       inst->alg.cra_init = crypto_authenc_init_tfm;
-       inst->alg.cra_exit = crypto_authenc_exit_tfm;
+       inst->free = crypto_authenc_free;
 
-       inst->alg.cra_aead.setkey = crypto_authenc_setkey;
-       inst->alg.cra_aead.encrypt = crypto_authenc_encrypt;
-       inst->alg.cra_aead.decrypt = crypto_authenc_decrypt;
-       inst->alg.cra_aead.givencrypt = crypto_authenc_givencrypt;
+       err = aead_register_instance(tmpl, inst);
+       if (err)
+               goto err_drop_enc;
 
 out:
        crypto_mod_put(auth_base);
-       return inst;
+       return err;
 
 err_drop_enc:
        crypto_drop_skcipher(&ctx->enc);
@@ -687,23 +478,12 @@ err_drop_auth:
 err_free_inst:
        kfree(inst);
 out_put_auth:
-       inst = ERR_PTR(err);
        goto out;
 }
 
-static void crypto_authenc_free(struct crypto_instance *inst)
-{
-       struct authenc_instance_ctx *ctx = crypto_instance_ctx(inst);
-
-       crypto_drop_skcipher(&ctx->enc);
-       crypto_drop_ahash(&ctx->auth);
-       kfree(inst);
-}
-
 static struct crypto_template crypto_authenc_tmpl = {
        .name = "authenc",
-       .alloc = crypto_authenc_alloc,
-       .free = crypto_authenc_free,
+       .create = crypto_authenc_create,
        .module = THIS_MODULE,
 };
 
index b8efe36ce1142d0c6b0b8e45ec23965ec7135c40..0c0468869e25b51035cf363fd3f3e2f7903330fb 100644 (file)
@@ -4,6 +4,7 @@
  *
  * Copyright (C) 2010 secunet Security Networks AG
  * Copyright (C) 2010 Steffen Klassert <steffen.klassert@secunet.com>
+ * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License as published by the Free
@@ -16,6 +17,7 @@
 #include <crypto/internal/hash.h>
 #include <crypto/internal/skcipher.h>
 #include <crypto/authenc.h>
+#include <crypto/null.h>
 #include <crypto/scatterwalk.h>
 #include <linux/err.h>
 #include <linux/init.h>
@@ -34,19 +36,12 @@ struct crypto_authenc_esn_ctx {
        unsigned int reqoff;
        struct crypto_ahash *auth;
        struct crypto_ablkcipher *enc;
+       struct crypto_blkcipher *null;
 };
 
 struct authenc_esn_request_ctx {
-       unsigned int cryptlen;
-       unsigned int headlen;
-       unsigned int trailen;
-       struct scatterlist *sg;
-       struct scatterlist hsg[2];
-       struct scatterlist tsg[1];
-       struct scatterlist cipher[2];
-       crypto_completion_t complete;
-       crypto_completion_t update_complete;
-       crypto_completion_t update_complete2;
+       struct scatterlist src[2];
+       struct scatterlist dst[2];
        char tail[];
 };
 
@@ -56,6 +51,15 @@ static void authenc_esn_request_complete(struct aead_request *req, int err)
                aead_request_complete(req, err);
 }
 
+static int crypto_authenc_esn_setauthsize(struct crypto_aead *authenc_esn,
+                                         unsigned int authsize)
+{
+       if (authsize > 0 && authsize < 4)
+               return -EINVAL;
+
+       return 0;
+}
+
 static int crypto_authenc_esn_setkey(struct crypto_aead *authenc_esn, const u8 *key,
                                     unsigned int keylen)
 {
@@ -93,536 +97,242 @@ badkey:
        goto out;
 }
 
-static void authenc_esn_geniv_ahash_update_done(struct crypto_async_request *areq,
-                                               int err)
+static int crypto_authenc_esn_genicv_tail(struct aead_request *req,
+                                         unsigned int flags)
 {
-       struct aead_request *req = areq->data;
        struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
        struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
        struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
-       struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
-
-       if (err)
-               goto out;
-
-       ahash_request_set_crypt(ahreq, areq_ctx->sg, ahreq->result,
-                               areq_ctx->cryptlen);
-       ahash_request_set_callback(ahreq, aead_request_flags(req) &
-                                         CRYPTO_TFM_REQ_MAY_SLEEP,
-                                  areq_ctx->update_complete2, req);
-
-       err = crypto_ahash_update(ahreq);
-       if (err)
-               goto out;
-
-       ahash_request_set_crypt(ahreq, areq_ctx->tsg, ahreq->result,
-                               areq_ctx->trailen);
-       ahash_request_set_callback(ahreq, aead_request_flags(req) &
-                                         CRYPTO_TFM_REQ_MAY_SLEEP,
-                                  areq_ctx->complete, req);
-
-       err = crypto_ahash_finup(ahreq);
-       if (err)
-               goto out;
-
-       scatterwalk_map_and_copy(ahreq->result, areq_ctx->sg,
-                                areq_ctx->cryptlen,
-                                crypto_aead_authsize(authenc_esn), 1);
-
-out:
-       authenc_esn_request_complete(req, err);
-}
-
-static void authenc_esn_geniv_ahash_update_done2(struct crypto_async_request *areq,
-                                                int err)
-{
-       struct aead_request *req = areq->data;
-       struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
-       struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
-       struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
-       struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
-
-       if (err)
-               goto out;
-
-       ahash_request_set_crypt(ahreq, areq_ctx->tsg, ahreq->result,
-                               areq_ctx->trailen);
-       ahash_request_set_callback(ahreq, aead_request_flags(req) &
-                                         CRYPTO_TFM_REQ_MAY_SLEEP,
-                                  areq_ctx->complete, req);
-
-       err = crypto_ahash_finup(ahreq);
-       if (err)
-               goto out;
+       struct crypto_ahash *auth = ctx->auth;
+       u8 *hash = PTR_ALIGN((u8 *)areq_ctx->tail,
+                            crypto_ahash_alignmask(auth) + 1);
+       unsigned int authsize = crypto_aead_authsize(authenc_esn);
+       unsigned int assoclen = req->assoclen;
+       unsigned int cryptlen = req->cryptlen;
+       struct scatterlist *dst = req->dst;
+       u32 tmp[2];
 
-       scatterwalk_map_and_copy(ahreq->result, areq_ctx->sg,
-                                areq_ctx->cryptlen,
-                                crypto_aead_authsize(authenc_esn), 1);
+       /* Move high-order bits of sequence number back. */
+       scatterwalk_map_and_copy(tmp, dst, 4, 4, 0);
+       scatterwalk_map_and_copy(tmp + 1, dst, assoclen + cryptlen, 4, 0);
+       scatterwalk_map_and_copy(tmp, dst, 0, 8, 1);
 
-out:
-       authenc_esn_request_complete(req, err);
+       scatterwalk_map_and_copy(hash, dst, assoclen + cryptlen, authsize, 1);
+       return 0;
 }
 
-
 static void authenc_esn_geniv_ahash_done(struct crypto_async_request *areq,
                                         int err)
 {
        struct aead_request *req = areq->data;
-       struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
-       struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
-       struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
-       struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
 
-       if (err)
-               goto out;
-
-       scatterwalk_map_and_copy(ahreq->result, areq_ctx->sg,
-                                areq_ctx->cryptlen,
-                                crypto_aead_authsize(authenc_esn), 1);
-
-out:
+       err = err ?: crypto_authenc_esn_genicv_tail(req, 0);
        aead_request_complete(req, err);
 }
 
-
-static void authenc_esn_verify_ahash_update_done(struct crypto_async_request *areq,
-                                                int err)
+static int crypto_authenc_esn_genicv(struct aead_request *req,
+                                    unsigned int flags)
 {
-       u8 *ihash;
-       unsigned int authsize;
-       struct ablkcipher_request *abreq;
-       struct aead_request *req = areq->data;
        struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
-       struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
        struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
-       struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
-       unsigned int cryptlen = req->cryptlen;
-
-       if (err)
-               goto out;
-
-       ahash_request_set_crypt(ahreq, areq_ctx->sg, ahreq->result,
-                               areq_ctx->cryptlen);
-
-       ahash_request_set_callback(ahreq,
-                                  aead_request_flags(req) &
-                                  CRYPTO_TFM_REQ_MAY_SLEEP,
-                                  areq_ctx->update_complete2, req);
-
-       err = crypto_ahash_update(ahreq);
-       if (err)
-               goto out;
-
-       ahash_request_set_crypt(ahreq, areq_ctx->tsg, ahreq->result,
-                               areq_ctx->trailen);
-       ahash_request_set_callback(ahreq, aead_request_flags(req) &
-                                         CRYPTO_TFM_REQ_MAY_SLEEP,
-                                  areq_ctx->complete, req);
-
-       err = crypto_ahash_finup(ahreq);
-       if (err)
-               goto out;
-
-       authsize = crypto_aead_authsize(authenc_esn);
-       cryptlen -= authsize;
-       ihash = ahreq->result + authsize;
-       scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
-                                authsize, 0);
-
-       err = crypto_memneq(ihash, ahreq->result, authsize) ? -EBADMSG : 0;
-       if (err)
-               goto out;
-
-       abreq = aead_request_ctx(req);
-       ablkcipher_request_set_tfm(abreq, ctx->enc);
-       ablkcipher_request_set_callback(abreq, aead_request_flags(req),
-                                       req->base.complete, req->base.data);
-       ablkcipher_request_set_crypt(abreq, req->src, req->dst,
-                                    cryptlen, req->iv);
-
-       err = crypto_ablkcipher_decrypt(abreq);
-
-out:
-       authenc_esn_request_complete(req, err);
-}
-
-static void authenc_esn_verify_ahash_update_done2(struct crypto_async_request *areq,
-                                                 int err)
-{
-       u8 *ihash;
-       unsigned int authsize;
-       struct ablkcipher_request *abreq;
-       struct aead_request *req = areq->data;
-       struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
        struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
-       struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
+       struct crypto_ahash *auth = ctx->auth;
+       u8 *hash = PTR_ALIGN((u8 *)areq_ctx->tail,
+                            crypto_ahash_alignmask(auth) + 1);
        struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
+       unsigned int authsize = crypto_aead_authsize(authenc_esn);
+       unsigned int assoclen = req->assoclen;
        unsigned int cryptlen = req->cryptlen;
+       struct scatterlist *dst = req->dst;
+       u32 tmp[2];
 
-       if (err)
-               goto out;
+       if (!authsize)
+               return 0;
 
-       ahash_request_set_crypt(ahreq, areq_ctx->tsg, ahreq->result,
-                               areq_ctx->trailen);
-       ahash_request_set_callback(ahreq, aead_request_flags(req) &
-                                         CRYPTO_TFM_REQ_MAY_SLEEP,
-                                  areq_ctx->complete, req);
+       /* Move high-order bits of sequence number to the end. */
+       scatterwalk_map_and_copy(tmp, dst, 0, 8, 0);
+       scatterwalk_map_and_copy(tmp, dst, 4, 4, 1);
+       scatterwalk_map_and_copy(tmp + 1, dst, assoclen + cryptlen, 4, 1);
 
-       err = crypto_ahash_finup(ahreq);
-       if (err)
-               goto out;
+       sg_init_table(areq_ctx->dst, 2);
+       dst = scatterwalk_ffwd(areq_ctx->dst, dst, 4);
 
-       authsize = crypto_aead_authsize(authenc_esn);
-       cryptlen -= authsize;
-       ihash = ahreq->result + authsize;
-       scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
-                                authsize, 0);
-
-       err = crypto_memneq(ihash, ahreq->result, authsize) ? -EBADMSG : 0;
-       if (err)
-               goto out;
-
-       abreq = aead_request_ctx(req);
-       ablkcipher_request_set_tfm(abreq, ctx->enc);
-       ablkcipher_request_set_callback(abreq, aead_request_flags(req),
-                                       req->base.complete, req->base.data);
-       ablkcipher_request_set_crypt(abreq, req->src, req->dst,
-                                    cryptlen, req->iv);
-
-       err = crypto_ablkcipher_decrypt(abreq);
+       ahash_request_set_tfm(ahreq, auth);
+       ahash_request_set_crypt(ahreq, dst, hash, assoclen + cryptlen);
+       ahash_request_set_callback(ahreq, flags,
+                                  authenc_esn_geniv_ahash_done, req);
 
-out:
-       authenc_esn_request_complete(req, err);
+       return crypto_ahash_digest(ahreq) ?:
+              crypto_authenc_esn_genicv_tail(req, aead_request_flags(req));
 }
 
 
-static void authenc_esn_verify_ahash_done(struct crypto_async_request *areq,
-                                         int err)
+static void crypto_authenc_esn_encrypt_done(struct crypto_async_request *req,
+                                           int err)
 {
-       u8 *ihash;
-       unsigned int authsize;
-       struct ablkcipher_request *abreq;
-       struct aead_request *req = areq->data;
-       struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
-       struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
-       struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
-       struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
-       unsigned int cryptlen = req->cryptlen;
-
-       if (err)
-               goto out;
-
-       authsize = crypto_aead_authsize(authenc_esn);
-       cryptlen -= authsize;
-       ihash = ahreq->result + authsize;
-       scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
-                                authsize, 0);
-
-       err = crypto_memneq(ihash, ahreq->result, authsize) ? -EBADMSG : 0;
-       if (err)
-               goto out;
-
-       abreq = aead_request_ctx(req);
-       ablkcipher_request_set_tfm(abreq, ctx->enc);
-       ablkcipher_request_set_callback(abreq, aead_request_flags(req),
-                                       req->base.complete, req->base.data);
-       ablkcipher_request_set_crypt(abreq, req->src, req->dst,
-                                    cryptlen, req->iv);
+       struct aead_request *areq = req->data;
 
-       err = crypto_ablkcipher_decrypt(abreq);
+       if (!err)
+               err = crypto_authenc_esn_genicv(areq, 0);
 
-out:
-       authenc_esn_request_complete(req, err);
+       authenc_esn_request_complete(areq, err);
 }
 
-static u8 *crypto_authenc_esn_ahash(struct aead_request *req,
-                                   unsigned int flags)
+static int crypto_authenc_esn_copy(struct aead_request *req, unsigned int len)
 {
        struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
        struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
-       struct crypto_ahash *auth = ctx->auth;
-       struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
-       struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
-       u8 *hash = areq_ctx->tail;
-       int err;
+       struct blkcipher_desc desc = {
+               .tfm = ctx->null,
+       };
 
-       hash = (u8 *)ALIGN((unsigned long)hash + crypto_ahash_alignmask(auth),
-                           crypto_ahash_alignmask(auth) + 1);
-
-       ahash_request_set_tfm(ahreq, auth);
-
-       err = crypto_ahash_init(ahreq);
-       if (err)
-               return ERR_PTR(err);
-
-       ahash_request_set_crypt(ahreq, areq_ctx->hsg, hash, areq_ctx->headlen);
-       ahash_request_set_callback(ahreq, aead_request_flags(req) & flags,
-                                  areq_ctx->update_complete, req);
-
-       err = crypto_ahash_update(ahreq);
-       if (err)
-               return ERR_PTR(err);
-
-       ahash_request_set_crypt(ahreq, areq_ctx->sg, hash, areq_ctx->cryptlen);
-       ahash_request_set_callback(ahreq, aead_request_flags(req) & flags,
-                                  areq_ctx->update_complete2, req);
-
-       err = crypto_ahash_update(ahreq);
-       if (err)
-               return ERR_PTR(err);
-
-       ahash_request_set_crypt(ahreq, areq_ctx->tsg, hash,
-                               areq_ctx->trailen);
-       ahash_request_set_callback(ahreq, aead_request_flags(req) & flags,
-                                  areq_ctx->complete, req);
-
-       err = crypto_ahash_finup(ahreq);
-       if (err)
-               return ERR_PTR(err);
-
-       return hash;
+       return crypto_blkcipher_encrypt(&desc, req->dst, req->src, len);
 }
 
-static int crypto_authenc_esn_genicv(struct aead_request *req, u8 *iv,
-                                    unsigned int flags)
+static int crypto_authenc_esn_encrypt(struct aead_request *req)
 {
        struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
        struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
-       struct scatterlist *dst = req->dst;
-       struct scatterlist *assoc = req->assoc;
-       struct scatterlist *cipher = areq_ctx->cipher;
-       struct scatterlist *hsg = areq_ctx->hsg;
-       struct scatterlist *tsg = areq_ctx->tsg;
-       unsigned int ivsize = crypto_aead_ivsize(authenc_esn);
+       struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
+       struct ablkcipher_request *abreq = (void *)(areq_ctx->tail
+                                                   + ctx->reqoff);
+       struct crypto_ablkcipher *enc = ctx->enc;
+       unsigned int assoclen = req->assoclen;
        unsigned int cryptlen = req->cryptlen;
-       struct page *dstp;
-       u8 *vdst;
-       u8 *hash;
-
-       dstp = sg_page(dst);
-       vdst = PageHighMem(dstp) ? NULL : page_address(dstp) + dst->offset;
-
-       if (ivsize) {
-               sg_init_table(cipher, 2);
-               sg_set_buf(cipher, iv, ivsize);
-               scatterwalk_crypto_chain(cipher, dst, vdst == iv + ivsize, 2);
-               dst = cipher;
-               cryptlen += ivsize;
-       }
-
-       if (assoc->length < 12)
-               return -EINVAL;
-
-       sg_init_table(hsg, 2);
-       sg_set_page(hsg, sg_page(assoc), 4, assoc->offset);
-       sg_set_page(hsg + 1, sg_page(assoc), 4, assoc->offset + 8);
-
-       sg_init_table(tsg, 1);
-       sg_set_page(tsg, sg_page(assoc), 4, assoc->offset + 4);
-
-       areq_ctx->cryptlen = cryptlen;
-       areq_ctx->headlen = 8;
-       areq_ctx->trailen = 4;
-       areq_ctx->sg = dst;
-
-       areq_ctx->complete = authenc_esn_geniv_ahash_done;
-       areq_ctx->update_complete = authenc_esn_geniv_ahash_update_done;
-       areq_ctx->update_complete2 = authenc_esn_geniv_ahash_update_done2;
-
-       hash = crypto_authenc_esn_ahash(req, flags);
-       if (IS_ERR(hash))
-               return PTR_ERR(hash);
+       struct scatterlist *src, *dst;
+       int err;
 
-       scatterwalk_map_and_copy(hash, dst, cryptlen,
-                                crypto_aead_authsize(authenc_esn), 1);
-       return 0;
-}
+       sg_init_table(areq_ctx->src, 2);
+       src = scatterwalk_ffwd(areq_ctx->src, req->src, assoclen);
+       dst = src;
 
+       if (req->src != req->dst) {
+               err = crypto_authenc_esn_copy(req, assoclen);
+               if (err)
+                       return err;
 
-static void crypto_authenc_esn_encrypt_done(struct crypto_async_request *req,
-                                           int err)
-{
-       struct aead_request *areq = req->data;
-
-       if (!err) {
-               struct crypto_aead *authenc_esn = crypto_aead_reqtfm(areq);
-               struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
-               struct ablkcipher_request *abreq = aead_request_ctx(areq);
-               u8 *iv = (u8 *)(abreq + 1) +
-                        crypto_ablkcipher_reqsize(ctx->enc);
-
-               err = crypto_authenc_esn_genicv(areq, iv, 0);
+               sg_init_table(areq_ctx->dst, 2);
+               dst = scatterwalk_ffwd(areq_ctx->dst, req->dst, assoclen);
        }
 
-       authenc_esn_request_complete(areq, err);
-}
-
-static int crypto_authenc_esn_encrypt(struct aead_request *req)
-{
-       struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
-       struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
-       struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
-       struct crypto_ablkcipher *enc = ctx->enc;
-       struct scatterlist *dst = req->dst;
-       unsigned int cryptlen = req->cryptlen;
-       struct ablkcipher_request *abreq = (void *)(areq_ctx->tail
-                                                   + ctx->reqoff);
-       u8 *iv = (u8 *)abreq - crypto_ablkcipher_ivsize(enc);
-       int err;
-
        ablkcipher_request_set_tfm(abreq, enc);
        ablkcipher_request_set_callback(abreq, aead_request_flags(req),
                                        crypto_authenc_esn_encrypt_done, req);
-       ablkcipher_request_set_crypt(abreq, req->src, dst, cryptlen, req->iv);
-
-       memcpy(iv, req->iv, crypto_aead_ivsize(authenc_esn));
+       ablkcipher_request_set_crypt(abreq, src, dst, cryptlen, req->iv);
 
        err = crypto_ablkcipher_encrypt(abreq);
        if (err)
                return err;
 
-       return crypto_authenc_esn_genicv(req, iv, CRYPTO_TFM_REQ_MAY_SLEEP);
+       return crypto_authenc_esn_genicv(req, aead_request_flags(req));
 }
 
-static void crypto_authenc_esn_givencrypt_done(struct crypto_async_request *req,
-                                              int err)
+static int crypto_authenc_esn_decrypt_tail(struct aead_request *req,
+                                          unsigned int flags)
 {
-       struct aead_request *areq = req->data;
-
-       if (!err) {
-               struct skcipher_givcrypt_request *greq = aead_request_ctx(areq);
-
-               err = crypto_authenc_esn_genicv(areq, greq->giv, 0);
-       }
+       struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
+       unsigned int authsize = crypto_aead_authsize(authenc_esn);
+       struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
+       struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
+       struct ablkcipher_request *abreq = (void *)(areq_ctx->tail
+                                                   + ctx->reqoff);
+       struct crypto_ahash *auth = ctx->auth;
+       u8 *ohash = PTR_ALIGN((u8 *)areq_ctx->tail,
+                             crypto_ahash_alignmask(auth) + 1);
+       unsigned int cryptlen = req->cryptlen - authsize;
+       unsigned int assoclen = req->assoclen;
+       struct scatterlist *dst = req->dst;
+       u8 *ihash = ohash + crypto_ahash_digestsize(auth);
+       u32 tmp[2];
 
-       authenc_esn_request_complete(areq, err);
-}
+       /* Move high-order bits of sequence number back. */
+       scatterwalk_map_and_copy(tmp, dst, 4, 4, 0);
+       scatterwalk_map_and_copy(tmp + 1, dst, assoclen + cryptlen, 4, 0);
+       scatterwalk_map_and_copy(tmp, dst, 0, 8, 1);
 
-static int crypto_authenc_esn_givencrypt(struct aead_givcrypt_request *req)
-{
-       struct crypto_aead *authenc_esn = aead_givcrypt_reqtfm(req);
-       struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
-       struct aead_request *areq = &req->areq;
-       struct skcipher_givcrypt_request *greq = aead_request_ctx(areq);
-       u8 *iv = req->giv;
-       int err;
+       if (crypto_memneq(ihash, ohash, authsize))
+               return -EBADMSG;
 
-       skcipher_givcrypt_set_tfm(greq, ctx->enc);
-       skcipher_givcrypt_set_callback(greq, aead_request_flags(areq),
-                                      crypto_authenc_esn_givencrypt_done, areq);
-       skcipher_givcrypt_set_crypt(greq, areq->src, areq->dst, areq->cryptlen,
-                                   areq->iv);
-       skcipher_givcrypt_set_giv(greq, iv, req->seq);
+       sg_init_table(areq_ctx->dst, 2);
+       dst = scatterwalk_ffwd(areq_ctx->dst, dst, assoclen);
 
-       err = crypto_skcipher_givencrypt(greq);
-       if (err)
-               return err;
+       ablkcipher_request_set_tfm(abreq, ctx->enc);
+       ablkcipher_request_set_callback(abreq, flags,
+                                       req->base.complete, req->base.data);
+       ablkcipher_request_set_crypt(abreq, dst, dst, cryptlen, req->iv);
 
-       return crypto_authenc_esn_genicv(areq, iv, CRYPTO_TFM_REQ_MAY_SLEEP);
+       return crypto_ablkcipher_decrypt(abreq);
 }
 
-static int crypto_authenc_esn_verify(struct aead_request *req)
+static void authenc_esn_verify_ahash_done(struct crypto_async_request *areq,
+                                         int err)
 {
-       struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
-       struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
-       u8 *ohash;
-       u8 *ihash;
-       unsigned int authsize;
-
-       areq_ctx->complete = authenc_esn_verify_ahash_done;
-       areq_ctx->update_complete = authenc_esn_verify_ahash_update_done;
-
-       ohash = crypto_authenc_esn_ahash(req, CRYPTO_TFM_REQ_MAY_SLEEP);
-       if (IS_ERR(ohash))
-               return PTR_ERR(ohash);
+       struct aead_request *req = areq->data;
 
-       authsize = crypto_aead_authsize(authenc_esn);
-       ihash = ohash + authsize;
-       scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
-                                authsize, 0);
-       return crypto_memneq(ihash, ohash, authsize) ? -EBADMSG : 0;
+       err = err ?: crypto_authenc_esn_decrypt_tail(req, 0);
+       aead_request_complete(req, err);
 }
 
-static int crypto_authenc_esn_iverify(struct aead_request *req, u8 *iv,
-                                     unsigned int cryptlen)
+static int crypto_authenc_esn_decrypt(struct aead_request *req)
 {
        struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
        struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
-       struct scatterlist *src = req->src;
-       struct scatterlist *assoc = req->assoc;
-       struct scatterlist *cipher = areq_ctx->cipher;
-       struct scatterlist *hsg = areq_ctx->hsg;
-       struct scatterlist *tsg = areq_ctx->tsg;
-       unsigned int ivsize = crypto_aead_ivsize(authenc_esn);
-       struct page *srcp;
-       u8 *vsrc;
-
-       srcp = sg_page(src);
-       vsrc = PageHighMem(srcp) ? NULL : page_address(srcp) + src->offset;
-
-       if (ivsize) {
-               sg_init_table(cipher, 2);
-               sg_set_buf(cipher, iv, ivsize);
-               scatterwalk_crypto_chain(cipher, src, vsrc == iv + ivsize, 2);
-               src = cipher;
-               cryptlen += ivsize;
-       }
+       struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
+       struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
+       unsigned int authsize = crypto_aead_authsize(authenc_esn);
+       struct crypto_ahash *auth = ctx->auth;
+       u8 *ohash = PTR_ALIGN((u8 *)areq_ctx->tail,
+                             crypto_ahash_alignmask(auth) + 1);
+       unsigned int assoclen = req->assoclen;
+       unsigned int cryptlen = req->cryptlen;
+       u8 *ihash = ohash + crypto_ahash_digestsize(auth);
+       struct scatterlist *dst = req->dst;
+       u32 tmp[2];
+       int err;
 
-       if (assoc->length < 12)
-               return -EINVAL;
+       cryptlen -= authsize;
 
-       sg_init_table(hsg, 2);
-       sg_set_page(hsg, sg_page(assoc), 4, assoc->offset);
-       sg_set_page(hsg + 1, sg_page(assoc), 4, assoc->offset + 8);
+       if (req->src != dst) {
+               err = crypto_authenc_esn_copy(req, assoclen + cryptlen);
+               if (err)
+                       return err;
+       }
 
-       sg_init_table(tsg, 1);
-       sg_set_page(tsg, sg_page(assoc), 4, assoc->offset + 4);
+       scatterwalk_map_and_copy(ihash, req->src, assoclen + cryptlen,
+                                authsize, 0);
 
-       areq_ctx->cryptlen = cryptlen;
-       areq_ctx->headlen = 8;
-       areq_ctx->trailen = 4;
-       areq_ctx->sg = src;
+       if (!authsize)
+               goto tail;
 
-       areq_ctx->complete = authenc_esn_verify_ahash_done;
-       areq_ctx->update_complete = authenc_esn_verify_ahash_update_done;
-       areq_ctx->update_complete2 = authenc_esn_verify_ahash_update_done2;
+       /* Move high-order bits of sequence number to the end. */
+       scatterwalk_map_and_copy(tmp, dst, 0, 8, 0);
+       scatterwalk_map_and_copy(tmp, dst, 4, 4, 1);
+       scatterwalk_map_and_copy(tmp + 1, dst, assoclen + cryptlen, 4, 1);
 
-       return crypto_authenc_esn_verify(req);
-}
+       sg_init_table(areq_ctx->dst, 2);
+       dst = scatterwalk_ffwd(areq_ctx->dst, dst, 4);
 
-static int crypto_authenc_esn_decrypt(struct aead_request *req)
-{
-       struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
-       struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
-       struct ablkcipher_request *abreq = aead_request_ctx(req);
-       unsigned int cryptlen = req->cryptlen;
-       unsigned int authsize = crypto_aead_authsize(authenc_esn);
-       u8 *iv = req->iv;
-       int err;
-
-       if (cryptlen < authsize)
-               return -EINVAL;
-       cryptlen -= authsize;
+       ahash_request_set_tfm(ahreq, auth);
+       ahash_request_set_crypt(ahreq, dst, ohash, assoclen + cryptlen);
+       ahash_request_set_callback(ahreq, aead_request_flags(req),
+                                  authenc_esn_verify_ahash_done, req);
 
-       err = crypto_authenc_esn_iverify(req, iv, cryptlen);
+       err = crypto_ahash_digest(ahreq);
        if (err)
                return err;
 
-       ablkcipher_request_set_tfm(abreq, ctx->enc);
-       ablkcipher_request_set_callback(abreq, aead_request_flags(req),
-                                       req->base.complete, req->base.data);
-       ablkcipher_request_set_crypt(abreq, req->src, req->dst, cryptlen, iv);
-
-       return crypto_ablkcipher_decrypt(abreq);
+tail:
+       return crypto_authenc_esn_decrypt_tail(req, aead_request_flags(req));
 }
 
-static int crypto_authenc_esn_init_tfm(struct crypto_tfm *tfm)
+static int crypto_authenc_esn_init_tfm(struct crypto_aead *tfm)
 {
-       struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
-       struct authenc_esn_instance_ctx *ictx = crypto_instance_ctx(inst);
-       struct crypto_authenc_esn_ctx *ctx = crypto_tfm_ctx(tfm);
+       struct aead_instance *inst = aead_alg_instance(tfm);
+       struct authenc_esn_instance_ctx *ictx = aead_instance_ctx(inst);
+       struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(tfm);
        struct crypto_ahash *auth;
        struct crypto_ablkcipher *enc;
+       struct crypto_blkcipher *null;
        int err;
 
        auth = crypto_spawn_ahash(&ictx->auth);
@@ -634,15 +344,20 @@ static int crypto_authenc_esn_init_tfm(struct crypto_tfm *tfm)
        if (IS_ERR(enc))
                goto err_free_ahash;
 
+       null = crypto_get_default_null_skcipher();
+       err = PTR_ERR(null);
+       if (IS_ERR(null))
+               goto err_free_skcipher;
+
        ctx->auth = auth;
        ctx->enc = enc;
+       ctx->null = null;
 
-       ctx->reqoff = ALIGN(2 * crypto_ahash_digestsize(auth) +
-                           crypto_ahash_alignmask(auth),
-                           crypto_ahash_alignmask(auth) + 1) +
-                     crypto_ablkcipher_ivsize(enc);
+       ctx->reqoff = ALIGN(2 * crypto_ahash_digestsize(auth),
+                           crypto_ahash_alignmask(auth) + 1);
 
-       crypto_aead_set_reqsize(__crypto_aead_cast(tfm),
+       crypto_aead_set_reqsize(
+               tfm,
                sizeof(struct authenc_esn_request_ctx) +
                ctx->reqoff +
                max_t(unsigned int,
@@ -653,23 +368,36 @@ static int crypto_authenc_esn_init_tfm(struct crypto_tfm *tfm)
 
        return 0;
 
+err_free_skcipher:
+       crypto_free_ablkcipher(enc);
 err_free_ahash:
        crypto_free_ahash(auth);
        return err;
 }
 
-static void crypto_authenc_esn_exit_tfm(struct crypto_tfm *tfm)
+static void crypto_authenc_esn_exit_tfm(struct crypto_aead *tfm)
 {
-       struct crypto_authenc_esn_ctx *ctx = crypto_tfm_ctx(tfm);
+       struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(tfm);
 
        crypto_free_ahash(ctx->auth);
        crypto_free_ablkcipher(ctx->enc);
+       crypto_put_default_null_skcipher();
+}
+
+static void crypto_authenc_esn_free(struct aead_instance *inst)
+{
+       struct authenc_esn_instance_ctx *ctx = aead_instance_ctx(inst);
+
+       crypto_drop_skcipher(&ctx->enc);
+       crypto_drop_ahash(&ctx->auth);
+       kfree(inst);
 }
 
-static struct crypto_instance *crypto_authenc_esn_alloc(struct rtattr **tb)
+static int crypto_authenc_esn_create(struct crypto_template *tmpl,
+                                    struct rtattr **tb)
 {
        struct crypto_attr_type *algt;
-       struct crypto_instance *inst;
+       struct aead_instance *inst;
        struct hash_alg_common *auth;
        struct crypto_alg *auth_base;
        struct crypto_alg *enc;
@@ -679,15 +407,15 @@ static struct crypto_instance *crypto_authenc_esn_alloc(struct rtattr **tb)
 
        algt = crypto_get_attr_type(tb);
        if (IS_ERR(algt))
-               return ERR_CAST(algt);
+               return PTR_ERR(algt);
 
        if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
-               return ERR_PTR(-EINVAL);
+               return -EINVAL;
 
        auth = ahash_attr_alg(tb[1], CRYPTO_ALG_TYPE_HASH,
                               CRYPTO_ALG_TYPE_AHASH_MASK);
        if (IS_ERR(auth))
-               return ERR_CAST(auth);
+               return PTR_ERR(auth);
 
        auth_base = &auth->base;
 
@@ -701,13 +429,14 @@ static struct crypto_instance *crypto_authenc_esn_alloc(struct rtattr **tb)
        if (!inst)
                goto out_put_auth;
 
-       ctx = crypto_instance_ctx(inst);
+       ctx = aead_instance_ctx(inst);
 
-       err = crypto_init_ahash_spawn(&ctx->auth, auth, inst);
+       err = crypto_init_ahash_spawn(&ctx->auth, auth,
+                                     aead_crypto_instance(inst));
        if (err)
                goto err_free_inst;
 
-       crypto_set_skcipher_spawn(&ctx->enc, inst);
+       crypto_set_skcipher_spawn(&ctx->enc, aead_crypto_instance(inst));
        err = crypto_grab_skcipher(&ctx->enc, enc_name, 0,
                                   crypto_requires_sync(algt->type,
                                                        algt->mask));
@@ -717,40 +446,44 @@ static struct crypto_instance *crypto_authenc_esn_alloc(struct rtattr **tb)
        enc = crypto_skcipher_spawn_alg(&ctx->enc);
 
        err = -ENAMETOOLONG;
-       if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME,
-                    "authencesn(%s,%s)", auth_base->cra_name, enc->cra_name) >=
-           CRYPTO_MAX_ALG_NAME)
+       if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
+                    "authencesn(%s,%s)", auth_base->cra_name,
+                    enc->cra_name) >= CRYPTO_MAX_ALG_NAME)
                goto err_drop_enc;
 
-       if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
+       if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
                     "authencesn(%s,%s)", auth_base->cra_driver_name,
                     enc->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
                goto err_drop_enc;
 
-       inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD;
-       inst->alg.cra_flags |= enc->cra_flags & CRYPTO_ALG_ASYNC;
-       inst->alg.cra_priority = enc->cra_priority *
-                                10 + auth_base->cra_priority;
-       inst->alg.cra_blocksize = enc->cra_blocksize;
-       inst->alg.cra_alignmask = auth_base->cra_alignmask | enc->cra_alignmask;
-       inst->alg.cra_type = &crypto_aead_type;
+       inst->alg.base.cra_flags = enc->cra_flags & CRYPTO_ALG_ASYNC;
+       inst->alg.base.cra_priority = enc->cra_priority * 10 +
+                                     auth_base->cra_priority;
+       inst->alg.base.cra_blocksize = enc->cra_blocksize;
+       inst->alg.base.cra_alignmask = auth_base->cra_alignmask |
+                                      enc->cra_alignmask;
+       inst->alg.base.cra_ctxsize = sizeof(struct crypto_authenc_esn_ctx);
+
+       inst->alg.ivsize = enc->cra_ablkcipher.ivsize;
+       inst->alg.maxauthsize = auth->digestsize;
 
-       inst->alg.cra_aead.ivsize = enc->cra_ablkcipher.ivsize;
-       inst->alg.cra_aead.maxauthsize = auth->digestsize;
+       inst->alg.init = crypto_authenc_esn_init_tfm;
+       inst->alg.exit = crypto_authenc_esn_exit_tfm;
 
-       inst->alg.cra_ctxsize = sizeof(struct crypto_authenc_esn_ctx);
+       inst->alg.setkey = crypto_authenc_esn_setkey;
+       inst->alg.setauthsize = crypto_authenc_esn_setauthsize;
+       inst->alg.encrypt = crypto_authenc_esn_encrypt;
+       inst->alg.decrypt = crypto_authenc_esn_decrypt;
 
-       inst->alg.cra_init = crypto_authenc_esn_init_tfm;
-       inst->alg.cra_exit = crypto_authenc_esn_exit_tfm;
+       inst->free = crypto_authenc_esn_free,
 
-       inst->alg.cra_aead.setkey = crypto_authenc_esn_setkey;
-       inst->alg.cra_aead.encrypt = crypto_authenc_esn_encrypt;
-       inst->alg.cra_aead.decrypt = crypto_authenc_esn_decrypt;
-       inst->alg.cra_aead.givencrypt = crypto_authenc_esn_givencrypt;
+       err = aead_register_instance(tmpl, inst);
+       if (err)
+               goto err_drop_enc;
 
 out:
        crypto_mod_put(auth_base);
-       return inst;
+       return err;
 
 err_drop_enc:
        crypto_drop_skcipher(&ctx->enc);
@@ -759,23 +492,12 @@ err_drop_auth:
 err_free_inst:
        kfree(inst);
 out_put_auth:
-       inst = ERR_PTR(err);
        goto out;
 }
 
-static void crypto_authenc_esn_free(struct crypto_instance *inst)
-{
-       struct authenc_esn_instance_ctx *ctx = crypto_instance_ctx(inst);
-
-       crypto_drop_skcipher(&ctx->enc);
-       crypto_drop_ahash(&ctx->auth);
-       kfree(inst);
-}
-
 static struct crypto_template crypto_authenc_esn_tmpl = {
        .name = "authencesn",
-       .alloc = crypto_authenc_esn_alloc,
-       .free = crypto_authenc_esn_free,
+       .create = crypto_authenc_esn_create,
        .module = THIS_MODULE,
 };
 
index a4d1a5eda18b4e9758ec899fdf48c6f1359a8c14..cc31ea4335bf3640b25499cb3b5aea21e65ae778 100644 (file)
@@ -36,14 +36,20 @@ struct crypto_rfc4309_ctx {
        u8 nonce[3];
 };
 
+struct crypto_rfc4309_req_ctx {
+       struct scatterlist src[3];
+       struct scatterlist dst[3];
+       struct aead_request subreq;
+};
+
 struct crypto_ccm_req_priv_ctx {
        u8 odata[16];
        u8 idata[16];
        u8 auth_tag[16];
        u32 ilen;
        u32 flags;
-       struct scatterlist src[2];
-       struct scatterlist dst[2];
+       struct scatterlist src[3];
+       struct scatterlist dst[3];
        struct ablkcipher_request abreq;
 };
 
@@ -265,7 +271,7 @@ static int crypto_ccm_auth(struct aead_request *req, struct scatterlist *plain,
        /* format associated data and compute into mac */
        if (assoclen) {
                pctx->ilen = format_adata(idata, assoclen);
-               get_data_to_compute(cipher, pctx, req->assoc, req->assoclen);
+               get_data_to_compute(cipher, pctx, req->src, req->assoclen);
        } else {
                pctx->ilen = 0;
        }
@@ -286,7 +292,8 @@ static void crypto_ccm_encrypt_done(struct crypto_async_request *areq, int err)
        u8 *odata = pctx->odata;
 
        if (!err)
-               scatterwalk_map_and_copy(odata, req->dst, req->cryptlen,
+               scatterwalk_map_and_copy(odata, req->dst,
+                                        req->assoclen + req->cryptlen,
                                         crypto_aead_authsize(aead), 1);
        aead_request_complete(req, err);
 }
@@ -300,6 +307,41 @@ static inline int crypto_ccm_check_iv(const u8 *iv)
        return 0;
 }
 
+static int crypto_ccm_init_crypt(struct aead_request *req, u8 *tag)
+{
+       struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req);
+       struct scatterlist *sg;
+       u8 *iv = req->iv;
+       int err;
+
+       err = crypto_ccm_check_iv(iv);
+       if (err)
+               return err;
+
+       pctx->flags = aead_request_flags(req);
+
+        /* Note: rfc 3610 and NIST 800-38C require counter of
+        * zero to encrypt auth tag.
+        */
+       memset(iv + 15 - iv[0], 0, iv[0] + 1);
+
+       sg_init_table(pctx->src, 3);
+       sg_set_buf(pctx->src, tag, 16);
+       sg = scatterwalk_ffwd(pctx->src + 1, req->src, req->assoclen);
+       if (sg != pctx->src + 1)
+               sg_chain(pctx->src, 2, sg);
+
+       if (req->src != req->dst) {
+               sg_init_table(pctx->dst, 3);
+               sg_set_buf(pctx->dst, tag, 16);
+               sg = scatterwalk_ffwd(pctx->dst + 1, req->dst, req->assoclen);
+               if (sg != pctx->dst + 1)
+                       sg_chain(pctx->dst, 2, sg);
+       }
+
+       return 0;
+}
+
 static int crypto_ccm_encrypt(struct aead_request *req)
 {
        struct crypto_aead *aead = crypto_aead_reqtfm(req);
@@ -312,32 +354,17 @@ static int crypto_ccm_encrypt(struct aead_request *req)
        u8 *iv = req->iv;
        int err;
 
-       err = crypto_ccm_check_iv(iv);
+       err = crypto_ccm_init_crypt(req, odata);
        if (err)
                return err;
 
-       pctx->flags = aead_request_flags(req);
-
-       err = crypto_ccm_auth(req, req->src, cryptlen);
+       err = crypto_ccm_auth(req, sg_next(pctx->src), cryptlen);
        if (err)
                return err;
 
-        /* Note: rfc 3610 and NIST 800-38C require counter of
-        * zero to encrypt auth tag.
-        */
-       memset(iv + 15 - iv[0], 0, iv[0] + 1);
-
-       sg_init_table(pctx->src, 2);
-       sg_set_buf(pctx->src, odata, 16);
-       scatterwalk_sg_chain(pctx->src, 2, req->src);
-
        dst = pctx->src;
-       if (req->src != req->dst) {
-               sg_init_table(pctx->dst, 2);
-               sg_set_buf(pctx->dst, odata, 16);
-               scatterwalk_sg_chain(pctx->dst, 2, req->dst);
+       if (req->src != req->dst)
                dst = pctx->dst;
-       }
 
        ablkcipher_request_set_tfm(abreq, ctx->ctr);
        ablkcipher_request_set_callback(abreq, pctx->flags,
@@ -348,7 +375,7 @@ static int crypto_ccm_encrypt(struct aead_request *req)
                return err;
 
        /* copy authtag to end of dst */
-       scatterwalk_map_and_copy(odata, req->dst, cryptlen,
+       scatterwalk_map_and_copy(odata, sg_next(dst), cryptlen,
                                 crypto_aead_authsize(aead), 1);
        return err;
 }
@@ -361,9 +388,14 @@ static void crypto_ccm_decrypt_done(struct crypto_async_request *areq,
        struct crypto_aead *aead = crypto_aead_reqtfm(req);
        unsigned int authsize = crypto_aead_authsize(aead);
        unsigned int cryptlen = req->cryptlen - authsize;
+       struct scatterlist *dst;
+
+       pctx->flags = 0;
+
+       dst = sg_next(req->src == req->dst ? pctx->src : pctx->dst);
 
        if (!err) {
-               err = crypto_ccm_auth(req, req->dst, cryptlen);
+               err = crypto_ccm_auth(req, dst, cryptlen);
                if (!err && crypto_memneq(pctx->auth_tag, pctx->odata, authsize))
                        err = -EBADMSG;
        }
@@ -384,31 +416,18 @@ static int crypto_ccm_decrypt(struct aead_request *req)
        u8 *iv = req->iv;
        int err;
 
-       if (cryptlen < authsize)
-               return -EINVAL;
        cryptlen -= authsize;
 
-       err = crypto_ccm_check_iv(iv);
+       err = crypto_ccm_init_crypt(req, authtag);
        if (err)
                return err;
 
-       pctx->flags = aead_request_flags(req);
-
-       scatterwalk_map_and_copy(authtag, req->src, cryptlen, authsize, 0);
-
-       memset(iv + 15 - iv[0], 0, iv[0] + 1);
-
-       sg_init_table(pctx->src, 2);
-       sg_set_buf(pctx->src, authtag, 16);
-       scatterwalk_sg_chain(pctx->src, 2, req->src);
+       scatterwalk_map_and_copy(authtag, sg_next(pctx->src), cryptlen,
+                                authsize, 0);
 
        dst = pctx->src;
-       if (req->src != req->dst) {
-               sg_init_table(pctx->dst, 2);
-               sg_set_buf(pctx->dst, authtag, 16);
-               scatterwalk_sg_chain(pctx->dst, 2, req->dst);
+       if (req->src != req->dst)
                dst = pctx->dst;
-       }
 
        ablkcipher_request_set_tfm(abreq, ctx->ctr);
        ablkcipher_request_set_callback(abreq, pctx->flags,
@@ -418,7 +437,7 @@ static int crypto_ccm_decrypt(struct aead_request *req)
        if (err)
                return err;
 
-       err = crypto_ccm_auth(req, req->dst, cryptlen);
+       err = crypto_ccm_auth(req, sg_next(dst), cryptlen);
        if (err)
                return err;
 
@@ -429,11 +448,11 @@ static int crypto_ccm_decrypt(struct aead_request *req)
        return err;
 }
 
-static int crypto_ccm_init_tfm(struct crypto_tfm *tfm)
+static int crypto_ccm_init_tfm(struct crypto_aead *tfm)
 {
-       struct crypto_instance *inst = (void *)tfm->__crt_alg;
-       struct ccm_instance_ctx *ictx = crypto_instance_ctx(inst);
-       struct crypto_ccm_ctx *ctx = crypto_tfm_ctx(tfm);
+       struct aead_instance *inst = aead_alg_instance(tfm);
+       struct ccm_instance_ctx *ictx = aead_instance_ctx(inst);
+       struct crypto_ccm_ctx *ctx = crypto_aead_ctx(tfm);
        struct crypto_cipher *cipher;
        struct crypto_ablkcipher *ctr;
        unsigned long align;
@@ -451,9 +470,10 @@ static int crypto_ccm_init_tfm(struct crypto_tfm *tfm)
        ctx->cipher = cipher;
        ctx->ctr = ctr;
 
-       align = crypto_tfm_alg_alignmask(tfm);
+       align = crypto_aead_alignmask(tfm);
        align &= ~(crypto_tfm_ctx_alignment() - 1);
-       crypto_aead_set_reqsize(__crypto_aead_cast(tfm),
+       crypto_aead_set_reqsize(
+               tfm,
                align + sizeof(struct crypto_ccm_req_priv_ctx) +
                crypto_ablkcipher_reqsize(ctr));
 
@@ -464,21 +484,31 @@ err_free_cipher:
        return err;
 }
 
-static void crypto_ccm_exit_tfm(struct crypto_tfm *tfm)
+static void crypto_ccm_exit_tfm(struct crypto_aead *tfm)
 {
-       struct crypto_ccm_ctx *ctx = crypto_tfm_ctx(tfm);
+       struct crypto_ccm_ctx *ctx = crypto_aead_ctx(tfm);
 
        crypto_free_cipher(ctx->cipher);
        crypto_free_ablkcipher(ctx->ctr);
 }
 
-static struct crypto_instance *crypto_ccm_alloc_common(struct rtattr **tb,
-                                                      const char *full_name,
-                                                      const char *ctr_name,
-                                                      const char *cipher_name)
+static void crypto_ccm_free(struct aead_instance *inst)
+{
+       struct ccm_instance_ctx *ctx = aead_instance_ctx(inst);
+
+       crypto_drop_spawn(&ctx->cipher);
+       crypto_drop_skcipher(&ctx->ctr);
+       kfree(inst);
+}
+
+static int crypto_ccm_create_common(struct crypto_template *tmpl,
+                                   struct rtattr **tb,
+                                   const char *full_name,
+                                   const char *ctr_name,
+                                   const char *cipher_name)
 {
        struct crypto_attr_type *algt;
-       struct crypto_instance *inst;
+       struct aead_instance *inst;
        struct crypto_alg *ctr;
        struct crypto_alg *cipher;
        struct ccm_instance_ctx *ictx;
@@ -486,15 +516,15 @@ static struct crypto_instance *crypto_ccm_alloc_common(struct rtattr **tb,
 
        algt = crypto_get_attr_type(tb);
        if (IS_ERR(algt))
-               return ERR_CAST(algt);
+               return PTR_ERR(algt);
 
        if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
-               return ERR_PTR(-EINVAL);
+               return -EINVAL;
 
        cipher = crypto_alg_mod_lookup(cipher_name,  CRYPTO_ALG_TYPE_CIPHER,
                                       CRYPTO_ALG_TYPE_MASK);
        if (IS_ERR(cipher))
-               return ERR_CAST(cipher);
+               return PTR_ERR(cipher);
 
        err = -EINVAL;
        if (cipher->cra_blocksize != 16)
@@ -505,14 +535,15 @@ static struct crypto_instance *crypto_ccm_alloc_common(struct rtattr **tb,
        if (!inst)
                goto out_put_cipher;
 
-       ictx = crypto_instance_ctx(inst);
+       ictx = aead_instance_ctx(inst);
 
-       err = crypto_init_spawn(&ictx->cipher, cipher, inst,
+       err = crypto_init_spawn(&ictx->cipher, cipher,
+                               aead_crypto_instance(inst),
                                CRYPTO_ALG_TYPE_MASK);
        if (err)
                goto err_free_inst;
 
-       crypto_set_skcipher_spawn(&ictx->ctr, inst);
+       crypto_set_skcipher_spawn(&ictx->ctr, aead_crypto_instance(inst));
        err = crypto_grab_skcipher(&ictx->ctr, ctr_name, 0,
                                   crypto_requires_sync(algt->type,
                                                        algt->mask));
@@ -531,33 +562,39 @@ static struct crypto_instance *crypto_ccm_alloc_common(struct rtattr **tb,
                goto err_drop_ctr;
 
        err = -ENAMETOOLONG;
-       if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
+       if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
                     "ccm_base(%s,%s)", ctr->cra_driver_name,
                     cipher->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
                goto err_drop_ctr;
 
-       memcpy(inst->alg.cra_name, full_name, CRYPTO_MAX_ALG_NAME);
-
-       inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD;
-       inst->alg.cra_flags |= ctr->cra_flags & CRYPTO_ALG_ASYNC;
-       inst->alg.cra_priority = cipher->cra_priority + ctr->cra_priority;
-       inst->alg.cra_blocksize = 1;
-       inst->alg.cra_alignmask = cipher->cra_alignmask | ctr->cra_alignmask |
-                                 (__alignof__(u32) - 1);
-       inst->alg.cra_type = &crypto_aead_type;
-       inst->alg.cra_aead.ivsize = 16;
-       inst->alg.cra_aead.maxauthsize = 16;
-       inst->alg.cra_ctxsize = sizeof(struct crypto_ccm_ctx);
-       inst->alg.cra_init = crypto_ccm_init_tfm;
-       inst->alg.cra_exit = crypto_ccm_exit_tfm;
-       inst->alg.cra_aead.setkey = crypto_ccm_setkey;
-       inst->alg.cra_aead.setauthsize = crypto_ccm_setauthsize;
-       inst->alg.cra_aead.encrypt = crypto_ccm_encrypt;
-       inst->alg.cra_aead.decrypt = crypto_ccm_decrypt;
+       memcpy(inst->alg.base.cra_name, full_name, CRYPTO_MAX_ALG_NAME);
+
+       inst->alg.base.cra_flags = ctr->cra_flags & CRYPTO_ALG_ASYNC;
+       inst->alg.base.cra_priority = (cipher->cra_priority +
+                                      ctr->cra_priority) / 2;
+       inst->alg.base.cra_blocksize = 1;
+       inst->alg.base.cra_alignmask = cipher->cra_alignmask |
+                                      ctr->cra_alignmask |
+                                      (__alignof__(u32) - 1);
+       inst->alg.ivsize = 16;
+       inst->alg.maxauthsize = 16;
+       inst->alg.base.cra_ctxsize = sizeof(struct crypto_ccm_ctx);
+       inst->alg.init = crypto_ccm_init_tfm;
+       inst->alg.exit = crypto_ccm_exit_tfm;
+       inst->alg.setkey = crypto_ccm_setkey;
+       inst->alg.setauthsize = crypto_ccm_setauthsize;
+       inst->alg.encrypt = crypto_ccm_encrypt;
+       inst->alg.decrypt = crypto_ccm_decrypt;
+
+       inst->free = crypto_ccm_free;
+
+       err = aead_register_instance(tmpl, inst);
+       if (err)
+               goto err_drop_ctr;
 
-out:
+out_put_cipher:
        crypto_mod_put(cipher);
-       return inst;
+       return err;
 
 err_drop_ctr:
        crypto_drop_skcipher(&ictx->ctr);
@@ -565,12 +602,10 @@ err_drop_cipher:
        crypto_drop_spawn(&ictx->cipher);
 err_free_inst:
        kfree(inst);
-out_put_cipher:
-       inst = ERR_PTR(err);
-       goto out;
+       goto out_put_cipher;
 }
 
-static struct crypto_instance *crypto_ccm_alloc(struct rtattr **tb)
+static int crypto_ccm_create(struct crypto_template *tmpl, struct rtattr **tb)
 {
        const char *cipher_name;
        char ctr_name[CRYPTO_MAX_ALG_NAME];
@@ -578,36 +613,28 @@ static struct crypto_instance *crypto_ccm_alloc(struct rtattr **tb)
 
        cipher_name = crypto_attr_alg_name(tb[1]);
        if (IS_ERR(cipher_name))
-               return ERR_CAST(cipher_name);
+               return PTR_ERR(cipher_name);
 
        if (snprintf(ctr_name, CRYPTO_MAX_ALG_NAME, "ctr(%s)",
                     cipher_name) >= CRYPTO_MAX_ALG_NAME)
-               return ERR_PTR(-ENAMETOOLONG);
+               return -ENAMETOOLONG;
 
        if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "ccm(%s)", cipher_name) >=
            CRYPTO_MAX_ALG_NAME)
-               return ERR_PTR(-ENAMETOOLONG);
+               return -ENAMETOOLONG;
 
-       return crypto_ccm_alloc_common(tb, full_name, ctr_name, cipher_name);
-}
-
-static void crypto_ccm_free(struct crypto_instance *inst)
-{
-       struct ccm_instance_ctx *ctx = crypto_instance_ctx(inst);
-
-       crypto_drop_spawn(&ctx->cipher);
-       crypto_drop_skcipher(&ctx->ctr);
-       kfree(inst);
+       return crypto_ccm_create_common(tmpl, tb, full_name, ctr_name,
+                                       cipher_name);
 }
 
 static struct crypto_template crypto_ccm_tmpl = {
        .name = "ccm",
-       .alloc = crypto_ccm_alloc,
-       .free = crypto_ccm_free,
+       .create = crypto_ccm_create,
        .module = THIS_MODULE,
 };
 
-static struct crypto_instance *crypto_ccm_base_alloc(struct rtattr **tb)
+static int crypto_ccm_base_create(struct crypto_template *tmpl,
+                                 struct rtattr **tb)
 {
        const char *ctr_name;
        const char *cipher_name;
@@ -615,23 +642,23 @@ static struct crypto_instance *crypto_ccm_base_alloc(struct rtattr **tb)
 
        ctr_name = crypto_attr_alg_name(tb[1]);
        if (IS_ERR(ctr_name))
-               return ERR_CAST(ctr_name);
+               return PTR_ERR(ctr_name);
 
        cipher_name = crypto_attr_alg_name(tb[2]);
        if (IS_ERR(cipher_name))
-               return ERR_CAST(cipher_name);
+               return PTR_ERR(cipher_name);
 
        if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "ccm_base(%s,%s)",
                     ctr_name, cipher_name) >= CRYPTO_MAX_ALG_NAME)
-               return ERR_PTR(-ENAMETOOLONG);
+               return -ENAMETOOLONG;
 
-       return crypto_ccm_alloc_common(tb, full_name, ctr_name, cipher_name);
+       return crypto_ccm_create_common(tmpl, tb, full_name, ctr_name,
+                                       cipher_name);
 }
 
 static struct crypto_template crypto_ccm_base_tmpl = {
        .name = "ccm_base",
-       .alloc = crypto_ccm_base_alloc,
-       .free = crypto_ccm_free,
+       .create = crypto_ccm_base_create,
        .module = THIS_MODULE,
 };
 
@@ -677,10 +704,12 @@ static int crypto_rfc4309_setauthsize(struct crypto_aead *parent,
 
 static struct aead_request *crypto_rfc4309_crypt(struct aead_request *req)
 {
-       struct aead_request *subreq = aead_request_ctx(req);
+       struct crypto_rfc4309_req_ctx *rctx = aead_request_ctx(req);
+       struct aead_request *subreq = &rctx->subreq;
        struct crypto_aead *aead = crypto_aead_reqtfm(req);
        struct crypto_rfc4309_ctx *ctx = crypto_aead_ctx(aead);
        struct crypto_aead *child = ctx->child;
+       struct scatterlist *sg;
        u8 *iv = PTR_ALIGN((u8 *)(subreq + 1) + crypto_aead_reqsize(child),
                           crypto_aead_alignmask(child) + 1);
 
@@ -690,17 +719,38 @@ static struct aead_request *crypto_rfc4309_crypt(struct aead_request *req)
        memcpy(iv + 1, ctx->nonce, 3);
        memcpy(iv + 4, req->iv, 8);
 
+       scatterwalk_map_and_copy(iv + 16, req->src, 0, req->assoclen - 8, 0);
+
+       sg_init_table(rctx->src, 3);
+       sg_set_buf(rctx->src, iv + 16, req->assoclen - 8);
+       sg = scatterwalk_ffwd(rctx->src + 1, req->src, req->assoclen);
+       if (sg != rctx->src + 1)
+               sg_chain(rctx->src, 2, sg);
+
+       if (req->src != req->dst) {
+               sg_init_table(rctx->dst, 3);
+               sg_set_buf(rctx->dst, iv + 16, req->assoclen - 8);
+               sg = scatterwalk_ffwd(rctx->dst + 1, req->dst, req->assoclen);
+               if (sg != rctx->dst + 1)
+                       sg_chain(rctx->dst, 2, sg);
+       }
+
        aead_request_set_tfm(subreq, child);
        aead_request_set_callback(subreq, req->base.flags, req->base.complete,
                                  req->base.data);
-       aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, iv);
-       aead_request_set_assoc(subreq, req->assoc, req->assoclen);
+       aead_request_set_crypt(subreq, rctx->src,
+                              req->src == req->dst ? rctx->src : rctx->dst,
+                              req->cryptlen, iv);
+       aead_request_set_ad(subreq, req->assoclen - 8);
 
        return subreq;
 }
 
 static int crypto_rfc4309_encrypt(struct aead_request *req)
 {
+       if (req->assoclen != 16 && req->assoclen != 20)
+               return -EINVAL;
+
        req = crypto_rfc4309_crypt(req);
 
        return crypto_aead_encrypt(req);
@@ -708,16 +758,19 @@ static int crypto_rfc4309_encrypt(struct aead_request *req)
 
 static int crypto_rfc4309_decrypt(struct aead_request *req)
 {
+       if (req->assoclen != 16 && req->assoclen != 20)
+               return -EINVAL;
+
        req = crypto_rfc4309_crypt(req);
 
        return crypto_aead_decrypt(req);
 }
 
-static int crypto_rfc4309_init_tfm(struct crypto_tfm *tfm)
+static int crypto_rfc4309_init_tfm(struct crypto_aead *tfm)
 {
-       struct crypto_instance *inst = (void *)tfm->__crt_alg;
-       struct crypto_aead_spawn *spawn = crypto_instance_ctx(inst);
-       struct crypto_rfc4309_ctx *ctx = crypto_tfm_ctx(tfm);
+       struct aead_instance *inst = aead_alg_instance(tfm);
+       struct crypto_aead_spawn *spawn = aead_instance_ctx(inst);
+       struct crypto_rfc4309_ctx *ctx = crypto_aead_ctx(tfm);
        struct crypto_aead *aead;
        unsigned long align;
 
@@ -729,115 +782,118 @@ static int crypto_rfc4309_init_tfm(struct crypto_tfm *tfm)
 
        align = crypto_aead_alignmask(aead);
        align &= ~(crypto_tfm_ctx_alignment() - 1);
-       crypto_aead_set_reqsize(__crypto_aead_cast(tfm),
-               sizeof(struct aead_request) +
+       crypto_aead_set_reqsize(
+               tfm,
+               sizeof(struct crypto_rfc4309_req_ctx) +
                ALIGN(crypto_aead_reqsize(aead), crypto_tfm_ctx_alignment()) +
-               align + 16);
+               align + 32);
 
        return 0;
 }
 
-static void crypto_rfc4309_exit_tfm(struct crypto_tfm *tfm)
+static void crypto_rfc4309_exit_tfm(struct crypto_aead *tfm)
 {
-       struct crypto_rfc4309_ctx *ctx = crypto_tfm_ctx(tfm);
+       struct crypto_rfc4309_ctx *ctx = crypto_aead_ctx(tfm);
 
        crypto_free_aead(ctx->child);
 }
 
-static struct crypto_instance *crypto_rfc4309_alloc(struct rtattr **tb)
+static void crypto_rfc4309_free(struct aead_instance *inst)
+{
+       crypto_drop_aead(aead_instance_ctx(inst));
+       kfree(inst);
+}
+
+static int crypto_rfc4309_create(struct crypto_template *tmpl,
+                                struct rtattr **tb)
 {
        struct crypto_attr_type *algt;
-       struct crypto_instance *inst;
+       struct aead_instance *inst;
        struct crypto_aead_spawn *spawn;
-       struct crypto_alg *alg;
+       struct aead_alg *alg;
        const char *ccm_name;
        int err;
 
        algt = crypto_get_attr_type(tb);
        if (IS_ERR(algt))
-               return ERR_CAST(algt);
+               return PTR_ERR(algt);
 
        if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
-               return ERR_PTR(-EINVAL);
+               return -EINVAL;
 
        ccm_name = crypto_attr_alg_name(tb[1]);
        if (IS_ERR(ccm_name))
-               return ERR_CAST(ccm_name);
+               return PTR_ERR(ccm_name);
 
        inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
        if (!inst)
-               return ERR_PTR(-ENOMEM);
+               return -ENOMEM;
 
-       spawn = crypto_instance_ctx(inst);
-       crypto_set_aead_spawn(spawn, inst);
+       spawn = aead_instance_ctx(inst);
+       crypto_set_aead_spawn(spawn, aead_crypto_instance(inst));
        err = crypto_grab_aead(spawn, ccm_name, 0,
                               crypto_requires_sync(algt->type, algt->mask));
        if (err)
                goto out_free_inst;
 
-       alg = crypto_aead_spawn_alg(spawn);
+       alg = crypto_spawn_aead_alg(spawn);
 
        err = -EINVAL;
 
        /* We only support 16-byte blocks. */
-       if (alg->cra_aead.ivsize != 16)
+       if (crypto_aead_alg_ivsize(alg) != 16)
                goto out_drop_alg;
 
        /* Not a stream cipher? */
-       if (alg->cra_blocksize != 1)
+       if (alg->base.cra_blocksize != 1)
                goto out_drop_alg;
 
        err = -ENAMETOOLONG;
-       if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME,
-                    "rfc4309(%s)", alg->cra_name) >= CRYPTO_MAX_ALG_NAME ||
-           snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
-                    "rfc4309(%s)", alg->cra_driver_name) >=
+       if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
+                    "rfc4309(%s)", alg->base.cra_name) >=
+           CRYPTO_MAX_ALG_NAME ||
+           snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
+                    "rfc4309(%s)", alg->base.cra_driver_name) >=
            CRYPTO_MAX_ALG_NAME)
                goto out_drop_alg;
 
-       inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD;
-       inst->alg.cra_flags |= alg->cra_flags & CRYPTO_ALG_ASYNC;
-       inst->alg.cra_priority = alg->cra_priority;
-       inst->alg.cra_blocksize = 1;
-       inst->alg.cra_alignmask = alg->cra_alignmask;
-       inst->alg.cra_type = &crypto_nivaead_type;
+       inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC;
+       inst->alg.base.cra_priority = alg->base.cra_priority;
+       inst->alg.base.cra_blocksize = 1;
+       inst->alg.base.cra_alignmask = alg->base.cra_alignmask;
 
-       inst->alg.cra_aead.ivsize = 8;
-       inst->alg.cra_aead.maxauthsize = 16;
+       inst->alg.ivsize = 8;
+       inst->alg.maxauthsize = 16;
 
-       inst->alg.cra_ctxsize = sizeof(struct crypto_rfc4309_ctx);
+       inst->alg.base.cra_ctxsize = sizeof(struct crypto_rfc4309_ctx);
 
-       inst->alg.cra_init = crypto_rfc4309_init_tfm;
-       inst->alg.cra_exit = crypto_rfc4309_exit_tfm;
+       inst->alg.init = crypto_rfc4309_init_tfm;
+       inst->alg.exit = crypto_rfc4309_exit_tfm;
 
-       inst->alg.cra_aead.setkey = crypto_rfc4309_setkey;
-       inst->alg.cra_aead.setauthsize = crypto_rfc4309_setauthsize;
-       inst->alg.cra_aead.encrypt = crypto_rfc4309_encrypt;
-       inst->alg.cra_aead.decrypt = crypto_rfc4309_decrypt;
+       inst->alg.setkey = crypto_rfc4309_setkey;
+       inst->alg.setauthsize = crypto_rfc4309_setauthsize;
+       inst->alg.encrypt = crypto_rfc4309_encrypt;
+       inst->alg.decrypt = crypto_rfc4309_decrypt;
 
-       inst->alg.cra_aead.geniv = "seqiv";
+       inst->free = crypto_rfc4309_free;
+
+       err = aead_register_instance(tmpl, inst);
+       if (err)
+               goto out_drop_alg;
 
 out:
-       return inst;
+       return err;
 
 out_drop_alg:
        crypto_drop_aead(spawn);
 out_free_inst:
        kfree(inst);
-       inst = ERR_PTR(err);
        goto out;
 }
 
-static void crypto_rfc4309_free(struct crypto_instance *inst)
-{
-       crypto_drop_spawn(crypto_instance_ctx(inst));
-       kfree(inst);
-}
-
 static struct crypto_template crypto_rfc4309_tmpl = {
        .name = "rfc4309",
-       .alloc = crypto_rfc4309_alloc,
-       .free = crypto_rfc4309_free,
+       .create = crypto_rfc4309_create,
        .module = THIS_MODULE,
 };
 
index fa42e708aa9616d82ec18191a505fcc7e24abab4..da9c89968223673e08160fd6656cd0907437e070 100644 (file)
 #include <linux/crypto.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
-
-#define CHACHA20_NONCE_SIZE 16
-#define CHACHA20_KEY_SIZE   32
-#define CHACHA20_BLOCK_SIZE 64
-
-struct chacha20_ctx {
-       u32 key[8];
-};
+#include <crypto/chacha20.h>
 
 static inline u32 rotl32(u32 v, u8 n)
 {
@@ -108,7 +101,7 @@ static void chacha20_docrypt(u32 *state, u8 *dst, const u8 *src,
        }
 }
 
-static void chacha20_init(u32 *state, struct chacha20_ctx *ctx, u8 *iv)
+void crypto_chacha20_init(u32 *state, struct chacha20_ctx *ctx, u8 *iv)
 {
        static const char constant[16] = "expand 32-byte k";
 
@@ -129,8 +122,9 @@ static void chacha20_init(u32 *state, struct chacha20_ctx *ctx, u8 *iv)
        state[14] = le32_to_cpuvp(iv +  8);
        state[15] = le32_to_cpuvp(iv + 12);
 }
+EXPORT_SYMBOL_GPL(crypto_chacha20_init);
 
-static int chacha20_setkey(struct crypto_tfm *tfm, const u8 *key,
+int crypto_chacha20_setkey(struct crypto_tfm *tfm, const u8 *key,
                           unsigned int keysize)
 {
        struct chacha20_ctx *ctx = crypto_tfm_ctx(tfm);
@@ -144,8 +138,9 @@ static int chacha20_setkey(struct crypto_tfm *tfm, const u8 *key,
 
        return 0;
 }
+EXPORT_SYMBOL_GPL(crypto_chacha20_setkey);
 
-static int chacha20_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
+int crypto_chacha20_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
                          struct scatterlist *src, unsigned int nbytes)
 {
        struct blkcipher_walk walk;
@@ -155,7 +150,7 @@ static int chacha20_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
        blkcipher_walk_init(&walk, dst, src, nbytes);
        err = blkcipher_walk_virt_block(desc, &walk, CHACHA20_BLOCK_SIZE);
 
-       chacha20_init(state, crypto_blkcipher_ctx(desc->tfm), walk.iv);
+       crypto_chacha20_init(state, crypto_blkcipher_ctx(desc->tfm), walk.iv);
 
        while (walk.nbytes >= CHACHA20_BLOCK_SIZE) {
                chacha20_docrypt(state, walk.dst.virt.addr, walk.src.virt.addr,
@@ -172,6 +167,7 @@ static int chacha20_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
 
        return err;
 }
+EXPORT_SYMBOL_GPL(crypto_chacha20_crypt);
 
 static struct crypto_alg alg = {
        .cra_name               = "chacha20",
@@ -187,11 +183,11 @@ static struct crypto_alg alg = {
                .blkcipher = {
                        .min_keysize    = CHACHA20_KEY_SIZE,
                        .max_keysize    = CHACHA20_KEY_SIZE,
-                       .ivsize         = CHACHA20_NONCE_SIZE,
+                       .ivsize         = CHACHA20_IV_SIZE,
                        .geniv          = "seqiv",
-                       .setkey         = chacha20_setkey,
-                       .encrypt        = chacha20_crypt,
-                       .decrypt        = chacha20_crypt,
+                       .setkey         = crypto_chacha20_setkey,
+                       .encrypt        = crypto_chacha20_crypt,
+                       .decrypt        = crypto_chacha20_crypt,
                },
        },
 };
index 7b46ed799a64cdc6710408a16a847147ccd58290..99c3cce0129035b5c41e2e8b4ebdab5ecd9fde2e 100644 (file)
@@ -13,6 +13,8 @@
 #include <crypto/internal/hash.h>
 #include <crypto/internal/skcipher.h>
 #include <crypto/scatterwalk.h>
+#include <crypto/chacha20.h>
+#include <crypto/poly1305.h>
 #include <linux/err.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
 
 #include "internal.h"
 
-#define POLY1305_BLOCK_SIZE    16
-#define POLY1305_DIGEST_SIZE   16
-#define POLY1305_KEY_SIZE      32
-#define CHACHA20_KEY_SIZE      32
-#define CHACHA20_IV_SIZE       16
 #define CHACHAPOLY_IV_SIZE     12
 
 struct chachapoly_instance_ctx {
@@ -60,12 +57,16 @@ struct chacha_req {
 };
 
 struct chachapoly_req_ctx {
+       struct scatterlist src[2];
+       struct scatterlist dst[2];
        /* the key we generate for Poly1305 using Chacha20 */
        u8 key[POLY1305_KEY_SIZE];
        /* calculated Poly1305 tag */
        u8 tag[POLY1305_DIGEST_SIZE];
        /* length of data to en/decrypt, without ICV */
        unsigned int cryptlen;
+       /* Actual AD, excluding IV */
+       unsigned int assoclen;
        union {
                struct poly_req poly;
                struct chacha_req chacha;
@@ -98,7 +99,9 @@ static int poly_verify_tag(struct aead_request *req)
        struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
        u8 tag[sizeof(rctx->tag)];
 
-       scatterwalk_map_and_copy(tag, req->src, rctx->cryptlen, sizeof(tag), 0);
+       scatterwalk_map_and_copy(tag, req->src,
+                                req->assoclen + rctx->cryptlen,
+                                sizeof(tag), 0);
        if (crypto_memneq(tag, rctx->tag, sizeof(tag)))
                return -EBADMSG;
        return 0;
@@ -108,7 +111,8 @@ static int poly_copy_tag(struct aead_request *req)
 {
        struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
 
-       scatterwalk_map_and_copy(rctx->tag, req->dst, rctx->cryptlen,
+       scatterwalk_map_and_copy(rctx->tag, req->dst,
+                                req->assoclen + rctx->cryptlen,
                                 sizeof(rctx->tag), 1);
        return 0;
 }
@@ -123,14 +127,24 @@ static int chacha_decrypt(struct aead_request *req)
        struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
        struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
        struct chacha_req *creq = &rctx->u.chacha;
+       struct scatterlist *src, *dst;
        int err;
 
        chacha_iv(creq->iv, req, 1);
 
+       sg_init_table(rctx->src, 2);
+       src = scatterwalk_ffwd(rctx->src, req->src, req->assoclen);
+       dst = src;
+
+       if (req->src != req->dst) {
+               sg_init_table(rctx->dst, 2);
+               dst = scatterwalk_ffwd(rctx->dst, req->dst, req->assoclen);
+       }
+
        ablkcipher_request_set_callback(&creq->req, aead_request_flags(req),
                                        chacha_decrypt_done, req);
        ablkcipher_request_set_tfm(&creq->req, ctx->chacha);
-       ablkcipher_request_set_crypt(&creq->req, req->src, req->dst,
+       ablkcipher_request_set_crypt(&creq->req, src, dst,
                                     rctx->cryptlen, creq->iv);
        err = crypto_ablkcipher_decrypt(&creq->req);
        if (err)
@@ -156,14 +170,15 @@ static void poly_tail_done(struct crypto_async_request *areq, int err)
 
 static int poly_tail(struct aead_request *req)
 {
-       struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
+       struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+       struct chachapoly_ctx *ctx = crypto_aead_ctx(tfm);
        struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
        struct poly_req *preq = &rctx->u.poly;
        __le64 len;
        int err;
 
        sg_init_table(preq->src, 1);
-       len = cpu_to_le64(req->assoclen);
+       len = cpu_to_le64(rctx->assoclen);
        memcpy(&preq->tail.assoclen, &len, sizeof(len));
        len = cpu_to_le64(rctx->cryptlen);
        memcpy(&preq->tail.cryptlen, &len, sizeof(len));
@@ -228,6 +243,9 @@ static int poly_cipher(struct aead_request *req)
        if (rctx->cryptlen == req->cryptlen) /* encrypting */
                crypt = req->dst;
 
+       sg_init_table(rctx->src, 2);
+       crypt = scatterwalk_ffwd(rctx->src, crypt, req->assoclen);
+
        ahash_request_set_callback(&preq->req, aead_request_flags(req),
                                   poly_cipher_done, req);
        ahash_request_set_tfm(&preq->req, ctx->poly);
@@ -253,7 +271,7 @@ static int poly_adpad(struct aead_request *req)
        unsigned int padlen, bs = POLY1305_BLOCK_SIZE;
        int err;
 
-       padlen = (bs - (req->assoclen % bs)) % bs;
+       padlen = (bs - (rctx->assoclen % bs)) % bs;
        memset(preq->pad, 0, sizeof(preq->pad));
        sg_init_table(preq->src, 1);
        sg_set_buf(preq->src, preq->pad, padlen);
@@ -285,7 +303,7 @@ static int poly_ad(struct aead_request *req)
        ahash_request_set_callback(&preq->req, aead_request_flags(req),
                                   poly_ad_done, req);
        ahash_request_set_tfm(&preq->req, ctx->poly);
-       ahash_request_set_crypt(&preq->req, req->assoc, NULL, req->assoclen);
+       ahash_request_set_crypt(&preq->req, req->src, NULL, rctx->assoclen);
 
        err = crypto_ahash_update(&preq->req);
        if (err)
@@ -351,11 +369,20 @@ static void poly_genkey_done(struct crypto_async_request *areq, int err)
 
 static int poly_genkey(struct aead_request *req)
 {
-       struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
+       struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+       struct chachapoly_ctx *ctx = crypto_aead_ctx(tfm);
        struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
        struct chacha_req *creq = &rctx->u.chacha;
        int err;
 
+       rctx->assoclen = req->assoclen;
+
+       if (crypto_aead_ivsize(tfm) == 8) {
+               if (rctx->assoclen < 8)
+                       return -EINVAL;
+               rctx->assoclen -= 8;
+       }
+
        sg_init_table(creq->src, 1);
        memset(rctx->key, 0, sizeof(rctx->key));
        sg_set_buf(creq->src, rctx->key, sizeof(rctx->key));
@@ -385,14 +412,24 @@ static int chacha_encrypt(struct aead_request *req)
        struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
        struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
        struct chacha_req *creq = &rctx->u.chacha;
+       struct scatterlist *src, *dst;
        int err;
 
        chacha_iv(creq->iv, req, 1);
 
+       sg_init_table(rctx->src, 2);
+       src = scatterwalk_ffwd(rctx->src, req->src, req->assoclen);
+       dst = src;
+
+       if (req->src != req->dst) {
+               sg_init_table(rctx->dst, 2);
+               dst = scatterwalk_ffwd(rctx->dst, req->dst, req->assoclen);
+       }
+
        ablkcipher_request_set_callback(&creq->req, aead_request_flags(req),
                                        chacha_encrypt_done, req);
        ablkcipher_request_set_tfm(&creq->req, ctx->chacha);
-       ablkcipher_request_set_crypt(&creq->req, req->src, req->dst,
+       ablkcipher_request_set_crypt(&creq->req, src, dst,
                                     req->cryptlen, creq->iv);
        err = crypto_ablkcipher_encrypt(&creq->req);
        if (err)
@@ -426,8 +463,6 @@ static int chachapoly_decrypt(struct aead_request *req)
 {
        struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
 
-       if (req->cryptlen < POLY1305_DIGEST_SIZE)
-               return -EINVAL;
        rctx->cryptlen = req->cryptlen - POLY1305_DIGEST_SIZE;
 
        /* decrypt call chain:
@@ -476,11 +511,11 @@ static int chachapoly_setauthsize(struct crypto_aead *tfm,
        return 0;
 }
 
-static int chachapoly_init(struct crypto_tfm *tfm)
+static int chachapoly_init(struct crypto_aead *tfm)
 {
-       struct crypto_instance *inst = (void *)tfm->__crt_alg;
-       struct chachapoly_instance_ctx *ictx = crypto_instance_ctx(inst);
-       struct chachapoly_ctx *ctx = crypto_tfm_ctx(tfm);
+       struct aead_instance *inst = aead_alg_instance(tfm);
+       struct chachapoly_instance_ctx *ictx = aead_instance_ctx(inst);
+       struct chachapoly_ctx *ctx = crypto_aead_ctx(tfm);
        struct crypto_ablkcipher *chacha;
        struct crypto_ahash *poly;
        unsigned long align;
@@ -499,77 +534,87 @@ static int chachapoly_init(struct crypto_tfm *tfm)
        ctx->poly = poly;
        ctx->saltlen = ictx->saltlen;
 
-       align = crypto_tfm_alg_alignmask(tfm);
+       align = crypto_aead_alignmask(tfm);
        align &= ~(crypto_tfm_ctx_alignment() - 1);
-       crypto_aead_set_reqsize(__crypto_aead_cast(tfm),
-                               align + offsetof(struct chachapoly_req_ctx, u) +
-                               max(offsetof(struct chacha_req, req) +
-                                   sizeof(struct ablkcipher_request) +
-                                   crypto_ablkcipher_reqsize(chacha),
-                                   offsetof(struct poly_req, req) +
-                                   sizeof(struct ahash_request) +
-                                   crypto_ahash_reqsize(poly)));
+       crypto_aead_set_reqsize(
+               tfm,
+               align + offsetof(struct chachapoly_req_ctx, u) +
+               max(offsetof(struct chacha_req, req) +
+                   sizeof(struct ablkcipher_request) +
+                   crypto_ablkcipher_reqsize(chacha),
+                   offsetof(struct poly_req, req) +
+                   sizeof(struct ahash_request) +
+                   crypto_ahash_reqsize(poly)));
 
        return 0;
 }
 
-static void chachapoly_exit(struct crypto_tfm *tfm)
+static void chachapoly_exit(struct crypto_aead *tfm)
 {
-       struct chachapoly_ctx *ctx = crypto_tfm_ctx(tfm);
+       struct chachapoly_ctx *ctx = crypto_aead_ctx(tfm);
 
        crypto_free_ahash(ctx->poly);
        crypto_free_ablkcipher(ctx->chacha);
 }
 
-static struct crypto_instance *chachapoly_alloc(struct rtattr **tb,
-                                               const char *name,
-                                               unsigned int ivsize)
+static void chachapoly_free(struct aead_instance *inst)
+{
+       struct chachapoly_instance_ctx *ctx = aead_instance_ctx(inst);
+
+       crypto_drop_skcipher(&ctx->chacha);
+       crypto_drop_ahash(&ctx->poly);
+       kfree(inst);
+}
+
+static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb,
+                            const char *name, unsigned int ivsize)
 {
        struct crypto_attr_type *algt;
-       struct crypto_instance *inst;
+       struct aead_instance *inst;
        struct crypto_alg *chacha;
        struct crypto_alg *poly;
-       struct ahash_alg *poly_ahash;
+       struct hash_alg_common *poly_hash;
        struct chachapoly_instance_ctx *ctx;
        const char *chacha_name, *poly_name;
        int err;
 
        if (ivsize > CHACHAPOLY_IV_SIZE)
-               return ERR_PTR(-EINVAL);
+               return -EINVAL;
 
        algt = crypto_get_attr_type(tb);
        if (IS_ERR(algt))
-               return ERR_CAST(algt);
+               return PTR_ERR(algt);
 
        if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
-               return ERR_PTR(-EINVAL);
+               return -EINVAL;
 
        chacha_name = crypto_attr_alg_name(tb[1]);
        if (IS_ERR(chacha_name))
-               return ERR_CAST(chacha_name);
+               return PTR_ERR(chacha_name);
        poly_name = crypto_attr_alg_name(tb[2]);
        if (IS_ERR(poly_name))
-               return ERR_CAST(poly_name);
+               return PTR_ERR(poly_name);
 
        poly = crypto_find_alg(poly_name, &crypto_ahash_type,
                               CRYPTO_ALG_TYPE_HASH,
                               CRYPTO_ALG_TYPE_AHASH_MASK);
        if (IS_ERR(poly))
-               return ERR_CAST(poly);
+               return PTR_ERR(poly);
 
        err = -ENOMEM;
        inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
        if (!inst)
                goto out_put_poly;
 
-       ctx = crypto_instance_ctx(inst);
+       ctx = aead_instance_ctx(inst);
        ctx->saltlen = CHACHAPOLY_IV_SIZE - ivsize;
-       poly_ahash = container_of(poly, struct ahash_alg, halg.base);
-       err = crypto_init_ahash_spawn(&ctx->poly, &poly_ahash->halg, inst);
+       poly_hash = __crypto_hash_alg_common(poly);
+       err = crypto_init_ahash_spawn(&ctx->poly, poly_hash,
+                                     aead_crypto_instance(inst));
        if (err)
                goto err_free_inst;
 
-       crypto_set_skcipher_spawn(&ctx->chacha, inst);
+       crypto_set_skcipher_spawn(&ctx->chacha, aead_crypto_instance(inst));
        err = crypto_grab_skcipher(&ctx->chacha, chacha_name, 0,
                                   crypto_requires_sync(algt->type,
                                                        algt->mask));
@@ -587,37 +632,42 @@ static struct crypto_instance *chachapoly_alloc(struct rtattr **tb,
                goto out_drop_chacha;
 
        err = -ENAMETOOLONG;
-       if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME,
+       if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
                     "%s(%s,%s)", name, chacha_name,
                     poly_name) >= CRYPTO_MAX_ALG_NAME)
                goto out_drop_chacha;
-       if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
+       if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
                     "%s(%s,%s)", name, chacha->cra_driver_name,
                     poly->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
                goto out_drop_chacha;
 
-       inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD;
-       inst->alg.cra_flags |= (chacha->cra_flags |
-                               poly->cra_flags) & CRYPTO_ALG_ASYNC;
-       inst->alg.cra_priority = (chacha->cra_priority +
-                                 poly->cra_priority) / 2;
-       inst->alg.cra_blocksize = 1;
-       inst->alg.cra_alignmask = chacha->cra_alignmask | poly->cra_alignmask;
-       inst->alg.cra_type = &crypto_nivaead_type;
-       inst->alg.cra_aead.ivsize = ivsize;
-       inst->alg.cra_aead.maxauthsize = POLY1305_DIGEST_SIZE;
-       inst->alg.cra_ctxsize = sizeof(struct chachapoly_ctx) + ctx->saltlen;
-       inst->alg.cra_init = chachapoly_init;
-       inst->alg.cra_exit = chachapoly_exit;
-       inst->alg.cra_aead.encrypt = chachapoly_encrypt;
-       inst->alg.cra_aead.decrypt = chachapoly_decrypt;
-       inst->alg.cra_aead.setkey = chachapoly_setkey;
-       inst->alg.cra_aead.setauthsize = chachapoly_setauthsize;
-       inst->alg.cra_aead.geniv = "seqiv";
-
-out:
+       inst->alg.base.cra_flags = (chacha->cra_flags | poly->cra_flags) &
+                                  CRYPTO_ALG_ASYNC;
+       inst->alg.base.cra_priority = (chacha->cra_priority +
+                                      poly->cra_priority) / 2;
+       inst->alg.base.cra_blocksize = 1;
+       inst->alg.base.cra_alignmask = chacha->cra_alignmask |
+                                      poly->cra_alignmask;
+       inst->alg.base.cra_ctxsize = sizeof(struct chachapoly_ctx) +
+                                    ctx->saltlen;
+       inst->alg.ivsize = ivsize;
+       inst->alg.maxauthsize = POLY1305_DIGEST_SIZE;
+       inst->alg.init = chachapoly_init;
+       inst->alg.exit = chachapoly_exit;
+       inst->alg.encrypt = chachapoly_encrypt;
+       inst->alg.decrypt = chachapoly_decrypt;
+       inst->alg.setkey = chachapoly_setkey;
+       inst->alg.setauthsize = chachapoly_setauthsize;
+
+       inst->free = chachapoly_free;
+
+       err = aead_register_instance(tmpl, inst);
+       if (err)
+               goto out_drop_chacha;
+
+out_put_poly:
        crypto_mod_put(poly);
-       return inst;
+       return err;
 
 out_drop_chacha:
        crypto_drop_skcipher(&ctx->chacha);
@@ -625,41 +675,28 @@ err_drop_poly:
        crypto_drop_ahash(&ctx->poly);
 err_free_inst:
        kfree(inst);
-out_put_poly:
-       inst = ERR_PTR(err);
-       goto out;
-}
-
-static struct crypto_instance *rfc7539_alloc(struct rtattr **tb)
-{
-       return chachapoly_alloc(tb, "rfc7539", 12);
+       goto out_put_poly;
 }
 
-static struct crypto_instance *rfc7539esp_alloc(struct rtattr **tb)
+static int rfc7539_create(struct crypto_template *tmpl, struct rtattr **tb)
 {
-       return chachapoly_alloc(tb, "rfc7539esp", 8);
+       return chachapoly_create(tmpl, tb, "rfc7539", 12);
 }
 
-static void chachapoly_free(struct crypto_instance *inst)
+static int rfc7539esp_create(struct crypto_template *tmpl, struct rtattr **tb)
 {
-       struct chachapoly_instance_ctx *ctx = crypto_instance_ctx(inst);
-
-       crypto_drop_skcipher(&ctx->chacha);
-       crypto_drop_ahash(&ctx->poly);
-       kfree(inst);
+       return chachapoly_create(tmpl, tb, "rfc7539esp", 8);
 }
 
 static struct crypto_template rfc7539_tmpl = {
        .name = "rfc7539",
-       .alloc = rfc7539_alloc,
-       .free = chachapoly_free,
+       .create = rfc7539_create,
        .module = THIS_MODULE,
 };
 
 static struct crypto_template rfc7539esp_tmpl = {
        .name = "rfc7539esp",
-       .alloc = rfc7539esp_alloc,
-       .free = chachapoly_free,
+       .create = rfc7539esp_create,
        .module = THIS_MODULE,
 };
 
@@ -690,6 +727,5 @@ module_exit(chacha20poly1305_module_exit);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Martin Willi <martin@strongswan.org>");
 MODULE_DESCRIPTION("ChaCha20-Poly1305 AEAD");
-MODULE_ALIAS_CRYPTO("chacha20poly1305");
 MODULE_ALIAS_CRYPTO("rfc7539");
 MODULE_ALIAS_CRYPTO("rfc7539esp");
index 22ba81f76764aff2edd614ad8fad2914df33a8a9..c81861b1350b60a1c2def7695dd16d7b0db39ade 100644 (file)
@@ -176,10 +176,9 @@ static inline void cryptd_check_internal(struct rtattr **tb, u32 *type,
        algt = crypto_get_attr_type(tb);
        if (IS_ERR(algt))
                return;
-       if ((algt->type & CRYPTO_ALG_INTERNAL))
-               *type |= CRYPTO_ALG_INTERNAL;
-       if ((algt->mask & CRYPTO_ALG_INTERNAL))
-               *mask |= CRYPTO_ALG_INTERNAL;
+
+       *type |= algt->type & CRYPTO_ALG_INTERNAL;
+       *mask |= algt->mask & CRYPTO_ALG_INTERNAL;
 }
 
 static int cryptd_blkcipher_setkey(struct crypto_ablkcipher *parent,
@@ -688,16 +687,18 @@ static void cryptd_aead_crypt(struct aead_request *req,
                        int (*crypt)(struct aead_request *req))
 {
        struct cryptd_aead_request_ctx *rctx;
+       crypto_completion_t compl;
+
        rctx = aead_request_ctx(req);
+       compl = rctx->complete;
 
        if (unlikely(err == -EINPROGRESS))
                goto out;
        aead_request_set_tfm(req, child);
        err = crypt( req );
-       req->base.complete = rctx->complete;
 out:
        local_bh_disable();
-       rctx->complete(&req->base, err);
+       compl(&req->base, err);
        local_bh_enable();
 }
 
@@ -708,7 +709,7 @@ static void cryptd_aead_encrypt(struct crypto_async_request *areq, int err)
        struct aead_request *req;
 
        req = container_of(areq, struct aead_request, base);
-       cryptd_aead_crypt(req, child, err, crypto_aead_crt(child)->encrypt);
+       cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->encrypt);
 }
 
 static void cryptd_aead_decrypt(struct crypto_async_request *areq, int err)
@@ -718,7 +719,7 @@ static void cryptd_aead_decrypt(struct crypto_async_request *areq, int err)
        struct aead_request *req;
 
        req = container_of(areq, struct aead_request, base);
-       cryptd_aead_crypt(req, child, err, crypto_aead_crt(child)->decrypt);
+       cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->decrypt);
 }
 
 static int cryptd_aead_enqueue(struct aead_request *req,
@@ -756,7 +757,9 @@ static int cryptd_aead_init_tfm(struct crypto_aead *tfm)
                return PTR_ERR(cipher);
 
        ctx->child = cipher;
-       crypto_aead_set_reqsize(tfm, sizeof(struct cryptd_aead_request_ctx));
+       crypto_aead_set_reqsize(
+               tfm, max((unsigned)sizeof(struct cryptd_aead_request_ctx),
+                        crypto_aead_reqsize(cipher)));
        return 0;
 }
 
@@ -775,7 +778,7 @@ static int cryptd_create_aead(struct crypto_template *tmpl,
        struct aead_alg *alg;
        const char *name;
        u32 type = 0;
-       u32 mask = 0;
+       u32 mask = CRYPTO_ALG_ASYNC;
        int err;
 
        cryptd_check_internal(tb, &type, &mask);
index 08ea2867fc8a370d9eb0f1cd0365783f5a5e4f2c..d94d99ffe8b9b6cecf2c8348241ea17738e9b457 100644 (file)
@@ -25,7 +25,6 @@
 #include <net/netlink.h>
 #include <linux/security.h>
 #include <net/net_namespace.h>
-#include <crypto/internal/aead.h>
 #include <crypto/internal/skcipher.h>
 #include <crypto/internal/rng.h>
 #include <crypto/akcipher.h>
@@ -385,34 +384,6 @@ static struct crypto_alg *crypto_user_skcipher_alg(const char *name, u32 type,
        return ERR_PTR(err);
 }
 
-static struct crypto_alg *crypto_user_aead_alg(const char *name, u32 type,
-                                              u32 mask)
-{
-       int err;
-       struct crypto_alg *alg;
-
-       type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
-       type |= CRYPTO_ALG_TYPE_AEAD;
-       mask &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
-       mask |= CRYPTO_ALG_TYPE_MASK;
-
-       for (;;) {
-               alg = crypto_lookup_aead(name,  type, mask);
-               if (!IS_ERR(alg))
-                       return alg;
-
-               err = PTR_ERR(alg);
-               if (err != -EAGAIN)
-                       break;
-               if (signal_pending(current)) {
-                       err = -EINTR;
-                       break;
-               }
-       }
-
-       return ERR_PTR(err);
-}
-
 static int crypto_add_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
                          struct nlattr **attrs)
 {
@@ -446,9 +417,6 @@ static int crypto_add_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
                name = p->cru_name;
 
        switch (p->cru_type & p->cru_mask & CRYPTO_ALG_TYPE_MASK) {
-       case CRYPTO_ALG_TYPE_AEAD:
-               alg = crypto_user_aead_alg(name, p->cru_type, p->cru_mask);
-               break;
        case CRYPTO_ALG_TYPE_GIVCIPHER:
        case CRYPTO_ALG_TYPE_BLKCIPHER:
        case CRYPTO_ALG_TYPE_ABLKCIPHER:
index b6e43dc6135653eddb3e31f9659d54bc774bc6d1..b96a84560b67790845321250e1f1a799557bb4b8 100644 (file)
@@ -19,8 +19,6 @@
  */
 
 #include <crypto/internal/geniv.h>
-#include <crypto/null.h>
-#include <crypto/rng.h>
 #include <crypto/scatterwalk.h>
 #include <linux/err.h>
 #include <linux/init.h>
 
 #define MAX_IV_SIZE 16
 
-struct echainiv_ctx {
-       /* aead_geniv_ctx must be first the element */
-       struct aead_geniv_ctx geniv;
-       struct crypto_blkcipher *null;
-       u8 salt[] __attribute__ ((aligned(__alignof__(u32))));
-};
-
 static DEFINE_PER_CPU(u32 [MAX_IV_SIZE / sizeof(u32)], echainiv_iv);
 
 /* We don't care if we get preempted and read/write IVs from the next CPU. */
@@ -103,7 +94,7 @@ static void echainiv_encrypt_complete(struct crypto_async_request *base,
 static int echainiv_encrypt(struct aead_request *req)
 {
        struct crypto_aead *geniv = crypto_aead_reqtfm(req);
-       struct echainiv_ctx *ctx = crypto_aead_ctx(geniv);
+       struct aead_geniv_ctx *ctx = crypto_aead_ctx(geniv);
        struct aead_request *subreq = aead_request_ctx(req);
        crypto_completion_t compl;
        void *data;
@@ -114,7 +105,7 @@ static int echainiv_encrypt(struct aead_request *req)
        if (req->cryptlen < ivsize)
                return -EINVAL;
 
-       aead_request_set_tfm(subreq, ctx->geniv.child);
+       aead_request_set_tfm(subreq, ctx->child);
 
        compl = echainiv_encrypt_complete;
        data = req;
@@ -145,8 +136,8 @@ static int echainiv_encrypt(struct aead_request *req)
 
        aead_request_set_callback(subreq, req->base.flags, compl, data);
        aead_request_set_crypt(subreq, req->dst, req->dst,
-                              req->cryptlen - ivsize, info);
-       aead_request_set_ad(subreq, req->assoclen + ivsize);
+                              req->cryptlen, info);
+       aead_request_set_ad(subreq, req->assoclen);
 
        crypto_xor(info, ctx->salt, ivsize);
        scatterwalk_map_and_copy(info, req->dst, req->assoclen, ivsize, 1);
@@ -160,16 +151,16 @@ static int echainiv_encrypt(struct aead_request *req)
 static int echainiv_decrypt(struct aead_request *req)
 {
        struct crypto_aead *geniv = crypto_aead_reqtfm(req);
-       struct echainiv_ctx *ctx = crypto_aead_ctx(geniv);
+       struct aead_geniv_ctx *ctx = crypto_aead_ctx(geniv);
        struct aead_request *subreq = aead_request_ctx(req);
        crypto_completion_t compl;
        void *data;
        unsigned int ivsize = crypto_aead_ivsize(geniv);
 
-       if (req->cryptlen < ivsize + crypto_aead_authsize(geniv))
+       if (req->cryptlen < ivsize)
                return -EINVAL;
 
-       aead_request_set_tfm(subreq, ctx->geniv.child);
+       aead_request_set_tfm(subreq, ctx->child);
 
        compl = req->base.complete;
        data = req->base.data;
@@ -180,61 +171,10 @@ static int echainiv_decrypt(struct aead_request *req)
        aead_request_set_ad(subreq, req->assoclen + ivsize);
 
        scatterwalk_map_and_copy(req->iv, req->src, req->assoclen, ivsize, 0);
-       if (req->src != req->dst)
-               scatterwalk_map_and_copy(req->iv, req->dst,
-                                        req->assoclen, ivsize, 1);
 
        return crypto_aead_decrypt(subreq);
 }
 
-static int echainiv_init(struct crypto_tfm *tfm)
-{
-       struct crypto_aead *geniv = __crypto_aead_cast(tfm);
-       struct echainiv_ctx *ctx = crypto_aead_ctx(geniv);
-       int err;
-
-       spin_lock_init(&ctx->geniv.lock);
-
-       crypto_aead_set_reqsize(geniv, sizeof(struct aead_request));
-
-       err = crypto_get_default_rng();
-       if (err)
-               goto out;
-
-       err = crypto_rng_get_bytes(crypto_default_rng, ctx->salt,
-                                  crypto_aead_ivsize(geniv));
-       crypto_put_default_rng();
-       if (err)
-               goto out;
-
-       ctx->null = crypto_get_default_null_skcipher();
-       err = PTR_ERR(ctx->null);
-       if (IS_ERR(ctx->null))
-               goto out;
-
-       err = aead_geniv_init(tfm);
-       if (err)
-               goto drop_null;
-
-       ctx->geniv.child = geniv->child;
-       geniv->child = geniv;
-
-out:
-       return err;
-
-drop_null:
-       crypto_put_default_null_skcipher();
-       goto out;
-}
-
-static void echainiv_exit(struct crypto_tfm *tfm)
-{
-       struct echainiv_ctx *ctx = crypto_tfm_ctx(tfm);
-
-       crypto_free_aead(ctx->geniv.child);
-       crypto_put_default_null_skcipher();
-}
-
 static int echainiv_aead_create(struct crypto_template *tmpl,
                                struct rtattr **tb)
 {
@@ -251,9 +191,6 @@ static int echainiv_aead_create(struct crypto_template *tmpl,
        spawn = aead_instance_ctx(inst);
        alg = crypto_spawn_aead_alg(spawn);
 
-       if (alg->base.cra_aead.encrypt)
-               goto done;
-
        err = -EINVAL;
        if (inst->alg.ivsize & (sizeof(u32) - 1) ||
            inst->alg.ivsize > MAX_IV_SIZE)
@@ -262,14 +199,15 @@ static int echainiv_aead_create(struct crypto_template *tmpl,
        inst->alg.encrypt = echainiv_encrypt;
        inst->alg.decrypt = echainiv_decrypt;
 
-       inst->alg.base.cra_init = echainiv_init;
-       inst->alg.base.cra_exit = echainiv_exit;
+       inst->alg.init = aead_init_geniv;
+       inst->alg.exit = aead_exit_geniv;
 
        inst->alg.base.cra_alignmask |= __alignof__(u32) - 1;
-       inst->alg.base.cra_ctxsize = sizeof(struct echainiv_ctx);
+       inst->alg.base.cra_ctxsize = sizeof(struct aead_geniv_ctx);
        inst->alg.base.cra_ctxsize += inst->alg.ivsize;
 
-done:
+       inst->free = aead_geniv_free;
+
        err = aead_register_instance(tmpl, inst);
        if (err)
                goto free_inst;
index 7d32d4720564315f57a2766f5e1813d536d9ee69..ddb4f29b2fe6b0fc917ac9f3b4c2134a5b3c52e4 100644 (file)
@@ -38,6 +38,12 @@ struct crypto_rfc4106_ctx {
        u8 nonce[4];
 };
 
+struct crypto_rfc4106_req_ctx {
+       struct scatterlist src[3];
+       struct scatterlist dst[3];
+       struct aead_request subreq;
+};
+
 struct crypto_rfc4543_instance_ctx {
        struct crypto_aead_spawn aead;
 };
@@ -601,6 +607,15 @@ static void crypto_gcm_exit_tfm(struct crypto_aead *tfm)
        crypto_free_ablkcipher(ctx->ctr);
 }
 
+static void crypto_gcm_free(struct aead_instance *inst)
+{
+       struct gcm_instance_ctx *ctx = aead_instance_ctx(inst);
+
+       crypto_drop_skcipher(&ctx->ctr);
+       crypto_drop_ahash(&ctx->ghash);
+       kfree(inst);
+}
+
 static int crypto_gcm_create_common(struct crypto_template *tmpl,
                                    struct rtattr **tb,
                                    const char *full_name,
@@ -689,6 +704,8 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl,
        inst->alg.encrypt = crypto_gcm_encrypt;
        inst->alg.decrypt = crypto_gcm_decrypt;
 
+       inst->free = crypto_gcm_free;
+
        err = aead_register_instance(tmpl, inst);
        if (err)
                goto out_put_ctr;
@@ -728,19 +745,9 @@ static int crypto_gcm_create(struct crypto_template *tmpl, struct rtattr **tb)
                                        ctr_name, "ghash");
 }
 
-static void crypto_gcm_free(struct crypto_instance *inst)
-{
-       struct gcm_instance_ctx *ctx = crypto_instance_ctx(inst);
-
-       crypto_drop_skcipher(&ctx->ctr);
-       crypto_drop_ahash(&ctx->ghash);
-       kfree(aead_instance(inst));
-}
-
 static struct crypto_template crypto_gcm_tmpl = {
        .name = "gcm",
        .create = crypto_gcm_create,
-       .free = crypto_gcm_free,
        .module = THIS_MODULE,
 };
 
@@ -770,7 +777,6 @@ static int crypto_gcm_base_create(struct crypto_template *tmpl,
 static struct crypto_template crypto_gcm_base_tmpl = {
        .name = "gcm_base",
        .create = crypto_gcm_base_create,
-       .free = crypto_gcm_free,
        .module = THIS_MODULE,
 };
 
@@ -816,27 +822,50 @@ static int crypto_rfc4106_setauthsize(struct crypto_aead *parent,
 
 static struct aead_request *crypto_rfc4106_crypt(struct aead_request *req)
 {
-       struct aead_request *subreq = aead_request_ctx(req);
+       struct crypto_rfc4106_req_ctx *rctx = aead_request_ctx(req);
        struct crypto_aead *aead = crypto_aead_reqtfm(req);
        struct crypto_rfc4106_ctx *ctx = crypto_aead_ctx(aead);
+       struct aead_request *subreq = &rctx->subreq;
        struct crypto_aead *child = ctx->child;
+       struct scatterlist *sg;
        u8 *iv = PTR_ALIGN((u8 *)(subreq + 1) + crypto_aead_reqsize(child),
                           crypto_aead_alignmask(child) + 1);
 
+       scatterwalk_map_and_copy(iv + 12, req->src, 0, req->assoclen - 8, 0);
+
        memcpy(iv, ctx->nonce, 4);
        memcpy(iv + 4, req->iv, 8);
 
+       sg_init_table(rctx->src, 3);
+       sg_set_buf(rctx->src, iv + 12, req->assoclen - 8);
+       sg = scatterwalk_ffwd(rctx->src + 1, req->src, req->assoclen);
+       if (sg != rctx->src + 1)
+               sg_chain(rctx->src, 2, sg);
+
+       if (req->src != req->dst) {
+               sg_init_table(rctx->dst, 3);
+               sg_set_buf(rctx->dst, iv + 12, req->assoclen - 8);
+               sg = scatterwalk_ffwd(rctx->dst + 1, req->dst, req->assoclen);
+               if (sg != rctx->dst + 1)
+                       sg_chain(rctx->dst, 2, sg);
+       }
+
        aead_request_set_tfm(subreq, child);
        aead_request_set_callback(subreq, req->base.flags, req->base.complete,
                                  req->base.data);
-       aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, iv);
-       aead_request_set_ad(subreq, req->assoclen);
+       aead_request_set_crypt(subreq, rctx->src,
+                              req->src == req->dst ? rctx->src : rctx->dst,
+                              req->cryptlen, iv);
+       aead_request_set_ad(subreq, req->assoclen - 8);
 
        return subreq;
 }
 
 static int crypto_rfc4106_encrypt(struct aead_request *req)
 {
+       if (req->assoclen != 16 && req->assoclen != 20)
+               return -EINVAL;
+
        req = crypto_rfc4106_crypt(req);
 
        return crypto_aead_encrypt(req);
@@ -844,6 +873,9 @@ static int crypto_rfc4106_encrypt(struct aead_request *req)
 
 static int crypto_rfc4106_decrypt(struct aead_request *req)
 {
+       if (req->assoclen != 16 && req->assoclen != 20)
+               return -EINVAL;
+
        req = crypto_rfc4106_crypt(req);
 
        return crypto_aead_decrypt(req);
@@ -867,9 +899,9 @@ static int crypto_rfc4106_init_tfm(struct crypto_aead *tfm)
        align &= ~(crypto_tfm_ctx_alignment() - 1);
        crypto_aead_set_reqsize(
                tfm,
-               sizeof(struct aead_request) +
+               sizeof(struct crypto_rfc4106_req_ctx) +
                ALIGN(crypto_aead_reqsize(aead), crypto_tfm_ctx_alignment()) +
-               align + 12);
+               align + 24);
 
        return 0;
 }
@@ -881,6 +913,12 @@ static void crypto_rfc4106_exit_tfm(struct crypto_aead *tfm)
        crypto_free_aead(ctx->child);
 }
 
+static void crypto_rfc4106_free(struct aead_instance *inst)
+{
+       crypto_drop_aead(aead_instance_ctx(inst));
+       kfree(inst);
+}
+
 static int crypto_rfc4106_create(struct crypto_template *tmpl,
                                 struct rtattr **tb)
 {
@@ -934,7 +972,7 @@ static int crypto_rfc4106_create(struct crypto_template *tmpl,
            CRYPTO_MAX_ALG_NAME)
                goto out_drop_alg;
 
-       inst->alg.base.cra_flags |= alg->base.cra_flags & CRYPTO_ALG_ASYNC;
+       inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC;
        inst->alg.base.cra_priority = alg->base.cra_priority;
        inst->alg.base.cra_blocksize = 1;
        inst->alg.base.cra_alignmask = alg->base.cra_alignmask;
@@ -952,6 +990,8 @@ static int crypto_rfc4106_create(struct crypto_template *tmpl,
        inst->alg.encrypt = crypto_rfc4106_encrypt;
        inst->alg.decrypt = crypto_rfc4106_decrypt;
 
+       inst->free = crypto_rfc4106_free;
+
        err = aead_register_instance(tmpl, inst);
        if (err)
                goto out_drop_alg;
@@ -966,16 +1006,9 @@ out_free_inst:
        goto out;
 }
 
-static void crypto_rfc4106_free(struct crypto_instance *inst)
-{
-       crypto_drop_aead(crypto_instance_ctx(inst));
-       kfree(aead_instance(inst));
-}
-
 static struct crypto_template crypto_rfc4106_tmpl = {
        .name = "rfc4106",
        .create = crypto_rfc4106_create,
-       .free = crypto_rfc4106_free,
        .module = THIS_MODULE,
 };
 
@@ -1114,6 +1147,15 @@ static void crypto_rfc4543_exit_tfm(struct crypto_aead *tfm)
        crypto_put_default_null_skcipher();
 }
 
+static void crypto_rfc4543_free(struct aead_instance *inst)
+{
+       struct crypto_rfc4543_instance_ctx *ctx = aead_instance_ctx(inst);
+
+       crypto_drop_aead(&ctx->aead);
+
+       kfree(inst);
+}
+
 static int crypto_rfc4543_create(struct crypto_template *tmpl,
                                struct rtattr **tb)
 {
@@ -1187,6 +1229,8 @@ static int crypto_rfc4543_create(struct crypto_template *tmpl,
        inst->alg.encrypt = crypto_rfc4543_encrypt;
        inst->alg.decrypt = crypto_rfc4543_decrypt;
 
+       inst->free = crypto_rfc4543_free,
+
        err = aead_register_instance(tmpl, inst);
        if (err)
                goto out_drop_alg;
@@ -1201,19 +1245,9 @@ out_free_inst:
        goto out;
 }
 
-static void crypto_rfc4543_free(struct crypto_instance *inst)
-{
-       struct crypto_rfc4543_instance_ctx *ctx = crypto_instance_ctx(inst);
-
-       crypto_drop_aead(&ctx->aead);
-
-       kfree(aead_instance(inst));
-}
-
 static struct crypto_template crypto_rfc4543_tmpl = {
        .name = "rfc4543",
        .create = crypto_rfc4543_create,
-       .free = crypto_rfc4543_free,
        .module = THIS_MODULE,
 };
 
index b32d834144cdce724613dda889cec09484af7384..ceea83d13168f648e2ca22f635504992c24ba7f2 100644 (file)
@@ -79,7 +79,7 @@ int jent_fips_enabled(void)
 
 void jent_panic(char *s)
 {
-       panic(s);
+       panic("%s", s);
 }
 
 void jent_memcpy(void *dest, const void *src, unsigned int n)
index 45e7d515567294506142904fd771005c8e1ca70c..ee9cfb99fe256af06ae7ad5d946c3b76d90de1e9 100644 (file)
@@ -274,11 +274,16 @@ static int pcrypt_create_aead(struct crypto_template *tmpl, struct rtattr **tb,
                              u32 type, u32 mask)
 {
        struct pcrypt_instance_ctx *ctx;
+       struct crypto_attr_type *algt;
        struct aead_instance *inst;
        struct aead_alg *alg;
        const char *name;
        int err;
 
+       algt = crypto_get_attr_type(tb);
+       if (IS_ERR(algt))
+               return PTR_ERR(algt);
+
        name = crypto_attr_alg_name(tb[1]);
        if (IS_ERR(name))
                return PTR_ERR(name);
@@ -299,6 +304,8 @@ static int pcrypt_create_aead(struct crypto_template *tmpl, struct rtattr **tb,
        if (err)
                goto out_drop_aead;
 
+       inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC;
+
        inst->alg.ivsize = crypto_aead_alg_ivsize(alg);
        inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg);
 
index 387b5c887a8035cfff51cc0c57eb8a3668235eee..2df9835dfbc0c6e039c522460926cee88991809a 100644 (file)
 
 #include <crypto/algapi.h>
 #include <crypto/internal/hash.h>
+#include <crypto/poly1305.h>
 #include <linux/crypto.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 
-#define POLY1305_BLOCK_SIZE    16
-#define POLY1305_KEY_SIZE      32
-#define POLY1305_DIGEST_SIZE   16
-
-struct poly1305_desc_ctx {
-       /* key */
-       u32 r[5];
-       /* finalize key */
-       u32 s[4];
-       /* accumulator */
-       u32 h[5];
-       /* partial buffer */
-       u8 buf[POLY1305_BLOCK_SIZE];
-       /* bytes used in partial buffer */
-       unsigned int buflen;
-       /* r key has been set */
-       bool rset;
-       /* s key has been set */
-       bool sset;
-};
-
 static inline u64 mlt(u64 a, u64 b)
 {
        return a * b;
@@ -58,7 +38,7 @@ static inline u32 le32_to_cpuvp(const void *p)
        return le32_to_cpup(p);
 }
 
-static int poly1305_init(struct shash_desc *desc)
+int crypto_poly1305_init(struct shash_desc *desc)
 {
        struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
 
@@ -69,8 +49,9 @@ static int poly1305_init(struct shash_desc *desc)
 
        return 0;
 }
+EXPORT_SYMBOL_GPL(crypto_poly1305_init);
 
-static int poly1305_setkey(struct crypto_shash *tfm,
+int crypto_poly1305_setkey(struct crypto_shash *tfm,
                           const u8 *key, unsigned int keylen)
 {
        /* Poly1305 requires a unique key for each tag, which implies that
@@ -79,6 +60,7 @@ static int poly1305_setkey(struct crypto_shash *tfm,
         * the update() call. */
        return -ENOTSUPP;
 }
+EXPORT_SYMBOL_GPL(crypto_poly1305_setkey);
 
 static void poly1305_setrkey(struct poly1305_desc_ctx *dctx, const u8 *key)
 {
@@ -98,16 +80,10 @@ static void poly1305_setskey(struct poly1305_desc_ctx *dctx, const u8 *key)
        dctx->s[3] = le32_to_cpuvp(key + 12);
 }
 
-static unsigned int poly1305_blocks(struct poly1305_desc_ctx *dctx,
-                                   const u8 *src, unsigned int srclen,
-                                   u32 hibit)
+unsigned int crypto_poly1305_setdesckey(struct poly1305_desc_ctx *dctx,
+                                       const u8 *src, unsigned int srclen)
 {
-       u32 r0, r1, r2, r3, r4;
-       u32 s1, s2, s3, s4;
-       u32 h0, h1, h2, h3, h4;
-       u64 d0, d1, d2, d3, d4;
-
-       if (unlikely(!dctx->sset)) {
+       if (!dctx->sset) {
                if (!dctx->rset && srclen >= POLY1305_BLOCK_SIZE) {
                        poly1305_setrkey(dctx, src);
                        src += POLY1305_BLOCK_SIZE;
@@ -121,6 +97,25 @@ static unsigned int poly1305_blocks(struct poly1305_desc_ctx *dctx,
                        dctx->sset = true;
                }
        }
+       return srclen;
+}
+EXPORT_SYMBOL_GPL(crypto_poly1305_setdesckey);
+
+static unsigned int poly1305_blocks(struct poly1305_desc_ctx *dctx,
+                                   const u8 *src, unsigned int srclen,
+                                   u32 hibit)
+{
+       u32 r0, r1, r2, r3, r4;
+       u32 s1, s2, s3, s4;
+       u32 h0, h1, h2, h3, h4;
+       u64 d0, d1, d2, d3, d4;
+       unsigned int datalen;
+
+       if (unlikely(!dctx->sset)) {
+               datalen = crypto_poly1305_setdesckey(dctx, src, srclen);
+               src += srclen - datalen;
+               srclen = datalen;
+       }
 
        r0 = dctx->r[0];
        r1 = dctx->r[1];
@@ -181,7 +176,7 @@ static unsigned int poly1305_blocks(struct poly1305_desc_ctx *dctx,
        return srclen;
 }
 
-static int poly1305_update(struct shash_desc *desc,
+int crypto_poly1305_update(struct shash_desc *desc,
                           const u8 *src, unsigned int srclen)
 {
        struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
@@ -214,8 +209,9 @@ static int poly1305_update(struct shash_desc *desc,
 
        return 0;
 }
+EXPORT_SYMBOL_GPL(crypto_poly1305_update);
 
-static int poly1305_final(struct shash_desc *desc, u8 *dst)
+int crypto_poly1305_final(struct shash_desc *desc, u8 *dst)
 {
        struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
        __le32 *mac = (__le32 *)dst;
@@ -282,13 +278,14 @@ static int poly1305_final(struct shash_desc *desc, u8 *dst)
 
        return 0;
 }
+EXPORT_SYMBOL_GPL(crypto_poly1305_final);
 
 static struct shash_alg poly1305_alg = {
        .digestsize     = POLY1305_DIGEST_SIZE,
-       .init           = poly1305_init,
-       .update         = poly1305_update,
-       .final          = poly1305_final,
-       .setkey         = poly1305_setkey,
+       .init           = crypto_poly1305_init,
+       .update         = crypto_poly1305_update,
+       .final          = crypto_poly1305_final,
+       .setkey         = crypto_poly1305_setkey,
        .descsize       = sizeof(struct poly1305_desc_ctx),
        .base           = {
                .cra_name               = "poly1305",
index 752af0656f2e60e9e4c56ea8bcb065bca63ebde4..466003e1a8cf20b501425ef8049cdc71b3f262bb 100644 (file)
@@ -267,12 +267,36 @@ err_free_m:
        return ret;
 }
 
+static int rsa_check_key_length(unsigned int len)
+{
+       switch (len) {
+       case 512:
+       case 1024:
+       case 1536:
+       case 2048:
+       case 3072:
+       case 4096:
+               return 0;
+       }
+
+       return -EINVAL;
+}
+
 static int rsa_setkey(struct crypto_akcipher *tfm, const void *key,
                      unsigned int keylen)
 {
        struct rsa_key *pkey = akcipher_tfm_ctx(tfm);
+       int ret;
 
-       return rsa_parse_key(pkey, key, keylen);
+       ret = rsa_parse_key(pkey, key, keylen);
+       if (ret)
+               return ret;
+
+       if (rsa_check_key_length(mpi_get_size(pkey->n) << 3)) {
+               rsa_free_key(pkey);
+               ret = -EINVAL;
+       }
+       return ret;
 }
 
 static void rsa_exit_tfm(struct crypto_akcipher *tfm)
index 3e8e0a9e5a8e5e7a6ea89e5364db73d2a754b0a5..8d96ce969b4480601b125a14db850f5edda38ddc 100644 (file)
@@ -28,7 +28,7 @@ int rsa_get_n(void *context, size_t hdrlen, unsigned char tag,
                return -ENOMEM;
 
        /* In FIPS mode only allow key size 2K & 3K */
-       if (fips_enabled && (mpi_get_size(key->n) != 256 ||
+       if (fips_enabled && (mpi_get_size(key->n) != 256 &&
                             mpi_get_size(key->n) != 384)) {
                pr_err("RSA: key size not allowed in FIPS mode\n");
                mpi_free(key->n);
@@ -62,7 +62,7 @@ int rsa_get_d(void *context, size_t hdrlen, unsigned char tag,
                return -ENOMEM;
 
        /* In FIPS mode only allow key size 2K & 3K */
-       if (fips_enabled && (mpi_get_size(key->d) != 256 ||
+       if (fips_enabled && (mpi_get_size(key->d) != 256 &&
                             mpi_get_size(key->d) != 384)) {
                pr_err("RSA: key size not allowed in FIPS mode\n");
                mpi_free(key->d);
index 122c56e3491b9819b20cc34871a8cb4f6d6fdaac..15a749a5cab72ea53d597cfa1983a2a175154cc2 100644 (file)
@@ -15,7 +15,6 @@
 
 #include <crypto/internal/geniv.h>
 #include <crypto/internal/skcipher.h>
-#include <crypto/null.h>
 #include <crypto/rng.h>
 #include <crypto/scatterwalk.h>
 #include <linux/err.h>
 #include <linux/spinlock.h>
 #include <linux/string.h>
 
-struct seqniv_request_ctx {
-       struct scatterlist dst[2];
-       struct aead_request subreq;
-};
-
 struct seqiv_ctx {
        spinlock_t lock;
        u8 salt[] __attribute__ ((aligned(__alignof__(u32))));
 };
 
-struct seqiv_aead_ctx {
-       /* aead_geniv_ctx must be first the element */
-       struct aead_geniv_ctx geniv;
-       struct crypto_blkcipher *null;
-       u8 salt[] __attribute__ ((aligned(__alignof__(u32))));
-};
-
 static void seqiv_free(struct crypto_instance *inst);
 
 static void seqiv_complete2(struct skcipher_givcrypt_request *req, int err)
@@ -71,32 +58,6 @@ static void seqiv_complete(struct crypto_async_request *base, int err)
        skcipher_givcrypt_complete(req, err);
 }
 
-static void seqiv_aead_complete2(struct aead_givcrypt_request *req, int err)
-{
-       struct aead_request *subreq = aead_givcrypt_reqctx(req);
-       struct crypto_aead *geniv;
-
-       if (err == -EINPROGRESS)
-               return;
-
-       if (err)
-               goto out;
-
-       geniv = aead_givcrypt_reqtfm(req);
-       memcpy(req->areq.iv, subreq->iv, crypto_aead_ivsize(geniv));
-
-out:
-       kfree(subreq->iv);
-}
-
-static void seqiv_aead_complete(struct crypto_async_request *base, int err)
-{
-       struct aead_givcrypt_request *req = base->data;
-
-       seqiv_aead_complete2(req, err);
-       aead_givcrypt_complete(req, err);
-}
-
 static void seqiv_aead_encrypt_complete2(struct aead_request *req, int err)
 {
        struct aead_request *subreq = aead_request_ctx(req);
@@ -124,50 +85,6 @@ static void seqiv_aead_encrypt_complete(struct crypto_async_request *base,
        aead_request_complete(req, err);
 }
 
-static void seqniv_aead_encrypt_complete2(struct aead_request *req, int err)
-{
-       unsigned int ivsize = 8;
-       u8 data[20];
-
-       if (err == -EINPROGRESS)
-               return;
-
-       /* Swap IV and ESP header back to correct order. */
-       scatterwalk_map_and_copy(data, req->dst, 0, req->assoclen + ivsize, 0);
-       scatterwalk_map_and_copy(data + ivsize, req->dst, 0, req->assoclen, 1);
-       scatterwalk_map_and_copy(data, req->dst, req->assoclen, ivsize, 1);
-}
-
-static void seqniv_aead_encrypt_complete(struct crypto_async_request *base,
-                                       int err)
-{
-       struct aead_request *req = base->data;
-
-       seqniv_aead_encrypt_complete2(req, err);
-       aead_request_complete(req, err);
-}
-
-static void seqniv_aead_decrypt_complete2(struct aead_request *req, int err)
-{
-       u8 data[4];
-
-       if (err == -EINPROGRESS)
-               return;
-
-       /* Move ESP header back to correct location. */
-       scatterwalk_map_and_copy(data, req->dst, 16, req->assoclen - 8, 0);
-       scatterwalk_map_and_copy(data, req->dst, 8, req->assoclen - 8, 1);
-}
-
-static void seqniv_aead_decrypt_complete(struct crypto_async_request *base,
-                                        int err)
-{
-       struct aead_request *req = base->data;
-
-       seqniv_aead_decrypt_complete2(req, err);
-       aead_request_complete(req, err);
-}
-
 static void seqiv_geniv(struct seqiv_ctx *ctx, u8 *info, u64 seq,
                        unsigned int ivsize)
 {
@@ -227,112 +144,10 @@ static int seqiv_givencrypt(struct skcipher_givcrypt_request *req)
        return err;
 }
 
-static int seqiv_aead_givencrypt(struct aead_givcrypt_request *req)
-{
-       struct crypto_aead *geniv = aead_givcrypt_reqtfm(req);
-       struct seqiv_ctx *ctx = crypto_aead_ctx(geniv);
-       struct aead_request *areq = &req->areq;
-       struct aead_request *subreq = aead_givcrypt_reqctx(req);
-       crypto_completion_t compl;
-       void *data;
-       u8 *info;
-       unsigned int ivsize;
-       int err;
-
-       aead_request_set_tfm(subreq, aead_geniv_base(geniv));
-
-       compl = areq->base.complete;
-       data = areq->base.data;
-       info = areq->iv;
-
-       ivsize = crypto_aead_ivsize(geniv);
-
-       if (unlikely(!IS_ALIGNED((unsigned long)info,
-                                crypto_aead_alignmask(geniv) + 1))) {
-               info = kmalloc(ivsize, areq->base.flags &
-                                      CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL:
-                                                                 GFP_ATOMIC);
-               if (!info)
-                       return -ENOMEM;
-
-               compl = seqiv_aead_complete;
-               data = req;
-       }
-
-       aead_request_set_callback(subreq, areq->base.flags, compl, data);
-       aead_request_set_crypt(subreq, areq->src, areq->dst, areq->cryptlen,
-                              info);
-       aead_request_set_assoc(subreq, areq->assoc, areq->assoclen);
-
-       seqiv_geniv(ctx, info, req->seq, ivsize);
-       memcpy(req->giv, info, ivsize);
-
-       err = crypto_aead_encrypt(subreq);
-       if (unlikely(info != areq->iv))
-               seqiv_aead_complete2(req, err);
-       return err;
-}
-
-static int seqniv_aead_encrypt(struct aead_request *req)
-{
-       struct crypto_aead *geniv = crypto_aead_reqtfm(req);
-       struct seqiv_aead_ctx *ctx = crypto_aead_ctx(geniv);
-       struct seqniv_request_ctx *rctx = aead_request_ctx(req);
-       struct aead_request *subreq = &rctx->subreq;
-       struct scatterlist *dst;
-       crypto_completion_t compl;
-       void *data;
-       unsigned int ivsize = 8;
-       u8 buf[20] __attribute__ ((aligned(__alignof__(u32))));
-       int err;
-
-       if (req->cryptlen < ivsize)
-               return -EINVAL;
-
-       /* ESP AD is at most 12 bytes (ESN). */
-       if (req->assoclen > 12)
-               return -EINVAL;
-
-       aead_request_set_tfm(subreq, ctx->geniv.child);
-
-       compl = seqniv_aead_encrypt_complete;
-       data = req;
-
-       if (req->src != req->dst) {
-               struct blkcipher_desc desc = {
-                       .tfm = ctx->null,
-               };
-
-               err = crypto_blkcipher_encrypt(&desc, req->dst, req->src,
-                                              req->assoclen + req->cryptlen);
-               if (err)
-                       return err;
-       }
-
-       dst = scatterwalk_ffwd(rctx->dst, req->dst, ivsize);
-
-       aead_request_set_callback(subreq, req->base.flags, compl, data);
-       aead_request_set_crypt(subreq, dst, dst,
-                              req->cryptlen - ivsize, req->iv);
-       aead_request_set_ad(subreq, req->assoclen);
-
-       memcpy(buf, req->iv, ivsize);
-       crypto_xor(buf, ctx->salt, ivsize);
-       memcpy(req->iv, buf, ivsize);
-
-       /* Swap order of IV and ESP AD for ICV generation. */
-       scatterwalk_map_and_copy(buf + ivsize, req->dst, 0, req->assoclen, 0);
-       scatterwalk_map_and_copy(buf, req->dst, 0, req->assoclen + ivsize, 1);
-
-       err = crypto_aead_encrypt(subreq);
-       seqniv_aead_encrypt_complete2(req, err);
-       return err;
-}
-
 static int seqiv_aead_encrypt(struct aead_request *req)
 {
        struct crypto_aead *geniv = crypto_aead_reqtfm(req);
-       struct seqiv_aead_ctx *ctx = crypto_aead_ctx(geniv);
+       struct aead_geniv_ctx *ctx = crypto_aead_ctx(geniv);
        struct aead_request *subreq = aead_request_ctx(req);
        crypto_completion_t compl;
        void *data;
@@ -343,7 +158,7 @@ static int seqiv_aead_encrypt(struct aead_request *req)
        if (req->cryptlen < ivsize)
                return -EINVAL;
 
-       aead_request_set_tfm(subreq, ctx->geniv.child);
+       aead_request_set_tfm(subreq, ctx->child);
 
        compl = req->base.complete;
        data = req->base.data;
@@ -387,67 +202,10 @@ static int seqiv_aead_encrypt(struct aead_request *req)
        return err;
 }
 
-static int seqniv_aead_decrypt(struct aead_request *req)
-{
-       struct crypto_aead *geniv = crypto_aead_reqtfm(req);
-       struct seqiv_aead_ctx *ctx = crypto_aead_ctx(geniv);
-       struct seqniv_request_ctx *rctx = aead_request_ctx(req);
-       struct aead_request *subreq = &rctx->subreq;
-       struct scatterlist *dst;
-       crypto_completion_t compl;
-       void *data;
-       unsigned int ivsize = 8;
-       u8 buf[20];
-       int err;
-
-       if (req->cryptlen < ivsize + crypto_aead_authsize(geniv))
-               return -EINVAL;
-
-       aead_request_set_tfm(subreq, ctx->geniv.child);
-
-       compl = req->base.complete;
-       data = req->base.data;
-
-       if (req->assoclen > 12)
-               return -EINVAL;
-       else if (req->assoclen > 8) {
-               compl = seqniv_aead_decrypt_complete;
-               data = req;
-       }
-
-       if (req->src != req->dst) {
-               struct blkcipher_desc desc = {
-                       .tfm = ctx->null,
-               };
-
-               err = crypto_blkcipher_encrypt(&desc, req->dst, req->src,
-                                              req->assoclen + req->cryptlen);
-               if (err)
-                       return err;
-       }
-
-       /* Move ESP AD forward for ICV generation. */
-       scatterwalk_map_and_copy(buf, req->dst, 0, req->assoclen + ivsize, 0);
-       memcpy(req->iv, buf + req->assoclen, ivsize);
-       scatterwalk_map_and_copy(buf, req->dst, ivsize, req->assoclen, 1);
-
-       dst = scatterwalk_ffwd(rctx->dst, req->dst, ivsize);
-
-       aead_request_set_callback(subreq, req->base.flags, compl, data);
-       aead_request_set_crypt(subreq, dst, dst,
-                              req->cryptlen - ivsize, req->iv);
-       aead_request_set_ad(subreq, req->assoclen);
-
-       err = crypto_aead_decrypt(subreq);
-       if (req->assoclen > 8)
-               seqniv_aead_decrypt_complete2(req, err);
-       return err;
-}
-
 static int seqiv_aead_decrypt(struct aead_request *req)
 {
        struct crypto_aead *geniv = crypto_aead_reqtfm(req);
-       struct seqiv_aead_ctx *ctx = crypto_aead_ctx(geniv);
+       struct aead_geniv_ctx *ctx = crypto_aead_ctx(geniv);
        struct aead_request *subreq = aead_request_ctx(req);
        crypto_completion_t compl;
        void *data;
@@ -456,7 +214,7 @@ static int seqiv_aead_decrypt(struct aead_request *req)
        if (req->cryptlen < ivsize + crypto_aead_authsize(geniv))
                return -EINVAL;
 
-       aead_request_set_tfm(subreq, ctx->geniv.child);
+       aead_request_set_tfm(subreq, ctx->child);
 
        compl = req->base.complete;
        data = req->base.data;
@@ -467,9 +225,6 @@ static int seqiv_aead_decrypt(struct aead_request *req)
        aead_request_set_ad(subreq, req->assoclen + ivsize);
 
        scatterwalk_map_and_copy(req->iv, req->src, req->assoclen, ivsize, 0);
-       if (req->src != req->dst)
-               scatterwalk_map_and_copy(req->iv, req->dst,
-                                        req->assoclen, ivsize, 1);
 
        return crypto_aead_decrypt(subreq);
 }
@@ -495,85 +250,6 @@ static int seqiv_init(struct crypto_tfm *tfm)
        return err ?: skcipher_geniv_init(tfm);
 }
 
-static int seqiv_old_aead_init(struct crypto_tfm *tfm)
-{
-       struct crypto_aead *geniv = __crypto_aead_cast(tfm);
-       struct seqiv_ctx *ctx = crypto_aead_ctx(geniv);
-       int err;
-
-       spin_lock_init(&ctx->lock);
-
-       crypto_aead_set_reqsize(__crypto_aead_cast(tfm),
-                               sizeof(struct aead_request));
-       err = 0;
-       if (!crypto_get_default_rng()) {
-               geniv->givencrypt = seqiv_aead_givencrypt;
-               err = crypto_rng_get_bytes(crypto_default_rng, ctx->salt,
-                                          crypto_aead_ivsize(geniv));
-               crypto_put_default_rng();
-       }
-
-       return err ?: aead_geniv_init(tfm);
-}
-
-static int seqiv_aead_init_common(struct crypto_tfm *tfm, unsigned int reqsize)
-{
-       struct crypto_aead *geniv = __crypto_aead_cast(tfm);
-       struct seqiv_aead_ctx *ctx = crypto_aead_ctx(geniv);
-       int err;
-
-       spin_lock_init(&ctx->geniv.lock);
-
-       crypto_aead_set_reqsize(geniv, sizeof(struct aead_request));
-
-       err = crypto_get_default_rng();
-       if (err)
-               goto out;
-
-       err = crypto_rng_get_bytes(crypto_default_rng, ctx->salt,
-                                  crypto_aead_ivsize(geniv));
-       crypto_put_default_rng();
-       if (err)
-               goto out;
-
-       ctx->null = crypto_get_default_null_skcipher();
-       err = PTR_ERR(ctx->null);
-       if (IS_ERR(ctx->null))
-               goto out;
-
-       err = aead_geniv_init(tfm);
-       if (err)
-               goto drop_null;
-
-       ctx->geniv.child = geniv->child;
-       geniv->child = geniv;
-
-out:
-       return err;
-
-drop_null:
-       crypto_put_default_null_skcipher();
-       goto out;
-}
-
-static int seqiv_aead_init(struct crypto_tfm *tfm)
-{
-       return seqiv_aead_init_common(tfm, sizeof(struct aead_request));
-}
-
-static int seqniv_aead_init(struct crypto_tfm *tfm)
-{
-       return seqiv_aead_init_common(tfm, sizeof(struct seqniv_request_ctx));
-}
-
-static void seqiv_aead_exit(struct crypto_tfm *tfm)
-{
-       struct seqiv_aead_ctx *ctx = crypto_tfm_ctx(tfm);
-
-       crypto_free_aead(ctx->geniv.child);
-       crypto_put_default_null_skcipher();
-}
-
 static int seqiv_ablkcipher_create(struct crypto_template *tmpl,
                                   struct rtattr **tb)
 {
@@ -609,33 +285,6 @@ free_inst:
        goto out;
 }
 
-static int seqiv_old_aead_create(struct crypto_template *tmpl,
-                                struct aead_instance *aead)
-{
-       struct crypto_instance *inst = aead_crypto_instance(aead);
-       int err = -EINVAL;
-
-       if (inst->alg.cra_aead.ivsize < sizeof(u64))
-               goto free_inst;
-
-       inst->alg.cra_init = seqiv_old_aead_init;
-       inst->alg.cra_exit = aead_geniv_exit;
-
-       inst->alg.cra_ctxsize = inst->alg.cra_aead.ivsize;
-       inst->alg.cra_ctxsize += sizeof(struct seqiv_ctx);
-
-       err = crypto_register_instance(tmpl, inst);
-       if (err)
-               goto free_inst;
-
-out:
-       return err;
-
-free_inst:
-       aead_geniv_free(aead);
-       goto out;
-}
-
 static int seqiv_aead_create(struct crypto_template *tmpl, struct rtattr **tb)
 {
        struct aead_instance *inst;
@@ -650,15 +299,9 @@ static int seqiv_aead_create(struct crypto_template *tmpl, struct rtattr **tb)
 
        inst->alg.base.cra_alignmask |= __alignof__(u32) - 1;
 
-       if (inst->alg.base.cra_aead.encrypt)
-               return seqiv_old_aead_create(tmpl, inst);
-
        spawn = aead_instance_ctx(inst);
        alg = crypto_spawn_aead_alg(spawn);
 
-       if (alg->base.cra_aead.encrypt)
-               goto done;
-
        err = -EINVAL;
        if (inst->alg.ivsize != sizeof(u64))
                goto free_inst;
@@ -666,13 +309,12 @@ static int seqiv_aead_create(struct crypto_template *tmpl, struct rtattr **tb)
        inst->alg.encrypt = seqiv_aead_encrypt;
        inst->alg.decrypt = seqiv_aead_decrypt;
 
-       inst->alg.base.cra_init = seqiv_aead_init;
-       inst->alg.base.cra_exit = seqiv_aead_exit;
+       inst->alg.init = aead_init_geniv;
+       inst->alg.exit = aead_exit_geniv;
 
-       inst->alg.base.cra_ctxsize = sizeof(struct seqiv_aead_ctx);
-       inst->alg.base.cra_ctxsize += inst->alg.base.cra_aead.ivsize;
+       inst->alg.base.cra_ctxsize = sizeof(struct aead_geniv_ctx);
+       inst->alg.base.cra_ctxsize += inst->alg.ivsize;
 
-done:
        err = aead_register_instance(tmpl, inst);
        if (err)
                goto free_inst;
@@ -702,51 +344,6 @@ static int seqiv_create(struct crypto_template *tmpl, struct rtattr **tb)
        return err;
 }
 
-static int seqniv_create(struct crypto_template *tmpl, struct rtattr **tb)
-{
-       struct aead_instance *inst;
-       struct crypto_aead_spawn *spawn;
-       struct aead_alg *alg;
-       int err;
-
-       inst = aead_geniv_alloc(tmpl, tb, 0, 0);
-       err = PTR_ERR(inst);
-       if (IS_ERR(inst))
-               goto out;
-
-       spawn = aead_instance_ctx(inst);
-       alg = crypto_spawn_aead_alg(spawn);
-
-       if (alg->base.cra_aead.encrypt)
-               goto done;
-
-       err = -EINVAL;
-       if (inst->alg.ivsize != sizeof(u64))
-               goto free_inst;
-
-       inst->alg.encrypt = seqniv_aead_encrypt;
-       inst->alg.decrypt = seqniv_aead_decrypt;
-
-       inst->alg.base.cra_init = seqniv_aead_init;
-       inst->alg.base.cra_exit = seqiv_aead_exit;
-
-       inst->alg.base.cra_alignmask |= __alignof__(u32) - 1;
-       inst->alg.base.cra_ctxsize = sizeof(struct seqiv_aead_ctx);
-       inst->alg.base.cra_ctxsize += inst->alg.ivsize;
-
-done:
-       err = aead_register_instance(tmpl, inst);
-       if (err)
-               goto free_inst;
-
-out:
-       return err;
-
-free_inst:
-       aead_geniv_free(inst);
-       goto out;
-}
-
 static void seqiv_free(struct crypto_instance *inst)
 {
        if ((inst->alg.cra_flags ^ CRYPTO_ALG_TYPE_AEAD) & CRYPTO_ALG_TYPE_MASK)
@@ -762,36 +359,13 @@ static struct crypto_template seqiv_tmpl = {
        .module = THIS_MODULE,
 };
 
-static struct crypto_template seqniv_tmpl = {
-       .name = "seqniv",
-       .create = seqniv_create,
-       .free = seqiv_free,
-       .module = THIS_MODULE,
-};
-
 static int __init seqiv_module_init(void)
 {
-       int err;
-
-       err = crypto_register_template(&seqiv_tmpl);
-       if (err)
-               goto out;
-
-       err = crypto_register_template(&seqniv_tmpl);
-       if (err)
-               goto out_undo_niv;
-
-out:
-       return err;
-
-out_undo_niv:
-       crypto_unregister_template(&seqiv_tmpl);
-       goto out;
+       return crypto_register_template(&seqiv_tmpl);
 }
 
 static void __exit seqiv_module_exit(void)
 {
-       crypto_unregister_template(&seqniv_tmpl);
        crypto_unregister_template(&seqiv_tmpl);
 }
 
@@ -801,4 +375,3 @@ module_exit(seqiv_module_exit);
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("Sequence Number IV Generator");
 MODULE_ALIAS_CRYPTO("seqiv");
-MODULE_ALIAS_CRYPTO("seqniv");
diff --git a/crypto/skcipher.c b/crypto/skcipher.c
new file mode 100644 (file)
index 0000000..dd5fc1b
--- /dev/null
@@ -0,0 +1,245 @@
+/*
+ * Symmetric key cipher operations.
+ *
+ * Generic encrypt/decrypt wrapper for ciphers, handles operations across
+ * multiple page boundaries by using temporary blocks.  In user context,
+ * the kernel is given a chance to schedule us once per page.
+ *
+ * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+
+#include <crypto/internal/skcipher.h>
+#include <linux/bug.h>
+#include <linux/module.h>
+
+#include "internal.h"
+
+static unsigned int crypto_skcipher_extsize(struct crypto_alg *alg)
+{
+       if (alg->cra_type == &crypto_blkcipher_type)
+               return sizeof(struct crypto_blkcipher *);
+
+       BUG_ON(alg->cra_type != &crypto_ablkcipher_type &&
+              alg->cra_type != &crypto_givcipher_type);
+
+       return sizeof(struct crypto_ablkcipher *);
+}
+
+static int skcipher_setkey_blkcipher(struct crypto_skcipher *tfm,
+                                    const u8 *key, unsigned int keylen)
+{
+       struct crypto_blkcipher **ctx = crypto_skcipher_ctx(tfm);
+       struct crypto_blkcipher *blkcipher = *ctx;
+       int err;
+
+       crypto_blkcipher_clear_flags(blkcipher, ~0);
+       crypto_blkcipher_set_flags(blkcipher, crypto_skcipher_get_flags(tfm) &
+                                             CRYPTO_TFM_REQ_MASK);
+       err = crypto_blkcipher_setkey(blkcipher, key, keylen);
+       crypto_skcipher_set_flags(tfm, crypto_blkcipher_get_flags(blkcipher) &
+                                      CRYPTO_TFM_RES_MASK);
+
+       return err;
+}
+
+static int skcipher_crypt_blkcipher(struct skcipher_request *req,
+                                   int (*crypt)(struct blkcipher_desc *,
+                                                struct scatterlist *,
+                                                struct scatterlist *,
+                                                unsigned int))
+{
+       struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+       struct crypto_blkcipher **ctx = crypto_skcipher_ctx(tfm);
+       struct blkcipher_desc desc = {
+               .tfm = *ctx,
+               .info = req->iv,
+               .flags = req->base.flags,
+       };
+
+
+       return crypt(&desc, req->dst, req->src, req->cryptlen);
+}
+
+static int skcipher_encrypt_blkcipher(struct skcipher_request *req)
+{
+       struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+       struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
+       struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
+
+       return skcipher_crypt_blkcipher(req, alg->encrypt);
+}
+
+static int skcipher_decrypt_blkcipher(struct skcipher_request *req)
+{
+       struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+       struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
+       struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
+
+       return skcipher_crypt_blkcipher(req, alg->decrypt);
+}
+
+static void crypto_exit_skcipher_ops_blkcipher(struct crypto_tfm *tfm)
+{
+       struct crypto_blkcipher **ctx = crypto_tfm_ctx(tfm);
+
+       crypto_free_blkcipher(*ctx);
+}
+
+int crypto_init_skcipher_ops_blkcipher(struct crypto_tfm *tfm)
+{
+       struct crypto_alg *calg = tfm->__crt_alg;
+       struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
+       struct crypto_blkcipher **ctx = crypto_tfm_ctx(tfm);
+       struct crypto_blkcipher *blkcipher;
+       struct crypto_tfm *btfm;
+
+       if (!crypto_mod_get(calg))
+               return -EAGAIN;
+
+       btfm = __crypto_alloc_tfm(calg, CRYPTO_ALG_TYPE_BLKCIPHER,
+                                       CRYPTO_ALG_TYPE_MASK);
+       if (IS_ERR(btfm)) {
+               crypto_mod_put(calg);
+               return PTR_ERR(btfm);
+       }
+
+       blkcipher = __crypto_blkcipher_cast(btfm);
+       *ctx = blkcipher;
+       tfm->exit = crypto_exit_skcipher_ops_blkcipher;
+
+       skcipher->setkey = skcipher_setkey_blkcipher;
+       skcipher->encrypt = skcipher_encrypt_blkcipher;
+       skcipher->decrypt = skcipher_decrypt_blkcipher;
+
+       skcipher->ivsize = crypto_blkcipher_ivsize(blkcipher);
+
+       return 0;
+}
+
+static int skcipher_setkey_ablkcipher(struct crypto_skcipher *tfm,
+                                     const u8 *key, unsigned int keylen)
+{
+       struct crypto_ablkcipher **ctx = crypto_skcipher_ctx(tfm);
+       struct crypto_ablkcipher *ablkcipher = *ctx;
+       int err;
+
+       crypto_ablkcipher_clear_flags(ablkcipher, ~0);
+       crypto_ablkcipher_set_flags(ablkcipher,
+                                   crypto_skcipher_get_flags(tfm) &
+                                   CRYPTO_TFM_REQ_MASK);
+       err = crypto_ablkcipher_setkey(ablkcipher, key, keylen);
+       crypto_skcipher_set_flags(tfm,
+                                 crypto_ablkcipher_get_flags(ablkcipher) &
+                                 CRYPTO_TFM_RES_MASK);
+
+       return err;
+}
+
+static int skcipher_crypt_ablkcipher(struct skcipher_request *req,
+                                    int (*crypt)(struct ablkcipher_request *))
+{
+       struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+       struct crypto_ablkcipher **ctx = crypto_skcipher_ctx(tfm);
+       struct ablkcipher_request *subreq = skcipher_request_ctx(req);
+
+       ablkcipher_request_set_tfm(subreq, *ctx);
+       ablkcipher_request_set_callback(subreq, skcipher_request_flags(req),
+                                       req->base.complete, req->base.data);
+       ablkcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
+                                    req->iv);
+
+       return crypt(subreq);
+}
+
+static int skcipher_encrypt_ablkcipher(struct skcipher_request *req)
+{
+       struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+       struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
+       struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;
+
+       return skcipher_crypt_ablkcipher(req, alg->encrypt);
+}
+
+static int skcipher_decrypt_ablkcipher(struct skcipher_request *req)
+{
+       struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+       struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
+       struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;
+
+       return skcipher_crypt_ablkcipher(req, alg->decrypt);
+}
+
+static void crypto_exit_skcipher_ops_ablkcipher(struct crypto_tfm *tfm)
+{
+       struct crypto_ablkcipher **ctx = crypto_tfm_ctx(tfm);
+
+       crypto_free_ablkcipher(*ctx);
+}
+
+int crypto_init_skcipher_ops_ablkcipher(struct crypto_tfm *tfm)
+{
+       struct crypto_alg *calg = tfm->__crt_alg;
+       struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
+       struct crypto_ablkcipher **ctx = crypto_tfm_ctx(tfm);
+       struct crypto_ablkcipher *ablkcipher;
+       struct crypto_tfm *abtfm;
+
+       if (!crypto_mod_get(calg))
+               return -EAGAIN;
+
+       abtfm = __crypto_alloc_tfm(calg, 0, 0);
+       if (IS_ERR(abtfm)) {
+               crypto_mod_put(calg);
+               return PTR_ERR(abtfm);
+       }
+
+       ablkcipher = __crypto_ablkcipher_cast(abtfm);
+       *ctx = ablkcipher;
+       tfm->exit = crypto_exit_skcipher_ops_ablkcipher;
+
+       skcipher->setkey = skcipher_setkey_ablkcipher;
+       skcipher->encrypt = skcipher_encrypt_ablkcipher;
+       skcipher->decrypt = skcipher_decrypt_ablkcipher;
+
+       skcipher->ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+       skcipher->reqsize = crypto_ablkcipher_reqsize(ablkcipher) +
+                           sizeof(struct ablkcipher_request);
+
+       return 0;
+}
+
+static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm)
+{
+       if (tfm->__crt_alg->cra_type == &crypto_blkcipher_type)
+               return crypto_init_skcipher_ops_blkcipher(tfm);
+
+       BUG_ON(tfm->__crt_alg->cra_type != &crypto_ablkcipher_type &&
+              tfm->__crt_alg->cra_type != &crypto_givcipher_type);
+
+       return crypto_init_skcipher_ops_ablkcipher(tfm);
+}
+
+static const struct crypto_type crypto_skcipher_type2 = {
+       .extsize = crypto_skcipher_extsize,
+       .init_tfm = crypto_skcipher_init_tfm,
+       .maskclear = ~CRYPTO_ALG_TYPE_MASK,
+       .maskset = CRYPTO_ALG_TYPE_BLKCIPHER_MASK,
+       .type = CRYPTO_ALG_TYPE_BLKCIPHER,
+       .tfmsize = offsetof(struct crypto_skcipher, base),
+};
+
+struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name,
+                                             u32 type, u32 mask)
+{
+       return crypto_alloc_tfm(alg_name, &crypto_skcipher_type2, type, mask);
+}
+EXPORT_SYMBOL_GPL(crypto_alloc_skcipher);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Symmetric key cipher type");
index 9f6f10b498ba4aab02849c760b1da61bd5a7fd99..2b00b617daab537dba7657b6fbc569d440db931f 100644 (file)
@@ -73,6 +73,22 @@ static char *check[] = {
        "lzo", "cts", "zlib", NULL
 };
 
+struct tcrypt_result {
+       struct completion completion;
+       int err;
+};
+
+static void tcrypt_complete(struct crypto_async_request *req, int err)
+{
+       struct tcrypt_result *res = req->data;
+
+       if (err == -EINPROGRESS)
+               return;
+
+       res->err = err;
+       complete(&res->completion);
+}
+
 static int test_cipher_jiffies(struct blkcipher_desc *desc, int enc,
                               struct scatterlist *sg, int blen, int secs)
 {
@@ -143,6 +159,20 @@ out:
        return ret;
 }
 
+static inline int do_one_aead_op(struct aead_request *req, int ret)
+{
+       if (ret == -EINPROGRESS || ret == -EBUSY) {
+               struct tcrypt_result *tr = req->base.data;
+
+               ret = wait_for_completion_interruptible(&tr->completion);
+               if (!ret)
+                       ret = tr->err;
+               reinit_completion(&tr->completion);
+       }
+
+       return ret;
+}
+
 static int test_aead_jiffies(struct aead_request *req, int enc,
                                int blen, int secs)
 {
@@ -153,9 +183,9 @@ static int test_aead_jiffies(struct aead_request *req, int enc,
        for (start = jiffies, end = start + secs * HZ, bcount = 0;
             time_before(jiffies, end); bcount++) {
                if (enc)
-                       ret = crypto_aead_encrypt(req);
+                       ret = do_one_aead_op(req, crypto_aead_encrypt(req));
                else
-                       ret = crypto_aead_decrypt(req);
+                       ret = do_one_aead_op(req, crypto_aead_decrypt(req));
 
                if (ret)
                        return ret;
@@ -177,9 +207,9 @@ static int test_aead_cycles(struct aead_request *req, int enc, int blen)
        /* Warm-up run. */
        for (i = 0; i < 4; i++) {
                if (enc)
-                       ret = crypto_aead_encrypt(req);
+                       ret = do_one_aead_op(req, crypto_aead_encrypt(req));
                else
-                       ret = crypto_aead_decrypt(req);
+                       ret = do_one_aead_op(req, crypto_aead_decrypt(req));
 
                if (ret)
                        goto out;
@@ -191,9 +221,9 @@ static int test_aead_cycles(struct aead_request *req, int enc, int blen)
 
                start = get_cycles();
                if (enc)
-                       ret = crypto_aead_encrypt(req);
+                       ret = do_one_aead_op(req, crypto_aead_encrypt(req));
                else
-                       ret = crypto_aead_decrypt(req);
+                       ret = do_one_aead_op(req, crypto_aead_decrypt(req));
                end = get_cycles();
 
                if (ret)
@@ -286,6 +316,7 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs,
        char *axbuf[XBUFSIZE];
        unsigned int *b_size;
        unsigned int iv_len;
+       struct tcrypt_result result;
 
        iv = kzalloc(MAX_IVLEN, GFP_KERNEL);
        if (!iv)
@@ -321,6 +352,7 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs,
                goto out_notfm;
        }
 
+       init_completion(&result.completion);
        printk(KERN_INFO "\ntesting speed of %s (%s) %s\n", algo,
                        get_driver_name(crypto_aead, tfm), e);
 
@@ -331,6 +363,9 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs,
                goto out_noreq;
        }
 
+       aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+                                 tcrypt_complete, &result);
+
        i = 0;
        do {
                b_size = aead_sizes;
@@ -749,22 +784,6 @@ out:
        crypto_free_hash(tfm);
 }
 
-struct tcrypt_result {
-       struct completion completion;
-       int err;
-};
-
-static void tcrypt_complete(struct crypto_async_request *req, int err)
-{
-       struct tcrypt_result *res = req->data;
-
-       if (err == -EINPROGRESS)
-               return;
-
-       res->err = err;
-       complete(&res->completion);
-}
-
 static inline int do_one_ahash_op(struct ahash_request *req, int ret)
 {
        if (ret == -EINPROGRESS || ret == -EBUSY) {
@@ -1759,14 +1778,27 @@ static int do_test(const char *alg, u32 type, u32 mask, int m)
 
        case 211:
                test_aead_speed("rfc4106(gcm(aes))", ENCRYPT, sec,
+                               NULL, 0, 16, 16, aead_speed_template_20);
+               test_aead_speed("gcm(aes)", ENCRYPT, sec,
                                NULL, 0, 16, 8, aead_speed_template_20);
                break;
 
        case 212:
                test_aead_speed("rfc4309(ccm(aes))", ENCRYPT, sec,
-                               NULL, 0, 16, 8, aead_speed_template_19);
+                               NULL, 0, 16, 16, aead_speed_template_19);
+               break;
+
+       case 213:
+               test_aead_speed("rfc7539esp(chacha20,poly1305)", ENCRYPT, sec,
+                               NULL, 0, 16, 8, aead_speed_template_36);
+               break;
+
+       case 214:
+               test_cipher_speed("chacha20", ENCRYPT, sec, NULL, 0,
+                                 speed_template_32);
                break;
 
+
        case 300:
                if (alg) {
                        test_hash_speed(alg, sec, generic_hash_speed_template);
@@ -1855,6 +1887,10 @@ static int do_test(const char *alg, u32 type, u32 mask, int m)
                test_hash_speed("crct10dif", sec, generic_hash_speed_template);
                if (mode > 300 && mode < 400) break;
 
+       case 321:
+               test_hash_speed("poly1305", sec, poly1305_speed_template);
+               if (mode > 300 && mode < 400) break;
+
        case 399:
                break;
 
index 6cc1b856871b96d5b0d9064fc6ae028649fe89b6..f0bfee1bb293809355d1c7455042fc5e7753b826 100644 (file)
@@ -61,12 +61,14 @@ static u8 speed_template_32_40_48[] = {32, 40, 48, 0};
 static u8 speed_template_32_48[] = {32, 48, 0};
 static u8 speed_template_32_48_64[] = {32, 48, 64, 0};
 static u8 speed_template_32_64[] = {32, 64, 0};
+static u8 speed_template_32[] = {32, 0};
 
 /*
  * AEAD speed tests
  */
 static u8 aead_speed_template_19[] = {19, 0};
 static u8 aead_speed_template_20[] = {20, 0};
+static u8 aead_speed_template_36[] = {36, 0};
 
 /*
  * Digest speed tests
@@ -127,4 +129,22 @@ static struct hash_speed hash_speed_template_16[] = {
        {  .blen = 0,   .plen = 0,      .klen = 0, }
 };
 
+static struct hash_speed poly1305_speed_template[] = {
+       { .blen = 96,   .plen = 16, },
+       { .blen = 96,   .plen = 32, },
+       { .blen = 96,   .plen = 96, },
+       { .blen = 288,  .plen = 16, },
+       { .blen = 288,  .plen = 32, },
+       { .blen = 288,  .plen = 288, },
+       { .blen = 1056, .plen = 32, },
+       { .blen = 1056, .plen = 1056, },
+       { .blen = 2080, .plen = 32, },
+       { .blen = 2080, .plen = 2080, },
+       { .blen = 4128, .plen = 4128, },
+       { .blen = 8224, .plen = 8224, },
+
+       /* End marker */
+       {  .blen = 0,   .plen = 0, }
+};
+
 #endif /* _CRYPTO_TCRYPT_H */
index d0a42bd3aae926fcfef0624c6497452e9a0741c5..35c2de13697182ba22190ea6d0c05d46c6b905be 100644 (file)
@@ -22,6 +22,7 @@
 
 #include <crypto/aead.h>
 #include <crypto/hash.h>
+#include <crypto/skcipher.h>
 #include <linux/err.h>
 #include <linux/fips.h>
 #include <linux/module.h>
@@ -921,15 +922,15 @@ out_nobuf:
        return ret;
 }
 
-static int __test_skcipher(struct crypto_ablkcipher *tfm, int enc,
+static int __test_skcipher(struct crypto_skcipher *tfm, int enc,
                           struct cipher_testvec *template, unsigned int tcount,
                           const bool diff_dst, const int align_offset)
 {
        const char *algo =
-               crypto_tfm_alg_driver_name(crypto_ablkcipher_tfm(tfm));
+               crypto_tfm_alg_driver_name(crypto_skcipher_tfm(tfm));
        unsigned int i, j, k, n, temp;
        char *q;
-       struct ablkcipher_request *req;
+       struct skcipher_request *req;
        struct scatterlist sg[8];
        struct scatterlist sgout[8];
        const char *e, *d;
@@ -958,15 +959,15 @@ static int __test_skcipher(struct crypto_ablkcipher *tfm, int enc,
 
        init_completion(&result.completion);
 
-       req = ablkcipher_request_alloc(tfm, GFP_KERNEL);
+       req = skcipher_request_alloc(tfm, GFP_KERNEL);
        if (!req) {
                pr_err("alg: skcipher%s: Failed to allocate request for %s\n",
                       d, algo);
                goto out;
        }
 
-       ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
-                                       tcrypt_complete, &result);
+       skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+                                     tcrypt_complete, &result);
 
        j = 0;
        for (i = 0; i < tcount; i++) {
@@ -987,15 +988,16 @@ static int __test_skcipher(struct crypto_ablkcipher *tfm, int enc,
                data += align_offset;
                memcpy(data, template[i].input, template[i].ilen);
 
-               crypto_ablkcipher_clear_flags(tfm, ~0);
+               crypto_skcipher_clear_flags(tfm, ~0);
                if (template[i].wk)
-                       crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_REQ_WEAK_KEY);
+                       crypto_skcipher_set_flags(tfm,
+                                                 CRYPTO_TFM_REQ_WEAK_KEY);
 
-               ret = crypto_ablkcipher_setkey(tfm, template[i].key,
-                                              template[i].klen);
+               ret = crypto_skcipher_setkey(tfm, template[i].key,
+                                            template[i].klen);
                if (!ret == template[i].fail) {
                        pr_err("alg: skcipher%s: setkey failed on test %d for %s: flags=%x\n",
-                              d, j, algo, crypto_ablkcipher_get_flags(tfm));
+                              d, j, algo, crypto_skcipher_get_flags(tfm));
                        goto out;
                } else if (ret)
                        continue;
@@ -1007,10 +1009,10 @@ static int __test_skcipher(struct crypto_ablkcipher *tfm, int enc,
                        sg_init_one(&sgout[0], data, template[i].ilen);
                }
 
-               ablkcipher_request_set_crypt(req, sg, (diff_dst) ? sgout : sg,
-                                            template[i].ilen, iv);
-               ret = enc ? crypto_ablkcipher_encrypt(req) :
-                           crypto_ablkcipher_decrypt(req);
+               skcipher_request_set_crypt(req, sg, (diff_dst) ? sgout : sg,
+                                          template[i].ilen, iv);
+               ret = enc ? crypto_skcipher_encrypt(req) :
+                           crypto_skcipher_decrypt(req);
 
                switch (ret) {
                case 0:
@@ -1054,15 +1056,16 @@ static int __test_skcipher(struct crypto_ablkcipher *tfm, int enc,
                        memset(iv, 0, MAX_IVLEN);
 
                j++;
-               crypto_ablkcipher_clear_flags(tfm, ~0);
+               crypto_skcipher_clear_flags(tfm, ~0);
                if (template[i].wk)
-                       crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_REQ_WEAK_KEY);
+                       crypto_skcipher_set_flags(tfm,
+                                                 CRYPTO_TFM_REQ_WEAK_KEY);
 
-               ret = crypto_ablkcipher_setkey(tfm, template[i].key,
-                                              template[i].klen);
+               ret = crypto_skcipher_setkey(tfm, template[i].key,
+                                            template[i].klen);
                if (!ret == template[i].fail) {
                        pr_err("alg: skcipher%s: setkey failed on chunk test %d for %s: flags=%x\n",
-                              d, j, algo, crypto_ablkcipher_get_flags(tfm));
+                              d, j, algo, crypto_skcipher_get_flags(tfm));
                        goto out;
                } else if (ret)
                        continue;
@@ -1100,11 +1103,11 @@ static int __test_skcipher(struct crypto_ablkcipher *tfm, int enc,
                        temp += template[i].tap[k];
                }
 
-               ablkcipher_request_set_crypt(req, sg, (diff_dst) ? sgout : sg,
-                                            template[i].ilen, iv);
+               skcipher_request_set_crypt(req, sg, (diff_dst) ? sgout : sg,
+                                          template[i].ilen, iv);
 
-               ret = enc ? crypto_ablkcipher_encrypt(req) :
-                           crypto_ablkcipher_decrypt(req);
+               ret = enc ? crypto_skcipher_encrypt(req) :
+                           crypto_skcipher_decrypt(req);
 
                switch (ret) {
                case 0:
@@ -1157,7 +1160,7 @@ static int __test_skcipher(struct crypto_ablkcipher *tfm, int enc,
        ret = 0;
 
 out:
-       ablkcipher_request_free(req);
+       skcipher_request_free(req);
        if (diff_dst)
                testmgr_free_buf(xoutbuf);
 out_nooutbuf:
@@ -1166,7 +1169,7 @@ out_nobuf:
        return ret;
 }
 
-static int test_skcipher(struct crypto_ablkcipher *tfm, int enc,
+static int test_skcipher(struct crypto_skcipher *tfm, int enc,
                         struct cipher_testvec *template, unsigned int tcount)
 {
        unsigned int alignmask;
@@ -1578,10 +1581,10 @@ out:
 static int alg_test_skcipher(const struct alg_test_desc *desc,
                             const char *driver, u32 type, u32 mask)
 {
-       struct crypto_ablkcipher *tfm;
+       struct crypto_skcipher *tfm;
        int err = 0;
 
-       tfm = crypto_alloc_ablkcipher(driver, type | CRYPTO_ALG_INTERNAL, mask);
+       tfm = crypto_alloc_skcipher(driver, type | CRYPTO_ALG_INTERNAL, mask);
        if (IS_ERR(tfm)) {
                printk(KERN_ERR "alg: skcipher: Failed to load transform for "
                       "%s: %ld\n", driver, PTR_ERR(tfm));
@@ -1600,7 +1603,7 @@ static int alg_test_skcipher(const struct alg_test_desc *desc,
                                    desc->suite.cipher.dec.count);
 
 out:
-       crypto_free_ablkcipher(tfm);
+       crypto_free_skcipher(tfm);
        return err;
 }
 
@@ -2476,6 +2479,7 @@ static const struct alg_test_desc alg_test_descs[] = {
                }
        }, {
                .alg = "cmac(aes)",
+               .fips_allowed = 1,
                .test = alg_test_hash,
                .suite = {
                        .hash = {
@@ -2485,6 +2489,7 @@ static const struct alg_test_desc alg_test_descs[] = {
                }
        }, {
                .alg = "cmac(des3_ede)",
+               .fips_allowed = 1,
                .test = alg_test_hash,
                .suite = {
                        .hash = {
index 868edf11704142deec8e05c44a2edfb37661ad27..64b8a8082645da7ddb69f1dda35f52b4f746a486 100644 (file)
@@ -14504,6 +14504,9 @@ static struct cipher_testvec aes_cbc_enc_tv_template[] = {
                .result = "\xe3\x53\x77\x9c\x10\x79\xae\xb8"
                          "\x27\x08\x94\x2d\xbe\x77\x18\x1a",
                .rlen   = 16,
+               .also_non_np = 1,
+               .np     = 8,
+               .tap    = { 3, 2, 3, 2, 3, 1, 1, 1 },
        }, {
                .key    = "\xc2\x86\x69\x6d\x88\x7c\x9a\xa0"
                          "\x61\x1b\xbb\x3e\x20\x25\xa4\x5a",
@@ -14723,6 +14726,9 @@ static struct cipher_testvec aes_cbc_dec_tv_template[] = {
                .ilen   = 16,
                .result = "Single block msg",
                .rlen   = 16,
+               .also_non_np = 1,
+               .np     = 8,
+               .tap    = { 3, 2, 3, 2, 3, 1, 1, 1 },
        }, {
                .key    = "\xc2\x86\x69\x6d\x88\x7c\x9a\xa0"
                          "\x61\x1b\xbb\x3e\x20\x25\xa4\x5a",
@@ -15032,6 +15038,9 @@ static struct aead_testvec hmac_sha1_aes_cbc_enc_tv_temp[] = {
                .klen   = 8 + 20 + 16,
                .iv     = "\x3d\xaf\xba\x42\x9d\x9e\xb4\x30"
                          "\xb4\x22\xda\x80\x2c\x9f\xac\x41",
+               .assoc  = "\x3d\xaf\xba\x42\x9d\x9e\xb4\x30"
+                         "\xb4\x22\xda\x80\x2c\x9f\xac\x41",
+               .alen   = 16,
                .input  = "Single block msg",
                .ilen   = 16,
                .result = "\xe3\x53\x77\x9c\x10\x79\xae\xb8"
@@ -15057,6 +15066,9 @@ static struct aead_testvec hmac_sha1_aes_cbc_enc_tv_temp[] = {
                .klen   = 8 + 20 + 16,
                .iv     = "\x56\x2e\x17\x99\x6d\x09\x3d\x28"
                          "\xdd\xb3\xba\x69\x5a\x2e\x6f\x58",
+               .assoc  = "\x56\x2e\x17\x99\x6d\x09\x3d\x28"
+                         "\xdd\xb3\xba\x69\x5a\x2e\x6f\x58",
+               .alen   = 16,
                .input  = "\x00\x01\x02\x03\x04\x05\x06\x07"
                          "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
                          "\x10\x11\x12\x13\x14\x15\x16\x17"
@@ -15087,6 +15099,9 @@ static struct aead_testvec hmac_sha1_aes_cbc_enc_tv_temp[] = {
                .klen   = 8 + 20 + 16,
                .iv     = "\xc7\x82\xdc\x4c\x09\x8c\x66\xcb"
                          "\xd9\xcd\x27\xd8\x25\x68\x2c\x81",
+               .assoc  = "\xc7\x82\xdc\x4c\x09\x8c\x66\xcb"
+                         "\xd9\xcd\x27\xd8\x25\x68\x2c\x81",
+               .alen   = 16,
                .input  = "This is a 48-byte message (exactly 3 AES blocks)",
                .ilen   = 48,
                .result = "\xd0\xa0\x2b\x38\x36\x45\x17\x53"
@@ -15116,6 +15131,9 @@ static struct aead_testvec hmac_sha1_aes_cbc_enc_tv_temp[] = {
                .klen   = 8 + 20 + 16,
                .iv     = "\x8c\xe8\x2e\xef\xbe\xa0\xda\x3c"
                          "\x44\x69\x9e\xd7\xdb\x51\xb7\xd9",
+               .assoc  = "\x8c\xe8\x2e\xef\xbe\xa0\xda\x3c"
+                         "\x44\x69\x9e\xd7\xdb\x51\xb7\xd9",
+               .alen   = 16,
                .input  = "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
                          "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
                          "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
@@ -15154,8 +15172,10 @@ static struct aead_testvec hmac_sha1_aes_cbc_enc_tv_temp[] = {
                .klen   = 8 + 20 + 16,
                .iv     = "\xe9\x6e\x8c\x08\xab\x46\x57\x63"
                          "\xfd\x09\x8d\x45\xdd\x3f\xf8\x93",
-               .assoc  = "\x00\x00\x43\x21\x00\x00\x00\x01",
-               .alen   = 8,
+               .assoc  = "\x00\x00\x43\x21\x00\x00\x00\x01"
+                         "\xe9\x6e\x8c\x08\xab\x46\x57\x63"
+                         "\xfd\x09\x8d\x45\xdd\x3f\xf8\x93",
+               .alen   = 24,
                .input  = "\x08\x00\x0e\xbd\xa7\x0a\x00\x00"
                          "\x8e\x9c\x08\x3d\xb9\x5b\x07\x00"
                          "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
@@ -15199,6 +15219,9 @@ static struct aead_testvec hmac_sha1_aes_cbc_enc_tv_temp[] = {
                .klen   = 8 + 20 + 24,
                .iv     = "\x00\x01\x02\x03\x04\x05\x06\x07"
                          "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+               .assoc  = "\x00\x01\x02\x03\x04\x05\x06\x07"
+                         "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+               .alen   = 16,
                .input  = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
                          "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
                          "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
@@ -15239,6 +15262,9 @@ static struct aead_testvec hmac_sha1_aes_cbc_enc_tv_temp[] = {
                .klen   = 8 + 20 + 32,
                .iv     = "\x00\x01\x02\x03\x04\x05\x06\x07"
                          "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+               .assoc  = "\x00\x01\x02\x03\x04\x05\x06\x07"
+                         "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+               .alen   = 16,
                .input  = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
                          "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
                          "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
@@ -15374,6 +15400,9 @@ static struct aead_testvec hmac_sha256_aes_cbc_enc_tv_temp[] = {
                .klen   = 8 + 32 + 16,
                .iv     = "\x3d\xaf\xba\x42\x9d\x9e\xb4\x30"
                          "\xb4\x22\xda\x80\x2c\x9f\xac\x41",
+               .assoc  = "\x3d\xaf\xba\x42\x9d\x9e\xb4\x30"
+                         "\xb4\x22\xda\x80\x2c\x9f\xac\x41",
+               .alen   = 16,
                .input  = "Single block msg",
                .ilen   = 16,
                .result = "\xe3\x53\x77\x9c\x10\x79\xae\xb8"
@@ -15401,6 +15430,9 @@ static struct aead_testvec hmac_sha256_aes_cbc_enc_tv_temp[] = {
                .klen   = 8 + 32 + 16,
                .iv     = "\x56\x2e\x17\x99\x6d\x09\x3d\x28"
                          "\xdd\xb3\xba\x69\x5a\x2e\x6f\x58",
+               .assoc  = "\x56\x2e\x17\x99\x6d\x09\x3d\x28"
+                         "\xdd\xb3\xba\x69\x5a\x2e\x6f\x58",
+               .alen   = 16,
                .input  = "\x00\x01\x02\x03\x04\x05\x06\x07"
                          "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
                          "\x10\x11\x12\x13\x14\x15\x16\x17"
@@ -15433,6 +15465,9 @@ static struct aead_testvec hmac_sha256_aes_cbc_enc_tv_temp[] = {
                .klen   = 8 + 32 + 16,
                .iv     = "\xc7\x82\xdc\x4c\x09\x8c\x66\xcb"
                          "\xd9\xcd\x27\xd8\x25\x68\x2c\x81",
+               .assoc  = "\xc7\x82\xdc\x4c\x09\x8c\x66\xcb"
+                         "\xd9\xcd\x27\xd8\x25\x68\x2c\x81",
+               .alen   = 16,
                .input  = "This is a 48-byte message (exactly 3 AES blocks)",
                .ilen   = 48,
                .result = "\xd0\xa0\x2b\x38\x36\x45\x17\x53"
@@ -15464,6 +15499,9 @@ static struct aead_testvec hmac_sha256_aes_cbc_enc_tv_temp[] = {
                .klen   = 8 + 32 + 16,
                .iv     = "\x8c\xe8\x2e\xef\xbe\xa0\xda\x3c"
                          "\x44\x69\x9e\xd7\xdb\x51\xb7\xd9",
+               .assoc  = "\x8c\xe8\x2e\xef\xbe\xa0\xda\x3c"
+                         "\x44\x69\x9e\xd7\xdb\x51\xb7\xd9",
+               .alen   = 16,
                .input  = "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
                          "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
                          "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
@@ -15504,8 +15542,10 @@ static struct aead_testvec hmac_sha256_aes_cbc_enc_tv_temp[] = {
                .klen   = 8 + 32 + 16,
                .iv     = "\xe9\x6e\x8c\x08\xab\x46\x57\x63"
                          "\xfd\x09\x8d\x45\xdd\x3f\xf8\x93",
-               .assoc  = "\x00\x00\x43\x21\x00\x00\x00\x01",
-               .alen   = 8,
+               .assoc  = "\x00\x00\x43\x21\x00\x00\x00\x01"
+                         "\xe9\x6e\x8c\x08\xab\x46\x57\x63"
+                         "\xfd\x09\x8d\x45\xdd\x3f\xf8\x93",
+               .alen   = 24,
                .input  = "\x08\x00\x0e\xbd\xa7\x0a\x00\x00"
                          "\x8e\x9c\x08\x3d\xb9\x5b\x07\x00"
                          "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
@@ -15551,6 +15591,9 @@ static struct aead_testvec hmac_sha256_aes_cbc_enc_tv_temp[] = {
                .klen   = 8 + 32 + 24,
                .iv     = "\x00\x01\x02\x03\x04\x05\x06\x07"
                          "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+               .assoc  = "\x00\x01\x02\x03\x04\x05\x06\x07"
+                         "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+               .alen   = 16,
                .input  = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
                          "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
                          "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
@@ -15593,6 +15636,9 @@ static struct aead_testvec hmac_sha256_aes_cbc_enc_tv_temp[] = {
                .klen   = 8 + 32 + 32,
                .iv     = "\x00\x01\x02\x03\x04\x05\x06\x07"
                          "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+               .assoc  = "\x00\x01\x02\x03\x04\x05\x06\x07"
+                         "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+               .alen   = 16,
                .input  = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
                          "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
                          "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
@@ -15641,6 +15687,9 @@ static struct aead_testvec hmac_sha512_aes_cbc_enc_tv_temp[] = {
                .klen   = 8 + 64 + 16,
                .iv     = "\x3d\xaf\xba\x42\x9d\x9e\xb4\x30"
                          "\xb4\x22\xda\x80\x2c\x9f\xac\x41",
+               .assoc  = "\x3d\xaf\xba\x42\x9d\x9e\xb4\x30"
+                         "\xb4\x22\xda\x80\x2c\x9f\xac\x41",
+               .alen   = 16,
                .input  = "Single block msg",
                .ilen   = 16,
                .result = "\xe3\x53\x77\x9c\x10\x79\xae\xb8"
@@ -15676,6 +15725,9 @@ static struct aead_testvec hmac_sha512_aes_cbc_enc_tv_temp[] = {
                .klen   = 8 + 64 + 16,
                .iv     = "\x56\x2e\x17\x99\x6d\x09\x3d\x28"
                          "\xdd\xb3\xba\x69\x5a\x2e\x6f\x58",
+               .assoc  = "\x56\x2e\x17\x99\x6d\x09\x3d\x28"
+                         "\xdd\xb3\xba\x69\x5a\x2e\x6f\x58",
+               .alen   = 16,
                .input  = "\x00\x01\x02\x03\x04\x05\x06\x07"
                          "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
                          "\x10\x11\x12\x13\x14\x15\x16\x17"
@@ -15716,6 +15768,9 @@ static struct aead_testvec hmac_sha512_aes_cbc_enc_tv_temp[] = {
                .klen   = 8 + 64 + 16,
                .iv     = "\xc7\x82\xdc\x4c\x09\x8c\x66\xcb"
                          "\xd9\xcd\x27\xd8\x25\x68\x2c\x81",
+               .assoc  = "\xc7\x82\xdc\x4c\x09\x8c\x66\xcb"
+                         "\xd9\xcd\x27\xd8\x25\x68\x2c\x81",
+               .alen   = 16,
                .input  = "This is a 48-byte message (exactly 3 AES blocks)",
                .ilen   = 48,
                .result = "\xd0\xa0\x2b\x38\x36\x45\x17\x53"
@@ -15755,6 +15810,9 @@ static struct aead_testvec hmac_sha512_aes_cbc_enc_tv_temp[] = {
                .klen   = 8 + 64 + 16,
                .iv     = "\x8c\xe8\x2e\xef\xbe\xa0\xda\x3c"
                          "\x44\x69\x9e\xd7\xdb\x51\xb7\xd9",
+               .assoc  = "\x8c\xe8\x2e\xef\xbe\xa0\xda\x3c"
+                         "\x44\x69\x9e\xd7\xdb\x51\xb7\xd9",
+               .alen   = 16,
                .input  = "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
                          "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
                          "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
@@ -15803,8 +15861,10 @@ static struct aead_testvec hmac_sha512_aes_cbc_enc_tv_temp[] = {
                .klen   = 8 + 64 + 16,
                .iv     = "\xe9\x6e\x8c\x08\xab\x46\x57\x63"
                          "\xfd\x09\x8d\x45\xdd\x3f\xf8\x93",
-               .assoc  = "\x00\x00\x43\x21\x00\x00\x00\x01",
-               .alen   = 8,
+               .assoc  = "\x00\x00\x43\x21\x00\x00\x00\x01"
+                         "\xe9\x6e\x8c\x08\xab\x46\x57\x63"
+                         "\xfd\x09\x8d\x45\xdd\x3f\xf8\x93",
+               .alen   = 24,
                .input  = "\x08\x00\x0e\xbd\xa7\x0a\x00\x00"
                          "\x8e\x9c\x08\x3d\xb9\x5b\x07\x00"
                          "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
@@ -15858,6 +15918,9 @@ static struct aead_testvec hmac_sha512_aes_cbc_enc_tv_temp[] = {
                .klen   = 8 + 64 + 24,
                .iv     = "\x00\x01\x02\x03\x04\x05\x06\x07"
                          "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+               .assoc  = "\x00\x01\x02\x03\x04\x05\x06\x07"
+                         "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+               .alen   = 16,
                .input  = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
                          "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
                          "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
@@ -15908,6 +15971,9 @@ static struct aead_testvec hmac_sha512_aes_cbc_enc_tv_temp[] = {
                .klen   = 8 + 64 + 32,
                .iv     = "\x00\x01\x02\x03\x04\x05\x06\x07"
                          "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+               .assoc  = "\x00\x01\x02\x03\x04\x05\x06\x07"
+                         "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+               .alen   = 16,
                .input  = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
                          "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
                          "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
@@ -15955,8 +16021,9 @@ static struct aead_testvec hmac_sha1_des_cbc_enc_tv_temp[] = {
                          "\xE9\xC0\xFF\x2E\x76\x0B\x64\x24",
                .klen   = 8 + 20 + 8,
                .iv     = "\x7D\x33\x88\x93\x0F\x93\xB2\x42",
-               .assoc  = "\x00\x00\x43\x21\x00\x00\x00\x01",
-               .alen   = 8,
+               .assoc  = "\x00\x00\x43\x21\x00\x00\x00\x01"
+                         "\x7D\x33\x88\x93\x0F\x93\xB2\x42",
+               .alen   = 16,
                .input  = "\x6f\x54\x20\x6f\x61\x4d\x79\x6e"
                          "\x53\x20\x63\x65\x65\x72\x73\x74"
                          "\x54\x20\x6f\x6f\x4d\x20\x6e\x61"
@@ -16015,8 +16082,9 @@ static struct aead_testvec hmac_sha224_des_cbc_enc_tv_temp[] = {
                          "\xE9\xC0\xFF\x2E\x76\x0B\x64\x24",
                .klen   = 8 + 24 + 8,
                .iv     = "\x7D\x33\x88\x93\x0F\x93\xB2\x42",
-               .assoc  = "\x00\x00\x43\x21\x00\x00\x00\x01",
-               .alen   = 8,
+               .assoc  = "\x00\x00\x43\x21\x00\x00\x00\x01"
+                         "\x7D\x33\x88\x93\x0F\x93\xB2\x42",
+               .alen   = 16,
                .input  = "\x6f\x54\x20\x6f\x61\x4d\x79\x6e"
                          "\x53\x20\x63\x65\x65\x72\x73\x74"
                          "\x54\x20\x6f\x6f\x4d\x20\x6e\x61"
@@ -16076,8 +16144,9 @@ static struct aead_testvec hmac_sha256_des_cbc_enc_tv_temp[] = {
                          "\xE9\xC0\xFF\x2E\x76\x0B\x64\x24",
                .klen   = 8 + 32 + 8,
                .iv     = "\x7D\x33\x88\x93\x0F\x93\xB2\x42",
-               .assoc  = "\x00\x00\x43\x21\x00\x00\x00\x01",
-               .alen   = 8,
+               .assoc  = "\x00\x00\x43\x21\x00\x00\x00\x01"
+                         "\x7D\x33\x88\x93\x0F\x93\xB2\x42",
+               .alen   = 16,
                .input  = "\x6f\x54\x20\x6f\x61\x4d\x79\x6e"
                          "\x53\x20\x63\x65\x65\x72\x73\x74"
                          "\x54\x20\x6f\x6f\x4d\x20\x6e\x61"
@@ -16140,8 +16209,9 @@ static struct aead_testvec hmac_sha384_des_cbc_enc_tv_temp[] = {
                          "\xE9\xC0\xFF\x2E\x76\x0B\x64\x24",
                .klen   = 8 + 48 + 8,
                .iv     = "\x7D\x33\x88\x93\x0F\x93\xB2\x42",
-               .assoc  = "\x00\x00\x43\x21\x00\x00\x00\x01",
-               .alen   = 8,
+               .assoc  = "\x00\x00\x43\x21\x00\x00\x00\x01"
+                         "\x7D\x33\x88\x93\x0F\x93\xB2\x42",
+               .alen   = 16,
                .input  = "\x6f\x54\x20\x6f\x61\x4d\x79\x6e"
                          "\x53\x20\x63\x65\x65\x72\x73\x74"
                          "\x54\x20\x6f\x6f\x4d\x20\x6e\x61"
@@ -16208,8 +16278,9 @@ static struct aead_testvec hmac_sha512_des_cbc_enc_tv_temp[] = {
                          "\xE9\xC0\xFF\x2E\x76\x0B\x64\x24",
                .klen   = 8 + 64 + 8,
                .iv     = "\x7D\x33\x88\x93\x0F\x93\xB2\x42",
-               .assoc  = "\x00\x00\x43\x21\x00\x00\x00\x01",
-               .alen   = 8,
+               .assoc  = "\x00\x00\x43\x21\x00\x00\x00\x01"
+                         "\x7D\x33\x88\x93\x0F\x93\xB2\x42",
+               .alen   = 16,
                .input  = "\x6f\x54\x20\x6f\x61\x4d\x79\x6e"
                          "\x53\x20\x63\x65\x65\x72\x73\x74"
                          "\x54\x20\x6f\x6f\x4d\x20\x6e\x61"
@@ -16275,8 +16346,9 @@ static struct aead_testvec hmac_sha1_des3_ede_cbc_enc_tv_temp[] = {
                          "\xEA\xC2\x84\xE8\x14\x95\xDB\xE8",
                .klen   = 8 + 20 + 24,
                .iv     = "\x7D\x33\x88\x93\x0F\x93\xB2\x42",
-               .assoc  = "\x00\x00\x43\x21\x00\x00\x00\x01",
-               .alen   = 8,
+               .assoc  = "\x00\x00\x43\x21\x00\x00\x00\x01"
+                         "\x7D\x33\x88\x93\x0F\x93\xB2\x42",
+               .alen   = 16,
                .input  = "\x6f\x54\x20\x6f\x61\x4d\x79\x6e"
                          "\x53\x20\x63\x65\x65\x72\x73\x74"
                  "\x54\x20\x6f\x6f\x4d\x20\x6e\x61"
@@ -16337,8 +16409,9 @@ static struct aead_testvec hmac_sha224_des3_ede_cbc_enc_tv_temp[] = {
                          "\xEA\xC2\x84\xE8\x14\x95\xDB\xE8",
                .klen   = 8 + 24 + 24,
                .iv     = "\x7D\x33\x88\x93\x0F\x93\xB2\x42",
-               .assoc  = "\x00\x00\x43\x21\x00\x00\x00\x01",
-               .alen   = 8,
+               .assoc  = "\x00\x00\x43\x21\x00\x00\x00\x01"
+                         "\x7D\x33\x88\x93\x0F\x93\xB2\x42",
+               .alen   = 16,
                .input  = "\x6f\x54\x20\x6f\x61\x4d\x79\x6e"
                          "\x53\x20\x63\x65\x65\x72\x73\x74"
                          "\x54\x20\x6f\x6f\x4d\x20\x6e\x61"
@@ -16400,8 +16473,9 @@ static struct aead_testvec hmac_sha256_des3_ede_cbc_enc_tv_temp[] = {
                          "\xEA\xC2\x84\xE8\x14\x95\xDB\xE8",
                .klen   = 8 + 32 + 24,
                .iv     = "\x7D\x33\x88\x93\x0F\x93\xB2\x42",
-               .assoc  = "\x00\x00\x43\x21\x00\x00\x00\x01",
-               .alen   = 8,
+               .assoc  = "\x00\x00\x43\x21\x00\x00\x00\x01"
+                         "\x7D\x33\x88\x93\x0F\x93\xB2\x42",
+               .alen   = 16,
                .input  = "\x6f\x54\x20\x6f\x61\x4d\x79\x6e"
                          "\x53\x20\x63\x65\x65\x72\x73\x74"
                          "\x54\x20\x6f\x6f\x4d\x20\x6e\x61"
@@ -16466,8 +16540,9 @@ static struct aead_testvec hmac_sha384_des3_ede_cbc_enc_tv_temp[] = {
                          "\xEA\xC2\x84\xE8\x14\x95\xDB\xE8",
                .klen   = 8 + 48 + 24,
                .iv     = "\x7D\x33\x88\x93\x0F\x93\xB2\x42",
-       .assoc  = "\x00\x00\x43\x21\x00\x00\x00\x01",
-               .alen   = 8,
+               .assoc  = "\x00\x00\x43\x21\x00\x00\x00\x01"
+                         "\x7D\x33\x88\x93\x0F\x93\xB2\x42",
+               .alen   = 16,
                .input  = "\x6f\x54\x20\x6f\x61\x4d\x79\x6e"
                          "\x53\x20\x63\x65\x65\x72\x73\x74"
                          "\x54\x20\x6f\x6f\x4d\x20\x6e\x61"
@@ -16536,8 +16611,9 @@ static struct aead_testvec hmac_sha512_des3_ede_cbc_enc_tv_temp[] = {
                          "\xEA\xC2\x84\xE8\x14\x95\xDB\xE8",
                .klen   = 8 + 64 + 24,
                .iv     = "\x7D\x33\x88\x93\x0F\x93\xB2\x42",
-               .assoc  = "\x00\x00\x43\x21\x00\x00\x00\x01",
-               .alen   = 8,
+               .assoc  = "\x00\x00\x43\x21\x00\x00\x00\x01"
+                         "\x7D\x33\x88\x93\x0F\x93\xB2\x42",
+               .alen   = 16,
                .input  = "\x6f\x54\x20\x6f\x61\x4d\x79\x6e"
                          "\x53\x20\x63\x65\x65\x72\x73\x74"
                          "\x54\x20\x6f\x6f\x4d\x20\x6e\x61"
@@ -20129,149 +20205,150 @@ static struct aead_testvec aes_gcm_dec_tv_template[] = {
 };
 
 static struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = {
-        { /* Generated using Crypto++ */
+       { /* Generated using Crypto++ */
                .key    = zeroed_string,
                .klen   = 20,
-                .iv     = zeroed_string,
-                .input  = zeroed_string,
-                .ilen   = 16,
-                .assoc  = zeroed_string,
-                .alen   = 8,
+               .iv     = zeroed_string,
+               .input  = zeroed_string,
+               .ilen   = 16,
+               .assoc  = zeroed_string,
+               .alen   = 16,
                .result = "\x03\x88\xDA\xCE\x60\xB6\xA3\x92"
-                          "\xF3\x28\xC2\xB9\x71\xB2\xFE\x78"
-                          "\x97\xFE\x4C\x23\x37\x42\x01\xE0"
-                          "\x81\x9F\x8D\xC5\xD7\x41\xA0\x1B",
+                         "\xF3\x28\xC2\xB9\x71\xB2\xFE\x78"
+                         "\x97\xFE\x4C\x23\x37\x42\x01\xE0"
+                         "\x81\x9F\x8D\xC5\xD7\x41\xA0\x1B",
                .rlen   = 32,
-        },{
+       },{
                .key    = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
                          "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
-                          "\x00\x00\x00\x00",
+                         "\x00\x00\x00\x00",
                .klen   = 20,
-                .iv     = "\x00\x00\x00\x00\x00\x00\x00\x01"
-                          "\x00\x00\x00\x00",
-                .input  = zeroed_string,
-                .ilen   = 16,
-                .assoc  = zeroed_string,
-                .alen   = 8,
+               .iv     = "\x00\x00\x00\x00\x00\x00\x00\x01",
+               .input  = zeroed_string,
+               .ilen   = 16,
+               .assoc  = "\x00\x00\x00\x00\x00\x00\x00\x00"
+                         "\x00\x00\x00\x00\x00\x00\x00\x01",
+               .alen   = 16,
                .result = "\xC0\x0D\x8B\x42\x0F\x8F\x34\x18"
-                          "\x88\xB1\xC5\xBC\xC5\xB6\xD6\x28"
-                          "\x6A\x9D\xDF\x11\x5E\xFE\x5E\x9D"
-                          "\x2F\x70\x44\x92\xF7\xF2\xE3\xEF",
+                         "\x88\xB1\xC5\xBC\xC5\xB6\xD6\x28"
+                         "\x6A\x9D\xDF\x11\x5E\xFE\x5E\x9D"
+                         "\x2F\x70\x44\x92\xF7\xF2\xE3\xEF",
                .rlen   = 32,
 
-        }, {
+       }, {
                .key    = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
                          "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
-                          "\x00\x00\x00\x00",
+                         "\x00\x00\x00\x00",
                .klen   = 20,
-                .iv     = zeroed_string,
-                .input  = "\x01\x01\x01\x01\x01\x01\x01\x01"
-                          "\x01\x01\x01\x01\x01\x01\x01\x01",
-                .ilen   = 16,
-                .assoc  = zeroed_string,
-                .alen   = 8,
+               .iv     = zeroed_string,
+               .input  = "\x01\x01\x01\x01\x01\x01\x01\x01"
+                         "\x01\x01\x01\x01\x01\x01\x01\x01",
+               .ilen   = 16,
+               .assoc  = zeroed_string,
+               .alen   = 16,
                .result = "\x4B\xB1\xB5\xE3\x25\x71\x70\xDE"
-                          "\x7F\xC9\x9C\xA5\x14\x19\xF2\xAC"
-                          "\x0B\x8F\x88\x69\x17\xE6\xB4\x3C"
-                          "\xB1\x68\xFD\x14\x52\x64\x61\xB2",
+                         "\x7F\xC9\x9C\xA5\x14\x19\xF2\xAC"
+                         "\x0B\x8F\x88\x69\x17\xE6\xB4\x3C"
+                         "\xB1\x68\xFD\x14\x52\x64\x61\xB2",
                .rlen   = 32,
-        }, {
+       }, {
                .key    = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
                          "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
-                          "\x00\x00\x00\x00",
+                         "\x00\x00\x00\x00",
                .klen   = 20,
-                .iv     = zeroed_string,
-                .input  = "\x01\x01\x01\x01\x01\x01\x01\x01"
-                          "\x01\x01\x01\x01\x01\x01\x01\x01",
-                .ilen   = 16,
-                .assoc  = "\x01\x01\x01\x01\x01\x01\x01\x01",
-                .alen   = 8,
+               .iv     = zeroed_string,
+               .input  = "\x01\x01\x01\x01\x01\x01\x01\x01"
+                         "\x01\x01\x01\x01\x01\x01\x01\x01",
+               .ilen   = 16,
+               .assoc  = "\x01\x01\x01\x01\x01\x01\x01\x01"
+                         "\x00\x00\x00\x00\x00\x00\x00\x00",
+               .alen   = 16,
                .result = "\x4B\xB1\xB5\xE3\x25\x71\x70\xDE"
-                          "\x7F\xC9\x9C\xA5\x14\x19\xF2\xAC"
-                          "\x90\x92\xB7\xE3\x5F\xA3\x9A\x63"
-                          "\x7E\xD7\x1F\xD8\xD3\x7C\x4B\xF5",
+                         "\x7F\xC9\x9C\xA5\x14\x19\xF2\xAC"
+                         "\x90\x92\xB7\xE3\x5F\xA3\x9A\x63"
+                         "\x7E\xD7\x1F\xD8\xD3\x7C\x4B\xF5",
                .rlen   = 32,
-        }, {
+       }, {
                .key    = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
                          "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
-                          "\x00\x00\x00\x00",
+                         "\x00\x00\x00\x00",
                .klen   = 20,
-                .iv     = "\x00\x00\x00\x00\x00\x00\x00\x01"
-                          "\x00\x00\x00\x00",
-                .input  = "\x01\x01\x01\x01\x01\x01\x01\x01"
-                          "\x01\x01\x01\x01\x01\x01\x01\x01",
-                .ilen   = 16,
-                .assoc  = "\x01\x01\x01\x01\x01\x01\x01\x01",
-                .alen   = 8,
+               .iv     = "\x00\x00\x00\x00\x00\x00\x00\x01",
+               .input  = "\x01\x01\x01\x01\x01\x01\x01\x01"
+                         "\x01\x01\x01\x01\x01\x01\x01\x01",
+               .ilen   = 16,
+               .assoc  = "\x01\x01\x01\x01\x01\x01\x01\x01"
+                         "\x00\x00\x00\x00\x00\x00\x00\x01",
+               .alen   = 16,
                .result = "\xC1\x0C\x8A\x43\x0E\x8E\x35\x19"
-                          "\x89\xB0\xC4\xBD\xC4\xB7\xD7\x29"
-                          "\x64\x50\xF9\x32\x13\xFB\x74\x61"
-                          "\xF4\xED\x52\xD3\xC5\x10\x55\x3C",
+                         "\x89\xB0\xC4\xBD\xC4\xB7\xD7\x29"
+                         "\x64\x50\xF9\x32\x13\xFB\x74\x61"
+                         "\xF4\xED\x52\xD3\xC5\x10\x55\x3C",
                .rlen   = 32,
-        }, {
+       }, {
                .key    = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
                          "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
-                          "\x00\x00\x00\x00",
+                         "\x00\x00\x00\x00",
                .klen   = 20,
-                .iv     = "\x00\x00\x00\x00\x00\x00\x00\x01"
-                          "\x00\x00\x00\x00",
-                .input  = "\x01\x01\x01\x01\x01\x01\x01\x01"
-                          "\x01\x01\x01\x01\x01\x01\x01\x01"
-                          "\x01\x01\x01\x01\x01\x01\x01\x01"
-                          "\x01\x01\x01\x01\x01\x01\x01\x01"
-                          "\x01\x01\x01\x01\x01\x01\x01\x01"
-                          "\x01\x01\x01\x01\x01\x01\x01\x01"
-                          "\x01\x01\x01\x01\x01\x01\x01\x01"
-                          "\x01\x01\x01\x01\x01\x01\x01\x01",
-                .ilen   = 64,
-                .assoc  = "\x01\x01\x01\x01\x01\x01\x01\x01",
-                .alen   = 8,
+               .iv     = "\x00\x00\x00\x00\x00\x00\x00\x01",
+               .input  = "\x01\x01\x01\x01\x01\x01\x01\x01"
+                         "\x01\x01\x01\x01\x01\x01\x01\x01"
+                         "\x01\x01\x01\x01\x01\x01\x01\x01"
+                         "\x01\x01\x01\x01\x01\x01\x01\x01"
+                         "\x01\x01\x01\x01\x01\x01\x01\x01"
+                         "\x01\x01\x01\x01\x01\x01\x01\x01"
+                         "\x01\x01\x01\x01\x01\x01\x01\x01"
+                         "\x01\x01\x01\x01\x01\x01\x01\x01",
+               .ilen   = 64,
+               .assoc  = "\x01\x01\x01\x01\x01\x01\x01\x01"
+                         "\x00\x00\x00\x00\x00\x00\x00\x01",
+               .alen   = 16,
                .result = "\xC1\x0C\x8A\x43\x0E\x8E\x35\x19"
-                          "\x89\xB0\xC4\xBD\xC4\xB7\xD7\x29"
-                          "\x98\x14\xA1\x42\x37\x80\xFD\x90"
-                          "\x68\x12\x01\xA8\x91\x89\xB9\x83"
-                          "\x5B\x11\x77\x12\x9B\xFF\x24\x89"
-                          "\x94\x5F\x18\x12\xBA\x27\x09\x39"
-                          "\x99\x96\x76\x42\x15\x1C\xCD\xCB"
-                          "\xDC\xD3\xDA\x65\x73\xAF\x80\xCD"
-                          "\xD2\xB6\xC2\x4A\x76\xC2\x92\x85"
-                          "\xBD\xCF\x62\x98\x58\x14\xE5\xBD",
+                         "\x89\xB0\xC4\xBD\xC4\xB7\xD7\x29"
+                         "\x98\x14\xA1\x42\x37\x80\xFD\x90"
+                         "\x68\x12\x01\xA8\x91\x89\xB9\x83"
+                         "\x5B\x11\x77\x12\x9B\xFF\x24\x89"
+                         "\x94\x5F\x18\x12\xBA\x27\x09\x39"
+                         "\x99\x96\x76\x42\x15\x1C\xCD\xCB"
+                         "\xDC\xD3\xDA\x65\x73\xAF\x80\xCD"
+                         "\xD2\xB6\xC2\x4A\x76\xC2\x92\x85"
+                         "\xBD\xCF\x62\x98\x58\x14\xE5\xBD",
                .rlen   = 80,
-        }, {
+       }, {
                .key    = "\x00\x01\x02\x03\x04\x05\x06\x07"
                          "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
-                          "\x00\x00\x00\x00",
+                         "\x00\x00\x00\x00",
                .klen   = 20,
-                .iv     = "\x00\x00\x45\x67\x89\xab\xcd\xef"
-                          "\x00\x00\x00\x00",
-                .input  = "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff",
-                .ilen   = 192,
-                .assoc  = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
-                          "\xaa\xaa\xaa\xaa",
-                .alen   = 12,
+               .iv     = "\x00\x00\x45\x67\x89\xab\xcd\xef",
+               .input  = "\xff\xff\xff\xff\xff\xff\xff\xff"
+                         "\xff\xff\xff\xff\xff\xff\xff\xff"
+                         "\xff\xff\xff\xff\xff\xff\xff\xff"
+                         "\xff\xff\xff\xff\xff\xff\xff\xff"
+                         "\xff\xff\xff\xff\xff\xff\xff\xff"
+                         "\xff\xff\xff\xff\xff\xff\xff\xff"
+                         "\xff\xff\xff\xff\xff\xff\xff\xff"
+                         "\xff\xff\xff\xff\xff\xff\xff\xff"
+                         "\xff\xff\xff\xff\xff\xff\xff\xff"
+                         "\xff\xff\xff\xff\xff\xff\xff\xff"
+                         "\xff\xff\xff\xff\xff\xff\xff\xff"
+                         "\xff\xff\xff\xff\xff\xff\xff\xff"
+                         "\xff\xff\xff\xff\xff\xff\xff\xff"
+                         "\xff\xff\xff\xff\xff\xff\xff\xff"
+                         "\xff\xff\xff\xff\xff\xff\xff\xff"
+                         "\xff\xff\xff\xff\xff\xff\xff\xff"
+                         "\xff\xff\xff\xff\xff\xff\xff\xff"
+                         "\xff\xff\xff\xff\xff\xff\xff\xff"
+                         "\xff\xff\xff\xff\xff\xff\xff\xff"
+                         "\xff\xff\xff\xff\xff\xff\xff\xff"
+                         "\xff\xff\xff\xff\xff\xff\xff\xff"
+                         "\xff\xff\xff\xff\xff\xff\xff\xff"
+                         "\xff\xff\xff\xff\xff\xff\xff\xff"
+                         "\xff\xff\xff\xff\xff\xff\xff\xff",
+               .ilen   = 192,
+               .assoc  = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+                         "\xaa\xaa\xaa\xaa\x00\x00\x45\x67"
+                         "\x89\xab\xcd\xef",
+               .alen   = 20,
                .result = "\xC1\x76\x33\x85\xE2\x9B\x5F\xDE"
                          "\xDE\x89\x3D\x42\xE7\xC9\x69\x8A"
                          "\x44\x6D\xC3\x88\x46\x2E\xC2\x01"
@@ -20316,8 +20393,9 @@ static struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = {
                          "\x00\x21\x00\x01\x01\x02\x02\x01",
                .ilen   = 72,
                .assoc  = "\x00\x00\x43\x21\x87\x65\x43\x21"
-                         "\x00\x00\x00\x00",
-               .alen   = 12,
+                         "\x00\x00\x00\x00\x49\x56\xED\x7E"
+                         "\x3B\x24\x4C\xFE",
+               .alen   = 20,
                .result = "\xFE\xCF\x53\x7E\x72\x9D\x5B\x07"
                          "\xDC\x30\xDF\x52\x8D\xD2\x2B\x76"
                          "\x8D\x1B\x98\x73\x66\x96\xA6\xFD"
@@ -20345,8 +20423,9 @@ static struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = {
                          "\x65\x72\x63\x69\x74\x79\x02\x64"
                          "\x6B\x00\x00\x01\x00\x01\x00\x01",
                .ilen   = 64,
-               .assoc  = "\x00\x00\xA5\xF8\x00\x00\x00\x0A",
-               .alen   = 8,
+               .assoc  = "\x00\x00\xA5\xF8\x00\x00\x00\x0A"
+                         "\xFA\xCE\xDB\xAD\xDE\xCA\xF8\x88",
+               .alen   = 16,
                .result = "\xDE\xB2\x2C\xD9\xB0\x7C\x72\xC1"
                          "\x6E\x3A\x65\xBE\xEB\x8D\xF3\x04"
                          "\xA5\xA5\x89\x7D\x33\xAE\x53\x0F"
@@ -20374,8 +20453,9 @@ static struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = {
                          "\x02\x04\x05\xB4\x01\x01\x04\x02"
                          "\x01\x02\x02\x01",
                .ilen   = 52,
-               .assoc  = "\x4A\x2C\xBF\xE3\x00\x00\x00\x02",
-               .alen   = 8,
+               .assoc  = "\x4A\x2C\xBF\xE3\x00\x00\x00\x02"
+                         "\x01\x02\x03\x04\x05\x06\x07\x08",
+               .alen   = 16,
                .result = "\xFF\x42\x5C\x9B\x72\x45\x99\xDF"
                          "\x7A\x3B\xCD\x51\x01\x94\xE0\x0D"
                          "\x6A\x78\x10\x7F\x1B\x0B\x1C\xBF"
@@ -20401,8 +20481,9 @@ static struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = {
                          "\x75\x76\x77\x61\x62\x63\x64\x65"
                          "\x66\x67\x68\x69\x01\x02\x02\x01",
                .ilen   = 64,
-               .assoc  = "\x00\x00\x00\x00\x00\x00\x00\x01",
-               .alen   = 8,
+               .assoc  = "\x00\x00\x00\x00\x00\x00\x00\x01"
+                         "\x00\x00\x00\x00\x00\x00\x00\x00",
+               .alen   = 16,
                .result = "\x46\x88\xDA\xF2\xF9\x73\xA3\x92"
                          "\x73\x29\x09\xC3\x31\xD5\x6D\x60"
                          "\xF6\x94\xAB\xAA\x41\x4B\x5E\x7F"
@@ -20430,8 +20511,9 @@ static struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = {
                          "\x66\x67\x68\x69\x01\x02\x02\x01",
                .ilen   = 64,
                .assoc  = "\x42\xF6\x7E\x3F\x10\x10\x10\x10"
-                         "\x10\x10\x10\x10",
-               .alen   = 12,
+                         "\x10\x10\x10\x10\x4E\x28\x00\x00"
+                         "\xA2\xFC\xA1\xA3",
+               .alen   = 20,
                .result = "\xFB\xA2\xCA\xA4\x85\x3C\xF9\xF0"
                          "\xF2\x2C\xB1\x0D\x86\xDD\x83\xB0"
                          "\xFE\xC7\x56\x91\xCF\x1A\x04\xB0"
@@ -20455,8 +20537,9 @@ static struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = {
                          "\x01\x02\x02\x01",
                .ilen   = 28,
                .assoc  = "\x42\xF6\x7E\x3F\x10\x10\x10\x10"
-                         "\x10\x10\x10\x10",
-               .alen   = 12,
+                         "\x10\x10\x10\x10\x4E\x28\x00\x00"
+                         "\xA2\xFC\xA1\xA3",
+               .alen   = 20,
                .result = "\xFB\xA2\xCA\x84\x5E\x5D\xF9\xF0"
                          "\xF2\x2C\x3E\x6E\x86\xDD\x83\x1E"
                          "\x1F\xC6\x57\x92\xCD\x1A\xF9\x13"
@@ -20477,8 +20560,9 @@ static struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = {
                          "\xCB\x71\x26\x02\xDD\x6B\xB0\x3E"
                          "\x50\x10\x16\xD0\x75\x68\x00\x01",
                .ilen   = 40,
-               .assoc  = "\x00\x00\xA5\xF8\x00\x00\x00\x0A",
-               .alen   = 8,
+               .assoc  = "\x00\x00\xA5\xF8\x00\x00\x00\x0A"
+                         "\xFA\xCE\xDB\xAD\xDE\xCA\xF8\x88",
+               .alen   = 16,
                .result = "\xA5\xB1\xF8\x06\x60\x29\xAE\xA4"
                          "\x0E\x59\x8B\x81\x22\xDE\x02\x42"
                          "\x09\x38\xB3\xAB\x33\xF8\x28\xE6"
@@ -20505,8 +20589,9 @@ static struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = {
                          "\x23\x01\x01\x01",
                .ilen   = 76,
                .assoc  = "\x00\x00\x01\x00\x00\x00\x00\x00"
-                         "\x00\x00\x00\x01",
-               .alen   = 12,
+                         "\x00\x00\x00\x01\xCA\xFE\xDE\xBA"
+                         "\xCE\xFA\xCE\x74",
+               .alen   = 20,
                .result = "\x18\xA6\xFD\x42\xF7\x2C\xBF\x4A"
                          "\xB2\xA2\xEA\x90\x1F\x73\xD8\x14"
                          "\xE3\xE7\xF2\x43\xD9\x54\x12\xE1"
@@ -20535,8 +20620,9 @@ static struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = {
                          "\x50\x10\x1F\x64\x6D\x54\x00\x01",
                .ilen   = 40,
                .assoc  = "\x17\x40\x5E\x67\x15\x6F\x31\x26"
-                         "\xDD\x0D\xB9\x9B",
-               .alen   = 12,
+                         "\xDD\x0D\xB9\x9B\x61\x6E\x64\x01"
+                         "\x69\x76\x65\x63",
+               .alen   = 20,
                .result = "\xF2\xD6\x9E\xCD\xBD\x5A\x0D\x5B"
                          "\x8D\x5E\xF3\x8B\xAD\x4D\xA5\x8D"
                          "\x1F\x27\x8F\xDE\x98\xEF\x67\x54"
@@ -20563,8 +20649,9 @@ static struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = {
                          "\x15\x01\x01\x01",
                .ilen   = 76,
                .assoc  = "\x42\xF6\x7E\x3F\x10\x10\x10\x10"
-                         "\x10\x10\x10\x10",
-               .alen   = 12,
+                         "\x10\x10\x10\x10\x4E\x28\x00\x00"
+                         "\xA2\xFC\xA1\xA3",
+               .alen   = 20,
                .result = "\xFB\xA2\xCA\xD1\x2F\xC1\xF9\xF0"
                          "\x0D\x3C\xEB\xF3\x05\x41\x0D\xB8"
                          "\x3D\x77\x84\xB6\x07\x32\x3D\x22"
@@ -20597,8 +20684,9 @@ static struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = {
                          "\x72\x72\x6F\x77\x01\x02\x02\x01",
                .ilen   = 72,
                .assoc  = "\x17\x40\x5E\x67\x15\x6F\x31\x26"
-                         "\xDD\x0D\xB9\x9B",
-               .alen   = 12,
+                         "\xDD\x0D\xB9\x9B\x61\x6E\x64\x01"
+                         "\x69\x76\x65\x63",
+               .alen   = 20,
                .result = "\xD4\xB7\xED\x86\xA1\x77\x7F\x2E"
                          "\xA1\x3D\x69\x73\xD3\x24\xC6\x9E"
                          "\x7B\x43\xF8\x26\xFB\x56\x83\x12"
@@ -20619,8 +20707,9 @@ static struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = {
                .iv     = "\x43\x45\x7E\x91\x82\x44\x3B\xC6",
                .input  = "\x01\x02\x02\x01",
                .ilen   = 4,
-               .assoc  = "\x33\x54\x67\xAE\xFF\xFF\xFF\xFF",
-               .alen   = 8,
+               .assoc  = "\x33\x54\x67\xAE\xFF\xFF\xFF\xFF"
+                         "\x43\x45\x7E\x91\x82\x44\x3B\xC6",
+               .alen   = 16,
                .result = "\x43\x7F\x86\x6B\xCB\x3F\x69\x9F"
                          "\xE9\xB0\x82\x2B\xAC\x96\x1C\x45"
                          "\x04\xBE\xF2\x70",
@@ -20636,8 +20725,9 @@ static struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = {
                          "\x62\x65\x00\x01",
                .ilen   = 20,
                .assoc  = "\x00\x00\x01\x00\x00\x00\x00\x00"
-                         "\x00\x00\x00\x01",
-               .alen   = 12,
+                         "\x00\x00\x00\x01\xCA\xFE\xDE\xBA"
+                         "\xCE\xFA\xCE\x74",
+               .alen   = 20,
                .result = "\x29\xC9\xFC\x69\xA1\x97\xD0\x38"
                          "\xCC\xDD\x14\xE2\xDD\xFC\xAA\x05"
                          "\x43\x33\x21\x64\x41\x25\x03\x52"
@@ -20661,8 +20751,9 @@ static struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = {
                          "\x01\x02\x02\x01",
                .ilen   = 52,
                .assoc  = "\x79\x6B\x69\x63\xFF\xFF\xFF\xFF"
-                         "\xFF\xFF\xFF\xFF",
-               .alen   = 12,
+                         "\xFF\xFF\xFF\xFF\x33\x30\x21\x69"
+                         "\x67\x65\x74\x6D",
+               .alen   = 20,
                .result = "\xF9\x7A\xB2\xAA\x35\x6D\x8E\xDC"
                          "\xE1\x76\x44\xAC\x8C\x78\xE2\x5D"
                          "\xD2\x4D\xED\xBB\x29\xEB\xF1\xB6"
@@ -20688,8 +20779,9 @@ static struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = {
                          "\x01\x02\x02\x01",
                .ilen   = 52,
                .assoc  = "\x3F\x7E\xF6\x42\x10\x10\x10\x10"
-                         "\x10\x10\x10\x10",
-               .alen   = 12,
+                         "\x10\x10\x10\x10\x4E\x28\x00\x00"
+                         "\xA2\xFC\xA1\xA3",
+               .alen   = 20,
                .result = "\xFB\xA2\xCA\xA8\xC6\xC5\xF9\xF0"
                          "\xF2\x2C\xA5\x4A\x06\x12\x10\xAD"
                          "\x3F\x6E\x57\x91\xCF\x1A\xCA\x21"
@@ -20712,8 +20804,9 @@ static struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = {
                          "\x71\x72\x73\x74\x01\x02\x02\x01",
                .ilen   = 32,
                .assoc  = "\x00\x00\x43\x21\x87\x65\x43\x21"
-                         "\x00\x00\x00\x07",
-               .alen   = 12,
+                         "\x00\x00\x00\x07\x48\x55\xEC\x7D"
+                         "\x3A\x23\x4B\xFD",
+               .alen   = 20,
                .result = "\x74\x75\x2E\x8A\xEB\x5D\x87\x3C"
                          "\xD7\xC0\xF4\xAC\xC3\x6C\x4B\xFF"
                          "\x84\xB7\xD7\xB9\x8F\x0C\xA8\xB6"
@@ -20725,122 +20818,122 @@ static struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = {
 };
 
 static struct aead_testvec aes_gcm_rfc4106_dec_tv_template[] = {
-        { /* Generated using Crypto++ */
+       { /* Generated using Crypto++ */
                .key    = zeroed_string,
                .klen   = 20,
-                .iv     = zeroed_string,
+               .iv     = zeroed_string,
                .input  = "\x03\x88\xDA\xCE\x60\xB6\xA3\x92"
-                          "\xF3\x28\xC2\xB9\x71\xB2\xFE\x78"
-                          "\x97\xFE\x4C\x23\x37\x42\x01\xE0"
-                          "\x81\x9F\x8D\xC5\xD7\x41\xA0\x1B",
+                         "\xF3\x28\xC2\xB9\x71\xB2\xFE\x78"
+                         "\x97\xFE\x4C\x23\x37\x42\x01\xE0"
+                         "\x81\x9F\x8D\xC5\xD7\x41\xA0\x1B",
                .ilen   = 32,
-                .assoc  = zeroed_string,
-                .alen   = 8,
-                .result = zeroed_string,
-                .rlen   = 16,
+               .assoc  = zeroed_string,
+               .alen   = 16,
+               .result = zeroed_string,
+               .rlen   = 16,
 
-        },{
+       },{
                .key    = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
                          "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
-                          "\x00\x00\x00\x00",
+                         "\x00\x00\x00\x00",
                .klen   = 20,
-                .iv     = "\x00\x00\x00\x00\x00\x00\x00\x01"
-                          "\x00\x00\x00\x00",
+               .iv     = "\x00\x00\x00\x00\x00\x00\x00\x01",
                .input  = "\xC0\x0D\x8B\x42\x0F\x8F\x34\x18"
-                          "\x88\xB1\xC5\xBC\xC5\xB6\xD6\x28"
-                          "\x6A\x9D\xDF\x11\x5E\xFE\x5E\x9D"
-                          "\x2F\x70\x44\x92\xF7\xF2\xE3\xEF",
+                         "\x88\xB1\xC5\xBC\xC5\xB6\xD6\x28"
+                         "\x6A\x9D\xDF\x11\x5E\xFE\x5E\x9D"
+                         "\x2F\x70\x44\x92\xF7\xF2\xE3\xEF",
                .ilen   = 32,
-                .assoc  = zeroed_string,
-                .alen   = 8,
-                .result = zeroed_string,
-                .rlen   = 16,
-        }, {
+               .assoc  = "\x00\x00\x00\x00\x00\x00\x00\x00"
+                         "\x00\x00\x00\x00\x00\x00\x00\x01",
+               .alen   = 16,
+               .result = zeroed_string,
+               .rlen   = 16,
+       }, {
                .key    = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
                          "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
-                          "\x00\x00\x00\x00",
+                         "\x00\x00\x00\x00",
                .klen   = 20,
-                .iv     = zeroed_string,
+               .iv     = zeroed_string,
                .input  = "\x4B\xB1\xB5\xE3\x25\x71\x70\xDE"
-                          "\x7F\xC9\x9C\xA5\x14\x19\xF2\xAC"
-                          "\x0B\x8F\x88\x69\x17\xE6\xB4\x3C"
-                          "\xB1\x68\xFD\x14\x52\x64\x61\xB2",
+                         "\x7F\xC9\x9C\xA5\x14\x19\xF2\xAC"
+                         "\x0B\x8F\x88\x69\x17\xE6\xB4\x3C"
+                         "\xB1\x68\xFD\x14\x52\x64\x61\xB2",
                .ilen   = 32,
-                .assoc  = zeroed_string,
-                .alen   = 8,
-                .result = "\x01\x01\x01\x01\x01\x01\x01\x01"
-                          "\x01\x01\x01\x01\x01\x01\x01\x01",
-                .rlen   = 16,
-        }, {
+               .assoc  = zeroed_string,
+               .alen   = 16,
+               .result = "\x01\x01\x01\x01\x01\x01\x01\x01"
+                         "\x01\x01\x01\x01\x01\x01\x01\x01",
+               .rlen   = 16,
+       }, {
                .key    = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
                          "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
-                          "\x00\x00\x00\x00",
+                         "\x00\x00\x00\x00",
                .klen   = 20,
-                .iv     = zeroed_string,
+               .iv     = zeroed_string,
                .input  = "\x4B\xB1\xB5\xE3\x25\x71\x70\xDE"
-                          "\x7F\xC9\x9C\xA5\x14\x19\xF2\xAC"
-                          "\x90\x92\xB7\xE3\x5F\xA3\x9A\x63"
-                          "\x7E\xD7\x1F\xD8\xD3\x7C\x4B\xF5",
+                         "\x7F\xC9\x9C\xA5\x14\x19\xF2\xAC"
+                         "\x90\x92\xB7\xE3\x5F\xA3\x9A\x63"
+                         "\x7E\xD7\x1F\xD8\xD3\x7C\x4B\xF5",
                .ilen   = 32,
-                .assoc  = "\x01\x01\x01\x01\x01\x01\x01\x01",
-                .alen   = 8,
-                .result = "\x01\x01\x01\x01\x01\x01\x01\x01"
-                          "\x01\x01\x01\x01\x01\x01\x01\x01",
-                .rlen   = 16,
+               .assoc  = "\x01\x01\x01\x01\x01\x01\x01\x01"
+                         "\x00\x00\x00\x00\x00\x00\x00\x00",
+               .alen   = 16,
+               .result = "\x01\x01\x01\x01\x01\x01\x01\x01"
+                         "\x01\x01\x01\x01\x01\x01\x01\x01",
+               .rlen   = 16,
 
-        }, {
+       }, {
                .key    = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
                          "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
-                          "\x00\x00\x00\x00",
+                         "\x00\x00\x00\x00",
                .klen   = 20,
-                .iv     = "\x00\x00\x00\x00\x00\x00\x00\x01"
-                          "\x00\x00\x00\x00",
+               .iv     = "\x00\x00\x00\x00\x00\x00\x00\x01",
                .input  = "\xC1\x0C\x8A\x43\x0E\x8E\x35\x19"
-                          "\x89\xB0\xC4\xBD\xC4\xB7\xD7\x29"
-                          "\x64\x50\xF9\x32\x13\xFB\x74\x61"
-                          "\xF4\xED\x52\xD3\xC5\x10\x55\x3C",
+                         "\x89\xB0\xC4\xBD\xC4\xB7\xD7\x29"
+                         "\x64\x50\xF9\x32\x13\xFB\x74\x61"
+                         "\xF4\xED\x52\xD3\xC5\x10\x55\x3C",
                .ilen   = 32,
-                .assoc  = "\x01\x01\x01\x01\x01\x01\x01\x01",
-                .alen   = 8,
-                .result = "\x01\x01\x01\x01\x01\x01\x01\x01"
-                          "\x01\x01\x01\x01\x01\x01\x01\x01",
-                .rlen   = 16,
-        }, {
+               .assoc  = "\x01\x01\x01\x01\x01\x01\x01\x01"
+                         "\x00\x00\x00\x00\x00\x00\x00\x01",
+               .alen   = 16,
+               .result = "\x01\x01\x01\x01\x01\x01\x01\x01"
+                         "\x01\x01\x01\x01\x01\x01\x01\x01",
+               .rlen   = 16,
+       }, {
                .key    = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
                          "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
-                          "\x00\x00\x00\x00",
+                         "\x00\x00\x00\x00",
                .klen   = 20,
-                .iv     = "\x00\x00\x00\x00\x00\x00\x00\x01"
-                          "\x00\x00\x00\x00",
+               .iv     = "\x00\x00\x00\x00\x00\x00\x00\x01",
                .input  = "\xC1\x0C\x8A\x43\x0E\x8E\x35\x19"
-                          "\x89\xB0\xC4\xBD\xC4\xB7\xD7\x29"
-                          "\x98\x14\xA1\x42\x37\x80\xFD\x90"
-                          "\x68\x12\x01\xA8\x91\x89\xB9\x83"
-                          "\x5B\x11\x77\x12\x9B\xFF\x24\x89"
-                          "\x94\x5F\x18\x12\xBA\x27\x09\x39"
-                          "\x99\x96\x76\x42\x15\x1C\xCD\xCB"
-                          "\xDC\xD3\xDA\x65\x73\xAF\x80\xCD"
-                          "\xD2\xB6\xC2\x4A\x76\xC2\x92\x85"
-                          "\xBD\xCF\x62\x98\x58\x14\xE5\xBD",
+                         "\x89\xB0\xC4\xBD\xC4\xB7\xD7\x29"
+                         "\x98\x14\xA1\x42\x37\x80\xFD\x90"
+                         "\x68\x12\x01\xA8\x91\x89\xB9\x83"
+                         "\x5B\x11\x77\x12\x9B\xFF\x24\x89"
+                         "\x94\x5F\x18\x12\xBA\x27\x09\x39"
+                         "\x99\x96\x76\x42\x15\x1C\xCD\xCB"
+                         "\xDC\xD3\xDA\x65\x73\xAF\x80\xCD"
+                         "\xD2\xB6\xC2\x4A\x76\xC2\x92\x85"
+                         "\xBD\xCF\x62\x98\x58\x14\xE5\xBD",
                .ilen   = 80,
-                .assoc  = "\x01\x01\x01\x01\x01\x01\x01\x01",
-                .alen   = 8,
-                .result = "\x01\x01\x01\x01\x01\x01\x01\x01"
-                          "\x01\x01\x01\x01\x01\x01\x01\x01"
-                          "\x01\x01\x01\x01\x01\x01\x01\x01"
-                          "\x01\x01\x01\x01\x01\x01\x01\x01"
-                          "\x01\x01\x01\x01\x01\x01\x01\x01"
-                          "\x01\x01\x01\x01\x01\x01\x01\x01"
-                          "\x01\x01\x01\x01\x01\x01\x01\x01"
-                          "\x01\x01\x01\x01\x01\x01\x01\x01",
-                .rlen   = 64,
-        }, {
+               .assoc  = "\x01\x01\x01\x01\x01\x01\x01\x01"
+                         "\x00\x00\x00\x00\x00\x00\x00\x01",
+               .alen   = 16,
+               .result = "\x01\x01\x01\x01\x01\x01\x01\x01"
+                         "\x01\x01\x01\x01\x01\x01\x01\x01"
+                         "\x01\x01\x01\x01\x01\x01\x01\x01"
+                         "\x01\x01\x01\x01\x01\x01\x01\x01"
+                         "\x01\x01\x01\x01\x01\x01\x01\x01"
+                         "\x01\x01\x01\x01\x01\x01\x01\x01"
+                         "\x01\x01\x01\x01\x01\x01\x01\x01"
+                         "\x01\x01\x01\x01\x01\x01\x01\x01",
+               .rlen   = 64,
+       }, {
                .key    = "\x00\x01\x02\x03\x04\x05\x06\x07"
                          "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
-                          "\x00\x00\x00\x00",
+                         "\x00\x00\x00\x00",
                .klen   = 20,
-                .iv     = "\x00\x00\x45\x67\x89\xab\xcd\xef"
-                          "\x00\x00\x00\x00",
+               .iv     = "\x00\x00\x45\x67\x89\xab\xcd\xef",
                .input  = "\xC1\x76\x33\x85\xE2\x9B\x5F\xDE"
                          "\xDE\x89\x3D\x42\xE7\xC9\x69\x8A"
                          "\x44\x6D\xC3\x88\x46\x2E\xC2\x01"
@@ -20868,34 +20961,35 @@ static struct aead_testvec aes_gcm_rfc4106_dec_tv_template[] = {
                          "\x37\x08\x1C\xCF\xBA\x5D\x71\x46"
                          "\x80\x72\xB0\x4C\x82\x0D\x60\x3C",
                .ilen   = 208,
-                .assoc  = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
-                          "\xaa\xaa\xaa\xaa",
-                .alen   = 12,
-                .result = "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff",
-                .rlen   = 192,
+               .assoc  = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+                         "\xaa\xaa\xaa\xaa\x00\x00\x45\x67"
+                         "\x89\xab\xcd\xef",
+               .alen   = 20,
+               .result = "\xff\xff\xff\xff\xff\xff\xff\xff"
+                         "\xff\xff\xff\xff\xff\xff\xff\xff"
+                         "\xff\xff\xff\xff\xff\xff\xff\xff"
+                         "\xff\xff\xff\xff\xff\xff\xff\xff"
+                         "\xff\xff\xff\xff\xff\xff\xff\xff"
+                         "\xff\xff\xff\xff\xff\xff\xff\xff"
+                         "\xff\xff\xff\xff\xff\xff\xff\xff"
+                         "\xff\xff\xff\xff\xff\xff\xff\xff"
+                         "\xff\xff\xff\xff\xff\xff\xff\xff"
+                         "\xff\xff\xff\xff\xff\xff\xff\xff"
+                         "\xff\xff\xff\xff\xff\xff\xff\xff"
+                         "\xff\xff\xff\xff\xff\xff\xff\xff"
+                         "\xff\xff\xff\xff\xff\xff\xff\xff"
+                         "\xff\xff\xff\xff\xff\xff\xff\xff"
+                         "\xff\xff\xff\xff\xff\xff\xff\xff"
+                         "\xff\xff\xff\xff\xff\xff\xff\xff"
+                         "\xff\xff\xff\xff\xff\xff\xff\xff"
+                         "\xff\xff\xff\xff\xff\xff\xff\xff"
+                         "\xff\xff\xff\xff\xff\xff\xff\xff"
+                         "\xff\xff\xff\xff\xff\xff\xff\xff"
+                         "\xff\xff\xff\xff\xff\xff\xff\xff"
+                         "\xff\xff\xff\xff\xff\xff\xff\xff"
+                         "\xff\xff\xff\xff\xff\xff\xff\xff"
+                         "\xff\xff\xff\xff\xff\xff\xff\xff",
+               .rlen   = 192,
        }, {
                .key    = "\x4C\x80\xCD\xEF\xBB\x5D\x10\xDA"
                          "\x90\x6A\xC7\x3C\x36\x13\xA6\x34"
@@ -20913,8 +21007,9 @@ static struct aead_testvec aes_gcm_rfc4106_dec_tv_template[] = {
                          "\x00\x21\x00\x01\x01\x02\x02\x01",
                .rlen   = 72,
                .assoc  = "\x00\x00\x43\x21\x87\x65\x43\x21"
-                         "\x00\x00\x00\x00",
-               .alen   = 12,
+                         "\x00\x00\x00\x00\x49\x56\xED\x7E"
+                         "\x3B\x24\x4C\xFE",
+               .alen   = 20,
                .input  = "\xFE\xCF\x53\x7E\x72\x9D\x5B\x07"
                          "\xDC\x30\xDF\x52\x8D\xD2\x2B\x76"
                          "\x8D\x1B\x98\x73\x66\x96\xA6\xFD"
@@ -20942,8 +21037,9 @@ static struct aead_testvec aes_gcm_rfc4106_dec_tv_template[] = {
                          "\x65\x72\x63\x69\x74\x79\x02\x64"
                          "\x6B\x00\x00\x01\x00\x01\x00\x01",
                .rlen   = 64,
-               .assoc  = "\x00\x00\xA5\xF8\x00\x00\x00\x0A",
-               .alen   = 8,
+               .assoc  = "\x00\x00\xA5\xF8\x00\x00\x00\x0A"
+                         "\xFA\xCE\xDB\xAD\xDE\xCA\xF8\x88",
+               .alen   = 16,
                .input  = "\xDE\xB2\x2C\xD9\xB0\x7C\x72\xC1"
                          "\x6E\x3A\x65\xBE\xEB\x8D\xF3\x04"
                          "\xA5\xA5\x89\x7D\x33\xAE\x53\x0F"
@@ -20971,8 +21067,9 @@ static struct aead_testvec aes_gcm_rfc4106_dec_tv_template[] = {
                          "\x02\x04\x05\xB4\x01\x01\x04\x02"
                          "\x01\x02\x02\x01",
                .rlen   = 52,
-               .assoc  = "\x4A\x2C\xBF\xE3\x00\x00\x00\x02",
-               .alen   = 8,
+               .assoc  = "\x4A\x2C\xBF\xE3\x00\x00\x00\x02"
+                         "\x01\x02\x03\x04\x05\x06\x07\x08",
+               .alen   = 16,
                .input  = "\xFF\x42\x5C\x9B\x72\x45\x99\xDF"
                          "\x7A\x3B\xCD\x51\x01\x94\xE0\x0D"
                          "\x6A\x78\x10\x7F\x1B\x0B\x1C\xBF"
@@ -20998,8 +21095,9 @@ static struct aead_testvec aes_gcm_rfc4106_dec_tv_template[] = {
                          "\x75\x76\x77\x61\x62\x63\x64\x65"
                          "\x66\x67\x68\x69\x01\x02\x02\x01",
                .rlen   = 64,
-               .assoc  = "\x00\x00\x00\x00\x00\x00\x00\x01",
-               .alen   = 8,
+               .assoc  = "\x00\x00\x00\x00\x00\x00\x00\x01"
+                         "\x00\x00\x00\x00\x00\x00\x00\x00",
+               .alen   = 16,
                .input  = "\x46\x88\xDA\xF2\xF9\x73\xA3\x92"
                          "\x73\x29\x09\xC3\x31\xD5\x6D\x60"
                          "\xF6\x94\xAB\xAA\x41\x4B\x5E\x7F"
@@ -21027,8 +21125,9 @@ static struct aead_testvec aes_gcm_rfc4106_dec_tv_template[] = {
                          "\x66\x67\x68\x69\x01\x02\x02\x01",
                .rlen   = 64,
                .assoc  = "\x42\xF6\x7E\x3F\x10\x10\x10\x10"
-                         "\x10\x10\x10\x10",
-               .alen   = 12,
+                         "\x10\x10\x10\x10\x4E\x28\x00\x00"
+                         "\xA2\xFC\xA1\xA3",
+               .alen   = 20,
                .input  = "\xFB\xA2\xCA\xA4\x85\x3C\xF9\xF0"
                          "\xF2\x2C\xB1\x0D\x86\xDD\x83\xB0"
                          "\xFE\xC7\x56\x91\xCF\x1A\x04\xB0"
@@ -21052,8 +21151,9 @@ static struct aead_testvec aes_gcm_rfc4106_dec_tv_template[] = {
                          "\x01\x02\x02\x01",
                .rlen   = 28,
                .assoc  = "\x42\xF6\x7E\x3F\x10\x10\x10\x10"
-                         "\x10\x10\x10\x10",
-               .alen   = 12,
+                         "\x10\x10\x10\x10\x4E\x28\x00\x00"
+                         "\xA2\xFC\xA1\xA3",
+               .alen   = 20,
                .input  = "\xFB\xA2\xCA\x84\x5E\x5D\xF9\xF0"
                          "\xF2\x2C\x3E\x6E\x86\xDD\x83\x1E"
                          "\x1F\xC6\x57\x92\xCD\x1A\xF9\x13"
@@ -21074,8 +21174,9 @@ static struct aead_testvec aes_gcm_rfc4106_dec_tv_template[] = {
                          "\xCB\x71\x26\x02\xDD\x6B\xB0\x3E"
                          "\x50\x10\x16\xD0\x75\x68\x00\x01",
                .rlen   = 40,
-               .assoc  = "\x00\x00\xA5\xF8\x00\x00\x00\x0A",
-               .alen   = 8,
+               .assoc  = "\x00\x00\xA5\xF8\x00\x00\x00\x0A"
+                         "\xFA\xCE\xDB\xAD\xDE\xCA\xF8\x88",
+               .alen   = 16,
                .input  = "\xA5\xB1\xF8\x06\x60\x29\xAE\xA4"
                          "\x0E\x59\x8B\x81\x22\xDE\x02\x42"
                          "\x09\x38\xB3\xAB\x33\xF8\x28\xE6"
@@ -21102,8 +21203,9 @@ static struct aead_testvec aes_gcm_rfc4106_dec_tv_template[] = {
                          "\x23\x01\x01\x01",
                .rlen   = 76,
                .assoc  = "\x00\x00\x01\x00\x00\x00\x00\x00"
-                         "\x00\x00\x00\x01",
-               .alen   = 12,
+                         "\x00\x00\x00\x01\xCA\xFE\xDE\xBA"
+                         "\xCE\xFA\xCE\x74",
+               .alen   = 20,
                .input  = "\x18\xA6\xFD\x42\xF7\x2C\xBF\x4A"
                          "\xB2\xA2\xEA\x90\x1F\x73\xD8\x14"
                          "\xE3\xE7\xF2\x43\xD9\x54\x12\xE1"
@@ -21132,8 +21234,9 @@ static struct aead_testvec aes_gcm_rfc4106_dec_tv_template[] = {
                          "\x50\x10\x1F\x64\x6D\x54\x00\x01",
                .rlen   = 40,
                .assoc  = "\x17\x40\x5E\x67\x15\x6F\x31\x26"
-                         "\xDD\x0D\xB9\x9B",
-               .alen   = 12,
+                         "\xDD\x0D\xB9\x9B\x61\x6E\x64\x01"
+                         "\x69\x76\x65\x63",
+               .alen   = 20,
                .input  = "\xF2\xD6\x9E\xCD\xBD\x5A\x0D\x5B"
                          "\x8D\x5E\xF3\x8B\xAD\x4D\xA5\x8D"
                          "\x1F\x27\x8F\xDE\x98\xEF\x67\x54"
@@ -21160,8 +21263,9 @@ static struct aead_testvec aes_gcm_rfc4106_dec_tv_template[] = {
                          "\x15\x01\x01\x01",
                .rlen   = 76,
                .assoc  = "\x42\xF6\x7E\x3F\x10\x10\x10\x10"
-                         "\x10\x10\x10\x10",
-               .alen   = 12,
+                         "\x10\x10\x10\x10\x4E\x28\x00\x00"
+                         "\xA2\xFC\xA1\xA3",
+               .alen   = 20,
                .input  = "\xFB\xA2\xCA\xD1\x2F\xC1\xF9\xF0"
                          "\x0D\x3C\xEB\xF3\x05\x41\x0D\xB8"
                          "\x3D\x77\x84\xB6\x07\x32\x3D\x22"
@@ -21194,8 +21298,9 @@ static struct aead_testvec aes_gcm_rfc4106_dec_tv_template[] = {
                          "\x72\x72\x6F\x77\x01\x02\x02\x01",
                .rlen   = 72,
                .assoc  = "\x17\x40\x5E\x67\x15\x6F\x31\x26"
-                         "\xDD\x0D\xB9\x9B",
-               .alen   = 12,
+                         "\xDD\x0D\xB9\x9B\x61\x6E\x64\x01"
+                         "\x69\x76\x65\x63",
+               .alen   = 20,
                .input  = "\xD4\xB7\xED\x86\xA1\x77\x7F\x2E"
                          "\xA1\x3D\x69\x73\xD3\x24\xC6\x9E"
                          "\x7B\x43\xF8\x26\xFB\x56\x83\x12"
@@ -21216,8 +21321,9 @@ static struct aead_testvec aes_gcm_rfc4106_dec_tv_template[] = {
                .iv     = "\x43\x45\x7E\x91\x82\x44\x3B\xC6",
                .result = "\x01\x02\x02\x01",
                .rlen   = 4,
-               .assoc  = "\x33\x54\x67\xAE\xFF\xFF\xFF\xFF",
-               .alen   = 8,
+               .assoc  = "\x33\x54\x67\xAE\xFF\xFF\xFF\xFF"
+                         "\x43\x45\x7E\x91\x82\x44\x3B\xC6",
+               .alen   = 16,
                .input  = "\x43\x7F\x86\x6B\xCB\x3F\x69\x9F"
                          "\xE9\xB0\x82\x2B\xAC\x96\x1C\x45"
                          "\x04\xBE\xF2\x70",
@@ -21233,8 +21339,9 @@ static struct aead_testvec aes_gcm_rfc4106_dec_tv_template[] = {
                          "\x62\x65\x00\x01",
                .rlen   = 20,
                .assoc  = "\x00\x00\x01\x00\x00\x00\x00\x00"
-                         "\x00\x00\x00\x01",
-               .alen   = 12,
+                         "\x00\x00\x00\x01\xCA\xFE\xDE\xBA"
+                         "\xCE\xFA\xCE\x74",
+               .alen   = 20,
                .input  = "\x29\xC9\xFC\x69\xA1\x97\xD0\x38"
                          "\xCC\xDD\x14\xE2\xDD\xFC\xAA\x05"
                          "\x43\x33\x21\x64\x41\x25\x03\x52"
@@ -21258,8 +21365,9 @@ static struct aead_testvec aes_gcm_rfc4106_dec_tv_template[] = {
                          "\x01\x02\x02\x01",
                .rlen   = 52,
                .assoc  = "\x79\x6B\x69\x63\xFF\xFF\xFF\xFF"
-                         "\xFF\xFF\xFF\xFF",
-               .alen   = 12,
+                         "\xFF\xFF\xFF\xFF\x33\x30\x21\x69"
+                         "\x67\x65\x74\x6D",
+               .alen   = 20,
                .input  = "\xF9\x7A\xB2\xAA\x35\x6D\x8E\xDC"
                          "\xE1\x76\x44\xAC\x8C\x78\xE2\x5D"
                          "\xD2\x4D\xED\xBB\x29\xEB\xF1\xB6"
@@ -21285,8 +21393,9 @@ static struct aead_testvec aes_gcm_rfc4106_dec_tv_template[] = {
                          "\x01\x02\x02\x01",
                .rlen   = 52,
                .assoc  = "\x3F\x7E\xF6\x42\x10\x10\x10\x10"
-                         "\x10\x10\x10\x10",
-               .alen   = 12,
+                         "\x10\x10\x10\x10\x4E\x28\x00\x00"
+                         "\xA2\xFC\xA1\xA3",
+               .alen   = 20,
                .input  = "\xFB\xA2\xCA\xA8\xC6\xC5\xF9\xF0"
                          "\xF2\x2C\xA5\x4A\x06\x12\x10\xAD"
                          "\x3F\x6E\x57\x91\xCF\x1A\xCA\x21"
@@ -21309,8 +21418,9 @@ static struct aead_testvec aes_gcm_rfc4106_dec_tv_template[] = {
                          "\x71\x72\x73\x74\x01\x02\x02\x01",
                .rlen   = 32,
                .assoc  = "\x00\x00\x43\x21\x87\x65\x43\x21"
-                         "\x00\x00\x00\x07",
-               .alen   = 12,
+                         "\x00\x00\x00\x07\x48\x55\xEC\x7D"
+                         "\x3A\x23\x4B\xFD",
+               .alen   = 20,
                .input  = "\x74\x75\x2E\x8A\xEB\x5D\x87\x3C"
                          "\xD7\xC0\xF4\xAC\xC3\x6C\x4B\xFF"
                          "\x84\xB7\xD7\xB9\x8F\x0C\xA8\xB6"
@@ -21538,10 +21648,7 @@ static struct aead_testvec aes_ccm_enc_tv_template[] = {
                          "\xba",
                .rlen   = 33,
        }, {
-               /*
-                * This is the same vector as aes_ccm_rfc4309_enc_tv_template[0]
-                * below but rewritten to use the ccm algorithm directly.
-                */
+               /* This is taken from FIPS CAVS. */
                .key    = "\x83\xac\x54\x66\xc2\xeb\xe5\x05"
                          "\x2e\x01\xd1\xfc\x5d\x82\x66\x2e",
                .klen   = 16,
@@ -21559,214 +21666,51 @@ static struct aead_testvec aes_ccm_enc_tv_template[] = {
                          "\xda\x24\xea\xd9\xa1\x39\x98\xfd"
                          "\xa4\xbe\xd9\xf2\x1a\x6d\x22\xa8",
                .rlen   = 48,
-       }
-};
-
-static struct aead_testvec aes_ccm_dec_tv_template[] = {
-       { /* From RFC 3610 */
-               .key    = "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
-                         "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf",
-               .klen   = 16,
-               .iv     = "\x01\x00\x00\x00\x03\x02\x01\x00"
-                         "\xa0\xa1\xa2\xa3\xa4\xa5\x00\x00",
-               .assoc  = "\x00\x01\x02\x03\x04\x05\x06\x07",
-               .alen   = 8,
-               .input  = "\x58\x8c\x97\x9a\x61\xc6\x63\xd2"
-                         "\xf0\x66\xd0\xc2\xc0\xf9\x89\x80"
-                         "\x6d\x5f\x6b\x61\xda\xc3\x84\x17"
-                         "\xe8\xd1\x2c\xfd\xf9\x26\xe0",
-               .ilen   = 31,
-               .result = "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
-                         "\x10\x11\x12\x13\x14\x15\x16\x17"
-                         "\x18\x19\x1a\x1b\x1c\x1d\x1e",
-               .rlen   = 23,
        }, {
-               .key    = "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
-                         "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf",
+               .key    = "\x1e\x2c\x7e\x01\x41\x9a\xef\xc0"
+                         "\x0d\x58\x96\x6e\x5c\xa2\x4b\xd3",
                .klen   = 16,
-               .iv     = "\x01\x00\x00\x00\x07\x06\x05\x04"
-                         "\xa0\xa1\xa2\xa3\xa4\xa5\x00\x00",
-               .assoc  = "\x00\x01\x02\x03\x04\x05\x06\x07"
-                         "\x08\x09\x0a\x0b",
-               .alen   = 12,
-               .input  = "\xdc\xf1\xfb\x7b\x5d\x9e\x23\xfb"
-                         "\x9d\x4e\x13\x12\x53\x65\x8a\xd8"
-                         "\x6e\xbd\xca\x3e\x51\xe8\x3f\x07"
-                         "\x7d\x9c\x2d\x93",
-               .ilen   = 28,
-               .result = "\x0c\x0d\x0e\x0f\x10\x11\x12\x13"
-                         "\x14\x15\x16\x17\x18\x19\x1a\x1b"
-                         "\x1c\x1d\x1e\x1f",
-               .rlen   = 20,
+               .iv     = "\x03\x4f\xa3\x19\xd3\x01\x5a\xd8"
+                         "\x30\x60\x15\x56\x00\x00\x00\x00",
+               .assoc  = "\xda\xe6\x28\x9c\x45\x2d\xfd\x63"
+                         "\x5e\xda\x4c\xb6\xe6\xfc\xf9\xb7"
+                         "\x0c\x56\xcb\xe4\xe0\x05\x7a\xe1"
+                         "\x0a\x63\x09\x78\xbc\x2c\x55\xde",
+               .alen   = 32,
+               .input  = "\x87\xa3\x36\xfd\x96\xb3\x93\x78"
+                         "\xa9\x28\x63\xba\x12\xa3\x14\x85"
+                         "\x57\x1e\x06\xc9\x7b\x21\xef\x76"
+                         "\x7f\x38\x7e\x8e\x29\xa4\x3e\x7e",
+               .ilen   = 32,
+               .result = "\x8a\x1e\x11\xf0\x02\x6b\xe2\x19"
+                         "\xfc\x70\xc4\x6d\x8e\xb7\x99\xab"
+                         "\xc5\x4b\xa2\xac\xd3\xf3\x48\xff"
+                         "\x3b\xb5\xce\x53\xef\xde\xbb\x02"
+                         "\xa9\x86\x15\x6c\x13\xfe\xda\x0a"
+                         "\x22\xb8\x29\x3d\xd8\x39\x9a\x23",
+               .rlen   = 48,
        }, {
-               .key    = "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
-                         "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf",
-               .klen   = 16,
-               .iv     = "\x01\x00\x00\x00\x0b\x0a\x09\x08"
-                         "\xa0\xa1\xa2\xa3\xa4\xa5\x00\x00",
-               .assoc  = "\x00\x01\x02\x03\x04\x05\x06\x07",
-               .alen   = 8,
-               .input  = "\x82\x53\x1a\x60\xcc\x24\x94\x5a"
-                         "\x4b\x82\x79\x18\x1a\xb5\xc8\x4d"
-                         "\xf2\x1c\xe7\xf9\xb7\x3f\x42\xe1"
-                         "\x97\xea\x9c\x07\xe5\x6b\x5e\xb1"
-                         "\x7e\x5f\x4e",
-               .ilen   = 35,
-               .result = "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
-                         "\x10\x11\x12\x13\x14\x15\x16\x17"
-                         "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
-                         "\x20",
-               .rlen   = 25,
-       }, {
-               .key    = "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
-                         "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf",
-               .klen   = 16,
-               .iv     = "\x01\x00\x00\x00\x0c\x0b\x0a\x09"
-                         "\xa0\xa1\xa2\xa3\xa4\xa5\x00\x00",
-               .assoc  = "\x00\x01\x02\x03\x04\x05\x06\x07"
-                         "\x08\x09\x0a\x0b",
-               .alen   = 12,
-               .input  = "\x07\x34\x25\x94\x15\x77\x85\x15"
-                         "\x2b\x07\x40\x98\x33\x0a\xbb\x14"
-                         "\x1b\x94\x7b\x56\x6a\xa9\x40\x6b"
-                         "\x4d\x99\x99\x88\xdd",
-               .ilen   = 29,
-               .result = "\x0c\x0d\x0e\x0f\x10\x11\x12\x13"
-                         "\x14\x15\x16\x17\x18\x19\x1a\x1b"
-                         "\x1c\x1d\x1e",
-               .rlen   = 19,
-       }, {
-               .key    = "\xd7\x82\x8d\x13\xb2\xb0\xbd\xc3"
-                         "\x25\xa7\x62\x36\xdf\x93\xcc\x6b",
-               .klen   = 16,
-               .iv     = "\x01\x00\x33\x56\x8e\xf7\xb2\x63"
-                         "\x3c\x96\x96\x76\x6c\xfa\x00\x00",
-               .assoc  = "\x63\x01\x8f\x76\xdc\x8a\x1b\xcb",
-               .alen   = 8,
-               .input  = "\x4c\xcb\x1e\x7c\xa9\x81\xbe\xfa"
-                         "\xa0\x72\x6c\x55\xd3\x78\x06\x12"
-                         "\x98\xc8\x5c\x92\x81\x4a\xbc\x33"
-                         "\xc5\x2e\xe8\x1d\x7d\x77\xc0\x8a",
-               .ilen   = 32,
-               .result = "\x90\x20\xea\x6f\x91\xbd\xd8\x5a"
-                         "\xfa\x00\x39\xba\x4b\xaf\xf9\xbf"
-                         "\xb7\x9c\x70\x28\x94\x9c\xd0\xec",
-               .rlen   = 24,
-       }, {
-               .key    = "\xd7\x82\x8d\x13\xb2\xb0\xbd\xc3"
-                         "\x25\xa7\x62\x36\xdf\x93\xcc\x6b",
-               .klen   = 16,
-               .iv     = "\x01\x00\xd5\x60\x91\x2d\x3f\x70"
-                         "\x3c\x96\x96\x76\x6c\xfa\x00\x00",
-               .assoc  = "\xcd\x90\x44\xd2\xb7\x1f\xdb\x81"
-                         "\x20\xea\x60\xc0",
-               .alen   = 12,
-               .input  = "\x00\x97\x69\xec\xab\xdf\x48\x62"
-                         "\x55\x94\xc5\x92\x51\xe6\x03\x57"
-                         "\x22\x67\x5e\x04\xc8\x47\x09\x9e"
-                         "\x5a\xe0\x70\x45\x51",
-               .ilen   = 29,
-               .result = "\x64\x35\xac\xba\xfb\x11\xa8\x2e"
-                         "\x2f\x07\x1d\x7c\xa4\xa5\xeb\xd9"
-                         "\x3a\x80\x3b\xa8\x7f",
-               .rlen   = 21,
-       }, {
-               .key    = "\xd7\x82\x8d\x13\xb2\xb0\xbd\xc3"
-                         "\x25\xa7\x62\x36\xdf\x93\xcc\x6b",
-               .klen   = 16,
-               .iv     = "\x01\x00\x42\xff\xf8\xf1\x95\x1c"
-                         "\x3c\x96\x96\x76\x6c\xfa\x00\x00",
-               .assoc  = "\xd8\x5b\xc7\xe6\x9f\x94\x4f\xb8",
-               .alen   = 8,
-               .input  = "\xbc\x21\x8d\xaa\x94\x74\x27\xb6"
-                         "\xdb\x38\x6a\x99\xac\x1a\xef\x23"
-                         "\xad\xe0\xb5\x29\x39\xcb\x6a\x63"
-                         "\x7c\xf9\xbe\xc2\x40\x88\x97\xc6"
-                         "\xba",
-               .ilen   = 33,
-               .result = "\x8a\x19\xb9\x50\xbc\xf7\x1a\x01"
-                         "\x8e\x5e\x67\x01\xc9\x17\x87\x65"
-                         "\x98\x09\xd6\x7d\xbe\xdd\x18",
-               .rlen   = 23,
-       },
-};
-
-/*
- * rfc4309 refers to section 8 of rfc3610 for test vectors, but they all
- * use a 13-byte nonce, we only support an 11-byte nonce. Similarly, all of
- * Special Publication 800-38C's test vectors also use nonce lengths our
- * implementation doesn't support. The following are taken from fips cavs
- * fax files on hand at Red Hat.
- *
- * nb: actual key lengths are (klen - 3), the last 3 bytes are actually
- * part of the nonce which combine w/the iv, but need to be input this way.
- */
-static struct aead_testvec aes_ccm_rfc4309_enc_tv_template[] = {
-       {
-               .key    = "\x83\xac\x54\x66\xc2\xeb\xe5\x05"
-                         "\x2e\x01\xd1\xfc\x5d\x82\x66\x2e"
-                         "\x96\xac\x59",
-               .klen   = 19,
-               .iv     = "\x30\x07\xa1\xe2\xa2\xc7\x55\x24",
-               .alen   = 0,
-               .input  = "\x19\xc8\x81\xf6\xe9\x86\xff\x93"
-                         "\x0b\x78\x67\xe5\xbb\xb7\xfc\x6e"
-                         "\x83\x77\xb3\xa6\x0c\x8c\x9f\x9c"
-                         "\x35\x2e\xad\xe0\x62\xf9\x91\xa1",
-               .ilen   = 32,
-               .result = "\xab\x6f\xe1\x69\x1d\x19\x99\xa8"
-                         "\x92\xa0\xc4\x6f\x7e\xe2\x8b\xb1"
-                         "\x70\xbb\x8c\xa6\x4c\x6e\x97\x8a"
-                         "\x57\x2b\xbe\x5d\x98\xa6\xb1\x32"
-                         "\xda\x24\xea\xd9\xa1\x39\x98\xfd"
-                         "\xa4\xbe\xd9\xf2\x1a\x6d\x22\xa8",
-               .rlen   = 48,
-       }, {
-               .key    = "\x1e\x2c\x7e\x01\x41\x9a\xef\xc0"
-                         "\x0d\x58\x96\x6e\x5c\xa2\x4b\xd3"
-                         "\x4f\xa3\x19",
-               .klen   = 19,
-               .iv     = "\xd3\x01\x5a\xd8\x30\x60\x15\x56",
-               .assoc  = "\xda\xe6\x28\x9c\x45\x2d\xfd\x63"
-                         "\x5e\xda\x4c\xb6\xe6\xfc\xf9\xb7"
-                         "\x0c\x56\xcb\xe4\xe0\x05\x7a\xe1"
-                         "\x0a\x63\x09\x78\xbc\x2c\x55\xde",
-               .alen   = 32,
-               .input  = "\x87\xa3\x36\xfd\x96\xb3\x93\x78"
-                         "\xa9\x28\x63\xba\x12\xa3\x14\x85"
-                         "\x57\x1e\x06\xc9\x7b\x21\xef\x76"
-                         "\x7f\x38\x7e\x8e\x29\xa4\x3e\x7e",
-               .ilen   = 32,
-               .result = "\x8a\x1e\x11\xf0\x02\x6b\xe2\x19"
-                         "\xfc\x70\xc4\x6d\x8e\xb7\x99\xab"
-                         "\xc5\x4b\xa2\xac\xd3\xf3\x48\xff"
-                         "\x3b\xb5\xce\x53\xef\xde\xbb\x02"
-                         "\xa9\x86\x15\x6c\x13\xfe\xda\x0a"
-                         "\x22\xb8\x29\x3d\xd8\x39\x9a\x23",
-               .rlen   = 48,
-       }, {
-               .key    = "\xf4\x6b\xc2\x75\x62\xfe\xb4\xe1"
-                         "\xa3\xf0\xff\xdd\x4e\x4b\x12\x75"
-                         "\x53\x14\x73\x66\x8d\x88\xf6\x80"
-                         "\xa0\x20\x35",
-               .klen   = 27,
-               .iv     = "\x26\xf2\x21\x8d\x50\x20\xda\xe2",
-               .assoc  = "\x5b\x9e\x13\x67\x02\x5e\xef\xc1"
-                         "\x6c\xf9\xd7\x1e\x52\x8f\x7a\x47"
-                         "\xe9\xd4\xcf\x20\x14\x6e\xf0\x2d"
-                         "\xd8\x9e\x2b\x56\x10\x23\x56\xe7",
-               .alen   = 32,
-               .ilen   = 0,
-               .result = "\x36\xea\x7a\x70\x08\xdc\x6a\xbc"
-                         "\xad\x0c\x7a\x63\xf6\x61\xfd\x9b",
-               .rlen   = 16,
+               .key    = "\xf4\x6b\xc2\x75\x62\xfe\xb4\xe1"
+                         "\xa3\xf0\xff\xdd\x4e\x4b\x12\x75"
+                         "\x53\x14\x73\x66\x8d\x88\xf6\x80",
+               .klen   = 24,
+               .iv     = "\x03\xa0\x20\x35\x26\xf2\x21\x8d"
+                         "\x50\x20\xda\xe2\x00\x00\x00\x00",
+               .assoc  = "\x5b\x9e\x13\x67\x02\x5e\xef\xc1"
+                         "\x6c\xf9\xd7\x1e\x52\x8f\x7a\x47"
+                         "\xe9\xd4\xcf\x20\x14\x6e\xf0\x2d"
+                         "\xd8\x9e\x2b\x56\x10\x23\x56\xe7",
+               .alen   = 32,
+               .result = "\x36\xea\x7a\x70\x08\xdc\x6a\xbc"
+                         "\xad\x0c\x7a\x63\xf6\x61\xfd\x9b",
+               .rlen   = 16,
        }, {
                .key    = "\x56\xdf\x5c\x8f\x26\x3f\x0e\x42"
                          "\xef\x7a\xd3\xce\xfc\x84\x60\x62"
-                         "\xca\xb4\x40\xaf\x5f\xc9\xc9\x01"
-                         "\xd6\x3c\x8c",
-               .klen   = 27,
-               .iv     = "\x86\x84\xb6\xcd\xef\x09\x2e\x94",
+                         "\xca\xb4\x40\xaf\x5f\xc9\xc9\x01",
+               .klen   = 24,
+               .iv     = "\x03\xd6\x3c\x8c\x86\x84\xb6\xcd"
+                         "\xef\x09\x2e\x94\x00\x00\x00\x00",
                .assoc  = "\x02\x65\x78\x3c\xe9\x21\x30\x91"
                          "\xb1\xb9\xda\x76\x9a\x78\x6d\x95"
                          "\xf2\x88\x32\xa3\xf2\x50\xcb\x4c"
@@ -21788,10 +21732,10 @@ static struct aead_testvec aes_ccm_rfc4309_enc_tv_template[] = {
                .key    = "\xe0\x8d\x99\x71\x60\xd7\x97\x1a"
                          "\xbd\x01\x99\xd5\x8a\xdf\x71\x3a"
                          "\xd3\xdf\x24\x4b\x5e\x3d\x4b\x4e"
-                         "\x30\x7a\xb9\xd8\x53\x0a\x5e\x2b"
-                         "\x1e\x29\x91",
-               .klen   = 35,
-               .iv     = "\xad\x8e\xc1\x53\x0a\xcf\x2d\xbe",
+                         "\x30\x7a\xb9\xd8\x53\x0a\x5e\x2b",
+               .klen   = 32,
+               .iv     = "\x03\x1e\x29\x91\xad\x8e\xc1\x53"
+                         "\x0a\xcf\x2d\xbe\x00\x00\x00\x00",
                .assoc  = "\x19\xb6\x1f\x57\xc4\xf3\xf0\x8b"
                          "\x78\x2b\x94\x02\x29\x0f\x42\x27"
                          "\x6b\x75\xcb\x98\x34\x08\x7e\x79"
@@ -21812,10 +21756,10 @@ static struct aead_testvec aes_ccm_rfc4309_enc_tv_template[] = {
                .key    = "\x7c\xc8\x18\x3b\x8d\x99\xe0\x7c"
                          "\x45\x41\xb8\xbd\x5c\xa7\xc2\x32"
                          "\x8a\xb8\x02\x59\xa4\xfe\xa9\x2c"
-                         "\x09\x75\x9a\x9b\x3c\x9b\x27\x39"
-                         "\xf9\xd9\x4e",
-               .klen   = 35,
-               .iv     = "\x63\xb5\x3d\x9d\x43\xf6\x1e\x50",
+                         "\x09\x75\x9a\x9b\x3c\x9b\x27\x39",
+               .klen   = 32,
+               .iv     = "\x03\xf9\xd9\x4e\x63\xb5\x3d\x9d"
+                         "\x43\xf6\x1e\x50",
                .assoc  = "\x57\xf5\x6b\x8b\x57\x5c\x3d\x3b"
                          "\x13\x02\x01\x0c\x83\x4c\x96\x35"
                          "\x8e\xd6\x39\xcf\x7d\x14\x9b\x94"
@@ -21837,10 +21781,10 @@ static struct aead_testvec aes_ccm_rfc4309_enc_tv_template[] = {
                .key    = "\xab\xd0\xe9\x33\x07\x26\xe5\x83"
                          "\x8c\x76\x95\xd4\xb6\xdc\xf3\x46"
                          "\xf9\x8f\xad\xe3\x02\x13\x83\x77"
-                         "\x3f\xb0\xf1\xa1\xa1\x22\x0f\x2b"
-                         "\x24\xa7\x8b",
-               .klen   = 35,
-               .iv     = "\x07\xcb\xcc\x0e\xe6\x33\xbf\xf5",
+                         "\x3f\xb0\xf1\xa1\xa1\x22\x0f\x2b",
+               .klen   = 32,
+               .iv     = "\x03\x24\xa7\x8b\x07\xcb\xcc\x0e"
+                         "\xe6\x33\xbf\xf5\x00\x00\x00\x00",
                .assoc  = "\xd4\xdb\x30\x1d\x03\xfe\xfd\x5f"
                          "\x87\xd4\x8c\xb6\xb6\xf1\x7a\x5d"
                          "\xab\x90\x65\x8d\x8e\xca\x4d\x4f"
@@ -21858,16 +21802,142 @@ static struct aead_testvec aes_ccm_rfc4309_enc_tv_template[] = {
                          "\x5c\xda\xb2\x33\xe5\x13\xe2\x0d"
                          "\x74\xd1\xef\xb5\x0f\x3a\xb5\xf8",
                .rlen   = 48,
-       },
+       }
 };
 
-static struct aead_testvec aes_ccm_rfc4309_dec_tv_template[] = {
-       {
+static struct aead_testvec aes_ccm_dec_tv_template[] = {
+       { /* From RFC 3610 */
+               .key    = "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
+                         "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf",
+               .klen   = 16,
+               .iv     = "\x01\x00\x00\x00\x03\x02\x01\x00"
+                         "\xa0\xa1\xa2\xa3\xa4\xa5\x00\x00",
+               .assoc  = "\x00\x01\x02\x03\x04\x05\x06\x07",
+               .alen   = 8,
+               .input  = "\x58\x8c\x97\x9a\x61\xc6\x63\xd2"
+                         "\xf0\x66\xd0\xc2\xc0\xf9\x89\x80"
+                         "\x6d\x5f\x6b\x61\xda\xc3\x84\x17"
+                         "\xe8\xd1\x2c\xfd\xf9\x26\xe0",
+               .ilen   = 31,
+               .result = "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+                         "\x10\x11\x12\x13\x14\x15\x16\x17"
+                         "\x18\x19\x1a\x1b\x1c\x1d\x1e",
+               .rlen   = 23,
+       }, {
+               .key    = "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
+                         "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf",
+               .klen   = 16,
+               .iv     = "\x01\x00\x00\x00\x07\x06\x05\x04"
+                         "\xa0\xa1\xa2\xa3\xa4\xa5\x00\x00",
+               .assoc  = "\x00\x01\x02\x03\x04\x05\x06\x07"
+                         "\x08\x09\x0a\x0b",
+               .alen   = 12,
+               .input  = "\xdc\xf1\xfb\x7b\x5d\x9e\x23\xfb"
+                         "\x9d\x4e\x13\x12\x53\x65\x8a\xd8"
+                         "\x6e\xbd\xca\x3e\x51\xe8\x3f\x07"
+                         "\x7d\x9c\x2d\x93",
+               .ilen   = 28,
+               .result = "\x0c\x0d\x0e\x0f\x10\x11\x12\x13"
+                         "\x14\x15\x16\x17\x18\x19\x1a\x1b"
+                         "\x1c\x1d\x1e\x1f",
+               .rlen   = 20,
+       }, {
+               .key    = "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
+                         "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf",
+               .klen   = 16,
+               .iv     = "\x01\x00\x00\x00\x0b\x0a\x09\x08"
+                         "\xa0\xa1\xa2\xa3\xa4\xa5\x00\x00",
+               .assoc  = "\x00\x01\x02\x03\x04\x05\x06\x07",
+               .alen   = 8,
+               .input  = "\x82\x53\x1a\x60\xcc\x24\x94\x5a"
+                         "\x4b\x82\x79\x18\x1a\xb5\xc8\x4d"
+                         "\xf2\x1c\xe7\xf9\xb7\x3f\x42\xe1"
+                         "\x97\xea\x9c\x07\xe5\x6b\x5e\xb1"
+                         "\x7e\x5f\x4e",
+               .ilen   = 35,
+               .result = "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+                         "\x10\x11\x12\x13\x14\x15\x16\x17"
+                         "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
+                         "\x20",
+               .rlen   = 25,
+       }, {
+               .key    = "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
+                         "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf",
+               .klen   = 16,
+               .iv     = "\x01\x00\x00\x00\x0c\x0b\x0a\x09"
+                         "\xa0\xa1\xa2\xa3\xa4\xa5\x00\x00",
+               .assoc  = "\x00\x01\x02\x03\x04\x05\x06\x07"
+                         "\x08\x09\x0a\x0b",
+               .alen   = 12,
+               .input  = "\x07\x34\x25\x94\x15\x77\x85\x15"
+                         "\x2b\x07\x40\x98\x33\x0a\xbb\x14"
+                         "\x1b\x94\x7b\x56\x6a\xa9\x40\x6b"
+                         "\x4d\x99\x99\x88\xdd",
+               .ilen   = 29,
+               .result = "\x0c\x0d\x0e\x0f\x10\x11\x12\x13"
+                         "\x14\x15\x16\x17\x18\x19\x1a\x1b"
+                         "\x1c\x1d\x1e",
+               .rlen   = 19,
+       }, {
+               .key    = "\xd7\x82\x8d\x13\xb2\xb0\xbd\xc3"
+                         "\x25\xa7\x62\x36\xdf\x93\xcc\x6b",
+               .klen   = 16,
+               .iv     = "\x01\x00\x33\x56\x8e\xf7\xb2\x63"
+                         "\x3c\x96\x96\x76\x6c\xfa\x00\x00",
+               .assoc  = "\x63\x01\x8f\x76\xdc\x8a\x1b\xcb",
+               .alen   = 8,
+               .input  = "\x4c\xcb\x1e\x7c\xa9\x81\xbe\xfa"
+                         "\xa0\x72\x6c\x55\xd3\x78\x06\x12"
+                         "\x98\xc8\x5c\x92\x81\x4a\xbc\x33"
+                         "\xc5\x2e\xe8\x1d\x7d\x77\xc0\x8a",
+               .ilen   = 32,
+               .result = "\x90\x20\xea\x6f\x91\xbd\xd8\x5a"
+                         "\xfa\x00\x39\xba\x4b\xaf\xf9\xbf"
+                         "\xb7\x9c\x70\x28\x94\x9c\xd0\xec",
+               .rlen   = 24,
+       }, {
+               .key    = "\xd7\x82\x8d\x13\xb2\xb0\xbd\xc3"
+                         "\x25\xa7\x62\x36\xdf\x93\xcc\x6b",
+               .klen   = 16,
+               .iv     = "\x01\x00\xd5\x60\x91\x2d\x3f\x70"
+                         "\x3c\x96\x96\x76\x6c\xfa\x00\x00",
+               .assoc  = "\xcd\x90\x44\xd2\xb7\x1f\xdb\x81"
+                         "\x20\xea\x60\xc0",
+               .alen   = 12,
+               .input  = "\x00\x97\x69\xec\xab\xdf\x48\x62"
+                         "\x55\x94\xc5\x92\x51\xe6\x03\x57"
+                         "\x22\x67\x5e\x04\xc8\x47\x09\x9e"
+                         "\x5a\xe0\x70\x45\x51",
+               .ilen   = 29,
+               .result = "\x64\x35\xac\xba\xfb\x11\xa8\x2e"
+                         "\x2f\x07\x1d\x7c\xa4\xa5\xeb\xd9"
+                         "\x3a\x80\x3b\xa8\x7f",
+               .rlen   = 21,
+       }, {
+               .key    = "\xd7\x82\x8d\x13\xb2\xb0\xbd\xc3"
+                         "\x25\xa7\x62\x36\xdf\x93\xcc\x6b",
+               .klen   = 16,
+               .iv     = "\x01\x00\x42\xff\xf8\xf1\x95\x1c"
+                         "\x3c\x96\x96\x76\x6c\xfa\x00\x00",
+               .assoc  = "\xd8\x5b\xc7\xe6\x9f\x94\x4f\xb8",
+               .alen   = 8,
+               .input  = "\xbc\x21\x8d\xaa\x94\x74\x27\xb6"
+                         "\xdb\x38\x6a\x99\xac\x1a\xef\x23"
+                         "\xad\xe0\xb5\x29\x39\xcb\x6a\x63"
+                         "\x7c\xf9\xbe\xc2\x40\x88\x97\xc6"
+                         "\xba",
+               .ilen   = 33,
+               .result = "\x8a\x19\xb9\x50\xbc\xf7\x1a\x01"
+                         "\x8e\x5e\x67\x01\xc9\x17\x87\x65"
+                         "\x98\x09\xd6\x7d\xbe\xdd\x18",
+               .rlen   = 23,
+       }, {
+               /* This is taken from FIPS CAVS. */
                .key    = "\xab\x2f\x8a\x74\xb7\x1c\xd2\xb1"
-                         "\xff\x80\x2e\x48\x7d\x82\xf8\xb9"
-                         "\xc6\xfb\x7d",
-               .klen   = 19,
-               .iv     = "\x80\x0d\x13\xab\xd8\xa6\xb2\xd8",
+                         "\xff\x80\x2e\x48\x7d\x82\xf8\xb9",
+               .klen   = 16,
+               .iv     = "\x03\xc6\xfb\x7d\x80\x0d\x13\xab"
+                         "\xd8\xa6\xb2\xd8\x00\x00\x00\x00",
                .alen   = 0,
                .input  = "\xd5\xe8\x93\x9f\xc7\x89\x2e\x2b",
                .ilen   = 8,
@@ -21876,10 +21946,10 @@ static struct aead_testvec aes_ccm_rfc4309_dec_tv_template[] = {
                .novrfy = 1,
        }, {
                .key    = "\xab\x2f\x8a\x74\xb7\x1c\xd2\xb1"
-                         "\xff\x80\x2e\x48\x7d\x82\xf8\xb9"
-                         "\xaf\x94\x87",
-               .klen   = 19,
-               .iv     = "\x78\x35\x82\x81\x7f\x88\x94\x68",
+                         "\xff\x80\x2e\x48\x7d\x82\xf8\xb9",
+               .klen   = 16,
+               .iv     = "\x03\xaf\x94\x87\x78\x35\x82\x81"
+                         "\x7f\x88\x94\x68\x00\x00\x00\x00",
                .alen   = 0,
                .input  = "\x41\x3c\xb8\x87\x73\xcb\xf3\xf3",
                .ilen   = 8,
@@ -21887,10 +21957,10 @@ static struct aead_testvec aes_ccm_rfc4309_dec_tv_template[] = {
                .rlen   = 0,
        }, {
                .key    = "\x61\x0e\x8c\xae\xe3\x23\xb6\x38"
-                         "\x76\x1c\xf6\x3a\x67\xa3\x9c\xd8"
-                         "\xc6\xfb\x7d",
-               .klen   = 19,
-               .iv     = "\x80\x0d\x13\xab\xd8\xa6\xb2\xd8",
+                         "\x76\x1c\xf6\x3a\x67\xa3\x9c\xd8",
+               .klen   = 16,
+               .iv     = "\x03\xc6\xfb\x7d\x80\x0d\x13\xab"
+                         "\xd8\xa6\xb2\xd8\x00\x00\x00\x00",
                .assoc  = "\xf3\x94\x87\x78\x35\x82\x81\x7f"
                          "\x88\x94\x68\xb1\x78\x6b\x2b\xd6"
                          "\x04\x1f\x4e\xed\x78\xd5\x33\x66"
@@ -21911,10 +21981,10 @@ static struct aead_testvec aes_ccm_rfc4309_dec_tv_template[] = {
                .novrfy = 1,
        }, {
                .key    = "\x61\x0e\x8c\xae\xe3\x23\xb6\x38"
-                         "\x76\x1c\xf6\x3a\x67\xa3\x9c\xd8"
-                         "\x05\xe0\xc9",
-               .klen   = 19,
-               .iv     = "\x0f\xed\x34\xea\x97\xd4\x3b\xdf",
+                         "\x76\x1c\xf6\x3a\x67\xa3\x9c\xd8",
+               .klen   = 16,
+               .iv     = "\x03\x05\xe0\xc9\x0f\xed\x34\xea"
+                         "\x97\xd4\x3b\xdf\x00\x00\x00\x00",
                .assoc  = "\x49\x5c\x50\x1f\x1d\x94\xcc\x81"
                          "\xba\xb7\xb6\x03\xaf\xa5\xc1\xa1"
                          "\xd8\x5c\x42\x68\xe0\x6c\xda\x89"
@@ -21935,10 +22005,10 @@ static struct aead_testvec aes_ccm_rfc4309_dec_tv_template[] = {
        }, {
                .key    = "\x39\xbb\xa7\xbe\x59\x97\x9e\x73"
                          "\xa2\xbc\x6b\x98\xd7\x75\x7f\xe3"
-                         "\xa4\x48\x93\x39\x26\x71\x4a\xc6"
-                         "\xee\x49\x83",
-               .klen   = 27,
-               .iv     = "\xe9\xa9\xff\xe9\x57\xba\xfd\x9e",
+                         "\xa4\x48\x93\x39\x26\x71\x4a\xc6",
+               .klen   = 24,
+               .iv     = "\x03\xee\x49\x83\xe9\xa9\xff\xe9"
+                         "\x57\xba\xfd\x9e\x00\x00\x00\x00",
                .assoc  = "\x44\xa6\x2c\x05\xe9\xe1\x43\xb1"
                          "\x58\x7c\xf2\x5c\x6d\x39\x0a\x64"
                          "\xa4\xf0\x13\x05\xd1\x77\x99\x67"
@@ -21949,114 +22019,1348 @@ static struct aead_testvec aes_ccm_rfc4309_dec_tv_template[] = {
                .result = "\x00",
                .rlen   = 0,
        }, {
-               .key    = "\x58\x5d\xa0\x96\x65\x1a\x04\xd7"
-                         "\x96\xe5\xc5\x68\xaa\x95\x35\xe0"
-                         "\x29\xa0\xba\x9e\x48\x78\xd1\xba"
-                         "\xee\x49\x83",
+               .key    = "\x58\x5d\xa0\x96\x65\x1a\x04\xd7"
+                         "\x96\xe5\xc5\x68\xaa\x95\x35\xe0"
+                         "\x29\xa0\xba\x9e\x48\x78\xd1\xba",
+               .klen   = 24,
+               .iv     = "\x03\xee\x49\x83\xe9\xa9\xff\xe9"
+                         "\x57\xba\xfd\x9e\x00\x00\x00\x00",
+               .assoc  = "\x44\xa6\x2c\x05\xe9\xe1\x43\xb1"
+                         "\x58\x7c\xf2\x5c\x6d\x39\x0a\x64"
+                         "\xa4\xf0\x13\x05\xd1\x77\x99\x67"
+                         "\x11\xc4\xc6\xdb\x00\x56\x36\x61",
+               .alen   = 32,
+               .input  = "\xfb\xe5\x5d\x34\xbe\xe5\xe8\xe7"
+                         "\x5a\xef\x2f\xbf\x1f\x7f\xd4\xb2"
+                         "\x66\xca\x61\x1e\x96\x7a\x61\xb3"
+                         "\x1c\x16\x45\x52\xba\x04\x9c\x9f"
+                         "\xb1\xd2\x40\xbc\x52\x7c\x6f\xb1",
+               .ilen   = 40,
+               .result = "\x85\x34\x66\x42\xc8\x92\x0f\x36"
+                         "\x58\xe0\x6b\x91\x3c\x98\x5c\xbb"
+                         "\x0a\x85\xcc\x02\xad\x7a\x96\xe9"
+                         "\x65\x43\xa4\xc3\x0f\xdc\x55\x81",
+               .rlen   = 32,
+       }, {
+               .key    = "\x58\x5d\xa0\x96\x65\x1a\x04\xd7"
+                         "\x96\xe5\xc5\x68\xaa\x95\x35\xe0"
+                         "\x29\xa0\xba\x9e\x48\x78\xd1\xba",
+               .klen   = 24,
+               .iv     = "\x03\xd1\xfc\x57\x9c\xfe\xb8\x9c"
+                         "\xad\x71\xaa\x1f\x00\x00\x00\x00",
+               .assoc  = "\x86\x67\xa5\xa9\x14\x5f\x0d\xc6"
+                         "\xff\x14\xc7\x44\xbf\x6c\x3a\xc3"
+                         "\xff\xb6\x81\xbd\xe2\xd5\x06\xc7"
+                         "\x3c\xa1\x52\x13\x03\x8a\x23\x3a",
+               .alen   = 32,
+               .input  = "\x3f\x66\xb0\x9d\xe5\x4b\x38\x00"
+                         "\xc6\x0e\x6e\xe5\xd6\x98\xa6\x37"
+                         "\x8c\x26\x33\xc6\xb2\xa2\x17\xfa"
+                         "\x64\x19\xc0\x30\xd7\xfc\x14\x6b"
+                         "\xe3\x33\xc2\x04\xb0\x37\xbe\x3f"
+                         "\xa9\xb4\x2d\x68\x03\xa3\x44\xef",
+               .ilen   = 48,
+               .result = "\x02\x87\x4d\x28\x80\x6e\xb2\xed"
+                         "\x99\x2a\xa8\xca\x04\x25\x45\x90"
+                         "\x1d\xdd\x5a\xd9\xe4\xdb\x9c\x9c"
+                         "\x49\xe9\x01\xfe\xa7\x80\x6d\x6b",
+               .rlen   = 32,
+               .novrfy = 1,
+       }, {
+               .key    = "\xa4\x4b\x54\x29\x0a\xb8\x6d\x01"
+                         "\x5b\x80\x2a\xcf\x25\xc4\xb7\x5c"
+                         "\x20\x2c\xad\x30\xc2\x2b\x41\xfb"
+                         "\x0e\x85\xbc\x33\xad\x0f\x2b\xff",
+               .klen   = 32,
+               .iv     = "\x03\xee\x49\x83\xe9\xa9\xff\xe9"
+                         "\x57\xba\xfd\x9e\x00\x00\x00\x00",
+               .alen   = 0,
+               .input  = "\x1f\xb8\x8f\xa3\xdd\x54\x00\xf2",
+               .ilen   = 8,
+               .result = "\x00",
+               .rlen   = 0,
+       }, {
+               .key    = "\x39\xbb\xa7\xbe\x59\x97\x9e\x73"
+                         "\xa2\xbc\x6b\x98\xd7\x75\x7f\xe3"
+                         "\xa4\x48\x93\x39\x26\x71\x4a\xc6"
+                         "\xae\x8f\x11\x4c\xc2\x9c\x4a\xbb",
+               .klen   = 32,
+               .iv     = "\x03\x85\x34\x66\x42\xc8\x92\x0f"
+                         "\x36\x58\xe0\x6b\x00\x00\x00\x00",
+               .alen   = 0,
+               .input  = "\x48\x01\x5e\x02\x24\x04\x66\x47"
+                         "\xa1\xea\x6f\xaf\xe8\xfc\xfb\xdd"
+                         "\xa5\xa9\x87\x8d\x84\xee\x2e\x77"
+                         "\xbb\x86\xb9\xf5\x5c\x6c\xff\xf6"
+                         "\x72\xc3\x8e\xf7\x70\xb1\xb2\x07"
+                         "\xbc\xa8\xa3\xbd\x83\x7c\x1d\x2a",
+               .ilen   = 48,
+               .result = "\xdc\x56\xf2\x71\xb0\xb1\xa0\x6c"
+                         "\xf0\x97\x3a\xfb\x6d\xe7\x32\x99"
+                         "\x3e\xaf\x70\x5e\xb2\x4d\xea\x39"
+                         "\x89\xd4\x75\x7a\x63\xb1\xda\x93",
+               .rlen   = 32,
+               .novrfy = 1,
+       }, {
+               .key    = "\x58\x5d\xa0\x96\x65\x1a\x04\xd7"
+                         "\x96\xe5\xc5\x68\xaa\x95\x35\xe0"
+                         "\x29\xa0\xba\x9e\x48\x78\xd1\xba"
+                         "\x0d\x1a\x53\x3b\xb5\xe3\xf8\x8b",
+               .klen   = 32,
+               .iv     = "\x03\xcf\x76\x3f\xd9\x95\x75\x8f"
+                         "\x44\x89\x40\x7b\x00\x00\x00\x00",
+               .assoc  = "\x8f\x86\x6c\x4d\x1d\xc5\x39\x88"
+                         "\xc8\xf3\x5c\x52\x10\x63\x6f\x2b"
+                         "\x8a\x2a\xc5\x6f\x30\x23\x58\x7b"
+                         "\xfb\x36\x03\x11\xb4\xd9\xf2\xfe",
+               .alen   = 32,
+               .input  = "\x48\x58\xd6\xf3\xad\x63\x58\xbf"
+                         "\xae\xc7\x5e\xae\x83\x8f\x7b\xe4"
+                         "\x78\x5c\x4c\x67\x71\x89\x94\xbf"
+                         "\x47\xf1\x63\x7e\x1c\x59\xbd\xc5"
+                         "\x7f\x44\x0a\x0c\x01\x18\x07\x92"
+                         "\xe1\xd3\x51\xce\x32\x6d\x0c\x5b",
+               .ilen   = 48,
+               .result = "\xc2\x54\xc8\xde\x78\x87\x77\x40"
+                         "\x49\x71\xe4\xb7\xe7\xcb\x76\x61"
+                         "\x0a\x41\xb9\xe9\xc0\x76\x54\xab"
+                         "\x04\x49\x3b\x19\x93\x57\x25\x5d",
+               .rlen   = 32,
+       },
+};
+
+/*
+ * rfc4309 refers to section 8 of rfc3610 for test vectors, but they all
+ * use a 13-byte nonce, we only support an 11-byte nonce.  Worse,
+ * they use AD lengths which are not valid ESP header lengths.
+ *
+ * These vectors are copied/generated from the ones for rfc4106 with
+ * the key truncated by one byte..
+ */
+static struct aead_testvec aes_ccm_rfc4309_enc_tv_template[] = {
+       { /* Generated using Crypto++ */
+               .key    = zeroed_string,
+               .klen   = 19,
+               .iv     = zeroed_string,
+               .input  = zeroed_string,
+               .ilen   = 16,
+               .assoc  = zeroed_string,
+               .alen   = 16,
+               .result = "\x2E\x9A\xCA\x6B\xDA\x54\xFC\x6F"
+                         "\x12\x50\xE8\xDE\x81\x3C\x63\x08"
+                         "\x1A\x22\xBA\x75\xEE\xD4\xD5\xB5"
+                         "\x27\x50\x01\xAC\x03\x33\x39\xFB",
+               .rlen   = 32,
+       },{
+               .key    = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
+                         "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
+                         "\x00\x00\x00",
+               .klen   = 19,
+               .iv     = "\x00\x00\x00\x00\x00\x00\x00\x01",
+               .input  = zeroed_string,
+               .ilen   = 16,
+               .assoc  = "\x00\x00\x00\x00\x00\x00\x00\x00"
+                         "\x00\x00\x00\x00\x00\x00\x00\x01",
+               .alen   = 16,
+               .result = "\xCF\xB9\x99\x17\xC8\x86\x0E\x7F"
+                         "\x7E\x76\xF8\xE6\xF8\xCC\x1F\x17"
+                         "\x6A\xE0\x53\x9F\x4B\x73\x7E\xDA"
+                         "\x08\x09\x4E\xC4\x1E\xAD\xC6\xB0",
+               .rlen   = 32,
+
+       }, {
+               .key    = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
+                         "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
+                         "\x00\x00\x00",
+               .klen   = 19,
+               .iv     = zeroed_string,
+               .input  = "\x01\x01\x01\x01\x01\x01\x01\x01"
+                         "\x01\x01\x01\x01\x01\x01\x01\x01",
+               .ilen   = 16,
+               .assoc  = zeroed_string,
+               .alen   = 16,
+               .result = "\x33\xDE\x73\xBC\xA6\xCE\x4E\xA6"
+                         "\x61\xF4\xF5\x41\x03\x4A\xE3\x86"
+                         "\xA1\xE2\xC2\x42\x2B\x81\x70\x40"
+                         "\xFD\x7F\x76\xD1\x03\x07\xBB\x0C",
+               .rlen   = 32,
+       }, {
+               .key    = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
+                         "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
+                         "\x00\x00\x00",
+               .klen   = 19,
+               .iv     = zeroed_string,
+               .input  = "\x01\x01\x01\x01\x01\x01\x01\x01"
+                         "\x01\x01\x01\x01\x01\x01\x01\x01",
+               .ilen   = 16,
+               .assoc  = "\x01\x01\x01\x01\x01\x01\x01\x01"
+                         "\x00\x00\x00\x00\x00\x00\x00\x00",
+               .alen   = 16,
+               .result = "\x33\xDE\x73\xBC\xA6\xCE\x4E\xA6"
+                         "\x61\xF4\xF5\x41\x03\x4A\xE3\x86"
+                         "\x5B\xC0\x73\xE0\x2B\x73\x68\xC9"
+                         "\x2D\x8C\x58\xC2\x90\x3D\xB0\x3E",
+               .rlen   = 32,
+       }, {
+               .key    = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
+                         "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
+                         "\x00\x00\x00",
+               .klen   = 19,
+               .iv     = "\x00\x00\x00\x00\x00\x00\x00\x01",
+               .input  = "\x01\x01\x01\x01\x01\x01\x01\x01"
+                         "\x01\x01\x01\x01\x01\x01\x01\x01",
+               .ilen   = 16,
+               .assoc  = "\x01\x01\x01\x01\x01\x01\x01\x01"
+                         "\x00\x00\x00\x00\x00\x00\x00\x01",
+               .alen   = 16,
+               .result = "\xCE\xB8\x98\x16\xC9\x87\x0F\x7E"
+                         "\x7F\x77\xF9\xE7\xF9\xCD\x1E\x16"
+                         "\x43\x8E\x76\x57\x3B\xB4\x05\xE8"
+                         "\xA9\x9B\xBF\x25\xE0\x4F\xC0\xED",
+               .rlen   = 32,
+       }, {
+               .key    = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
+                         "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
+                         "\x00\x00\x00",
+               .klen   = 19,
+               .iv     = "\x00\x00\x00\x00\x00\x00\x00\x01",
+               .input  = "\x01\x01\x01\x01\x01\x01\x01\x01"
+                         "\x01\x01\x01\x01\x01\x01\x01\x01"
+                         "\x01\x01\x01\x01\x01\x01\x01\x01"
+                         "\x01\x01\x01\x01\x01\x01\x01\x01"
+                         "\x01\x01\x01\x01\x01\x01\x01\x01"
+                         "\x01\x01\x01\x01\x01\x01\x01\x01"
+                         "\x01\x01\x01\x01\x01\x01\x01\x01"
+                         "\x01\x01\x01\x01\x01\x01\x01\x01",
+               .ilen   = 64,
+               .assoc  = "\x01\x01\x01\x01\x01\x01\x01\x01"
+                         "\x00\x00\x00\x00\x00\x00\x00\x01",
+               .alen   = 16,
+               .result = "\xCE\xB8\x98\x16\xC9\x87\x0F\x7E"
+                         "\x7F\x77\xF9\xE7\xF9\xCD\x1E\x16"
+                         "\x9C\xA4\x97\x83\x3F\x01\xA5\xF4"
+                         "\x43\x09\xE7\xB8\xE9\xD1\xD7\x02"
+                         "\x9B\xAB\x39\x18\xEB\x94\x34\x36"
+                         "\xE6\xC5\xC8\x9B\x00\x81\x9E\x49"
+                         "\x1D\x78\xE1\x48\xE3\xE9\xEA\x8E"
+                         "\x3A\x2B\x67\x5D\x35\x6A\x0F\xDB"
+                         "\x02\x73\xDD\xE7\x30\x4A\x30\x54"
+                         "\x1A\x9D\x09\xCA\xC8\x1C\x32\x5F",
+               .rlen   = 80,
+       }, {
+               .key    = "\x00\x01\x02\x03\x04\x05\x06\x07"
+                         "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+                         "\x00\x00\x00",
+               .klen   = 19,
+               .iv     = "\x00\x00\x45\x67\x89\xab\xcd\xef",
+               .input  = "\xff\xff\xff\xff\xff\xff\xff\xff"
+                         "\xff\xff\xff\xff\xff\xff\xff\xff"
+                         "\xff\xff\xff\xff\xff\xff\xff\xff"
+                         "\xff\xff\xff\xff\xff\xff\xff\xff"
+                         "\xff\xff\xff\xff\xff\xff\xff\xff"
+                         "\xff\xff\xff\xff\xff\xff\xff\xff"
+                         "\xff\xff\xff\xff\xff\xff\xff\xff"
+                         "\xff\xff\xff\xff\xff\xff\xff\xff"
+                         "\xff\xff\xff\xff\xff\xff\xff\xff"
+                         "\xff\xff\xff\xff\xff\xff\xff\xff"
+                         "\xff\xff\xff\xff\xff\xff\xff\xff"
+                         "\xff\xff\xff\xff\xff\xff\xff\xff"
+                         "\xff\xff\xff\xff\xff\xff\xff\xff"
+                         "\xff\xff\xff\xff\xff\xff\xff\xff"
+                         "\xff\xff\xff\xff\xff\xff\xff\xff"
+                         "\xff\xff\xff\xff\xff\xff\xff\xff"
+                         "\xff\xff\xff\xff\xff\xff\xff\xff"
+                         "\xff\xff\xff\xff\xff\xff\xff\xff"
+                         "\xff\xff\xff\xff\xff\xff\xff\xff"
+                         "\xff\xff\xff\xff\xff\xff\xff\xff"
+                         "\xff\xff\xff\xff\xff\xff\xff\xff"
+                         "\xff\xff\xff\xff\xff\xff\xff\xff"
+                         "\xff\xff\xff\xff\xff\xff\xff\xff"
+                         "\xff\xff\xff\xff\xff\xff\xff\xff",
+               .ilen   = 192,
+               .assoc  = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+                         "\xaa\xaa\xaa\xaa\x00\x00\x45\x67"
+                         "\x89\xab\xcd\xef",
+               .alen   = 20,
+               .result = "\x64\x17\xDC\x24\x9D\x92\xBA\x5E"
+                         "\x7C\x64\x6D\x33\x46\x77\xAC\xB1"
+                         "\x5C\x9E\xE2\xC7\x27\x11\x3E\x95"
+                         "\x7D\xBE\x28\xC8\xC1\xCA\x5E\x8C"
+                         "\xB4\xE2\xDE\x9F\x53\x59\x26\xDB"
+                         "\x0C\xD4\xE4\x07\x9A\xE6\x3E\x01"
+                         "\x58\x0D\x3E\x3D\xD5\x21\xEB\x04"
+                         "\x06\x9D\x5F\xB9\x02\x49\x1A\x2B"
+                         "\xBA\xF0\x4E\x3B\x85\x50\x5B\x09"
+                         "\xFE\xEC\xFC\x54\xEC\x0C\xE2\x79"
+                         "\x8A\x2F\x5F\xD7\x05\x5D\xF1\x6D"
+                         "\x22\xEB\xD1\x09\x80\x3F\x5A\x70"
+                         "\xB2\xB9\xD3\x63\x99\xC2\x4D\x1B"
+                         "\x36\x12\x00\x89\xAA\x5D\x55\xDA"
+                         "\x1D\x5B\xD8\x3C\x5F\x09\xD2\xE6"
+                         "\x39\x41\x5C\xF0\xBE\x26\x4E\x5F"
+                         "\x2B\x50\x44\x52\xC2\x10\x7D\x38"
+                         "\x82\x64\x83\x0C\xAE\x49\xD0\xE5"
+                         "\x4F\xE5\x66\x4C\x58\x7A\xEE\x43"
+                         "\x3B\x51\xFE\xBA\x24\x8A\xFE\xDC"
+                         "\x19\x6D\x60\x66\x61\xF9\x9A\x3F"
+                         "\x75\xFC\x38\x53\x5B\xB5\xCD\x52"
+                         "\x4F\xE5\xE4\xC9\xFE\x10\xCB\x98"
+                         "\xF0\x06\x5B\x07\xAB\xBB\xF4\x0E"
+                         "\x2D\xC2\xDD\x5D\xDD\x22\x9A\xCC"
+                         "\x39\xAB\x63\xA5\x3D\x9C\x51\x8A",
+               .rlen   = 208,
+       }, { /* From draft-mcgrew-gcm-test-01 */
+               .key    = "\x4C\x80\xCD\xEF\xBB\x5D\x10\xDA"
+                         "\x90\x6A\xC7\x3C\x36\x13\xA6\x34"
+                         "\x2E\x44\x3B",
+               .klen   = 19,
+               .iv     = "\x49\x56\xED\x7E\x3B\x24\x4C\xFE",
+               .input  = "\x45\x00\x00\x48\x69\x9A\x00\x00"
+                         "\x80\x11\x4D\xB7\xC0\xA8\x01\x02"
+                         "\xC0\xA8\x01\x01\x0A\x9B\xF1\x56"
+                         "\x38\xD3\x01\x00\x00\x01\x00\x00"
+                         "\x00\x00\x00\x00\x04\x5F\x73\x69"
+                         "\x70\x04\x5F\x75\x64\x70\x03\x73"
+                         "\x69\x70\x09\x63\x79\x62\x65\x72"
+                         "\x63\x69\x74\x79\x02\x64\x6B\x00"
+                         "\x00\x21\x00\x01\x01\x02\x02\x01",
+               .ilen   = 72,
+               .assoc  = "\x00\x00\x43\x21\x87\x65\x43\x21"
+                         "\x00\x00\x00\x00\x49\x56\xED\x7E"
+                         "\x3B\x24\x4C\xFE",
+               .alen   = 20,
+               .result = "\x89\xBA\x3E\xEF\xE6\xD6\xCF\xDB"
+                         "\x83\x60\xF5\xBA\x3A\x56\x79\xE6"
+                         "\x7E\x0C\x53\xCF\x9E\x87\xE0\x4E"
+                         "\x1A\x26\x01\x24\xC7\x2E\x3D\xBF"
+                         "\x29\x2C\x91\xC1\xB8\xA8\xCF\xE0"
+                         "\x39\xF8\x53\x6D\x31\x22\x2B\xBF"
+                         "\x98\x81\xFC\x34\xEE\x85\x36\xCD"
+                         "\x26\xDB\x6C\x7A\x0C\x77\x8A\x35"
+                         "\x18\x85\x54\xB2\xBC\xDD\x3F\x43"
+                         "\x61\x06\x8A\xDF\x86\x3F\xB4\xAC"
+                         "\x97\xDC\xBD\xFD\x92\x10\xC5\xFF",
+               .rlen   = 88,
+       }, {
+               .key    = "\xFE\xFF\xE9\x92\x86\x65\x73\x1C"
+                         "\x6D\x6A\x8F\x94\x67\x30\x83\x08"
+                         "\xCA\xFE\xBA",
+               .klen   = 19,
+               .iv     = "\xFA\xCE\xDB\xAD\xDE\xCA\xF8\x88",
+               .input  = "\x45\x00\x00\x3E\x69\x8F\x00\x00"
+                         "\x80\x11\x4D\xCC\xC0\xA8\x01\x02"
+                         "\xC0\xA8\x01\x01\x0A\x98\x00\x35"
+                         "\x00\x2A\x23\x43\xB2\xD0\x01\x00"
+                         "\x00\x01\x00\x00\x00\x00\x00\x00"
+                         "\x03\x73\x69\x70\x09\x63\x79\x62"
+                         "\x65\x72\x63\x69\x74\x79\x02\x64"
+                         "\x6B\x00\x00\x01\x00\x01\x00\x01",
+               .ilen   = 64,
+               .assoc  = "\x00\x00\xA5\xF8\x00\x00\x00\x0A"
+                         "\xFA\xCE\xDB\xAD\xDE\xCA\xF8\x88",
+               .alen   = 16,
+               .result = "\x4B\xC2\x70\x60\x64\xD2\xF3\xC8"
+                         "\xE5\x26\x8A\xDE\xB8\x7E\x7D\x16"
+                         "\x56\xC7\xD2\x88\xBA\x8D\x58\xAF"
+                         "\xF5\x71\xB6\x37\x84\xA7\xB1\x99"
+                         "\x51\x5C\x0D\xA0\x27\xDE\xE7\x2D"
+                         "\xEF\x25\x88\x1F\x1D\x77\x11\xFF"
+                         "\xDB\xED\xEE\x56\x16\xC5\x5C\x9B"
+                         "\x00\x62\x1F\x68\x4E\x7C\xA0\x97"
+                         "\x10\x72\x7E\x53\x13\x3B\x68\xE4"
+                         "\x30\x99\x91\x79\x09\xEA\xFF\x6A",
+               .rlen   = 80,
+       }, {
+               .key    = "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23"
+                         "\x34\x45\x56\x67\x78\x89\x9A\xAB"
+                         "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23"
+                         "\x34\x45\x56\x67\x78\x89\x9A\xAB"
+                         "\x11\x22\x33",
+               .klen   = 35,
+               .iv     = "\x01\x02\x03\x04\x05\x06\x07\x08",
+               .input  = "\x45\x00\x00\x30\x69\xA6\x40\x00"
+                         "\x80\x06\x26\x90\xC0\xA8\x01\x02"
+                         "\x93\x89\x15\x5E\x0A\x9E\x00\x8B"
+                         "\x2D\xC5\x7E\xE0\x00\x00\x00\x00"
+                         "\x70\x02\x40\x00\x20\xBF\x00\x00"
+                         "\x02\x04\x05\xB4\x01\x01\x04\x02"
+                         "\x01\x02\x02\x01",
+               .ilen   = 52,
+               .assoc  = "\x4A\x2C\xBF\xE3\x00\x00\x00\x02"
+                         "\x01\x02\x03\x04\x05\x06\x07\x08",
+               .alen   = 16,
+               .result = "\xD6\x31\x0D\x2B\x3D\x6F\xBD\x2F"
+                         "\x58\x41\x7E\xFF\x9A\x9E\x09\xB4"
+                         "\x1A\xF7\xF6\x42\x31\xCD\xBF\xAD"
+                         "\x27\x0E\x2C\xF2\xDB\x10\xDF\x55"
+                         "\x8F\x0D\xD7\xAC\x23\xBD\x42\x10"
+                         "\xD0\xB2\xAF\xD8\x37\xAC\x6B\x0B"
+                         "\x11\xD4\x0B\x12\xEC\xB4\xB1\x92"
+                         "\x23\xA6\x10\xB0\x26\xD6\xD9\x26"
+                         "\x5A\x48\x6A\x3E",
+               .rlen   = 68,
+       }, {
+               .key    = "\x00\x00\x00\x00\x00\x00\x00\x00"
+                         "\x00\x00\x00\x00\x00\x00\x00\x00"
+                         "\x00\x00\x00",
+               .klen   = 19,
+               .iv     = "\x00\x00\x00\x00\x00\x00\x00\x00",
+               .input  = "\x45\x00\x00\x3C\x99\xC5\x00\x00"
+                         "\x80\x01\xCB\x7A\x40\x67\x93\x18"
+                         "\x01\x01\x01\x01\x08\x00\x07\x5C"
+                         "\x02\x00\x44\x00\x61\x62\x63\x64"
+                         "\x65\x66\x67\x68\x69\x6A\x6B\x6C"
+                         "\x6D\x6E\x6F\x70\x71\x72\x73\x74"
+                         "\x75\x76\x77\x61\x62\x63\x64\x65"
+                         "\x66\x67\x68\x69\x01\x02\x02\x01",
+               .ilen   = 64,
+               .assoc  = "\x00\x00\x00\x00\x00\x00\x00\x01"
+                         "\x00\x00\x00\x00\x00\x00\x00\x00",
+               .alen   = 16,
+               .result = "\x6B\x9A\xCA\x57\x43\x91\xFC\x6F"
+                         "\x92\x51\x23\xA4\xC1\x5B\xF0\x10"
+                         "\xF3\x13\xF4\xF8\xA1\x9A\xB4\xDC"
+                         "\x89\xC8\xF8\x42\x62\x95\xB7\xCB"
+                         "\xB8\xF5\x0F\x1B\x2E\x94\xA2\xA7"
+                         "\xBF\xFB\x8A\x92\x13\x63\xD1\x3C"
+                         "\x08\xF5\xE8\xA6\xAA\xF6\x34\xF9"
+                         "\x42\x05\xAF\xB3\xE7\x9A\xFC\xEE"
+                         "\x36\x25\xC1\x10\x12\x1C\xCA\x82"
+                         "\xEA\xE6\x63\x5A\x57\x28\xA9\x9A",
+               .rlen   = 80,
+       }, {
+               .key    = "\x3D\xE0\x98\x74\xB3\x88\xE6\x49"
+                         "\x19\x88\xD0\xC3\x60\x7E\xAE\x1F"
+                         "\x57\x69\x0E",
+               .klen   = 19,
+               .iv     = "\x4E\x28\x00\x00\xA2\xFC\xA1\xA3",
+               .input  = "\x45\x00\x00\x3C\x99\xC3\x00\x00"
+                         "\x80\x01\xCB\x7C\x40\x67\x93\x18"
+                         "\x01\x01\x01\x01\x08\x00\x08\x5C"
+                         "\x02\x00\x43\x00\x61\x62\x63\x64"
+                         "\x65\x66\x67\x68\x69\x6A\x6B\x6C"
+                         "\x6D\x6E\x6F\x70\x71\x72\x73\x74"
+                         "\x75\x76\x77\x61\x62\x63\x64\x65"
+                         "\x66\x67\x68\x69\x01\x02\x02\x01",
+               .ilen   = 64,
+               .assoc  = "\x42\xF6\x7E\x3F\x10\x10\x10\x10"
+                         "\x10\x10\x10\x10\x4E\x28\x00\x00"
+                         "\xA2\xFC\xA1\xA3",
+               .alen   = 20,
+               .result = "\x6A\x6B\x45\x2B\x7C\x67\x52\xF6"
+                         "\x10\x60\x40\x62\x6B\x4F\x97\x8E"
+                         "\x0B\xB2\x22\x97\xCB\x21\xE0\x90"
+                         "\xA2\xE7\xD1\x41\x30\xE4\x4B\x1B"
+                         "\x79\x01\x58\x50\x01\x06\xE1\xE0"
+                         "\x2C\x83\x79\xD3\xDE\x46\x97\x1A"
+                         "\x30\xB8\xE5\xDF\xD7\x12\x56\x75"
+                         "\xD0\x95\xB7\xB8\x91\x42\xF7\xFD"
+                         "\x97\x57\xCA\xC1\x20\xD0\x86\xB9"
+                         "\x66\x9D\xB4\x2B\x96\x22\xAC\x67",
+               .rlen   = 80,
+       }, {
+               .key    = "\x3D\xE0\x98\x74\xB3\x88\xE6\x49"
+                         "\x19\x88\xD0\xC3\x60\x7E\xAE\x1F"
+                         "\x57\x69\x0E",
+               .klen   = 19,
+               .iv     = "\x4E\x28\x00\x00\xA2\xFC\xA1\xA3",
+               .input  = "\x45\x00\x00\x1C\x42\xA2\x00\x00"
+                         "\x80\x01\x44\x1F\x40\x67\x93\xB6"
+                         "\xE0\x00\x00\x02\x0A\x00\xF5\xFF"
+                         "\x01\x02\x02\x01",
+               .ilen   = 28,
+               .assoc  = "\x42\xF6\x7E\x3F\x10\x10\x10\x10"
+                         "\x10\x10\x10\x10\x4E\x28\x00\x00"
+                         "\xA2\xFC\xA1\xA3",
+               .alen   = 20,
+               .result = "\x6A\x6B\x45\x0B\xA7\x06\x52\xF6"
+                         "\x10\x60\xCF\x01\x6B\x4F\x97\x20"
+                         "\xEA\xB3\x23\x94\xC9\x21\x1D\x33"
+                         "\xA1\xE5\x90\x40\x05\x37\x45\x70"
+                         "\xB5\xD6\x09\x0A\x23\x73\x33\xF9"
+                         "\x08\xB4\x22\xE4",
+               .rlen   = 44,
+       }, {
+               .key    = "\xFE\xFF\xE9\x92\x86\x65\x73\x1C"
+                         "\x6D\x6A\x8F\x94\x67\x30\x83\x08"
+                         "\xFE\xFF\xE9\x92\x86\x65\x73\x1C"
+                         "\xCA\xFE\xBA",
+               .klen   = 27,
+               .iv     = "\xFA\xCE\xDB\xAD\xDE\xCA\xF8\x88",
+               .input  = "\x45\x00\x00\x28\xA4\xAD\x40\x00"
+                         "\x40\x06\x78\x80\x0A\x01\x03\x8F"
+                         "\x0A\x01\x06\x12\x80\x23\x06\xB8"
+                         "\xCB\x71\x26\x02\xDD\x6B\xB0\x3E"
+                         "\x50\x10\x16\xD0\x75\x68\x00\x01",
+               .ilen   = 40,
+               .assoc  = "\x00\x00\xA5\xF8\x00\x00\x00\x0A"
+                         "\xFA\xCE\xDB\xAD\xDE\xCA\xF8\x88",
+               .alen   = 16,
+               .result = "\x05\x22\x15\xD1\x52\x56\x85\x04"
+                         "\xA8\x5C\x5D\x6D\x7E\x6E\xF5\xFA"
+                         "\xEA\x16\x37\x50\xF3\xDF\x84\x3B"
+                         "\x2F\x32\x18\x57\x34\x2A\x8C\x23"
+                         "\x67\xDF\x6D\x35\x7B\x54\x0D\xFB"
+                         "\x34\xA5\x9F\x6C\x48\x30\x1E\x22"
+                         "\xFE\xB1\x22\x17\x17\x8A\xB9\x5B",
+               .rlen   = 56,
+       }, {
+               .key    = "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23"
+                         "\x34\x45\x56\x67\x78\x89\x9A\xAB"
+                         "\xDE\xCA\xF8",
+               .klen   = 19,
+               .iv     = "\xCA\xFE\xDE\xBA\xCE\xFA\xCE\x74",
+               .input  = "\x45\x00\x00\x49\x33\xBA\x00\x00"
+                         "\x7F\x11\x91\x06\xC3\xFB\x1D\x10"
+                         "\xC2\xB1\xD3\x26\xC0\x28\x31\xCE"
+                         "\x00\x35\xDD\x7B\x80\x03\x02\xD5"
+                         "\x00\x00\x4E\x20\x00\x1E\x8C\x18"
+                         "\xD7\x5B\x81\xDC\x91\xBA\xA0\x47"
+                         "\x6B\x91\xB9\x24\xB2\x80\x38\x9D"
+                         "\x92\xC9\x63\xBA\xC0\x46\xEC\x95"
+                         "\x9B\x62\x66\xC0\x47\x22\xB1\x49"
+                         "\x23\x01\x01\x01",
+               .ilen   = 76,
+               .assoc  = "\x00\x00\x01\x00\x00\x00\x00\x00"
+                         "\x00\x00\x00\x01\xCA\xFE\xDE\xBA"
+                         "\xCE\xFA\xCE\x74",
+               .alen   = 20,
+               .result = "\x92\xD0\x53\x79\x33\x38\xD5\xF3"
+                         "\x7D\xE4\x7A\x8E\x86\x03\xC9\x90"
+                         "\x96\x35\xAB\x9C\xFB\xE8\xA3\x76"
+                         "\xE9\xE9\xE2\xD1\x2E\x11\x0E\x00"
+                         "\xFA\xCE\xB5\x9E\x02\xA7\x7B\xEA"
+                         "\x71\x9A\x58\xFB\xA5\x8A\xE1\xB7"
+                         "\x9C\x39\x9D\xE3\xB5\x6E\x69\xE6"
+                         "\x63\xC9\xDB\x05\x69\x51\x12\xAD"
+                         "\x3E\x00\x32\x73\x86\xF2\xEE\xF5"
+                         "\x0F\xE8\x81\x7E\x84\xD3\xC0\x0D"
+                         "\x76\xD6\x55\xC6\xB4\xC2\x34\xC7"
+                         "\x12\x25\x0B\xF9",
+               .rlen   = 92,
+       }, {
+               .key    = "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23"
+                         "\x34\x45\x56\x67\x78\x89\x9A\xAB"
+                         "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23"
+                         "\x34\x45\x56\x67\x78\x89\x9A\xAB"
+                         "\x73\x61\x6C",
+               .klen   = 35,
+               .iv     = "\x61\x6E\x64\x01\x69\x76\x65\x63",
+               .input  = "\x45\x08\x00\x28\x73\x2C\x00\x00"
+                         "\x40\x06\xE9\xF9\x0A\x01\x06\x12"
+                         "\x0A\x01\x03\x8F\x06\xB8\x80\x23"
+                         "\xDD\x6B\xAF\xBE\xCB\x71\x26\x02"
+                         "\x50\x10\x1F\x64\x6D\x54\x00\x01",
+               .ilen   = 40,
+               .assoc  = "\x17\x40\x5E\x67\x15\x6F\x31\x26"
+                         "\xDD\x0D\xB9\x9B\x61\x6E\x64\x01"
+                         "\x69\x76\x65\x63",
+               .alen   = 20,
+               .result = "\xCC\x74\xB7\xD3\xB0\x38\x50\x42"
+                         "\x2C\x64\x87\x46\x1E\x34\x10\x05"
+                         "\x29\x6B\xBB\x36\xE9\x69\xAD\x92"
+                         "\x82\xA1\x10\x6A\xEB\x0F\xDC\x7D"
+                         "\x08\xBA\xF3\x91\xCA\xAA\x61\xDA"
+                         "\x62\xF4\x14\x61\x5C\x9D\xB5\xA7"
+                         "\xEE\xD7\xB9\x7E\x87\x99\x9B\x7D",
+               .rlen   = 56,
+       }, {
+               .key    = "\x3D\xE0\x98\x74\xB3\x88\xE6\x49"
+                         "\x19\x88\xD0\xC3\x60\x7E\xAE\x1F"
+                         "\x57\x69\x0E",
+               .klen   = 19,
+               .iv     = "\x4E\x28\x00\x00\xA2\xFC\xA1\xA3",
+               .input  = "\x45\x00\x00\x49\x33\x3E\x00\x00"
+                         "\x7F\x11\x91\x82\xC3\xFB\x1D\x10"
+                         "\xC2\xB1\xD3\x26\xC0\x28\x31\xCE"
+                         "\x00\x35\xCB\x45\x80\x03\x02\x5B"
+                         "\x00\x00\x01\xE0\x00\x1E\x8C\x18"
+                         "\xD6\x57\x59\xD5\x22\x84\xA0\x35"
+                         "\x2C\x71\x47\x5C\x88\x80\x39\x1C"
+                         "\x76\x4D\x6E\x5E\xE0\x49\x6B\x32"
+                         "\x5A\xE2\x70\xC0\x38\x99\x49\x39"
+                         "\x15\x01\x01\x01",
+               .ilen   = 76,
+               .assoc  = "\x42\xF6\x7E\x3F\x10\x10\x10\x10"
+                         "\x10\x10\x10\x10\x4E\x28\x00\x00"
+                         "\xA2\xFC\xA1\xA3",
+               .alen   = 20,
+               .result = "\x6A\x6B\x45\x5E\xD6\x9A\x52\xF6"
+                         "\xEF\x70\x1A\x9C\xE8\xD3\x19\x86"
+                         "\xC8\x02\xF0\xB0\x03\x09\xD9\x02"
+                         "\xA0\xD2\x59\x04\xD1\x85\x2A\x24"
+                         "\x1C\x67\x3E\xD8\x68\x72\x06\x94"
+                         "\x97\xBA\x4F\x76\x8D\xB0\x44\x5B"
+                         "\x69\xBF\xD5\xE2\x3D\xF1\x0B\x0C"
+                         "\xC0\xBF\xB1\x8F\x70\x09\x9E\xCE"
+                         "\xA5\xF2\x55\x58\x84\xFA\xF9\xB5"
+                         "\x23\xF4\x84\x40\x74\x14\x8A\x6B"
+                         "\xDB\xD7\x67\xED\xA4\x93\xF3\x47"
+                         "\xCC\xF7\x46\x6F",
+               .rlen   = 92,
+       }, {
+               .key    = "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23"
+                         "\x34\x45\x56\x67\x78\x89\x9A\xAB"
+                         "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23"
+                         "\x34\x45\x56\x67\x78\x89\x9A\xAB"
+                         "\x73\x61\x6C",
+               .klen   = 35,
+               .iv     = "\x61\x6E\x64\x01\x69\x76\x65\x63",
+               .input  = "\x63\x69\x73\x63\x6F\x01\x72\x75"
+                         "\x6C\x65\x73\x01\x74\x68\x65\x01"
+                         "\x6E\x65\x74\x77\x65\x01\x64\x65"
+                         "\x66\x69\x6E\x65\x01\x74\x68\x65"
+                         "\x74\x65\x63\x68\x6E\x6F\x6C\x6F"
+                         "\x67\x69\x65\x73\x01\x74\x68\x61"
+                         "\x74\x77\x69\x6C\x6C\x01\x64\x65"
+                         "\x66\x69\x6E\x65\x74\x6F\x6D\x6F"
+                         "\x72\x72\x6F\x77\x01\x02\x02\x01",
+               .ilen   = 72,
+               .assoc  = "\x17\x40\x5E\x67\x15\x6F\x31\x26"
+                         "\xDD\x0D\xB9\x9B\x61\x6E\x64\x01"
+                         "\x69\x76\x65\x63",
+               .alen   = 20,
+               .result = "\xEA\x15\xC4\x98\xAC\x15\x22\x37"
+                         "\x00\x07\x1D\xBE\x60\x5D\x73\x16"
+                         "\x4D\x0F\xCC\xCE\x8A\xD0\x49\xD4"
+                         "\x39\xA3\xD1\xB1\x21\x0A\x92\x1A"
+                         "\x2C\xCF\x8F\x9D\xC9\x91\x0D\xB4"
+                         "\x15\xFC\xBC\xA5\xC5\xBF\x54\xE5"
+                         "\x1C\xC7\x32\x41\x07\x7B\x2C\xB6"
+                         "\x5C\x23\x7C\x93\xEA\xEF\x23\x1C"
+                         "\x73\xF4\xE7\x12\x84\x4C\x37\x0A"
+                         "\x4A\x8F\x06\x37\x48\xF9\xF9\x05"
+                         "\x55\x13\x40\xC3\xD5\x55\x3A\x3D",
+               .rlen   = 88,
+       }, {
+               .key    = "\x7D\x77\x3D\x00\xC1\x44\xC5\x25"
+                         "\xAC\x61\x9D\x18\xC8\x4A\x3F\x47"
+                         "\xD9\x66\x42",
+               .klen   = 19,
+               .iv     = "\x43\x45\x7E\x91\x82\x44\x3B\xC6",
+               .input  = "\x01\x02\x02\x01",
+               .ilen   = 4,
+               .assoc  = "\x33\x54\x67\xAE\xFF\xFF\xFF\xFF"
+                         "\x43\x45\x7E\x91\x82\x44\x3B\xC6",
+               .alen   = 16,
+               .result = "\x4C\x72\x63\x30\x2F\xE6\x56\xDD"
+                         "\xD0\xD8\x60\x9D\x8B\xEF\x85\x90"
+                         "\xF7\x61\x24\x62",
+               .rlen   = 20,
+       }, {
+               .key    = "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23"
+                         "\x34\x45\x56\x67\x78\x89\x9A\xAB"
+                         "\xDE\xCA\xF8",
+               .klen   = 19,
+               .iv     = "\xCA\xFE\xDE\xBA\xCE\xFA\xCE\x74",
+               .input  = "\x74\x6F\x01\x62\x65\x01\x6F\x72"
+                         "\x01\x6E\x6F\x74\x01\x74\x6F\x01"
+                         "\x62\x65\x00\x01",
+               .ilen   = 20,
+               .assoc  = "\x00\x00\x01\x00\x00\x00\x00\x00"
+                         "\x00\x00\x00\x01\xCA\xFE\xDE\xBA"
+                         "\xCE\xFA\xCE\x74",
+               .alen   = 20,
+               .result = "\xA3\xBF\x52\x52\x65\x83\xBA\x81"
+                         "\x03\x9B\x84\xFC\x44\x8C\xBB\x81"
+                         "\x36\xE1\x78\xBB\xA5\x49\x3A\xD0"
+                         "\xF0\x6B\x21\xAF\x98\xC0\x34\xDC"
+                         "\x17\x17\x65\xAD",
+               .rlen   = 36,
+       }, {
+               .key    = "\x6C\x65\x67\x61\x6C\x69\x7A\x65"
+                         "\x6D\x61\x72\x69\x6A\x75\x61\x6E"
+                         "\x61\x61\x6E\x64\x64\x6F\x69\x74"
+                         "\x62\x65\x66\x6F\x72\x65\x69\x61"
+                         "\x74\x75\x72",
+               .klen   = 35,
+               .iv     = "\x33\x30\x21\x69\x67\x65\x74\x6D",
+               .input  = "\x45\x00\x00\x30\xDA\x3A\x00\x00"
+                         "\x80\x01\xDF\x3B\xC0\xA8\x00\x05"
+                         "\xC0\xA8\x00\x01\x08\x00\xC6\xCD"
+                         "\x02\x00\x07\x00\x61\x62\x63\x64"
+                         "\x65\x66\x67\x68\x69\x6A\x6B\x6C"
+                         "\x6D\x6E\x6F\x70\x71\x72\x73\x74"
+                         "\x01\x02\x02\x01",
+               .ilen   = 52,
+               .assoc  = "\x79\x6B\x69\x63\xFF\xFF\xFF\xFF"
+                         "\xFF\xFF\xFF\xFF\x33\x30\x21\x69"
+                         "\x67\x65\x74\x6D",
+               .alen   = 20,
+               .result = "\x96\xFD\x86\xF8\xD1\x98\xFF\x10"
+                         "\xAB\x8C\xDA\x8A\x5A\x08\x38\x1A"
+                         "\x48\x59\x80\x18\x1A\x18\x1A\x04"
+                         "\xC9\x0D\xE3\xE7\x0E\xA4\x0B\x75"
+                         "\x92\x9C\x52\x5C\x0B\xFB\xF8\xAF"
+                         "\x16\xC3\x35\xA8\xE7\xCE\x84\x04"
+                         "\xEB\x40\x6B\x7A\x8E\x75\xBB\x42"
+                         "\xE0\x63\x4B\x21\x44\xA2\x2B\x2B"
+                         "\x39\xDB\xC8\xDC",
+               .rlen   = 68,
+       }, {
+               .key    = "\x3D\xE0\x98\x74\xB3\x88\xE6\x49"
+                         "\x19\x88\xD0\xC3\x60\x7E\xAE\x1F"
+                         "\x57\x69\x0E",
+               .klen   = 19,
+               .iv     = "\x4E\x28\x00\x00\xA2\xFC\xA1\xA3",
+               .input  = "\x45\x00\x00\x30\xDA\x3A\x00\x00"
+                         "\x80\x01\xDF\x3B\xC0\xA8\x00\x05"
+                         "\xC0\xA8\x00\x01\x08\x00\xC6\xCD"
+                         "\x02\x00\x07\x00\x61\x62\x63\x64"
+                         "\x65\x66\x67\x68\x69\x6A\x6B\x6C"
+                         "\x6D\x6E\x6F\x70\x71\x72\x73\x74"
+                         "\x01\x02\x02\x01",
+               .ilen   = 52,
+               .assoc  = "\x3F\x7E\xF6\x42\x10\x10\x10\x10"
+                         "\x10\x10\x10\x10\x4E\x28\x00\x00"
+                         "\xA2\xFC\xA1\xA3",
+               .alen   = 20,
+               .result = "\x6A\x6B\x45\x27\x3F\x9E\x52\xF6"
+                         "\x10\x60\x54\x25\xEB\x80\x04\x93"
+                         "\xCA\x1B\x23\x97\xCB\x21\x2E\x01"
+                         "\xA2\xE7\x95\x41\x30\xE4\x4B\x1B"
+                         "\x79\x01\x58\x50\x01\x06\xE1\xE0"
+                         "\x2C\x83\x79\xD3\xDE\x46\x97\x1A"
+                         "\x44\xCC\x90\xBF\x00\x94\x94\x92"
+                         "\x20\x17\x0C\x1B\x55\xDE\x7E\x68"
+                         "\xF4\x95\x5D\x4F",
+               .rlen   = 68,
+       }, {
+               .key    = "\x4C\x80\xCD\xEF\xBB\x5D\x10\xDA"
+                         "\x90\x6A\xC7\x3C\x36\x13\xA6\x34"
+                         "\x22\x43\x3C",
+               .klen   = 19,
+               .iv     = "\x48\x55\xEC\x7D\x3A\x23\x4B\xFD",
+               .input  = "\x08\x00\xC6\xCD\x02\x00\x07\x00"
+                         "\x61\x62\x63\x64\x65\x66\x67\x68"
+                         "\x69\x6A\x6B\x6C\x6D\x6E\x6F\x70"
+                         "\x71\x72\x73\x74\x01\x02\x02\x01",
+               .ilen   = 32,
+               .assoc  = "\x00\x00\x43\x21\x87\x65\x43\x21"
+                         "\x00\x00\x00\x07\x48\x55\xEC\x7D"
+                         "\x3A\x23\x4B\xFD",
+               .alen   = 20,
+               .result = "\x67\xE9\x28\xB3\x1C\xA4\x6D\x02"
+                         "\xF0\xB5\x37\xB6\x6B\x2F\xF5\x4F"
+                         "\xF8\xA3\x4C\x53\xB8\x12\x09\xBF"
+                         "\x58\x7D\xCF\x29\xA3\x41\x68\x6B"
+                         "\xCE\xE8\x79\x85\x3C\xB0\x3A\x8F"
+                         "\x16\xB0\xA1\x26\xC9\xBC\xBC\xA6",
+               .rlen   = 48,
+       }
+};
+
+static struct aead_testvec aes_ccm_rfc4309_dec_tv_template[]   = {
+       { /* Generated using Crypto++ */
+               .key    = zeroed_string,
+               .klen   = 19,
+               .iv     = zeroed_string,
+               .result = zeroed_string,
+               .rlen   = 16,
+               .assoc  = zeroed_string,
+               .alen   = 16,
+               .input  = "\x2E\x9A\xCA\x6B\xDA\x54\xFC\x6F"
+                         "\x12\x50\xE8\xDE\x81\x3C\x63\x08"
+                         "\x1A\x22\xBA\x75\xEE\xD4\xD5\xB5"
+                         "\x27\x50\x01\xAC\x03\x33\x39\xFB",
+               .ilen   = 32,
+       },{
+               .key    = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
+                         "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
+                         "\x00\x00\x00",
+               .klen   = 19,
+               .iv     = "\x00\x00\x00\x00\x00\x00\x00\x01",
+               .result = zeroed_string,
+               .rlen   = 16,
+               .assoc  = "\x00\x00\x00\x00\x00\x00\x00\x00"
+                         "\x00\x00\x00\x00\x00\x00\x00\x01",
+               .alen   = 16,
+               .input  = "\xCF\xB9\x99\x17\xC8\x86\x0E\x7F"
+                         "\x7E\x76\xF8\xE6\xF8\xCC\x1F\x17"
+                         "\x6A\xE0\x53\x9F\x4B\x73\x7E\xDA"
+                         "\x08\x09\x4E\xC4\x1E\xAD\xC6\xB0",
+               .ilen   = 32,
+
+       }, {
+               .key    = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
+                         "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
+                         "\x00\x00\x00",
+               .klen   = 19,
+               .iv     = zeroed_string,
+               .result = "\x01\x01\x01\x01\x01\x01\x01\x01"
+                         "\x01\x01\x01\x01\x01\x01\x01\x01",
+               .rlen   = 16,
+               .assoc  = zeroed_string,
+               .alen   = 16,
+               .input  = "\x33\xDE\x73\xBC\xA6\xCE\x4E\xA6"
+                         "\x61\xF4\xF5\x41\x03\x4A\xE3\x86"
+                         "\xA1\xE2\xC2\x42\x2B\x81\x70\x40"
+                         "\xFD\x7F\x76\xD1\x03\x07\xBB\x0C",
+               .ilen   = 32,
+       }, {
+               .key    = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
+                         "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
+                         "\x00\x00\x00",
+               .klen   = 19,
+               .iv     = zeroed_string,
+               .result = "\x01\x01\x01\x01\x01\x01\x01\x01"
+                         "\x01\x01\x01\x01\x01\x01\x01\x01",
+               .rlen   = 16,
+               .assoc  = "\x01\x01\x01\x01\x01\x01\x01\x01"
+                         "\x00\x00\x00\x00\x00\x00\x00\x00",
+               .alen   = 16,
+               .input  = "\x33\xDE\x73\xBC\xA6\xCE\x4E\xA6"
+                         "\x61\xF4\xF5\x41\x03\x4A\xE3\x86"
+                         "\x5B\xC0\x73\xE0\x2B\x73\x68\xC9"
+                         "\x2D\x8C\x58\xC2\x90\x3D\xB0\x3E",
+               .ilen   = 32,
+       }, {
+               .key    = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
+                         "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
+                         "\x00\x00\x00",
+               .klen   = 19,
+               .iv     = "\x00\x00\x00\x00\x00\x00\x00\x01",
+               .result = "\x01\x01\x01\x01\x01\x01\x01\x01"
+                         "\x01\x01\x01\x01\x01\x01\x01\x01",
+               .rlen   = 16,
+               .assoc  = "\x01\x01\x01\x01\x01\x01\x01\x01"
+                         "\x00\x00\x00\x00\x00\x00\x00\x01",
+               .alen   = 16,
+               .input  = "\xCE\xB8\x98\x16\xC9\x87\x0F\x7E"
+                         "\x7F\x77\xF9\xE7\xF9\xCD\x1E\x16"
+                         "\x43\x8E\x76\x57\x3B\xB4\x05\xE8"
+                         "\xA9\x9B\xBF\x25\xE0\x4F\xC0\xED",
+               .ilen   = 32,
+       }, {
+               .key    = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
+                         "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
+                         "\x00\x00\x00",
+               .klen   = 19,
+               .iv     = "\x00\x00\x00\x00\x00\x00\x00\x01",
+               .result = "\x01\x01\x01\x01\x01\x01\x01\x01"
+                         "\x01\x01\x01\x01\x01\x01\x01\x01"
+                         "\x01\x01\x01\x01\x01\x01\x01\x01"
+                         "\x01\x01\x01\x01\x01\x01\x01\x01"
+                         "\x01\x01\x01\x01\x01\x01\x01\x01"
+                         "\x01\x01\x01\x01\x01\x01\x01\x01"
+                         "\x01\x01\x01\x01\x01\x01\x01\x01"
+                         "\x01\x01\x01\x01\x01\x01\x01\x01",
+               .rlen   = 64,
+               .assoc  = "\x01\x01\x01\x01\x01\x01\x01\x01"
+                         "\x00\x00\x00\x00\x00\x00\x00\x01",
+               .alen   = 16,
+               .input  = "\xCE\xB8\x98\x16\xC9\x87\x0F\x7E"
+                         "\x7F\x77\xF9\xE7\xF9\xCD\x1E\x16"
+                         "\x9C\xA4\x97\x83\x3F\x01\xA5\xF4"
+                         "\x43\x09\xE7\xB8\xE9\xD1\xD7\x02"
+                         "\x9B\xAB\x39\x18\xEB\x94\x34\x36"
+                         "\xE6\xC5\xC8\x9B\x00\x81\x9E\x49"
+                         "\x1D\x78\xE1\x48\xE3\xE9\xEA\x8E"
+                         "\x3A\x2B\x67\x5D\x35\x6A\x0F\xDB"
+                         "\x02\x73\xDD\xE7\x30\x4A\x30\x54"
+                         "\x1A\x9D\x09\xCA\xC8\x1C\x32\x5F",
+               .ilen   = 80,
+       }, {
+               .key    = "\x00\x01\x02\x03\x04\x05\x06\x07"
+                         "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+                         "\x00\x00\x00",
+               .klen   = 19,
+               .iv     = "\x00\x00\x45\x67\x89\xab\xcd\xef",
+               .result = "\xff\xff\xff\xff\xff\xff\xff\xff"
+                         "\xff\xff\xff\xff\xff\xff\xff\xff"
+                         "\xff\xff\xff\xff\xff\xff\xff\xff"
+                         "\xff\xff\xff\xff\xff\xff\xff\xff"
+                         "\xff\xff\xff\xff\xff\xff\xff\xff"
+                         "\xff\xff\xff\xff\xff\xff\xff\xff"
+                         "\xff\xff\xff\xff\xff\xff\xff\xff"
+                         "\xff\xff\xff\xff\xff\xff\xff\xff"
+                         "\xff\xff\xff\xff\xff\xff\xff\xff"
+                         "\xff\xff\xff\xff\xff\xff\xff\xff"
+                         "\xff\xff\xff\xff\xff\xff\xff\xff"
+                         "\xff\xff\xff\xff\xff\xff\xff\xff"
+                         "\xff\xff\xff\xff\xff\xff\xff\xff"
+                         "\xff\xff\xff\xff\xff\xff\xff\xff"
+                         "\xff\xff\xff\xff\xff\xff\xff\xff"
+                         "\xff\xff\xff\xff\xff\xff\xff\xff"
+                         "\xff\xff\xff\xff\xff\xff\xff\xff"
+                         "\xff\xff\xff\xff\xff\xff\xff\xff"
+                         "\xff\xff\xff\xff\xff\xff\xff\xff"
+                         "\xff\xff\xff\xff\xff\xff\xff\xff"
+                         "\xff\xff\xff\xff\xff\xff\xff\xff"
+                         "\xff\xff\xff\xff\xff\xff\xff\xff"
+                         "\xff\xff\xff\xff\xff\xff\xff\xff"
+                         "\xff\xff\xff\xff\xff\xff\xff\xff",
+               .rlen   = 192,
+               .assoc  = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+                         "\xaa\xaa\xaa\xaa\x00\x00\x45\x67"
+                         "\x89\xab\xcd\xef",
+               .alen   = 20,
+               .input  = "\x64\x17\xDC\x24\x9D\x92\xBA\x5E"
+                         "\x7C\x64\x6D\x33\x46\x77\xAC\xB1"
+                         "\x5C\x9E\xE2\xC7\x27\x11\x3E\x95"
+                         "\x7D\xBE\x28\xC8\xC1\xCA\x5E\x8C"
+                         "\xB4\xE2\xDE\x9F\x53\x59\x26\xDB"
+                         "\x0C\xD4\xE4\x07\x9A\xE6\x3E\x01"
+                         "\x58\x0D\x3E\x3D\xD5\x21\xEB\x04"
+                         "\x06\x9D\x5F\xB9\x02\x49\x1A\x2B"
+                         "\xBA\xF0\x4E\x3B\x85\x50\x5B\x09"
+                         "\xFE\xEC\xFC\x54\xEC\x0C\xE2\x79"
+                         "\x8A\x2F\x5F\xD7\x05\x5D\xF1\x6D"
+                         "\x22\xEB\xD1\x09\x80\x3F\x5A\x70"
+                         "\xB2\xB9\xD3\x63\x99\xC2\x4D\x1B"
+                         "\x36\x12\x00\x89\xAA\x5D\x55\xDA"
+                         "\x1D\x5B\xD8\x3C\x5F\x09\xD2\xE6"
+                         "\x39\x41\x5C\xF0\xBE\x26\x4E\x5F"
+                         "\x2B\x50\x44\x52\xC2\x10\x7D\x38"
+                         "\x82\x64\x83\x0C\xAE\x49\xD0\xE5"
+                         "\x4F\xE5\x66\x4C\x58\x7A\xEE\x43"
+                         "\x3B\x51\xFE\xBA\x24\x8A\xFE\xDC"
+                         "\x19\x6D\x60\x66\x61\xF9\x9A\x3F"
+                         "\x75\xFC\x38\x53\x5B\xB5\xCD\x52"
+                         "\x4F\xE5\xE4\xC9\xFE\x10\xCB\x98"
+                         "\xF0\x06\x5B\x07\xAB\xBB\xF4\x0E"
+                         "\x2D\xC2\xDD\x5D\xDD\x22\x9A\xCC"
+                         "\x39\xAB\x63\xA5\x3D\x9C\x51\x8A",
+               .ilen   = 208,
+       }, { /* From draft-mcgrew-gcm-test-01 */
+               .key    = "\x4C\x80\xCD\xEF\xBB\x5D\x10\xDA"
+                         "\x90\x6A\xC7\x3C\x36\x13\xA6\x34"
+                         "\x2E\x44\x3B",
+               .klen   = 19,
+               .iv     = "\x49\x56\xED\x7E\x3B\x24\x4C\xFE",
+               .result = "\x45\x00\x00\x48\x69\x9A\x00\x00"
+                         "\x80\x11\x4D\xB7\xC0\xA8\x01\x02"
+                         "\xC0\xA8\x01\x01\x0A\x9B\xF1\x56"
+                         "\x38\xD3\x01\x00\x00\x01\x00\x00"
+                         "\x00\x00\x00\x00\x04\x5F\x73\x69"
+                         "\x70\x04\x5F\x75\x64\x70\x03\x73"
+                         "\x69\x70\x09\x63\x79\x62\x65\x72"
+                         "\x63\x69\x74\x79\x02\x64\x6B\x00"
+                         "\x00\x21\x00\x01\x01\x02\x02\x01",
+               .rlen   = 72,
+               .assoc  = "\x00\x00\x43\x21\x87\x65\x43\x21"
+                         "\x00\x00\x00\x00\x49\x56\xED\x7E"
+                         "\x3B\x24\x4C\xFE",
+               .alen   = 20,
+               .input  = "\x89\xBA\x3E\xEF\xE6\xD6\xCF\xDB"
+                         "\x83\x60\xF5\xBA\x3A\x56\x79\xE6"
+                         "\x7E\x0C\x53\xCF\x9E\x87\xE0\x4E"
+                         "\x1A\x26\x01\x24\xC7\x2E\x3D\xBF"
+                         "\x29\x2C\x91\xC1\xB8\xA8\xCF\xE0"
+                         "\x39\xF8\x53\x6D\x31\x22\x2B\xBF"
+                         "\x98\x81\xFC\x34\xEE\x85\x36\xCD"
+                         "\x26\xDB\x6C\x7A\x0C\x77\x8A\x35"
+                         "\x18\x85\x54\xB2\xBC\xDD\x3F\x43"
+                         "\x61\x06\x8A\xDF\x86\x3F\xB4\xAC"
+                         "\x97\xDC\xBD\xFD\x92\x10\xC5\xFF",
+               .ilen   = 88,
+       }, {
+               .key    = "\xFE\xFF\xE9\x92\x86\x65\x73\x1C"
+                         "\x6D\x6A\x8F\x94\x67\x30\x83\x08"
+                         "\xCA\xFE\xBA",
+               .klen   = 19,
+               .iv     = "\xFA\xCE\xDB\xAD\xDE\xCA\xF8\x88",
+               .result = "\x45\x00\x00\x3E\x69\x8F\x00\x00"
+                         "\x80\x11\x4D\xCC\xC0\xA8\x01\x02"
+                         "\xC0\xA8\x01\x01\x0A\x98\x00\x35"
+                         "\x00\x2A\x23\x43\xB2\xD0\x01\x00"
+                         "\x00\x01\x00\x00\x00\x00\x00\x00"
+                         "\x03\x73\x69\x70\x09\x63\x79\x62"
+                         "\x65\x72\x63\x69\x74\x79\x02\x64"
+                         "\x6B\x00\x00\x01\x00\x01\x00\x01",
+               .rlen   = 64,
+               .assoc  = "\x00\x00\xA5\xF8\x00\x00\x00\x0A"
+                         "\xFA\xCE\xDB\xAD\xDE\xCA\xF8\x88",
+               .alen   = 16,
+               .input  = "\x4B\xC2\x70\x60\x64\xD2\xF3\xC8"
+                         "\xE5\x26\x8A\xDE\xB8\x7E\x7D\x16"
+                         "\x56\xC7\xD2\x88\xBA\x8D\x58\xAF"
+                         "\xF5\x71\xB6\x37\x84\xA7\xB1\x99"
+                         "\x51\x5C\x0D\xA0\x27\xDE\xE7\x2D"
+                         "\xEF\x25\x88\x1F\x1D\x77\x11\xFF"
+                         "\xDB\xED\xEE\x56\x16\xC5\x5C\x9B"
+                         "\x00\x62\x1F\x68\x4E\x7C\xA0\x97"
+                         "\x10\x72\x7E\x53\x13\x3B\x68\xE4"
+                         "\x30\x99\x91\x79\x09\xEA\xFF\x6A",
+               .ilen   = 80,
+       }, {
+               .key    = "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23"
+                         "\x34\x45\x56\x67\x78\x89\x9A\xAB"
+                         "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23"
+                         "\x34\x45\x56\x67\x78\x89\x9A\xAB"
+                         "\x11\x22\x33",
+               .klen   = 35,
+               .iv     = "\x01\x02\x03\x04\x05\x06\x07\x08",
+               .result = "\x45\x00\x00\x30\x69\xA6\x40\x00"
+                         "\x80\x06\x26\x90\xC0\xA8\x01\x02"
+                         "\x93\x89\x15\x5E\x0A\x9E\x00\x8B"
+                         "\x2D\xC5\x7E\xE0\x00\x00\x00\x00"
+                         "\x70\x02\x40\x00\x20\xBF\x00\x00"
+                         "\x02\x04\x05\xB4\x01\x01\x04\x02"
+                         "\x01\x02\x02\x01",
+               .rlen   = 52,
+               .assoc  = "\x4A\x2C\xBF\xE3\x00\x00\x00\x02"
+                         "\x01\x02\x03\x04\x05\x06\x07\x08",
+               .alen   = 16,
+               .input  = "\xD6\x31\x0D\x2B\x3D\x6F\xBD\x2F"
+                         "\x58\x41\x7E\xFF\x9A\x9E\x09\xB4"
+                         "\x1A\xF7\xF6\x42\x31\xCD\xBF\xAD"
+                         "\x27\x0E\x2C\xF2\xDB\x10\xDF\x55"
+                         "\x8F\x0D\xD7\xAC\x23\xBD\x42\x10"
+                         "\xD0\xB2\xAF\xD8\x37\xAC\x6B\x0B"
+                         "\x11\xD4\x0B\x12\xEC\xB4\xB1\x92"
+                         "\x23\xA6\x10\xB0\x26\xD6\xD9\x26"
+                         "\x5A\x48\x6A\x3E",
+               .ilen   = 68,
+       }, {
+               .key    = "\x00\x00\x00\x00\x00\x00\x00\x00"
+                         "\x00\x00\x00\x00\x00\x00\x00\x00"
+                         "\x00\x00\x00",
+               .klen   = 19,
+               .iv     = "\x00\x00\x00\x00\x00\x00\x00\x00",
+               .result = "\x45\x00\x00\x3C\x99\xC5\x00\x00"
+                         "\x80\x01\xCB\x7A\x40\x67\x93\x18"
+                         "\x01\x01\x01\x01\x08\x00\x07\x5C"
+                         "\x02\x00\x44\x00\x61\x62\x63\x64"
+                         "\x65\x66\x67\x68\x69\x6A\x6B\x6C"
+                         "\x6D\x6E\x6F\x70\x71\x72\x73\x74"
+                         "\x75\x76\x77\x61\x62\x63\x64\x65"
+                         "\x66\x67\x68\x69\x01\x02\x02\x01",
+               .rlen   = 64,
+               .assoc  = "\x00\x00\x00\x00\x00\x00\x00\x01"
+                         "\x00\x00\x00\x00\x00\x00\x00\x00",
+               .alen   = 16,
+               .input  = "\x6B\x9A\xCA\x57\x43\x91\xFC\x6F"
+                         "\x92\x51\x23\xA4\xC1\x5B\xF0\x10"
+                         "\xF3\x13\xF4\xF8\xA1\x9A\xB4\xDC"
+                         "\x89\xC8\xF8\x42\x62\x95\xB7\xCB"
+                         "\xB8\xF5\x0F\x1B\x2E\x94\xA2\xA7"
+                         "\xBF\xFB\x8A\x92\x13\x63\xD1\x3C"
+                         "\x08\xF5\xE8\xA6\xAA\xF6\x34\xF9"
+                         "\x42\x05\xAF\xB3\xE7\x9A\xFC\xEE"
+                         "\x36\x25\xC1\x10\x12\x1C\xCA\x82"
+                         "\xEA\xE6\x63\x5A\x57\x28\xA9\x9A",
+               .ilen   = 80,
+       }, {
+               .key    = "\x3D\xE0\x98\x74\xB3\x88\xE6\x49"
+                         "\x19\x88\xD0\xC3\x60\x7E\xAE\x1F"
+                         "\x57\x69\x0E",
+               .klen   = 19,
+               .iv     = "\x4E\x28\x00\x00\xA2\xFC\xA1\xA3",
+               .result = "\x45\x00\x00\x3C\x99\xC3\x00\x00"
+                         "\x80\x01\xCB\x7C\x40\x67\x93\x18"
+                         "\x01\x01\x01\x01\x08\x00\x08\x5C"
+                         "\x02\x00\x43\x00\x61\x62\x63\x64"
+                         "\x65\x66\x67\x68\x69\x6A\x6B\x6C"
+                         "\x6D\x6E\x6F\x70\x71\x72\x73\x74"
+                         "\x75\x76\x77\x61\x62\x63\x64\x65"
+                         "\x66\x67\x68\x69\x01\x02\x02\x01",
+               .rlen   = 64,
+               .assoc  = "\x42\xF6\x7E\x3F\x10\x10\x10\x10"
+                         "\x10\x10\x10\x10\x4E\x28\x00\x00"
+                         "\xA2\xFC\xA1\xA3",
+               .alen   = 20,
+               .input  = "\x6A\x6B\x45\x2B\x7C\x67\x52\xF6"
+                         "\x10\x60\x40\x62\x6B\x4F\x97\x8E"
+                         "\x0B\xB2\x22\x97\xCB\x21\xE0\x90"
+                         "\xA2\xE7\xD1\x41\x30\xE4\x4B\x1B"
+                         "\x79\x01\x58\x50\x01\x06\xE1\xE0"
+                         "\x2C\x83\x79\xD3\xDE\x46\x97\x1A"
+                         "\x30\xB8\xE5\xDF\xD7\x12\x56\x75"
+                         "\xD0\x95\xB7\xB8\x91\x42\xF7\xFD"
+                         "\x97\x57\xCA\xC1\x20\xD0\x86\xB9"
+                         "\x66\x9D\xB4\x2B\x96\x22\xAC\x67",
+               .ilen   = 80,
+       }, {
+               .key    = "\x3D\xE0\x98\x74\xB3\x88\xE6\x49"
+                         "\x19\x88\xD0\xC3\x60\x7E\xAE\x1F"
+                         "\x57\x69\x0E",
+               .klen   = 19,
+               .iv     = "\x4E\x28\x00\x00\xA2\xFC\xA1\xA3",
+               .result = "\x45\x00\x00\x1C\x42\xA2\x00\x00"
+                         "\x80\x01\x44\x1F\x40\x67\x93\xB6"
+                         "\xE0\x00\x00\x02\x0A\x00\xF5\xFF"
+                         "\x01\x02\x02\x01",
+               .rlen   = 28,
+               .assoc  = "\x42\xF6\x7E\x3F\x10\x10\x10\x10"
+                         "\x10\x10\x10\x10\x4E\x28\x00\x00"
+                         "\xA2\xFC\xA1\xA3",
+               .alen   = 20,
+               .input  = "\x6A\x6B\x45\x0B\xA7\x06\x52\xF6"
+                         "\x10\x60\xCF\x01\x6B\x4F\x97\x20"
+                         "\xEA\xB3\x23\x94\xC9\x21\x1D\x33"
+                         "\xA1\xE5\x90\x40\x05\x37\x45\x70"
+                         "\xB5\xD6\x09\x0A\x23\x73\x33\xF9"
+                         "\x08\xB4\x22\xE4",
+               .ilen   = 44,
+       }, {
+               .key    = "\xFE\xFF\xE9\x92\x86\x65\x73\x1C"
+                         "\x6D\x6A\x8F\x94\x67\x30\x83\x08"
+                         "\xFE\xFF\xE9\x92\x86\x65\x73\x1C"
+                         "\xCA\xFE\xBA",
                .klen   = 27,
-               .iv     = "\xe9\xa9\xff\xe9\x57\xba\xfd\x9e",
-               .assoc  = "\x44\xa6\x2c\x05\xe9\xe1\x43\xb1"
-                         "\x58\x7c\xf2\x5c\x6d\x39\x0a\x64"
-                         "\xa4\xf0\x13\x05\xd1\x77\x99\x67"
-                         "\x11\xc4\xc6\xdb\x00\x56\x36\x61",
-               .alen   = 32,
-               .input  = "\xfb\xe5\x5d\x34\xbe\xe5\xe8\xe7"
-                         "\x5a\xef\x2f\xbf\x1f\x7f\xd4\xb2"
-                         "\x66\xca\x61\x1e\x96\x7a\x61\xb3"
-                         "\x1c\x16\x45\x52\xba\x04\x9c\x9f"
-                         "\xb1\xd2\x40\xbc\x52\x7c\x6f\xb1",
-               .ilen   = 40,
-               .result = "\x85\x34\x66\x42\xc8\x92\x0f\x36"
-                         "\x58\xe0\x6b\x91\x3c\x98\x5c\xbb"
-                         "\x0a\x85\xcc\x02\xad\x7a\x96\xe9"
-                         "\x65\x43\xa4\xc3\x0f\xdc\x55\x81",
-               .rlen   = 32,
+               .iv     = "\xFA\xCE\xDB\xAD\xDE\xCA\xF8\x88",
+               .result = "\x45\x00\x00\x28\xA4\xAD\x40\x00"
+                         "\x40\x06\x78\x80\x0A\x01\x03\x8F"
+                         "\x0A\x01\x06\x12\x80\x23\x06\xB8"
+                         "\xCB\x71\x26\x02\xDD\x6B\xB0\x3E"
+                         "\x50\x10\x16\xD0\x75\x68\x00\x01",
+               .rlen   = 40,
+               .assoc  = "\x00\x00\xA5\xF8\x00\x00\x00\x0A"
+                         "\xFA\xCE\xDB\xAD\xDE\xCA\xF8\x88",
+               .alen   = 16,
+               .input  = "\x05\x22\x15\xD1\x52\x56\x85\x04"
+                         "\xA8\x5C\x5D\x6D\x7E\x6E\xF5\xFA"
+                         "\xEA\x16\x37\x50\xF3\xDF\x84\x3B"
+                         "\x2F\x32\x18\x57\x34\x2A\x8C\x23"
+                         "\x67\xDF\x6D\x35\x7B\x54\x0D\xFB"
+                         "\x34\xA5\x9F\x6C\x48\x30\x1E\x22"
+                         "\xFE\xB1\x22\x17\x17\x8A\xB9\x5B",
+               .ilen   = 56,
        }, {
-               .key    = "\x58\x5d\xa0\x96\x65\x1a\x04\xd7"
-                         "\x96\xe5\xc5\x68\xaa\x95\x35\xe0"
-                         "\x29\xa0\xba\x9e\x48\x78\xd1\xba"
-                         "\xd1\xfc\x57",
-               .klen   = 27,
-               .iv     = "\x9c\xfe\xb8\x9c\xad\x71\xaa\x1f",
-               .assoc  = "\x86\x67\xa5\xa9\x14\x5f\x0d\xc6"
-                         "\xff\x14\xc7\x44\xbf\x6c\x3a\xc3"
-                         "\xff\xb6\x81\xbd\xe2\xd5\x06\xc7"
-                         "\x3c\xa1\x52\x13\x03\x8a\x23\x3a",
-               .alen   = 32,
-               .input  = "\x3f\x66\xb0\x9d\xe5\x4b\x38\x00"
-                         "\xc6\x0e\x6e\xe5\xd6\x98\xa6\x37"
-                         "\x8c\x26\x33\xc6\xb2\xa2\x17\xfa"
-                         "\x64\x19\xc0\x30\xd7\xfc\x14\x6b"
-                         "\xe3\x33\xc2\x04\xb0\x37\xbe\x3f"
-                         "\xa9\xb4\x2d\x68\x03\xa3\x44\xef",
-               .ilen   = 48,
-               .result = "\x02\x87\x4d\x28\x80\x6e\xb2\xed"
-                         "\x99\x2a\xa8\xca\x04\x25\x45\x90"
-                         "\x1d\xdd\x5a\xd9\xe4\xdb\x9c\x9c"
-                         "\x49\xe9\x01\xfe\xa7\x80\x6d\x6b",
-               .rlen   = 32,
-               .novrfy = 1,
+               .key    = "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23"
+                         "\x34\x45\x56\x67\x78\x89\x9A\xAB"
+                         "\xDE\xCA\xF8",
+               .klen   = 19,
+               .iv     = "\xCA\xFE\xDE\xBA\xCE\xFA\xCE\x74",
+               .result = "\x45\x00\x00\x49\x33\xBA\x00\x00"
+                         "\x7F\x11\x91\x06\xC3\xFB\x1D\x10"
+                         "\xC2\xB1\xD3\x26\xC0\x28\x31\xCE"
+                         "\x00\x35\xDD\x7B\x80\x03\x02\xD5"
+                         "\x00\x00\x4E\x20\x00\x1E\x8C\x18"
+                         "\xD7\x5B\x81\xDC\x91\xBA\xA0\x47"
+                         "\x6B\x91\xB9\x24\xB2\x80\x38\x9D"
+                         "\x92\xC9\x63\xBA\xC0\x46\xEC\x95"
+                         "\x9B\x62\x66\xC0\x47\x22\xB1\x49"
+                         "\x23\x01\x01\x01",
+               .rlen   = 76,
+               .assoc  = "\x00\x00\x01\x00\x00\x00\x00\x00"
+                         "\x00\x00\x00\x01\xCA\xFE\xDE\xBA"
+                         "\xCE\xFA\xCE\x74",
+               .alen   = 20,
+               .input  = "\x92\xD0\x53\x79\x33\x38\xD5\xF3"
+                         "\x7D\xE4\x7A\x8E\x86\x03\xC9\x90"
+                         "\x96\x35\xAB\x9C\xFB\xE8\xA3\x76"
+                         "\xE9\xE9\xE2\xD1\x2E\x11\x0E\x00"
+                         "\xFA\xCE\xB5\x9E\x02\xA7\x7B\xEA"
+                         "\x71\x9A\x58\xFB\xA5\x8A\xE1\xB7"
+                         "\x9C\x39\x9D\xE3\xB5\x6E\x69\xE6"
+                         "\x63\xC9\xDB\x05\x69\x51\x12\xAD"
+                         "\x3E\x00\x32\x73\x86\xF2\xEE\xF5"
+                         "\x0F\xE8\x81\x7E\x84\xD3\xC0\x0D"
+                         "\x76\xD6\x55\xC6\xB4\xC2\x34\xC7"
+                         "\x12\x25\x0B\xF9",
+               .ilen   = 92,
        }, {
-               .key    = "\xa4\x4b\x54\x29\x0a\xb8\x6d\x01"
-                         "\x5b\x80\x2a\xcf\x25\xc4\xb7\x5c"
-                         "\x20\x2c\xad\x30\xc2\x2b\x41\xfb"
-                         "\x0e\x85\xbc\x33\xad\x0f\x2b\xff"
-                         "\xee\x49\x83",
+               .key    = "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23"
+                         "\x34\x45\x56\x67\x78\x89\x9A\xAB"
+                         "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23"
+                         "\x34\x45\x56\x67\x78\x89\x9A\xAB"
+                         "\x73\x61\x6C",
                .klen   = 35,
-               .iv     = "\xe9\xa9\xff\xe9\x57\xba\xfd\x9e",
-               .alen   = 0,
-               .input  = "\x1f\xb8\x8f\xa3\xdd\x54\x00\xf2",
-               .ilen   = 8,
-               .result = "\x00",
-               .rlen   = 0,
+               .iv     = "\x61\x6E\x64\x01\x69\x76\x65\x63",
+               .result = "\x45\x08\x00\x28\x73\x2C\x00\x00"
+                         "\x40\x06\xE9\xF9\x0A\x01\x06\x12"
+                         "\x0A\x01\x03\x8F\x06\xB8\x80\x23"
+                         "\xDD\x6B\xAF\xBE\xCB\x71\x26\x02"
+                         "\x50\x10\x1F\x64\x6D\x54\x00\x01",
+               .rlen   = 40,
+               .assoc  = "\x17\x40\x5E\x67\x15\x6F\x31\x26"
+                         "\xDD\x0D\xB9\x9B\x61\x6E\x64\x01"
+                         "\x69\x76\x65\x63",
+               .alen   = 20,
+               .input  = "\xCC\x74\xB7\xD3\xB0\x38\x50\x42"
+                         "\x2C\x64\x87\x46\x1E\x34\x10\x05"
+                         "\x29\x6B\xBB\x36\xE9\x69\xAD\x92"
+                         "\x82\xA1\x10\x6A\xEB\x0F\xDC\x7D"
+                         "\x08\xBA\xF3\x91\xCA\xAA\x61\xDA"
+                         "\x62\xF4\x14\x61\x5C\x9D\xB5\xA7"
+                         "\xEE\xD7\xB9\x7E\x87\x99\x9B\x7D",
+               .ilen   = 56,
        }, {
-               .key    = "\x39\xbb\xa7\xbe\x59\x97\x9e\x73"
-                         "\xa2\xbc\x6b\x98\xd7\x75\x7f\xe3"
-                         "\xa4\x48\x93\x39\x26\x71\x4a\xc6"
-                         "\xae\x8f\x11\x4c\xc2\x9c\x4a\xbb"
-                         "\x85\x34\x66",
+               .key    = "\x3D\xE0\x98\x74\xB3\x88\xE6\x49"
+                         "\x19\x88\xD0\xC3\x60\x7E\xAE\x1F"
+                         "\x57\x69\x0E",
+               .klen   = 19,
+               .iv     = "\x4E\x28\x00\x00\xA2\xFC\xA1\xA3",
+               .result = "\x45\x00\x00\x49\x33\x3E\x00\x00"
+                         "\x7F\x11\x91\x82\xC3\xFB\x1D\x10"
+                         "\xC2\xB1\xD3\x26\xC0\x28\x31\xCE"
+                         "\x00\x35\xCB\x45\x80\x03\x02\x5B"
+                         "\x00\x00\x01\xE0\x00\x1E\x8C\x18"
+                         "\xD6\x57\x59\xD5\x22\x84\xA0\x35"
+                         "\x2C\x71\x47\x5C\x88\x80\x39\x1C"
+                         "\x76\x4D\x6E\x5E\xE0\x49\x6B\x32"
+                         "\x5A\xE2\x70\xC0\x38\x99\x49\x39"
+                         "\x15\x01\x01\x01",
+               .rlen   = 76,
+               .assoc  = "\x42\xF6\x7E\x3F\x10\x10\x10\x10"
+                         "\x10\x10\x10\x10\x4E\x28\x00\x00"
+                         "\xA2\xFC\xA1\xA3",
+               .alen   = 20,
+               .input  = "\x6A\x6B\x45\x5E\xD6\x9A\x52\xF6"
+                         "\xEF\x70\x1A\x9C\xE8\xD3\x19\x86"
+                         "\xC8\x02\xF0\xB0\x03\x09\xD9\x02"
+                         "\xA0\xD2\x59\x04\xD1\x85\x2A\x24"
+                         "\x1C\x67\x3E\xD8\x68\x72\x06\x94"
+                         "\x97\xBA\x4F\x76\x8D\xB0\x44\x5B"
+                         "\x69\xBF\xD5\xE2\x3D\xF1\x0B\x0C"
+                         "\xC0\xBF\xB1\x8F\x70\x09\x9E\xCE"
+                         "\xA5\xF2\x55\x58\x84\xFA\xF9\xB5"
+                         "\x23\xF4\x84\x40\x74\x14\x8A\x6B"
+                         "\xDB\xD7\x67\xED\xA4\x93\xF3\x47"
+                         "\xCC\xF7\x46\x6F",
+               .ilen   = 92,
+       }, {
+               .key    = "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23"
+                         "\x34\x45\x56\x67\x78\x89\x9A\xAB"
+                         "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23"
+                         "\x34\x45\x56\x67\x78\x89\x9A\xAB"
+                         "\x73\x61\x6C",
                .klen   = 35,
-               .iv     = "\x42\xc8\x92\x0f\x36\x58\xe0\x6b",
-               .alen   = 0,
-               .input  = "\x48\x01\x5e\x02\x24\x04\x66\x47"
-                         "\xa1\xea\x6f\xaf\xe8\xfc\xfb\xdd"
-                         "\xa5\xa9\x87\x8d\x84\xee\x2e\x77"
-                         "\xbb\x86\xb9\xf5\x5c\x6c\xff\xf6"
-                         "\x72\xc3\x8e\xf7\x70\xb1\xb2\x07"
-                         "\xbc\xa8\xa3\xbd\x83\x7c\x1d\x2a",
-               .ilen   = 48,
-               .result = "\xdc\x56\xf2\x71\xb0\xb1\xa0\x6c"
-                         "\xf0\x97\x3a\xfb\x6d\xe7\x32\x99"
-                         "\x3e\xaf\x70\x5e\xb2\x4d\xea\x39"
-                         "\x89\xd4\x75\x7a\x63\xb1\xda\x93",
-               .rlen   = 32,
-               .novrfy = 1,
+               .iv     = "\x61\x6E\x64\x01\x69\x76\x65\x63",
+               .result = "\x63\x69\x73\x63\x6F\x01\x72\x75"
+                         "\x6C\x65\x73\x01\x74\x68\x65\x01"
+                         "\x6E\x65\x74\x77\x65\x01\x64\x65"
+                         "\x66\x69\x6E\x65\x01\x74\x68\x65"
+                         "\x74\x65\x63\x68\x6E\x6F\x6C\x6F"
+                         "\x67\x69\x65\x73\x01\x74\x68\x61"
+                         "\x74\x77\x69\x6C\x6C\x01\x64\x65"
+                         "\x66\x69\x6E\x65\x74\x6F\x6D\x6F"
+                         "\x72\x72\x6F\x77\x01\x02\x02\x01",
+               .rlen   = 72,
+               .assoc  = "\x17\x40\x5E\x67\x15\x6F\x31\x26"
+                         "\xDD\x0D\xB9\x9B\x61\x6E\x64\x01"
+                         "\x69\x76\x65\x63",
+               .alen   = 20,
+               .input  = "\xEA\x15\xC4\x98\xAC\x15\x22\x37"
+                         "\x00\x07\x1D\xBE\x60\x5D\x73\x16"
+                         "\x4D\x0F\xCC\xCE\x8A\xD0\x49\xD4"
+                         "\x39\xA3\xD1\xB1\x21\x0A\x92\x1A"
+                         "\x2C\xCF\x8F\x9D\xC9\x91\x0D\xB4"
+                         "\x15\xFC\xBC\xA5\xC5\xBF\x54\xE5"
+                         "\x1C\xC7\x32\x41\x07\x7B\x2C\xB6"
+                         "\x5C\x23\x7C\x93\xEA\xEF\x23\x1C"
+                         "\x73\xF4\xE7\x12\x84\x4C\x37\x0A"
+                         "\x4A\x8F\x06\x37\x48\xF9\xF9\x05"
+                         "\x55\x13\x40\xC3\xD5\x55\x3A\x3D",
+               .ilen   = 88,
        }, {
-               .key    = "\x58\x5d\xa0\x96\x65\x1a\x04\xd7"
-                         "\x96\xe5\xc5\x68\xaa\x95\x35\xe0"
-                         "\x29\xa0\xba\x9e\x48\x78\xd1\xba"
-                         "\x0d\x1a\x53\x3b\xb5\xe3\xf8\x8b"
-                         "\xcf\x76\x3f",
+               .key    = "\x7D\x77\x3D\x00\xC1\x44\xC5\x25"
+                         "\xAC\x61\x9D\x18\xC8\x4A\x3F\x47"
+                         "\xD9\x66\x42",
+               .klen   = 19,
+               .iv     = "\x43\x45\x7E\x91\x82\x44\x3B\xC6",
+               .result = "\x01\x02\x02\x01",
+               .rlen   = 4,
+               .assoc  = "\x33\x54\x67\xAE\xFF\xFF\xFF\xFF"
+                         "\x43\x45\x7E\x91\x82\x44\x3B\xC6",
+               .alen   = 16,
+               .input  = "\x4C\x72\x63\x30\x2F\xE6\x56\xDD"
+                         "\xD0\xD8\x60\x9D\x8B\xEF\x85\x90"
+                         "\xF7\x61\x24\x62",
+               .ilen   = 20,
+       }, {
+               .key    = "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23"
+                         "\x34\x45\x56\x67\x78\x89\x9A\xAB"
+                         "\xDE\xCA\xF8",
+               .klen   = 19,
+               .iv     = "\xCA\xFE\xDE\xBA\xCE\xFA\xCE\x74",
+               .result = "\x74\x6F\x01\x62\x65\x01\x6F\x72"
+                         "\x01\x6E\x6F\x74\x01\x74\x6F\x01"
+                         "\x62\x65\x00\x01",
+               .rlen   = 20,
+               .assoc  = "\x00\x00\x01\x00\x00\x00\x00\x00"
+                         "\x00\x00\x00\x01\xCA\xFE\xDE\xBA"
+                         "\xCE\xFA\xCE\x74",
+               .alen   = 20,
+               .input  = "\xA3\xBF\x52\x52\x65\x83\xBA\x81"
+                         "\x03\x9B\x84\xFC\x44\x8C\xBB\x81"
+                         "\x36\xE1\x78\xBB\xA5\x49\x3A\xD0"
+                         "\xF0\x6B\x21\xAF\x98\xC0\x34\xDC"
+                         "\x17\x17\x65\xAD",
+               .ilen   = 36,
+       }, {
+               .key    = "\x6C\x65\x67\x61\x6C\x69\x7A\x65"
+                         "\x6D\x61\x72\x69\x6A\x75\x61\x6E"
+                         "\x61\x61\x6E\x64\x64\x6F\x69\x74"
+                         "\x62\x65\x66\x6F\x72\x65\x69\x61"
+                         "\x74\x75\x72",
                .klen   = 35,
-               .iv     = "\xd9\x95\x75\x8f\x44\x89\x40\x7b",
-               .assoc  = "\x8f\x86\x6c\x4d\x1d\xc5\x39\x88"
-                         "\xc8\xf3\x5c\x52\x10\x63\x6f\x2b"
-                         "\x8a\x2a\xc5\x6f\x30\x23\x58\x7b"
-                         "\xfb\x36\x03\x11\xb4\xd9\xf2\xfe",
-               .alen   = 32,
-               .input  = "\x48\x58\xd6\xf3\xad\x63\x58\xbf"
-                         "\xae\xc7\x5e\xae\x83\x8f\x7b\xe4"
-                         "\x78\x5c\x4c\x67\x71\x89\x94\xbf"
-                         "\x47\xf1\x63\x7e\x1c\x59\xbd\xc5"
-                         "\x7f\x44\x0a\x0c\x01\x18\x07\x92"
-                         "\xe1\xd3\x51\xce\x32\x6d\x0c\x5b",
-               .ilen   = 48,
-               .result = "\xc2\x54\xc8\xde\x78\x87\x77\x40"
-                         "\x49\x71\xe4\xb7\xe7\xcb\x76\x61"
-                         "\x0a\x41\xb9\xe9\xc0\x76\x54\xab"
-                         "\x04\x49\x3b\x19\x93\x57\x25\x5d",
+               .iv     = "\x33\x30\x21\x69\x67\x65\x74\x6D",
+               .result = "\x45\x00\x00\x30\xDA\x3A\x00\x00"
+                         "\x80\x01\xDF\x3B\xC0\xA8\x00\x05"
+                         "\xC0\xA8\x00\x01\x08\x00\xC6\xCD"
+                         "\x02\x00\x07\x00\x61\x62\x63\x64"
+                         "\x65\x66\x67\x68\x69\x6A\x6B\x6C"
+                         "\x6D\x6E\x6F\x70\x71\x72\x73\x74"
+                         "\x01\x02\x02\x01",
+               .rlen   = 52,
+               .assoc  = "\x79\x6B\x69\x63\xFF\xFF\xFF\xFF"
+                         "\xFF\xFF\xFF\xFF\x33\x30\x21\x69"
+                         "\x67\x65\x74\x6D",
+               .alen   = 20,
+               .input  = "\x96\xFD\x86\xF8\xD1\x98\xFF\x10"
+                         "\xAB\x8C\xDA\x8A\x5A\x08\x38\x1A"
+                         "\x48\x59\x80\x18\x1A\x18\x1A\x04"
+                         "\xC9\x0D\xE3\xE7\x0E\xA4\x0B\x75"
+                         "\x92\x9C\x52\x5C\x0B\xFB\xF8\xAF"
+                         "\x16\xC3\x35\xA8\xE7\xCE\x84\x04"
+                         "\xEB\x40\x6B\x7A\x8E\x75\xBB\x42"
+                         "\xE0\x63\x4B\x21\x44\xA2\x2B\x2B"
+                         "\x39\xDB\xC8\xDC",
+               .ilen   = 68,
+       }, {
+               .key    = "\x3D\xE0\x98\x74\xB3\x88\xE6\x49"
+                         "\x19\x88\xD0\xC3\x60\x7E\xAE\x1F"
+                         "\x57\x69\x0E",
+               .klen   = 19,
+               .iv     = "\x4E\x28\x00\x00\xA2\xFC\xA1\xA3",
+               .result = "\x45\x00\x00\x30\xDA\x3A\x00\x00"
+                         "\x80\x01\xDF\x3B\xC0\xA8\x00\x05"
+                         "\xC0\xA8\x00\x01\x08\x00\xC6\xCD"
+                         "\x02\x00\x07\x00\x61\x62\x63\x64"
+                         "\x65\x66\x67\x68\x69\x6A\x6B\x6C"
+                         "\x6D\x6E\x6F\x70\x71\x72\x73\x74"
+                         "\x01\x02\x02\x01",
+               .rlen   = 52,
+               .assoc  = "\x3F\x7E\xF6\x42\x10\x10\x10\x10"
+                         "\x10\x10\x10\x10\x4E\x28\x00\x00"
+                         "\xA2\xFC\xA1\xA3",
+               .alen   = 20,
+               .input  = "\x6A\x6B\x45\x27\x3F\x9E\x52\xF6"
+                         "\x10\x60\x54\x25\xEB\x80\x04\x93"
+                         "\xCA\x1B\x23\x97\xCB\x21\x2E\x01"
+                         "\xA2\xE7\x95\x41\x30\xE4\x4B\x1B"
+                         "\x79\x01\x58\x50\x01\x06\xE1\xE0"
+                         "\x2C\x83\x79\xD3\xDE\x46\x97\x1A"
+                         "\x44\xCC\x90\xBF\x00\x94\x94\x92"
+                         "\x20\x17\x0C\x1B\x55\xDE\x7E\x68"
+                         "\xF4\x95\x5D\x4F",
+               .ilen   = 68,
+       }, {
+               .key    = "\x4C\x80\xCD\xEF\xBB\x5D\x10\xDA"
+                         "\x90\x6A\xC7\x3C\x36\x13\xA6\x34"
+                         "\x22\x43\x3C",
+               .klen   = 19,
+               .iv     = "\x48\x55\xEC\x7D\x3A\x23\x4B\xFD",
+               .result = "\x08\x00\xC6\xCD\x02\x00\x07\x00"
+                         "\x61\x62\x63\x64\x65\x66\x67\x68"
+                         "\x69\x6A\x6B\x6C\x6D\x6E\x6F\x70"
+                         "\x71\x72\x73\x74\x01\x02\x02\x01",
                .rlen   = 32,
-       },
+               .assoc  = "\x00\x00\x43\x21\x87\x65\x43\x21"
+                         "\x00\x00\x00\x07\x48\x55\xEC\x7D"
+                         "\x3A\x23\x4B\xFD",
+               .alen   = 20,
+               .input  = "\x67\xE9\x28\xB3\x1C\xA4\x6D\x02"
+                         "\xF0\xB5\x37\xB6\x6B\x2F\xF5\x4F"
+                         "\xF8\xA3\x4C\x53\xB8\x12\x09\xBF"
+                         "\x58\x7D\xCF\x29\xA3\x41\x68\x6B"
+                         "\xCE\xE8\x79\x85\x3C\xB0\x3A\x8F"
+                         "\x16\xB0\xA1\x26\xC9\xBC\xBC\xA6",
+               .ilen   = 48,
+       }
 };
 
 /*
@@ -22343,8 +23647,9 @@ static struct aead_testvec rfc7539esp_enc_tv_template[] = {
                .klen   = 36,
                .iv     = "\x01\x02\x03\x04\x05\x06\x07\x08",
                .assoc  = "\xf3\x33\x88\x86\x00\x00\x00\x00"
-                         "\x00\x00\x4e\x91",
-               .alen   = 12,
+                         "\x00\x00\x4e\x91\x01\x02\x03\x04"
+                         "\x05\x06\x07\x08",
+               .alen   = 20,
                .input  = "\x49\x6e\x74\x65\x72\x6e\x65\x74"
                          "\x2d\x44\x72\x61\x66\x74\x73\x20"
                          "\x61\x72\x65\x20\x64\x72\x61\x66"
@@ -22430,8 +23735,9 @@ static struct aead_testvec rfc7539esp_dec_tv_template[] = {
                .klen   = 36,
                .iv     = "\x01\x02\x03\x04\x05\x06\x07\x08",
                .assoc  = "\xf3\x33\x88\x86\x00\x00\x00\x00"
-                         "\x00\x00\x4e\x91",
-               .alen   = 12,
+                         "\x00\x00\x4e\x91\x01\x02\x03\x04"
+                         "\x05\x06\x07\x08",
+               .alen   = 20,
                .input  = "\x64\xa0\x86\x15\x75\x86\x1a\xf4"
                          "\x60\xf0\x62\xc7\x9b\xe6\x43\xbd"
                          "\x5e\x80\x5c\xfd\x34\x5c\xf3\x89"
@@ -30174,7 +31480,7 @@ static struct cipher_testvec salsa20_stream_enc_tv_template[] = {
        },
 };
 
-#define CHACHA20_ENC_TEST_VECTORS 3
+#define CHACHA20_ENC_TEST_VECTORS 4
 static struct cipher_testvec chacha20_enc_tv_template[] = {
        { /* RFC7539 A.2. Test Vector #1 */
                .key    = "\x00\x00\x00\x00\x00\x00\x00\x00"
@@ -30348,6 +31654,338 @@ static struct cipher_testvec chacha20_enc_tv_template[] = {
                          "\x87\xb5\x8d\xfd\x72\x8a\xfa\x36"
                          "\x75\x7a\x79\x7a\xc1\x88\xd1",
                .rlen   = 127,
+       }, { /* Self-made test vector for long data */
+               .key    = "\x1c\x92\x40\xa5\xeb\x55\xd3\x8a"
+                         "\xf3\x33\x88\x86\x04\xf6\xb5\xf0"
+                         "\x47\x39\x17\xc1\x40\x2b\x80\x09"
+                         "\x9d\xca\x5c\xbc\x20\x70\x75\xc0",
+               .klen   = 32,
+               .iv     = "\x1c\x00\x00\x00\x00\x00\x00\x00"
+                         "\x00\x00\x00\x00\x00\x00\x00\x01",
+               .input  = "\x49\xee\xe0\xdc\x24\x90\x40\xcd"
+                         "\xc5\x40\x8f\x47\x05\xbc\xdd\x81"
+                         "\x47\xc6\x8d\xe6\xb1\x8f\xd7\xcb"
+                         "\x09\x0e\x6e\x22\x48\x1f\xbf\xb8"
+                         "\x5c\xf7\x1e\x8a\xc1\x23\xf2\xd4"
+                         "\x19\x4b\x01\x0f\x4e\xa4\x43\xce"
+                         "\x01\xc6\x67\xda\x03\x91\x18\x90"
+                         "\xa5\xa4\x8e\x45\x03\xb3\x2d\xac"
+                         "\x74\x92\xd3\x53\x47\xc8\xdd\x25"
+                         "\x53\x6c\x02\x03\x87\x0d\x11\x0c"
+                         "\x58\xe3\x12\x18\xfd\x2a\x5b\x40"
+                         "\x0c\x30\xf0\xb8\x3f\x43\xce\xae"
+                         "\x65\x3a\x7d\x7c\xf4\x54\xaa\xcc"
+                         "\x33\x97\xc3\x77\xba\xc5\x70\xde"
+                         "\xd7\xd5\x13\xa5\x65\xc4\x5f\x0f"
+                         "\x46\x1a\x0d\x97\xb5\xf3\xbb\x3c"
+                         "\x84\x0f\x2b\xc5\xaa\xea\xf2\x6c"
+                         "\xc9\xb5\x0c\xee\x15\xf3\x7d\xbe"
+                         "\x9f\x7b\x5a\xa6\xae\x4f\x83\xb6"
+                         "\x79\x49\x41\xf4\x58\x18\xcb\x86"
+                         "\x7f\x30\x0e\xf8\x7d\x44\x36\xea"
+                         "\x75\xeb\x88\x84\x40\x3c\xad\x4f"
+                         "\x6f\x31\x6b\xaa\x5d\xe5\xa5\xc5"
+                         "\x21\x66\xe9\xa7\xe3\xb2\x15\x88"
+                         "\x78\xf6\x79\xa1\x59\x47\x12\x4e"
+                         "\x9f\x9f\x64\x1a\xa0\x22\x5b\x08"
+                         "\xbe\x7c\x36\xc2\x2b\x66\x33\x1b"
+                         "\xdd\x60\x71\xf7\x47\x8c\x61\xc3"
+                         "\xda\x8a\x78\x1e\x16\xfa\x1e\x86"
+                         "\x81\xa6\x17\x2a\xa7\xb5\xc2\xe7"
+                         "\xa4\xc7\x42\xf1\xcf\x6a\xca\xb4"
+                         "\x45\xcf\xf3\x93\xf0\xe7\xea\xf6"
+                         "\xf4\xe6\x33\x43\x84\x93\xa5\x67"
+                         "\x9b\x16\x58\x58\x80\x0f\x2b\x5c"
+                         "\x24\x74\x75\x7f\x95\x81\xb7\x30"
+                         "\x7a\x33\xa7\xf7\x94\x87\x32\x27"
+                         "\x10\x5d\x14\x4c\x43\x29\xdd\x26"
+                         "\xbd\x3e\x3c\x0e\xfe\x0e\xa5\x10"
+                         "\xea\x6b\x64\xfd\x73\xc6\xed\xec"
+                         "\xa8\xc9\xbf\xb3\xba\x0b\x4d\x07"
+                         "\x70\xfc\x16\xfd\x79\x1e\xd7\xc5"
+                         "\x49\x4e\x1c\x8b\x8d\x79\x1b\xb1"
+                         "\xec\xca\x60\x09\x4c\x6a\xd5\x09"
+                         "\x49\x46\x00\x88\x22\x8d\xce\xea"
+                         "\xb1\x17\x11\xde\x42\xd2\x23\xc1"
+                         "\x72\x11\xf5\x50\x73\x04\x40\x47"
+                         "\xf9\x5d\xe7\xa7\x26\xb1\x7e\xb0"
+                         "\x3f\x58\xc1\x52\xab\x12\x67\x9d"
+                         "\x3f\x43\x4b\x68\xd4\x9c\x68\x38"
+                         "\x07\x8a\x2d\x3e\xf3\xaf\x6a\x4b"
+                         "\xf9\xe5\x31\x69\x22\xf9\xa6\x69"
+                         "\xc6\x9c\x96\x9a\x12\x35\x95\x1d"
+                         "\x95\xd5\xdd\xbe\xbf\x93\x53\x24"
+                         "\xfd\xeb\xc2\x0a\x64\xb0\x77\x00"
+                         "\x6f\x88\xc4\x37\x18\x69\x7c\xd7"
+                         "\x41\x92\x55\x4c\x03\xa1\x9a\x4b"
+                         "\x15\xe5\xdf\x7f\x37\x33\x72\xc1"
+                         "\x8b\x10\x67\xa3\x01\x57\x94\x25"
+                         "\x7b\x38\x71\x7e\xdd\x1e\xcc\x73"
+                         "\x55\xd2\x8e\xeb\x07\xdd\xf1\xda"
+                         "\x58\xb1\x47\x90\xfe\x42\x21\x72"
+                         "\xa3\x54\x7a\xa0\x40\xec\x9f\xdd"
+                         "\xc6\x84\x6e\xca\xae\xe3\x68\xb4"
+                         "\x9d\xe4\x78\xff\x57\xf2\xf8\x1b"
+                         "\x03\xa1\x31\xd9\xde\x8d\xf5\x22"
+                         "\x9c\xdd\x20\xa4\x1e\x27\xb1\x76"
+                         "\x4f\x44\x55\xe2\x9b\xa1\x9c\xfe"
+                         "\x54\xf7\x27\x1b\xf4\xde\x02\xf5"
+                         "\x1b\x55\x48\x5c\xdc\x21\x4b\x9e"
+                         "\x4b\x6e\xed\x46\x23\xdc\x65\xb2"
+                         "\xcf\x79\x5f\x28\xe0\x9e\x8b\xe7"
+                         "\x4c\x9d\x8a\xff\xc1\xa6\x28\xb8"
+                         "\x65\x69\x8a\x45\x29\xef\x74\x85"
+                         "\xde\x79\xc7\x08\xae\x30\xb0\xf4"
+                         "\xa3\x1d\x51\x41\xab\xce\xcb\xf6"
+                         "\xb5\xd8\x6d\xe0\x85\xe1\x98\xb3"
+                         "\x43\xbb\x86\x83\x0a\xa0\xf5\xb7"
+                         "\x04\x0b\xfa\x71\x1f\xb0\xf6\xd9"
+                         "\x13\x00\x15\xf0\xc7\xeb\x0d\x5a"
+                         "\x9f\xd7\xb9\x6c\x65\x14\x22\x45"
+                         "\x6e\x45\x32\x3e\x7e\x60\x1a\x12"
+                         "\x97\x82\x14\xfb\xaa\x04\x22\xfa"
+                         "\xa0\xe5\x7e\x8c\x78\x02\x48\x5d"
+                         "\x78\x33\x5a\x7c\xad\xdb\x29\xce"
+                         "\xbb\x8b\x61\xa4\xb7\x42\xe2\xac"
+                         "\x8b\x1a\xd9\x2f\x0b\x8b\x62\x21"
+                         "\x83\x35\x7e\xad\x73\xc2\xb5\x6c"
+                         "\x10\x26\x38\x07\xe5\xc7\x36\x80"
+                         "\xe2\x23\x12\x61\xf5\x48\x4b\x2b"
+                         "\xc5\xdf\x15\xd9\x87\x01\xaa\xac"
+                         "\x1e\x7c\xad\x73\x78\x18\x63\xe0"
+                         "\x8b\x9f\x81\xd8\x12\x6a\x28\x10"
+                         "\xbe\x04\x68\x8a\x09\x7c\x1b\x1c"
+                         "\x83\x66\x80\x47\x80\xe8\xfd\x35"
+                         "\x1c\x97\x6f\xae\x49\x10\x66\xcc"
+                         "\xc6\xd8\xcc\x3a\x84\x91\x20\x77"
+                         "\x72\xe4\x24\xd2\x37\x9f\xc5\xc9"
+                         "\x25\x94\x10\x5f\x40\x00\x64\x99"
+                         "\xdc\xae\xd7\x21\x09\x78\x50\x15"
+                         "\xac\x5f\xc6\x2c\xa2\x0b\xa9\x39"
+                         "\x87\x6e\x6d\xab\xde\x08\x51\x16"
+                         "\xc7\x13\xe9\xea\xed\x06\x8e\x2c"
+                         "\xf8\x37\x8c\xf0\xa6\x96\x8d\x43"
+                         "\xb6\x98\x37\xb2\x43\xed\xde\xdf"
+                         "\x89\x1a\xe7\xeb\x9d\xa1\x7b\x0b"
+                         "\x77\xb0\xe2\x75\xc0\xf1\x98\xd9"
+                         "\x80\x55\xc9\x34\x91\xd1\x59\xe8"
+                         "\x4b\x0f\xc1\xa9\x4b\x7a\x84\x06"
+                         "\x20\xa8\x5d\xfa\xd1\xde\x70\x56"
+                         "\x2f\x9e\x91\x9c\x20\xb3\x24\xd8"
+                         "\x84\x3d\xe1\x8c\x7e\x62\x52\xe5"
+                         "\x44\x4b\x9f\xc2\x93\x03\xea\x2b"
+                         "\x59\xc5\xfa\x3f\x91\x2b\xbb\x23"
+                         "\xf5\xb2\x7b\xf5\x38\xaf\xb3\xee"
+                         "\x63\xdc\x7b\xd1\xff\xaa\x8b\xab"
+                         "\x82\x6b\x37\x04\xeb\x74\xbe\x79"
+                         "\xb9\x83\x90\xef\x20\x59\x46\xff"
+                         "\xe9\x97\x3e\x2f\xee\xb6\x64\x18"
+                         "\x38\x4c\x7a\x4a\xf9\x61\xe8\x9a"
+                         "\xa1\xb5\x01\xa6\x47\xd3\x11\xd4"
+                         "\xce\xd3\x91\x49\x88\xc7\xb8\x4d"
+                         "\xb1\xb9\x07\x6d\x16\x72\xae\x46"
+                         "\x5e\x03\xa1\x4b\xb6\x02\x30\xa8"
+                         "\x3d\xa9\x07\x2a\x7c\x19\xe7\x62"
+                         "\x87\xe3\x82\x2f\x6f\xe1\x09\xd9"
+                         "\x94\x97\xea\xdd\x58\x9e\xae\x76"
+                         "\x7e\x35\xe5\xb4\xda\x7e\xf4\xde"
+                         "\xf7\x32\x87\xcd\x93\xbf\x11\x56"
+                         "\x11\xbe\x08\x74\xe1\x69\xad\xe2"
+                         "\xd7\xf8\x86\x75\x8a\x3c\xa4\xbe"
+                         "\x70\xa7\x1b\xfc\x0b\x44\x2a\x76"
+                         "\x35\xea\x5d\x85\x81\xaf\x85\xeb"
+                         "\xa0\x1c\x61\xc2\xf7\x4f\xa5\xdc"
+                         "\x02\x7f\xf6\x95\x40\x6e\x8a\x9a"
+                         "\xf3\x5d\x25\x6e\x14\x3a\x22\xc9"
+                         "\x37\x1c\xeb\x46\x54\x3f\xa5\x91"
+                         "\xc2\xb5\x8c\xfe\x53\x08\x97\x32"
+                         "\x1b\xb2\x30\x27\xfe\x25\x5d\xdc"
+                         "\x08\x87\xd0\xe5\x94\x1a\xd4\xf1"
+                         "\xfe\xd6\xb4\xa3\xe6\x74\x81\x3c"
+                         "\x1b\xb7\x31\xa7\x22\xfd\xd4\xdd"
+                         "\x20\x4e\x7c\x51\xb0\x60\x73\xb8"
+                         "\x9c\xac\x91\x90\x7e\x01\xb0\xe1"
+                         "\x8a\x2f\x75\x1c\x53\x2a\x98\x2a"
+                         "\x06\x52\x95\x52\xb2\xe9\x25\x2e"
+                         "\x4c\xe2\x5a\x00\xb2\x13\x81\x03"
+                         "\x77\x66\x0d\xa5\x99\xda\x4e\x8c"
+                         "\xac\xf3\x13\x53\x27\x45\xaf\x64"
+                         "\x46\xdc\xea\x23\xda\x97\xd1\xab"
+                         "\x7d\x6c\x30\x96\x1f\xbc\x06\x34"
+                         "\x18\x0b\x5e\x21\x35\x11\x8d\x4c"
+                         "\xe0\x2d\xe9\x50\x16\x74\x81\xa8"
+                         "\xb4\x34\xb9\x72\x42\xa6\xcc\xbc"
+                         "\xca\x34\x83\x27\x10\x5b\x68\x45"
+                         "\x8f\x52\x22\x0c\x55\x3d\x29\x7c"
+                         "\xe3\xc0\x66\x05\x42\x91\x5f\x58"
+                         "\xfe\x4a\x62\xd9\x8c\xa9\x04\x19"
+                         "\x04\xa9\x08\x4b\x57\xfc\x67\x53"
+                         "\x08\x7c\xbc\x66\x8a\xb0\xb6\x9f"
+                         "\x92\xd6\x41\x7c\x5b\x2a\x00\x79"
+                         "\x72",
+               .ilen   = 1281,
+               .result = "\x45\xe8\xe0\xb6\x9c\xca\xfd\x87"
+                         "\xe8\x1d\x37\x96\x8a\xe3\x40\x35"
+                         "\xcf\x5e\x3a\x46\x3d\xfb\xd0\x69"
+                         "\xde\xaf\x7a\xd5\x0d\xe9\x52\xec"
+                         "\xc2\x82\xe5\x3e\x7d\xb2\x4a\xd9"
+                         "\xbb\xc3\x9f\xc0\x5d\xac\x93\x8d"
+                         "\x0e\x6f\xd3\xd7\xfb\x6a\x0d\xce"
+                         "\x92\x2c\xf7\xbb\x93\x57\xcc\xee"
+                         "\x42\x72\x6f\xc8\x4b\xd2\x76\xbf"
+                         "\xa0\xe3\x7a\x39\xf9\x5c\x8e\xfd"
+                         "\xa1\x1d\x41\xe5\x08\xc1\x1c\x11"
+                         "\x92\xfd\x39\x5c\x51\xd0\x2f\x66"
+                         "\x33\x4a\x71\x15\xfe\xee\x12\x54"
+                         "\x8c\x8f\x34\xd8\x50\x3c\x18\xa6"
+                         "\xc5\xe1\x46\x8a\xfb\x5f\x7e\x25"
+                         "\x9b\xe2\xc3\x66\x41\x2b\xb3\xa5"
+                         "\x57\x0e\x94\x17\x26\x39\xbb\x54"
+                         "\xae\x2e\x6f\x42\xfb\x4d\x89\x6f"
+                         "\x9d\xf1\x16\x2e\xe3\xe7\xfc\xe3"
+                         "\xb2\x4b\x2b\xa6\x7c\x04\x69\x3a"
+                         "\x70\x5a\xa7\xf1\x31\x64\x19\xca"
+                         "\x45\x79\xd8\x58\x23\x61\xaf\xc2"
+                         "\x52\x05\xc3\x0b\xc1\x64\x7c\x81"
+                         "\xd9\x11\xcf\xff\x02\x3d\x51\x84"
+                         "\x01\xac\xc6\x2e\x34\x2b\x09\x3a"
+                         "\xa8\x5d\x98\x0e\x89\xd9\xef\x8f"
+                         "\xd9\xd7\x7d\xdd\x63\x47\x46\x7d"
+                         "\xa1\xda\x0b\x53\x7d\x79\xcd\xc9"
+                         "\x86\xdd\x6b\x13\xa1\x9a\x70\xdd"
+                         "\x5c\xa1\x69\x3c\xe4\x5d\xe3\x8c"
+                         "\xe5\xf4\x87\x9c\x10\xcf\x0f\x0b"
+                         "\xc8\x43\xdc\xf8\x1d\x62\x5e\x5b"
+                         "\xe2\x03\x06\xc5\x71\xb6\x48\xa5"
+                         "\xf0\x0f\x2d\xd5\xa2\x73\x55\x8f"
+                         "\x01\xa7\x59\x80\x5f\x11\x6c\x40"
+                         "\xff\xb1\xf2\xc6\x7e\x01\xbb\x1c"
+                         "\x69\x9c\xc9\x3f\x71\x5f\x07\x7e"
+                         "\xdf\x6f\x99\xca\x9c\xfd\xf9\xb9"
+                         "\x49\xe7\xcc\x91\xd5\x9b\x8f\x03"
+                         "\xae\xe7\x61\x32\xef\x41\x6c\x75"
+                         "\x84\x9b\x8c\xce\x1d\x6b\x93\x21"
+                         "\x41\xec\xc6\xad\x8e\x0c\x48\xa8"
+                         "\xe2\xf5\x57\xde\xf7\x38\xfd\x4a"
+                         "\x6f\xa7\x4a\xf9\xac\x7d\xb1\x85"
+                         "\x7d\x6c\x95\x0a\x5a\xcf\x68\xd2"
+                         "\xe0\x7a\x26\xd9\xc1\x6d\x3e\xc6"
+                         "\x37\xbd\xbe\x24\x36\x77\x9f\x1b"
+                         "\xc1\x22\xf3\x79\xae\x95\x78\x66"
+                         "\x97\x11\xc0\x1a\xf1\xe8\x0d\x38"
+                         "\x09\xc2\xee\xb7\xd3\x46\x7b\x59"
+                         "\x77\x23\xe8\xb4\x92\x3d\x78\xbe"
+                         "\xe2\x25\x63\xa5\x2a\x06\x70\x92"
+                         "\x32\x63\xf9\x19\x21\x68\xe1\x0b"
+                         "\x9a\xd0\xee\x21\xdb\x1f\xe0\xde"
+                         "\x3e\x64\x02\x4d\x0e\xe0\x0a\xa9"
+                         "\xed\x19\x8c\xa8\xbf\xe3\x2e\x75"
+                         "\x24\x2b\xb0\xe5\x82\x6a\x1e\x6f"
+                         "\x71\x2a\x3a\x60\xed\x06\x0d\x17"
+                         "\xa2\xdb\x29\x1d\xae\xb2\xc4\xfb"
+                         "\x94\x04\xd8\x58\xfc\xc4\x04\x4e"
+                         "\xee\xc7\xc1\x0f\xe9\x9b\x63\x2d"
+                         "\x02\x3e\x02\x67\xe5\xd8\xbb\x79"
+                         "\xdf\xd2\xeb\x50\xe9\x0a\x02\x46"
+                         "\xdf\x68\xcf\xe7\x2b\x0a\x56\xd6"
+                         "\xf7\xbc\x44\xad\xb8\xb5\x5f\xeb"
+                         "\xbc\x74\x6b\xe8\x7e\xb0\x60\xc6"
+                         "\x0d\x96\x09\xbb\x19\xba\xe0\x3c"
+                         "\xc4\x6c\xbf\x0f\x58\xc0\x55\x62"
+                         "\x23\xa0\xff\xb5\x1c\xfd\x18\xe1"
+                         "\xcf\x6d\xd3\x52\xb4\xce\xa6\xfa"
+                         "\xaa\xfb\x1b\x0b\x42\x6d\x79\x42"
+                         "\x48\x70\x5b\x0e\xdd\x3a\xc9\x69"
+                         "\x8b\x73\x67\xf6\x95\xdb\x8c\xfb"
+                         "\xfd\xb5\x08\x47\x42\x84\x9a\xfa"
+                         "\xcc\x67\xb2\x3c\xb6\xfd\xd8\x32"
+                         "\xd6\x04\xb6\x4a\xea\x53\x4b\xf5"
+                         "\x94\x16\xad\xf0\x10\x2e\x2d\xb4"
+                         "\x8b\xab\xe5\x89\xc7\x39\x12\xf3"
+                         "\x8d\xb5\x96\x0b\x87\x5d\xa7\x7c"
+                         "\xb0\xc2\xf6\x2e\x57\x97\x2c\xdc"
+                         "\x54\x1c\x34\x72\xde\x0c\x68\x39"
+                         "\x9d\x32\xa5\x75\x92\x13\x32\xea"
+                         "\x90\x27\xbd\x5b\x1d\xb9\x21\x02"
+                         "\x1c\xcc\xba\x97\x5e\x49\x58\xe8"
+                         "\xac\x8b\xf3\xce\x3c\xf0\x00\xe9"
+                         "\x6c\xae\xe9\x77\xdf\xf4\x02\xcd"
+                         "\x55\x25\x89\x9e\x90\xf3\x6b\x8f"
+                         "\xb7\xd6\x47\x98\x26\x2f\x31\x2f"
+                         "\x8d\xbf\x54\xcd\x99\xeb\x80\xd7"
+                         "\xac\xc3\x08\xc2\xa6\x32\xf1\x24"
+                         "\x76\x7c\x4f\x78\x53\x55\xfb\x00"
+                         "\x8a\xd6\x52\x53\x25\x45\xfb\x0a"
+                         "\x6b\xb9\xbe\x3c\x5e\x11\xcc\x6a"
+                         "\xdd\xfc\xa7\xc4\x79\x4d\xbd\xfb"
+                         "\xce\x3a\xf1\x7a\xda\xeb\xfe\x64"
+                         "\x28\x3d\x0f\xee\x80\xba\x0c\xf8"
+                         "\xe9\x5b\x3a\xd4\xae\xc9\xf3\x0e"
+                         "\xe8\x5d\xc5\x5c\x0b\x20\x20\xee"
+                         "\x40\x0d\xde\x07\xa7\x14\xb4\x90"
+                         "\xb6\xbd\x3b\xae\x7d\x2b\xa7\xc7"
+                         "\xdc\x0b\x4c\x5d\x65\xb0\xd2\xc5"
+                         "\x79\x61\x23\xe0\xa2\x99\x73\x55"
+                         "\xad\xc6\xfb\xc7\x54\xb5\x98\x1f"
+                         "\x8c\x86\xc2\x3f\xbe\x5e\xea\x64"
+                         "\xa3\x60\x18\x9f\x80\xaf\x52\x74"
+                         "\x1a\xfe\x22\xc2\x92\x67\x40\x02"
+                         "\x08\xee\x67\x5b\x67\xe0\x3d\xde"
+                         "\x7a\xaf\x8e\x28\xf3\x5e\x0e\xf4"
+                         "\x48\x56\xaa\x85\x22\xd8\x36\xed"
+                         "\x3b\x3d\x68\x69\x30\xbc\x71\x23"
+                         "\xb1\x6e\x61\x03\x89\x44\x03\xf4"
+                         "\x32\xaa\x4c\x40\x9f\x69\xfb\x70"
+                         "\x91\xcc\x1f\x11\xbd\x76\x67\xe6"
+                         "\x10\x8b\x29\x39\x68\xea\x4e\x6d"
+                         "\xae\xfb\x40\xcf\xe2\xd0\x0d\x8d"
+                         "\x6f\xed\x9b\x8d\x64\x7a\x94\x8e"
+                         "\x32\x38\x78\xeb\x7d\x5f\xf9\x4d"
+                         "\x13\xbe\x21\xea\x16\xe7\x5c\xee"
+                         "\xcd\xf6\x5f\xc6\x45\xb2\x8f\x2b"
+                         "\xb5\x93\x3e\x45\xdb\xfd\xa2\x6a"
+                         "\xec\x83\x92\x99\x87\x47\xe0\x7c"
+                         "\xa2\x7b\xc4\x2a\xcd\xc0\x81\x03"
+                         "\x98\xb0\x87\xb6\x86\x13\x64\x33"
+                         "\x4c\xd7\x99\xbf\xdb\x7b\x6e\xaa"
+                         "\x76\xcc\xa0\x74\x1b\xa3\x6e\x83"
+                         "\xd4\xba\x7a\x84\x9d\x91\x71\xcd"
+                         "\x60\x2d\x56\xfd\x26\x35\xcb\xeb"
+                         "\xac\xe9\xee\xa4\xfc\x18\x5b\x91"
+                         "\xd5\xfe\x84\x45\xe0\xc7\xfd\x11"
+                         "\xe9\x00\xb6\x54\xdf\xe1\x94\xde"
+                         "\x2b\x70\x9f\x94\x7f\x15\x0e\x83"
+                         "\x63\x10\xb3\xf5\xea\xd3\xe8\xd1"
+                         "\xa5\xfc\x17\x19\x68\x9a\xbc\x17"
+                         "\x30\x43\x0a\x1a\x33\x92\xd4\x2a"
+                         "\x2e\x68\x99\xbc\x49\xf0\x68\xe3"
+                         "\xf0\x1f\xcb\xcc\xfa\xbb\x05\x56"
+                         "\x46\x84\x8b\x69\x83\x64\xc5\xe0"
+                         "\xc5\x52\x99\x07\x3c\xa6\x5c\xaf"
+                         "\xa3\xde\xd7\xdb\x43\xe6\xb7\x76"
+                         "\x4e\x4d\xd6\x71\x60\x63\x4a\x0c"
+                         "\x5f\xae\x25\x84\x22\x90\x5f\x26"
+                         "\x61\x4d\x8f\xaf\xc9\x22\xf2\x05"
+                         "\xcf\xc1\xdc\x68\xe5\x57\x8e\x24"
+                         "\x1b\x30\x59\xca\xd7\x0d\xc3\xd3"
+                         "\x52\x9e\x09\x3e\x0e\xaf\xdb\x5f"
+                         "\xc7\x2b\xde\x3a\xfd\xad\x93\x04"
+                         "\x74\x06\x89\x0e\x90\xeb\x85\xff"
+                         "\xe6\x3c\x12\x42\xf4\xfa\x80\x75"
+                         "\x5e\x4e\xd7\x2f\x93\x0b\x34\x41"
+                         "\x02\x85\x68\xd0\x03\x12\xde\x92"
+                         "\x54\x7a\x7e\xfb\x55\xe7\x88\xfb"
+                         "\xa4\xa9\xf2\xd1\xc6\x70\x06\x37"
+                         "\x25\xee\xa7\x6e\xd9\x89\x86\x50"
+                         "\x2e\x07\xdb\xfb\x2a\x86\x45\x0e"
+                         "\x91\xf4\x7c\xbb\x12\x60\xe8\x3f"
+                         "\x71\xbe\x8f\x9d\x26\xef\xd9\x89"
+                         "\xc4\x8f\xd8\xc5\x73\xd8\x84\xaa"
+                         "\x2f\xad\x22\x1e\x7e\xcf\xa2\x08"
+                         "\x23\x45\x89\x42\xa0\x30\xeb\xbf"
+                         "\xa1\xed\xad\xd5\x76\xfa\x24\x8f"
+                         "\x98",
+               .rlen   = 1281,
        },
 };
 
index c507bcad2c37ee4d1af6e9214f0c36769defd1a5..b2c1c047dc94586710f9dc0858846225318fac6f 100644 (file)
@@ -381,6 +381,9 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
        clk[IMX6QDL_CLK_ASRC]         = imx_clk_gate2_shared("asrc",         "asrc_podf",   base + 0x68, 6, &share_count_asrc);
        clk[IMX6QDL_CLK_ASRC_IPG]     = imx_clk_gate2_shared("asrc_ipg",     "ahb",         base + 0x68, 6, &share_count_asrc);
        clk[IMX6QDL_CLK_ASRC_MEM]     = imx_clk_gate2_shared("asrc_mem",     "ahb",         base + 0x68, 6, &share_count_asrc);
+       clk[IMX6QDL_CLK_CAAM_MEM]     = imx_clk_gate2("caam_mem",      "ahb",               base + 0x68, 8);
+       clk[IMX6QDL_CLK_CAAM_ACLK]    = imx_clk_gate2("caam_aclk",     "ahb",               base + 0x68, 10);
+       clk[IMX6QDL_CLK_CAAM_IPG]     = imx_clk_gate2("caam_ipg",      "ipg",               base + 0x68, 12);
        clk[IMX6QDL_CLK_CAN1_IPG]     = imx_clk_gate2("can1_ipg",      "ipg",               base + 0x68, 14);
        clk[IMX6QDL_CLK_CAN1_SERIAL]  = imx_clk_gate2("can1_serial",   "can_root",          base + 0x68, 16);
        clk[IMX6QDL_CLK_CAN2_IPG]     = imx_clk_gate2("can2_ipg",      "ipg",               base + 0x68, 18);
index 4044125fb5d5fa7589e2cbcb12623546d0effa75..07bc7aa6b224aeeb7ada29b08aec61966d3b3b2e 100644 (file)
@@ -480,4 +480,21 @@ config CRYPTO_DEV_IMGTEC_HASH
          hardware hash accelerator. Supporting MD5/SHA1/SHA224/SHA256
          hashing algorithms.
 
+config CRYPTO_DEV_SUN4I_SS
+       tristate "Support for Allwinner Security System cryptographic accelerator"
+       depends on ARCH_SUNXI
+       select CRYPTO_MD5
+       select CRYPTO_SHA1
+       select CRYPTO_AES
+       select CRYPTO_DES
+       select CRYPTO_BLKCIPHER
+       help
+         Some Allwinner SoC have a crypto accelerator named
+         Security System. Select this if you want to use it.
+         The Security System handle AES/DES/3DES ciphers in CBC mode
+         and SHA1 and MD5 hash algorithms.
+
+         To compile this driver as a module, choose M here: the module
+         will be called sun4i-ss.
+
 endif # CRYPTO_HW
index e35c07a8da8568c59d56e35f0f9c30b2362ecbca..c3ced6fbd1b8f9877a7d6a03acad9deacbf5d378 100644 (file)
@@ -28,3 +28,4 @@ obj-$(CONFIG_CRYPTO_DEV_UX500) += ux500/
 obj-$(CONFIG_CRYPTO_DEV_QAT) += qat/
 obj-$(CONFIG_CRYPTO_DEV_QCE) += qce/
 obj-$(CONFIG_CRYPTO_DEV_VMX) += vmx/
+obj-$(CONFIG_CRYPTO_DEV_SUN4I_SS) += sunxi-ss/
index 3b28e8c3de28d1181e7f36a45af196ebfa457701..192a8fa325c1ccc8c71dd8617d84f509a1b6dfae 100644 (file)
@@ -1113,7 +1113,7 @@ static irqreturn_t crypto4xx_ce_interrupt_handler(int irq, void *data)
        struct device *dev = (struct device *)data;
        struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev);
 
-       if (core_dev->dev->ce_base == 0)
+       if (!core_dev->dev->ce_base)
                return 0;
 
        writel(PPC4XX_INTERRUPT_CLR,
index e286e285aa8a48f462b205638986d7b52cb01642..5652a53415dc2efe395069ade7a55517620c2579 100644 (file)
@@ -1,6 +1,6 @@
 config CRYPTO_DEV_FSL_CAAM
        tristate "Freescale CAAM-Multicore driver backend"
-       depends on FSL_SOC
+       depends on FSL_SOC || ARCH_MXC
        help
          Enables the driver module for Freescale's Cryptographic Accelerator
          and Assurance Module (CAAM), also known as the SEC version 4 (SEC4).
@@ -112,6 +112,14 @@ config CRYPTO_DEV_FSL_CAAM_RNG_API
          To compile this as a module, choose M here: the module
          will be called caamrng.
 
+config CRYPTO_DEV_FSL_CAAM_IMX
+       def_bool SOC_IMX6 || SOC_IMX7D
+       depends on CRYPTO_DEV_FSL_CAAM
+
+config CRYPTO_DEV_FSL_CAAM_LE
+       def_bool CRYPTO_DEV_FSL_CAAM_IMX || SOC_LS1021A
+       depends on CRYPTO_DEV_FSL_CAAM
+
 config CRYPTO_DEV_FSL_CAAM_DEBUG
        bool "Enable debug output in CAAM driver"
        depends on CRYPTO_DEV_FSL_CAAM
index daca933a82ec9ea1c918a868516e859ffcdbca98..ba79d638f78200a189bda8763f2997222d60709d 100644 (file)
 #define AEAD_DESC_JOB_IO_LEN           (DESC_JOB_IO_LEN + CAAM_CMD_SZ * 2)
 #define GCM_DESC_JOB_IO_LEN            (AEAD_DESC_JOB_IO_LEN + \
                                         CAAM_CMD_SZ * 4)
+#define AUTHENC_DESC_JOB_IO_LEN                (AEAD_DESC_JOB_IO_LEN + \
+                                        CAAM_CMD_SZ * 5)
 
 /* length of descriptors text */
 #define DESC_AEAD_BASE                 (4 * CAAM_CMD_SZ)
-#define DESC_AEAD_ENC_LEN              (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ)
-#define DESC_AEAD_DEC_LEN              (DESC_AEAD_BASE + 18 * CAAM_CMD_SZ)
-#define DESC_AEAD_GIVENC_LEN           (DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ)
+#define DESC_AEAD_ENC_LEN              (DESC_AEAD_BASE + 11 * CAAM_CMD_SZ)
+#define DESC_AEAD_DEC_LEN              (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ)
+#define DESC_AEAD_GIVENC_LEN           (DESC_AEAD_ENC_LEN + 9 * CAAM_CMD_SZ)
 
 /* Note: Nonce is counted in enckeylen */
-#define DESC_AEAD_CTR_RFC3686_LEN      (6 * CAAM_CMD_SZ)
+#define DESC_AEAD_CTR_RFC3686_LEN      (4 * CAAM_CMD_SZ)
 
 #define DESC_AEAD_NULL_BASE            (3 * CAAM_CMD_SZ)
-#define DESC_AEAD_NULL_ENC_LEN         (DESC_AEAD_NULL_BASE + 14 * CAAM_CMD_SZ)
-#define DESC_AEAD_NULL_DEC_LEN         (DESC_AEAD_NULL_BASE + 17 * CAAM_CMD_SZ)
+#define DESC_AEAD_NULL_ENC_LEN         (DESC_AEAD_NULL_BASE + 11 * CAAM_CMD_SZ)
+#define DESC_AEAD_NULL_DEC_LEN         (DESC_AEAD_NULL_BASE + 13 * CAAM_CMD_SZ)
 
 #define DESC_GCM_BASE                  (3 * CAAM_CMD_SZ)
 #define DESC_GCM_ENC_LEN               (DESC_GCM_BASE + 16 * CAAM_CMD_SZ)
 #define DESC_GCM_DEC_LEN               (DESC_GCM_BASE + 12 * CAAM_CMD_SZ)
 
 #define DESC_RFC4106_BASE              (3 * CAAM_CMD_SZ)
-#define DESC_RFC4106_ENC_LEN           (DESC_RFC4106_BASE + 10 * CAAM_CMD_SZ)
-#define DESC_RFC4106_DEC_LEN           (DESC_RFC4106_BASE + 10 * CAAM_CMD_SZ)
+#define DESC_RFC4106_ENC_LEN           (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
+#define DESC_RFC4106_DEC_LEN           (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
 
 #define DESC_RFC4543_BASE              (3 * CAAM_CMD_SZ)
 #define DESC_RFC4543_ENC_LEN           (DESC_RFC4543_BASE + 11 * CAAM_CMD_SZ)
 #endif
 static struct list_head alg_list;
 
+struct caam_alg_entry {
+       int class1_alg_type;
+       int class2_alg_type;
+       int alg_op;
+       bool rfc3686;
+       bool geniv;
+};
+
+struct caam_aead_alg {
+       struct aead_alg aead;
+       struct caam_alg_entry caam;
+       bool registered;
+};
+
 /* Set DK bit in class 1 operation if shared */
 static inline void append_dec_op1(u32 *desc, u32 type)
 {
@@ -144,18 +160,6 @@ static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
                             KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH);
 }
 
-/*
- * For aead encrypt and decrypt, read iv for both classes
- */
-static inline void aead_append_ld_iv(u32 *desc, int ivsize, int ivoffset)
-{
-       append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
-                       LDST_SRCDST_BYTE_CONTEXT |
-                       (ivoffset << LDST_OFFSET_SHIFT));
-       append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO |
-                   (ivoffset << MOVE_OFFSET_SHIFT) | ivsize);
-}
-
 /*
  * For ablkcipher encrypt and decrypt, read from req->src and
  * write to req->dst
@@ -169,13 +173,6 @@ static inline void ablkcipher_append_src_dst(u32 *desc)
        append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
 }
 
-/*
- * If all data, including src (with assoc and iv) or dst (with iv only) are
- * contiguous
- */
-#define GIV_SRC_CONTIG         1
-#define GIV_DST_CONTIG         (1 << 1)
-
 /*
  * per-session context
  */
@@ -259,7 +256,6 @@ static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx,
 
 static int aead_null_set_sh_desc(struct crypto_aead *aead)
 {
-       unsigned int ivsize = crypto_aead_ivsize(aead);
        struct caam_ctx *ctx = crypto_aead_ctx(aead);
        struct device *jrdev = ctx->jrdev;
        bool keys_fit_inline = false;
@@ -270,11 +266,11 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead)
         * Job Descriptor and Shared Descriptors
         * must all fit into the 64-word Descriptor h/w Buffer
         */
-       if (DESC_AEAD_NULL_ENC_LEN + DESC_JOB_IO_LEN +
+       if (DESC_AEAD_NULL_ENC_LEN + AEAD_DESC_JOB_IO_LEN +
            ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
                keys_fit_inline = true;
 
-       /* old_aead_encrypt shared descriptor */
+       /* aead_encrypt shared descriptor */
        desc = ctx->sh_desc_enc;
 
        init_sh_desc(desc, HDR_SHARE_SERIAL);
@@ -291,20 +287,10 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead)
                           KEY_DEST_MDHA_SPLIT | KEY_ENC);
        set_jump_tgt_here(desc, key_jump_cmd);
 
-       /* cryptlen = seqoutlen - authsize */
-       append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
-
-       /*
-        * NULL encryption; IV is zero
-        * assoclen = (assoclen + cryptlen) - cryptlen
-        */
-       append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
-
-       /* read assoc before reading payload */
-       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
-                            KEY_VLF);
+       /* assoclen + cryptlen = seqinlen */
+       append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
 
-       /* Prepare to read and write cryptlen bytes */
+       /* Prepare to read and write cryptlen + assoclen bytes */
        append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
        append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
 
@@ -363,7 +349,7 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead)
 
        desc = ctx->sh_desc_dec;
 
-       /* old_aead_decrypt shared descriptor */
+       /* aead_decrypt shared descriptor */
        init_sh_desc(desc, HDR_SHARE_SERIAL);
 
        /* Skip if already shared */
@@ -382,18 +368,10 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead)
        append_operation(desc, ctx->class2_alg_type |
                         OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
 
-       /* assoclen + cryptlen = seqinlen - ivsize - authsize */
-       append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
-                               ctx->authsize + ivsize);
-       /* assoclen = (assoclen + cryptlen) - cryptlen */
+       /* assoclen + cryptlen = seqoutlen */
        append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
-       append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);
 
-       /* read assoc before reading payload */
-       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
-                            KEY_VLF);
-
-       /* Prepare to read and write cryptlen bytes */
+       /* Prepare to read and write cryptlen + assoclen bytes */
        append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
        append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
 
@@ -450,10 +428,10 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead)
 
 static int aead_set_sh_desc(struct crypto_aead *aead)
 {
+       struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
+                                                struct caam_aead_alg, aead);
        unsigned int ivsize = crypto_aead_ivsize(aead);
        struct caam_ctx *ctx = crypto_aead_ctx(aead);
-       struct crypto_tfm *ctfm = crypto_aead_tfm(aead);
-       const char *alg_name = crypto_tfm_alg_name(ctfm);
        struct device *jrdev = ctx->jrdev;
        bool keys_fit_inline;
        u32 geniv, moveiv;
@@ -461,11 +439,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
        u32 *desc;
        const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
                               OP_ALG_AAI_CTR_MOD128);
-       const bool is_rfc3686 = (ctr_mode &&
-                                (strstr(alg_name, "rfc3686") != NULL));
-
-       if (!ctx->authsize)
-               return 0;
+       const bool is_rfc3686 = alg->caam.rfc3686;
 
        /* NULL encryption / decryption */
        if (!ctx->enckeylen)
@@ -486,18 +460,21 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
        if (is_rfc3686)
                ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
 
+       if (alg->caam.geniv)
+               goto skip_enc;
+
        /*
         * Job Descriptor and Shared Descriptors
         * must all fit into the 64-word Descriptor h/w Buffer
         */
        keys_fit_inline = false;
-       if (DESC_AEAD_ENC_LEN + DESC_JOB_IO_LEN +
+       if (DESC_AEAD_ENC_LEN + AUTHENC_DESC_JOB_IO_LEN +
            ctx->split_key_pad_len + ctx->enckeylen +
            (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
            CAAM_DESC_BYTES_MAX)
                keys_fit_inline = true;
 
-       /* old_aead_encrypt shared descriptor */
+       /* aead_encrypt shared descriptor */
        desc = ctx->sh_desc_enc;
 
        /* Note: Context registers are saved. */
@@ -507,19 +484,16 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
        append_operation(desc, ctx->class2_alg_type |
                         OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
 
-       /* cryptlen = seqoutlen - authsize */
-       append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
-
-       /* assoclen + cryptlen = seqinlen - ivsize */
-       append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, ivsize);
+       /* Read and write assoclen bytes */
+       append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+       append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
 
-       /* assoclen = (assoclen + cryptlen) - cryptlen */
-       append_math_sub(desc, VARSEQINLEN, REG2, REG3, CAAM_CMD_SZ);
+       /* Skip assoc data */
+       append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
 
        /* read assoc before reading payload */
        append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
-                            KEY_VLF);
-       aead_append_ld_iv(desc, ivsize, ctx1_iv_off);
+                                     FIFOLDST_VLF);
 
        /* Load Counter into CONTEXT1 reg */
        if (is_rfc3686)
@@ -534,8 +508,8 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
                         OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
 
        /* Read and write cryptlen bytes */
-       append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
-       append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+       append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+       append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
        aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
 
        /* Write ICV */
@@ -555,18 +529,19 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
                       desc_bytes(desc), 1);
 #endif
 
+skip_enc:
        /*
         * Job Descriptor and Shared Descriptors
         * must all fit into the 64-word Descriptor h/w Buffer
         */
        keys_fit_inline = false;
-       if (DESC_AEAD_DEC_LEN + DESC_JOB_IO_LEN +
+       if (DESC_AEAD_DEC_LEN + AUTHENC_DESC_JOB_IO_LEN +
            ctx->split_key_pad_len + ctx->enckeylen +
            (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
            CAAM_DESC_BYTES_MAX)
                keys_fit_inline = true;
 
-       /* old_aead_decrypt shared descriptor */
+       /* aead_decrypt shared descriptor */
        desc = ctx->sh_desc_dec;
 
        /* Note: Context registers are saved. */
@@ -576,19 +551,17 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
        append_operation(desc, ctx->class2_alg_type |
                         OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
 
-       /* assoclen + cryptlen = seqinlen - ivsize - authsize */
-       append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
-                               ctx->authsize + ivsize);
-       /* assoclen = (assoclen + cryptlen) - cryptlen */
-       append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
-       append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);
+       /* Read and write assoclen bytes */
+       append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+       append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+
+       /* Skip assoc data */
+       append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
 
        /* read assoc before reading payload */
        append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
                             KEY_VLF);
 
-       aead_append_ld_iv(desc, ivsize, ctx1_iv_off);
-
        /* Load Counter into CONTEXT1 reg */
        if (is_rfc3686)
                append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
@@ -605,8 +578,8 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
                append_dec_op1(desc, ctx->class1_alg_type);
 
        /* Read and write cryptlen bytes */
-       append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
-       append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
+       append_math_add(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+       append_math_add(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
        aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
 
        /* Load ICV */
@@ -626,12 +599,15 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
                       desc_bytes(desc), 1);
 #endif
 
+       if (!alg->caam.geniv)
+               goto skip_givenc;
+
        /*
         * Job Descriptor and Shared Descriptors
         * must all fit into the 64-word Descriptor h/w Buffer
         */
        keys_fit_inline = false;
-       if (DESC_AEAD_GIVENC_LEN + DESC_JOB_IO_LEN +
+       if (DESC_AEAD_GIVENC_LEN + AUTHENC_DESC_JOB_IO_LEN +
            ctx->split_key_pad_len + ctx->enckeylen +
            (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
            CAAM_DESC_BYTES_MAX)
@@ -643,6 +619,9 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
        /* Note: Context registers are saved. */
        init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
 
+       if (is_rfc3686)
+               goto copy_iv;
+
        /* Generate IV */
        geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
                NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
@@ -656,6 +635,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
                    (ivsize << MOVE_LEN_SHIFT));
        append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
 
+copy_iv:
        /* Copy IV to class 1 context */
        append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_OUTFIFO |
                    (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
@@ -668,8 +648,12 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
        /* ivsize + cryptlen = seqoutlen - authsize */
        append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
 
-       /* assoclen = seqinlen - (ivsize + cryptlen) */
-       append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
+       /* Read and write assoclen bytes */
+       append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+       append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+
+       /* Skip assoc data */
+       append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
 
        /* read assoc before reading payload */
        append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
@@ -710,9 +694,9 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
        append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
                         LDST_SRCDST_BYTE_CONTEXT);
 
-       ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
-                                                desc_bytes(desc),
-                                                DMA_TO_DEVICE);
+       ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
+                                             desc_bytes(desc),
+                                             DMA_TO_DEVICE);
        if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
                dev_err(jrdev, "unable to map shared descriptor\n");
                return -ENOMEM;
@@ -723,6 +707,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
                       desc_bytes(desc), 1);
 #endif
 
+skip_givenc:
        return 0;
 }
 
@@ -976,22 +961,28 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead)
        append_operation(desc, ctx->class1_alg_type |
                         OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
 
-       append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+       append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
        append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
 
-       /* Skip assoc data */
-       append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
-
        /* Read assoc data */
        append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
                             FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
 
-       /* cryptlen = seqoutlen - assoclen */
-       append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+       /* Skip IV */
+       append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
 
        /* Will read cryptlen bytes */
        append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
 
+       /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
+       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
+
+       /* Skip assoc data */
+       append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
+
+       /* cryptlen = seqoutlen - assoclen */
+       append_math_sub(desc, VARSEQOUTLEN, VARSEQINLEN, REG0, CAAM_CMD_SZ);
+
        /* Write encrypted data */
        append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
 
@@ -1044,21 +1035,27 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead)
        append_operation(desc, ctx->class1_alg_type |
                         OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
 
-       append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+       append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
        append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
 
-       /* Skip assoc data */
-       append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
-
        /* Read assoc data */
        append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
                             FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
 
-       /* Will write cryptlen bytes */
-       append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+       /* Skip IV */
+       append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
 
        /* Will read cryptlen bytes */
-       append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+       append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG3, CAAM_CMD_SZ);
+
+       /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
+       append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
+
+       /* Skip assoc data */
+       append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
+
+       /* Will write cryptlen bytes */
+       append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
 
        /* Store payload data */
        append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
@@ -1793,22 +1790,6 @@ static void aead_unmap(struct device *dev,
                   edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
 }
 
-static void old_aead_unmap(struct device *dev,
-                          struct aead_edesc *edesc,
-                          struct aead_request *req)
-{
-       struct crypto_aead *aead = crypto_aead_reqtfm(req);
-       int ivsize = crypto_aead_ivsize(aead);
-
-       dma_unmap_sg_chained(dev, req->assoc, edesc->assoc_nents,
-                            DMA_TO_DEVICE, edesc->assoc_chained);
-
-       caam_unmap(dev, req->src, req->dst,
-                  edesc->src_nents, edesc->src_chained, edesc->dst_nents,
-                  edesc->dst_chained, edesc->iv_dma, ivsize,
-                  edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
-}
-
 static void ablkcipher_unmap(struct device *dev,
                             struct ablkcipher_edesc *edesc,
                             struct ablkcipher_request *req)
@@ -1844,45 +1825,6 @@ static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
        aead_request_complete(req, err);
 }
 
-static void old_aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
-                                 void *context)
-{
-       struct aead_request *req = context;
-       struct aead_edesc *edesc;
-#ifdef DEBUG
-       struct crypto_aead *aead = crypto_aead_reqtfm(req);
-       struct caam_ctx *ctx = crypto_aead_ctx(aead);
-       int ivsize = crypto_aead_ivsize(aead);
-
-       dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
-#endif
-
-       edesc = (struct aead_edesc *)((char *)desc -
-                offsetof(struct aead_edesc, hw_desc));
-
-       if (err)
-               caam_jr_strstatus(jrdev, err);
-
-       old_aead_unmap(jrdev, edesc, req);
-
-#ifdef DEBUG
-       print_hex_dump(KERN_ERR, "assoc  @"__stringify(__LINE__)": ",
-                      DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
-                      req->assoclen , 1);
-       print_hex_dump(KERN_ERR, "dstiv  @"__stringify(__LINE__)": ",
-                      DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src) - ivsize,
-                      edesc->src_nents ? 100 : ivsize, 1);
-       print_hex_dump(KERN_ERR, "dst    @"__stringify(__LINE__)": ",
-                      DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
-                      edesc->src_nents ? 100 : req->cryptlen +
-                      ctx->authsize + 4, 1);
-#endif
-
-       kfree(edesc);
-
-       aead_request_complete(req, err);
-}
-
 static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
                                   void *context)
 {
@@ -1911,62 +1853,6 @@ static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
        aead_request_complete(req, err);
 }
 
-static void old_aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
-                                 void *context)
-{
-       struct aead_request *req = context;
-       struct aead_edesc *edesc;
-#ifdef DEBUG
-       struct crypto_aead *aead = crypto_aead_reqtfm(req);
-       struct caam_ctx *ctx = crypto_aead_ctx(aead);
-       int ivsize = crypto_aead_ivsize(aead);
-
-       dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
-#endif
-
-       edesc = (struct aead_edesc *)((char *)desc -
-                offsetof(struct aead_edesc, hw_desc));
-
-#ifdef DEBUG
-       print_hex_dump(KERN_ERR, "dstiv  @"__stringify(__LINE__)": ",
-                      DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
-                      ivsize, 1);
-       print_hex_dump(KERN_ERR, "dst    @"__stringify(__LINE__)": ",
-                      DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->dst),
-                      req->cryptlen - ctx->authsize, 1);
-#endif
-
-       if (err)
-               caam_jr_strstatus(jrdev, err);
-
-       old_aead_unmap(jrdev, edesc, req);
-
-       /*
-        * verify hw auth check passed else return -EBADMSG
-        */
-       if ((err & JRSTA_CCBERR_ERRID_MASK) == JRSTA_CCBERR_ERRID_ICVCHK)
-               err = -EBADMSG;
-
-#ifdef DEBUG
-       print_hex_dump(KERN_ERR, "iphdrout@"__stringify(__LINE__)": ",
-                      DUMP_PREFIX_ADDRESS, 16, 4,
-                      ((char *)sg_virt(req->assoc) - sizeof(struct iphdr)),
-                      sizeof(struct iphdr) + req->assoclen +
-                      ((req->cryptlen > 1500) ? 1500 : req->cryptlen) +
-                      ctx->authsize + 36, 1);
-       if (!err && edesc->sec4_sg_bytes) {
-               struct scatterlist *sg = sg_last(req->src, edesc->src_nents);
-               print_hex_dump(KERN_ERR, "sglastout@"__stringify(__LINE__)": ",
-                              DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(sg),
-                       sg->length + ctx->authsize + 16, 1);
-       }
-#endif
-
-       kfree(edesc);
-
-       aead_request_complete(req, err);
-}
-
 static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
                                   void *context)
 {
@@ -2032,91 +1918,6 @@ static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
        ablkcipher_request_complete(req, err);
 }
 
-/*
- * Fill in aead job descriptor
- */
-static void old_init_aead_job(u32 *sh_desc, dma_addr_t ptr,
-                             struct aead_edesc *edesc,
-                             struct aead_request *req,
-                             bool all_contig, bool encrypt)
-{
-       struct crypto_aead *aead = crypto_aead_reqtfm(req);
-       struct caam_ctx *ctx = crypto_aead_ctx(aead);
-       int ivsize = crypto_aead_ivsize(aead);
-       int authsize = ctx->authsize;
-       u32 *desc = edesc->hw_desc;
-       u32 out_options = 0, in_options;
-       dma_addr_t dst_dma, src_dma;
-       int len, sec4_sg_index = 0;
-       bool is_gcm = false;
-
-#ifdef DEBUG
-       debug("assoclen %d cryptlen %d authsize %d\n",
-             req->assoclen, req->cryptlen, authsize);
-       print_hex_dump(KERN_ERR, "assoc  @"__stringify(__LINE__)": ",
-                      DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
-                      req->assoclen , 1);
-       print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
-                      DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
-                      edesc->src_nents ? 100 : ivsize, 1);
-       print_hex_dump(KERN_ERR, "src    @"__stringify(__LINE__)": ",
-                      DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
-                       edesc->src_nents ? 100 : req->cryptlen, 1);
-       print_hex_dump(KERN_ERR, "shrdesc@"__stringify(__LINE__)": ",
-                      DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
-                      desc_bytes(sh_desc), 1);
-#endif
-
-       if (((ctx->class1_alg_type & OP_ALG_ALGSEL_MASK) ==
-             OP_ALG_ALGSEL_AES) &&
-           ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == OP_ALG_AAI_GCM))
-               is_gcm = true;
-
-       len = desc_len(sh_desc);
-       init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
-
-       if (all_contig) {
-               if (is_gcm)
-                       src_dma = edesc->iv_dma;
-               else
-                       src_dma = sg_dma_address(req->assoc);
-               in_options = 0;
-       } else {
-               src_dma = edesc->sec4_sg_dma;
-               sec4_sg_index += (edesc->assoc_nents ? : 1) + 1 +
-                                (edesc->src_nents ? : 1);
-               in_options = LDST_SGF;
-       }
-
-       append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + req->cryptlen,
-                         in_options);
-
-       if (likely(req->src == req->dst)) {
-               if (all_contig) {
-                       dst_dma = sg_dma_address(req->src);
-               } else {
-                       dst_dma = src_dma + sizeof(struct sec4_sg_entry) *
-                                 ((edesc->assoc_nents ? : 1) + 1);
-                       out_options = LDST_SGF;
-               }
-       } else {
-               if (!edesc->dst_nents) {
-                       dst_dma = sg_dma_address(req->dst);
-               } else {
-                       dst_dma = edesc->sec4_sg_dma +
-                                 sec4_sg_index *
-                                 sizeof(struct sec4_sg_entry);
-                       out_options = LDST_SGF;
-               }
-       }
-       if (encrypt)
-               append_seq_out_ptr(desc, dst_dma, req->cryptlen + authsize,
-                                  out_options);
-       else
-               append_seq_out_ptr(desc, dst_dma, req->cryptlen - authsize,
-                                  out_options);
-}
-
 /*
  * Fill in aead job descriptor
  */
@@ -2208,80 +2009,43 @@ static void init_gcm_job(struct aead_request *req,
        /* End of blank commands */
 }
 
-/*
- * Fill in aead givencrypt job descriptor
- */
-static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr,
-                             struct aead_edesc *edesc,
-                             struct aead_request *req,
-                             int contig)
+static void init_authenc_job(struct aead_request *req,
+                            struct aead_edesc *edesc,
+                            bool all_contig, bool encrypt)
 {
        struct crypto_aead *aead = crypto_aead_reqtfm(req);
+       struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
+                                                struct caam_aead_alg, aead);
+       unsigned int ivsize = crypto_aead_ivsize(aead);
        struct caam_ctx *ctx = crypto_aead_ctx(aead);
-       int ivsize = crypto_aead_ivsize(aead);
-       int authsize = ctx->authsize;
+       const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
+                              OP_ALG_AAI_CTR_MOD128);
+       const bool is_rfc3686 = alg->caam.rfc3686;
        u32 *desc = edesc->hw_desc;
-       u32 out_options = 0, in_options;
-       dma_addr_t dst_dma, src_dma;
-       int len, sec4_sg_index = 0;
-       bool is_gcm = false;
+       u32 ivoffset = 0;
 
-#ifdef DEBUG
-       debug("assoclen %d cryptlen %d authsize %d\n",
-             req->assoclen, req->cryptlen, authsize);
-       print_hex_dump(KERN_ERR, "assoc  @"__stringify(__LINE__)": ",
-                      DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
-                      req->assoclen , 1);
-       print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
-                      DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1);
-       print_hex_dump(KERN_ERR, "src    @"__stringify(__LINE__)": ",
-                      DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
-                       edesc->src_nents > 1 ? 100 : req->cryptlen, 1);
-       print_hex_dump(KERN_ERR, "shrdesc@"__stringify(__LINE__)": ",
-                      DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
-                      desc_bytes(sh_desc), 1);
-#endif
+       /*
+        * AES-CTR needs to load IV in CONTEXT1 reg
+        * at an offset of 128bits (16bytes)
+        * CONTEXT1[255:128] = IV
+        */
+       if (ctr_mode)
+               ivoffset = 16;
 
-       if (((ctx->class1_alg_type & OP_ALG_ALGSEL_MASK) ==
-             OP_ALG_ALGSEL_AES) &&
-           ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == OP_ALG_AAI_GCM))
-               is_gcm = true;
+       /*
+        * RFC3686 specific:
+        *      CONTEXT1[255:128] = {NONCE, IV, COUNTER}
+        */
+       if (is_rfc3686)
+               ivoffset = 16 + CTR_RFC3686_NONCE_SIZE;
 
-       len = desc_len(sh_desc);
-       init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
+       init_aead_job(req, edesc, all_contig, encrypt);
 
-       if (contig & GIV_SRC_CONTIG) {
-               if (is_gcm)
-                       src_dma = edesc->iv_dma;
-               else
-                       src_dma = sg_dma_address(req->assoc);
-               in_options = 0;
-       } else {
-               src_dma = edesc->sec4_sg_dma;
-               sec4_sg_index += edesc->assoc_nents + 1 + edesc->src_nents;
-               in_options = LDST_SGF;
-       }
-       append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + req->cryptlen,
-                         in_options);
-
-       if (contig & GIV_DST_CONTIG) {
-               dst_dma = edesc->iv_dma;
-       } else {
-               if (likely(req->src == req->dst)) {
-                       dst_dma = src_dma + sizeof(struct sec4_sg_entry) *
-                                 (edesc->assoc_nents +
-                                  (is_gcm ? 1 + edesc->src_nents : 0));
-                       out_options = LDST_SGF;
-               } else {
-                       dst_dma = edesc->sec4_sg_dma +
-                                 sec4_sg_index *
-                                 sizeof(struct sec4_sg_entry);
-                       out_options = LDST_SGF;
-               }
-       }
-
-       append_seq_out_ptr(desc, dst_dma, ivsize + req->cryptlen + authsize,
-                          out_options);
+       if (ivsize && (is_rfc3686 || !(alg->caam.geniv && encrypt)))
+               append_load_as_imm(desc, req->iv, ivsize,
+                                  LDST_CLASS_1_CCB |
+                                  LDST_SRCDST_BYTE_CONTEXT |
+                                  (ivoffset << LDST_OFFSET_SHIFT));
 }
 
 /*
@@ -2389,150 +2153,6 @@ static void init_ablkcipher_giv_job(u32 *sh_desc, dma_addr_t ptr,
        append_seq_out_ptr(desc, dst_dma, req->nbytes + ivsize, out_options);
 }
 
-/*
- * allocate and map the aead extended descriptor
- */
-static struct aead_edesc *old_aead_edesc_alloc(struct aead_request *req,
-                                              int desc_bytes,
-                                              bool *all_contig_ptr,
-                                              bool encrypt)
-{
-       struct crypto_aead *aead = crypto_aead_reqtfm(req);
-       struct caam_ctx *ctx = crypto_aead_ctx(aead);
-       struct device *jrdev = ctx->jrdev;
-       gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
-                      CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
-       int assoc_nents, src_nents, dst_nents = 0;
-       struct aead_edesc *edesc;
-       dma_addr_t iv_dma = 0;
-       int sgc;
-       bool all_contig = true;
-       bool assoc_chained = false, src_chained = false, dst_chained = false;
-       int ivsize = crypto_aead_ivsize(aead);
-       int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
-       unsigned int authsize = ctx->authsize;
-       bool is_gcm = false;
-
-       assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained);
-
-       if (unlikely(req->dst != req->src)) {
-               src_nents = sg_count(req->src, req->cryptlen, &src_chained);
-               dst_nents = sg_count(req->dst,
-                                    req->cryptlen +
-                                       (encrypt ? authsize : (-authsize)),
-                                    &dst_chained);
-       } else {
-               src_nents = sg_count(req->src,
-                                    req->cryptlen +
-                                       (encrypt ? authsize : 0),
-                                    &src_chained);
-       }
-
-       sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
-                                DMA_TO_DEVICE, assoc_chained);
-       if (likely(req->src == req->dst)) {
-               sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
-                                        DMA_BIDIRECTIONAL, src_chained);
-       } else {
-               sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
-                                        DMA_TO_DEVICE, src_chained);
-               sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
-                                        DMA_FROM_DEVICE, dst_chained);
-       }
-
-       iv_dma = dma_map_single(jrdev, req->iv, ivsize, DMA_TO_DEVICE);
-       if (dma_mapping_error(jrdev, iv_dma)) {
-               dev_err(jrdev, "unable to map IV\n");
-               return ERR_PTR(-ENOMEM);
-       }
-
-       if (((ctx->class1_alg_type & OP_ALG_ALGSEL_MASK) ==
-             OP_ALG_ALGSEL_AES) &&
-           ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == OP_ALG_AAI_GCM))
-               is_gcm = true;
-
-       /*
-        * Check if data are contiguous.
-        * GCM expected input sequence: IV, AAD, text
-        * All other - expected input sequence: AAD, IV, text
-        */
-       if (is_gcm)
-               all_contig = (!assoc_nents &&
-                             iv_dma + ivsize == sg_dma_address(req->assoc) &&
-                             !src_nents && sg_dma_address(req->assoc) +
-                             req->assoclen == sg_dma_address(req->src));
-       else
-               all_contig = (!assoc_nents && sg_dma_address(req->assoc) +
-                             req->assoclen == iv_dma && !src_nents &&
-                             iv_dma + ivsize == sg_dma_address(req->src));
-       if (!all_contig) {
-               assoc_nents = assoc_nents ? : 1;
-               src_nents = src_nents ? : 1;
-               sec4_sg_len = assoc_nents + 1 + src_nents;
-       }
-
-       sec4_sg_len += dst_nents;
-
-       sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
-
-       /* allocate space for base edesc and hw desc commands, link tables */
-       edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
-                       sec4_sg_bytes, GFP_DMA | flags);
-       if (!edesc) {
-               dev_err(jrdev, "could not allocate extended descriptor\n");
-               return ERR_PTR(-ENOMEM);
-       }
-
-       edesc->assoc_nents = assoc_nents;
-       edesc->assoc_chained = assoc_chained;
-       edesc->src_nents = src_nents;
-       edesc->src_chained = src_chained;
-       edesc->dst_nents = dst_nents;
-       edesc->dst_chained = dst_chained;
-       edesc->iv_dma = iv_dma;
-       edesc->sec4_sg_bytes = sec4_sg_bytes;
-       edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
-                        desc_bytes;
-       *all_contig_ptr = all_contig;
-
-       sec4_sg_index = 0;
-       if (!all_contig) {
-               if (!is_gcm) {
-                       sg_to_sec4_sg_len(req->assoc, req->assoclen,
-                                         edesc->sec4_sg + sec4_sg_index);
-                       sec4_sg_index += assoc_nents;
-               }
-
-               dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
-                                  iv_dma, ivsize, 0);
-               sec4_sg_index += 1;
-
-               if (is_gcm) {
-                       sg_to_sec4_sg_len(req->assoc, req->assoclen,
-                                         edesc->sec4_sg + sec4_sg_index);
-                       sec4_sg_index += assoc_nents;
-               }
-
-               sg_to_sec4_sg_last(req->src,
-                                  src_nents,
-                                  edesc->sec4_sg +
-                                  sec4_sg_index, 0);
-               sec4_sg_index += src_nents;
-       }
-       if (dst_nents) {
-               sg_to_sec4_sg_last(req->dst, dst_nents,
-                                  edesc->sec4_sg + sec4_sg_index, 0);
-       }
-       edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
-                                           sec4_sg_bytes, DMA_TO_DEVICE);
-       if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
-               dev_err(jrdev, "unable to map S/G table\n");
-               return ERR_PTR(-ENOMEM);
-       }
-
-       return edesc;
-}
-
 /*
  * allocate and map the aead extended descriptor
  */
@@ -2579,8 +2199,8 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
        sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
 
        /* allocate space for base edesc and hw desc commands, link tables */
-       edesc = kzalloc(sizeof(struct aead_edesc) + desc_bytes +
-                       sec4_sg_bytes, GFP_DMA | flags);
+       edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
+                       GFP_DMA | flags);
        if (!edesc) {
                dev_err(jrdev, "could not allocate extended descriptor\n");
                return ERR_PTR(-ENOMEM);
@@ -2685,7 +2305,15 @@ static int gcm_encrypt(struct aead_request *req)
        return ret;
 }
 
-static int old_aead_encrypt(struct aead_request *req)
+static int ipsec_gcm_encrypt(struct aead_request *req)
+{
+       if (req->assoclen < 8)
+               return -EINVAL;
+
+       return gcm_encrypt(req);
+}
+
+static int aead_encrypt(struct aead_request *req)
 {
        struct aead_edesc *edesc;
        struct crypto_aead *aead = crypto_aead_reqtfm(req);
@@ -2696,14 +2324,13 @@ static int old_aead_encrypt(struct aead_request *req)
        int ret = 0;
 
        /* allocate extended descriptor */
-       edesc = old_aead_edesc_alloc(req, DESC_JOB_IO_LEN *
-                                    CAAM_CMD_SZ, &all_contig, true);
+       edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN,
+                                &all_contig, true);
        if (IS_ERR(edesc))
                return PTR_ERR(edesc);
 
        /* Create and submit job descriptor */
-       old_init_aead_job(ctx->sh_desc_enc, ctx->sh_desc_enc_dma, edesc, req,
-                         all_contig, true);
+       init_authenc_job(req, edesc, all_contig, true);
 #ifdef DEBUG
        print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
                       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
@@ -2711,11 +2338,11 @@ static int old_aead_encrypt(struct aead_request *req)
 #endif
 
        desc = edesc->hw_desc;
-       ret = caam_jr_enqueue(jrdev, desc, old_aead_encrypt_done, req);
+       ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
        if (!ret) {
                ret = -EINPROGRESS;
        } else {
-               old_aead_unmap(jrdev, edesc, req);
+               aead_unmap(jrdev, edesc, req);
                kfree(edesc);
        }
 
@@ -2757,7 +2384,15 @@ static int gcm_decrypt(struct aead_request *req)
        return ret;
 }
 
-static int old_aead_decrypt(struct aead_request *req)
+static int ipsec_gcm_decrypt(struct aead_request *req)
+{
+       if (req->assoclen < 8)
+               return -EINVAL;
+
+       return gcm_decrypt(req);
+}
+
+static int aead_decrypt(struct aead_request *req)
 {
        struct aead_edesc *edesc;
        struct crypto_aead *aead = crypto_aead_reqtfm(req);
@@ -2768,20 +2403,19 @@ static int old_aead_decrypt(struct aead_request *req)
        int ret = 0;
 
        /* allocate extended descriptor */
-       edesc = old_aead_edesc_alloc(req, DESC_JOB_IO_LEN *
-                                    CAAM_CMD_SZ, &all_contig, false);
+       edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN,
+                                &all_contig, false);
        if (IS_ERR(edesc))
                return PTR_ERR(edesc);
 
 #ifdef DEBUG
        print_hex_dump(KERN_ERR, "dec src@"__stringify(__LINE__)": ",
                       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
-                      req->cryptlen, 1);
+                      req->assoclen + req->cryptlen, 1);
 #endif
 
        /* Create and submit job descriptor*/
-       old_init_aead_job(ctx->sh_desc_dec,
-                         ctx->sh_desc_dec_dma, edesc, req, all_contig, false);
+       init_authenc_job(req, edesc, all_contig, false);
 #ifdef DEBUG
        print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
                       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
@@ -2789,49 +2423,58 @@ static int old_aead_decrypt(struct aead_request *req)
 #endif
 
        desc = edesc->hw_desc;
-       ret = caam_jr_enqueue(jrdev, desc, old_aead_decrypt_done, req);
+       ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
        if (!ret) {
                ret = -EINPROGRESS;
        } else {
-               old_aead_unmap(jrdev, edesc, req);
+               aead_unmap(jrdev, edesc, req);
                kfree(edesc);
        }
 
        return ret;
 }
 
+static int aead_givdecrypt(struct aead_request *req)
+{
+       struct crypto_aead *aead = crypto_aead_reqtfm(req);
+       unsigned int ivsize = crypto_aead_ivsize(aead);
+
+       if (req->cryptlen < ivsize)
+               return -EINVAL;
+
+       req->cryptlen -= ivsize;
+       req->assoclen += ivsize;
+
+       return aead_decrypt(req);
+}
+
 /*
- * allocate and map the aead extended descriptor for aead givencrypt
+ * allocate and map the ablkcipher extended descriptor for ablkcipher
  */
-static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
-                                              *greq, int desc_bytes,
-                                              u32 *contig_ptr)
+static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
+                                                      *req, int desc_bytes,
+                                                      bool *iv_contig_out)
 {
-       struct aead_request *req = &greq->areq;
-       struct crypto_aead *aead = crypto_aead_reqtfm(req);
-       struct caam_ctx *ctx = crypto_aead_ctx(aead);
+       struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+       struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
        struct device *jrdev = ctx->jrdev;
        gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
-                      CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
-       int assoc_nents, src_nents, dst_nents = 0;
-       struct aead_edesc *edesc;
+                                         CRYPTO_TFM_REQ_MAY_SLEEP)) ?
+                      GFP_KERNEL : GFP_ATOMIC;
+       int src_nents, dst_nents = 0, sec4_sg_bytes;
+       struct ablkcipher_edesc *edesc;
        dma_addr_t iv_dma = 0;
+       bool iv_contig = false;
        int sgc;
-       u32 contig = GIV_SRC_CONTIG | GIV_DST_CONTIG;
-       int ivsize = crypto_aead_ivsize(aead);
-       bool assoc_chained = false, src_chained = false, dst_chained = false;
-       int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
-       bool is_gcm = false;
+       int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+       bool src_chained = false, dst_chained = false;
+       int sec4_sg_index;
 
-       assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained);
-       src_nents = sg_count(req->src, req->cryptlen, &src_chained);
+       src_nents = sg_count(req->src, req->nbytes, &src_chained);
 
-       if (unlikely(req->dst != req->src))
-               dst_nents = sg_count(req->dst, req->cryptlen + ctx->authsize,
-                                    &dst_chained);
+       if (req->dst != req->src)
+               dst_nents = sg_count(req->dst, req->nbytes, &dst_chained);
 
-       sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
-                                DMA_TO_DEVICE, assoc_chained);
        if (likely(req->src == req->dst)) {
                sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
                                         DMA_BIDIRECTIONAL, src_chained);
@@ -2842,121 +2485,52 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
                                         DMA_FROM_DEVICE, dst_chained);
        }
 
-       iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
+       iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
        if (dma_mapping_error(jrdev, iv_dma)) {
                dev_err(jrdev, "unable to map IV\n");
                return ERR_PTR(-ENOMEM);
        }
 
-       if (((ctx->class1_alg_type & OP_ALG_ALGSEL_MASK) ==
-             OP_ALG_ALGSEL_AES) &&
-           ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == OP_ALG_AAI_GCM))
-               is_gcm = true;
-
        /*
-        * Check if data are contiguous.
-        * GCM expected input sequence: IV, AAD, text
-        * All other - expected input sequence: AAD, IV, text
+        * Check if iv can be contiguous with source and destination.
+        * If so, include it. If not, create scatterlist.
         */
-
-       if (is_gcm) {
-               if (assoc_nents || iv_dma + ivsize !=
-                   sg_dma_address(req->assoc) || src_nents ||
-                   sg_dma_address(req->assoc) + req->assoclen !=
-                   sg_dma_address(req->src))
-                       contig &= ~GIV_SRC_CONTIG;
-       } else {
-               if (assoc_nents ||
-                   sg_dma_address(req->assoc) + req->assoclen != iv_dma ||
-                   src_nents || iv_dma + ivsize != sg_dma_address(req->src))
-                       contig &= ~GIV_SRC_CONTIG;
-       }
-
-       if (dst_nents || iv_dma + ivsize != sg_dma_address(req->dst))
-               contig &= ~GIV_DST_CONTIG;
-
-       if (!(contig & GIV_SRC_CONTIG)) {
-               assoc_nents = assoc_nents ? : 1;
+       if (!src_nents && iv_dma + ivsize == sg_dma_address(req->src))
+               iv_contig = true;
+       else
                src_nents = src_nents ? : 1;
-               sec4_sg_len += assoc_nents + 1 + src_nents;
-               if (req->src == req->dst &&
-                   (src_nents || iv_dma + ivsize != sg_dma_address(req->src)))
-                       contig &= ~GIV_DST_CONTIG;
-       }
-
-       /*
-        * Add new sg entries for GCM output sequence.
-        * Expected output sequence: IV, encrypted text.
-        */
-       if (is_gcm && req->src == req->dst && !(contig & GIV_DST_CONTIG))
-               sec4_sg_len += 1 + src_nents;
-
-       if (unlikely(req->src != req->dst)) {
-               dst_nents = dst_nents ? : 1;
-               sec4_sg_len += 1 + dst_nents;
-       }
-
-       sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
+       sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
+                       sizeof(struct sec4_sg_entry);
 
        /* allocate space for base edesc and hw desc commands, link tables */
-       edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
-                       sec4_sg_bytes, GFP_DMA | flags);
+       edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
+                       GFP_DMA | flags);
        if (!edesc) {
                dev_err(jrdev, "could not allocate extended descriptor\n");
                return ERR_PTR(-ENOMEM);
        }
 
-       edesc->assoc_nents = assoc_nents;
-       edesc->assoc_chained = assoc_chained;
        edesc->src_nents = src_nents;
        edesc->src_chained = src_chained;
        edesc->dst_nents = dst_nents;
        edesc->dst_chained = dst_chained;
-       edesc->iv_dma = iv_dma;
        edesc->sec4_sg_bytes = sec4_sg_bytes;
-       edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
+       edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
                         desc_bytes;
-       *contig_ptr = contig;
 
        sec4_sg_index = 0;
-       if (!(contig & GIV_SRC_CONTIG)) {
-               if (!is_gcm) {
-                       sg_to_sec4_sg_len(req->assoc, req->assoclen,
-                                         edesc->sec4_sg + sec4_sg_index);
-                       sec4_sg_index += assoc_nents;
-               }
-
-               dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
-                                  iv_dma, ivsize, 0);
-               sec4_sg_index += 1;
-
-               if (is_gcm) {
-                       sg_to_sec4_sg_len(req->assoc, req->assoclen,
-                                         edesc->sec4_sg + sec4_sg_index);
-                       sec4_sg_index += assoc_nents;
-               }
-
-               sg_to_sec4_sg_last(req->src, src_nents,
-                                  edesc->sec4_sg +
-                                  sec4_sg_index, 0);
-               sec4_sg_index += src_nents;
-       }
-
-       if (is_gcm && req->src == req->dst && !(contig & GIV_DST_CONTIG)) {
-               dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
-                                  iv_dma, ivsize, 0);
-               sec4_sg_index += 1;
+       if (!iv_contig) {
+               dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
                sg_to_sec4_sg_last(req->src, src_nents,
-                                  edesc->sec4_sg + sec4_sg_index, 0);
+                                  edesc->sec4_sg + 1, 0);
+               sec4_sg_index += 1 + src_nents;
        }
 
-       if (unlikely(req->src != req->dst && !(contig & GIV_DST_CONTIG))) {
-               dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
-                                  iv_dma, ivsize, 0);
-               sec4_sg_index += 1;
+       if (dst_nents) {
                sg_to_sec4_sg_last(req->dst, dst_nents,
-                                  edesc->sec4_sg + sec4_sg_index, 0);
+                       edesc->sec4_sg + sec4_sg_index, 0);
        }
+
        edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
                                            sec4_sg_bytes, DMA_TO_DEVICE);
        if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
@@ -2964,201 +2538,58 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
                return ERR_PTR(-ENOMEM);
        }
 
+       edesc->iv_dma = iv_dma;
+
+#ifdef DEBUG
+       print_hex_dump(KERN_ERR, "ablkcipher sec4_sg@"__stringify(__LINE__)": ",
+                      DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
+                      sec4_sg_bytes, 1);
+#endif
+
+       *iv_contig_out = iv_contig;
        return edesc;
 }
 
-static int old_aead_givencrypt(struct aead_givcrypt_request *areq)
+static int ablkcipher_encrypt(struct ablkcipher_request *req)
 {
-       struct aead_request *req = &areq->areq;
-       struct aead_edesc *edesc;
-       struct crypto_aead *aead = crypto_aead_reqtfm(req);
-       struct caam_ctx *ctx = crypto_aead_ctx(aead);
+       struct ablkcipher_edesc *edesc;
+       struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+       struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
        struct device *jrdev = ctx->jrdev;
-       u32 contig;
+       bool iv_contig;
        u32 *desc;
        int ret = 0;
 
        /* allocate extended descriptor */
-       edesc = aead_giv_edesc_alloc(areq, DESC_JOB_IO_LEN *
-                                    CAAM_CMD_SZ, &contig);
-
+       edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
+                                      CAAM_CMD_SZ, &iv_contig);
        if (IS_ERR(edesc))
                return PTR_ERR(edesc);
 
-#ifdef DEBUG
-       print_hex_dump(KERN_ERR, "giv src@"__stringify(__LINE__)": ",
-                      DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
-                      req->cryptlen, 1);
-#endif
-
        /* Create and submit job descriptor*/
-       init_aead_giv_job(ctx->sh_desc_givenc,
-                         ctx->sh_desc_givenc_dma, edesc, req, contig);
+       init_ablkcipher_job(ctx->sh_desc_enc,
+               ctx->sh_desc_enc_dma, edesc, req, iv_contig);
 #ifdef DEBUG
-       print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
+       print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
                       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
                       desc_bytes(edesc->hw_desc), 1);
 #endif
-
        desc = edesc->hw_desc;
-       ret = caam_jr_enqueue(jrdev, desc, old_aead_encrypt_done, req);
+       ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
+
        if (!ret) {
                ret = -EINPROGRESS;
        } else {
-               old_aead_unmap(jrdev, edesc, req);
+               ablkcipher_unmap(jrdev, edesc, req);
                kfree(edesc);
        }
 
        return ret;
 }
 
-static int aead_null_givencrypt(struct aead_givcrypt_request *areq)
-{
-       return old_aead_encrypt(&areq->areq);
-}
-
-/*
- * allocate and map the ablkcipher extended descriptor for ablkcipher
- */
-static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
-                                                      *req, int desc_bytes,
-                                                      bool *iv_contig_out)
+static int ablkcipher_decrypt(struct ablkcipher_request *req)
 {
-       struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
-       struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
-       struct device *jrdev = ctx->jrdev;
-       gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
-                                         CRYPTO_TFM_REQ_MAY_SLEEP)) ?
-                      GFP_KERNEL : GFP_ATOMIC;
-       int src_nents, dst_nents = 0, sec4_sg_bytes;
-       struct ablkcipher_edesc *edesc;
-       dma_addr_t iv_dma = 0;
-       bool iv_contig = false;
-       int sgc;
-       int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
-       bool src_chained = false, dst_chained = false;
-       int sec4_sg_index;
-
-       src_nents = sg_count(req->src, req->nbytes, &src_chained);
-
-       if (req->dst != req->src)
-               dst_nents = sg_count(req->dst, req->nbytes, &dst_chained);
-
-       if (likely(req->src == req->dst)) {
-               sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
-                                        DMA_BIDIRECTIONAL, src_chained);
-       } else {
-               sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
-                                        DMA_TO_DEVICE, src_chained);
-               sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
-                                        DMA_FROM_DEVICE, dst_chained);
-       }
-
-       iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
-       if (dma_mapping_error(jrdev, iv_dma)) {
-               dev_err(jrdev, "unable to map IV\n");
-               return ERR_PTR(-ENOMEM);
-       }
-
-       /*
-        * Check if iv can be contiguous with source and destination.
-        * If so, include it. If not, create scatterlist.
-        */
-       if (!src_nents && iv_dma + ivsize == sg_dma_address(req->src))
-               iv_contig = true;
-       else
-               src_nents = src_nents ? : 1;
-       sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
-                       sizeof(struct sec4_sg_entry);
-
-       /* allocate space for base edesc and hw desc commands, link tables */
-       edesc = kmalloc(sizeof(struct ablkcipher_edesc) + desc_bytes +
-                       sec4_sg_bytes, GFP_DMA | flags);
-       if (!edesc) {
-               dev_err(jrdev, "could not allocate extended descriptor\n");
-               return ERR_PTR(-ENOMEM);
-       }
-
-       edesc->src_nents = src_nents;
-       edesc->src_chained = src_chained;
-       edesc->dst_nents = dst_nents;
-       edesc->dst_chained = dst_chained;
-       edesc->sec4_sg_bytes = sec4_sg_bytes;
-       edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
-                        desc_bytes;
-
-       sec4_sg_index = 0;
-       if (!iv_contig) {
-               dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
-               sg_to_sec4_sg_last(req->src, src_nents,
-                                  edesc->sec4_sg + 1, 0);
-               sec4_sg_index += 1 + src_nents;
-       }
-
-       if (dst_nents) {
-               sg_to_sec4_sg_last(req->dst, dst_nents,
-                       edesc->sec4_sg + sec4_sg_index, 0);
-       }
-
-       edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
-                                           sec4_sg_bytes, DMA_TO_DEVICE);
-       if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
-               dev_err(jrdev, "unable to map S/G table\n");
-               return ERR_PTR(-ENOMEM);
-       }
-
-       edesc->iv_dma = iv_dma;
-
-#ifdef DEBUG
-       print_hex_dump(KERN_ERR, "ablkcipher sec4_sg@"__stringify(__LINE__)": ",
-                      DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
-                      sec4_sg_bytes, 1);
-#endif
-
-       *iv_contig_out = iv_contig;
-       return edesc;
-}
-
-static int ablkcipher_encrypt(struct ablkcipher_request *req)
-{
-       struct ablkcipher_edesc *edesc;
-       struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
-       struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
-       struct device *jrdev = ctx->jrdev;
-       bool iv_contig;
-       u32 *desc;
-       int ret = 0;
-
-       /* allocate extended descriptor */
-       edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
-                                      CAAM_CMD_SZ, &iv_contig);
-       if (IS_ERR(edesc))
-               return PTR_ERR(edesc);
-
-       /* Create and submit job descriptor*/
-       init_ablkcipher_job(ctx->sh_desc_enc,
-               ctx->sh_desc_enc_dma, edesc, req, iv_contig);
-#ifdef DEBUG
-       print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
-                      DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
-                      desc_bytes(edesc->hw_desc), 1);
-#endif
-       desc = edesc->hw_desc;
-       ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
-
-       if (!ret) {
-               ret = -EINPROGRESS;
-       } else {
-               ablkcipher_unmap(jrdev, edesc, req);
-               kfree(edesc);
-       }
-
-       return ret;
-}
-
-static int ablkcipher_decrypt(struct ablkcipher_request *req)
-{
-       struct ablkcipher_edesc *edesc;
+       struct ablkcipher_edesc *edesc;
        struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
        struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
        struct device *jrdev = ctx->jrdev;
@@ -3251,8 +2682,8 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
                        sizeof(struct sec4_sg_entry);
 
        /* allocate space for base edesc and hw desc commands, link tables */
-       edesc = kmalloc(sizeof(*edesc) + desc_bytes +
-                       sec4_sg_bytes, GFP_DMA | flags);
+       edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
+                       GFP_DMA | flags);
        if (!edesc) {
                dev_err(jrdev, "could not allocate extended descriptor\n");
                return ERR_PTR(-ENOMEM);
@@ -3347,7 +2778,6 @@ struct caam_alg_template {
        u32 type;
        union {
                struct ablkcipher_alg ablkcipher;
-               struct old_aead_alg aead;
        } template_u;
        u32 class1_alg_type;
        u32 class2_alg_type;
@@ -3355,753 +2785,1426 @@ struct caam_alg_template {
 };
 
 static struct caam_alg_template driver_algs[] = {
-       /* single-pass ipsec_esp descriptor */
-       {
-               .name = "authenc(hmac(md5),ecb(cipher_null))",
-               .driver_name = "authenc-hmac-md5-ecb-cipher_null-caam",
-               .blocksize = NULL_BLOCK_SIZE,
-               .type = CRYPTO_ALG_TYPE_AEAD,
-               .template_aead = {
-                       .setkey = aead_setkey,
-                       .setauthsize = aead_setauthsize,
-                       .encrypt = old_aead_encrypt,
-                       .decrypt = old_aead_decrypt,
-                       .givencrypt = aead_null_givencrypt,
-                       .geniv = "<built-in>",
-                       .ivsize = NULL_IV_SIZE,
-                       .maxauthsize = MD5_DIGEST_SIZE,
-                       },
-               .class1_alg_type = 0,
-               .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
-               .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
-       },
+       /* ablkcipher descriptor */
        {
-               .name = "authenc(hmac(sha1),ecb(cipher_null))",
-               .driver_name = "authenc-hmac-sha1-ecb-cipher_null-caam",
-               .blocksize = NULL_BLOCK_SIZE,
-               .type = CRYPTO_ALG_TYPE_AEAD,
-               .template_aead = {
-                       .setkey = aead_setkey,
-                       .setauthsize = aead_setauthsize,
-                       .encrypt = old_aead_encrypt,
-                       .decrypt = old_aead_decrypt,
-                       .givencrypt = aead_null_givencrypt,
+               .name = "cbc(aes)",
+               .driver_name = "cbc-aes-caam",
+               .blocksize = AES_BLOCK_SIZE,
+               .type = CRYPTO_ALG_TYPE_GIVCIPHER,
+               .template_ablkcipher = {
+                       .setkey = ablkcipher_setkey,
+                       .encrypt = ablkcipher_encrypt,
+                       .decrypt = ablkcipher_decrypt,
+                       .givencrypt = ablkcipher_givencrypt,
                        .geniv = "<built-in>",
-                       .ivsize = NULL_IV_SIZE,
-                       .maxauthsize = SHA1_DIGEST_SIZE,
+                       .min_keysize = AES_MIN_KEY_SIZE,
+                       .max_keysize = AES_MAX_KEY_SIZE,
+                       .ivsize = AES_BLOCK_SIZE,
                        },
-               .class1_alg_type = 0,
-               .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
-               .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
+               .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
        },
        {
-               .name = "authenc(hmac(sha224),ecb(cipher_null))",
-               .driver_name = "authenc-hmac-sha224-ecb-cipher_null-caam",
-               .blocksize = NULL_BLOCK_SIZE,
-               .type = CRYPTO_ALG_TYPE_AEAD,
-               .template_aead = {
-                       .setkey = aead_setkey,
-                       .setauthsize = aead_setauthsize,
-                       .encrypt = old_aead_encrypt,
-                       .decrypt = old_aead_decrypt,
-                       .givencrypt = aead_null_givencrypt,
+               .name = "cbc(des3_ede)",
+               .driver_name = "cbc-3des-caam",
+               .blocksize = DES3_EDE_BLOCK_SIZE,
+               .type = CRYPTO_ALG_TYPE_GIVCIPHER,
+               .template_ablkcipher = {
+                       .setkey = ablkcipher_setkey,
+                       .encrypt = ablkcipher_encrypt,
+                       .decrypt = ablkcipher_decrypt,
+                       .givencrypt = ablkcipher_givencrypt,
                        .geniv = "<built-in>",
-                       .ivsize = NULL_IV_SIZE,
-                       .maxauthsize = SHA224_DIGEST_SIZE,
+                       .min_keysize = DES3_EDE_KEY_SIZE,
+                       .max_keysize = DES3_EDE_KEY_SIZE,
+                       .ivsize = DES3_EDE_BLOCK_SIZE,
                        },
-               .class1_alg_type = 0,
-               .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
-                                  OP_ALG_AAI_HMAC_PRECOMP,
-               .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
+               .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
        },
        {
-               .name = "authenc(hmac(sha256),ecb(cipher_null))",
-               .driver_name = "authenc-hmac-sha256-ecb-cipher_null-caam",
-               .blocksize = NULL_BLOCK_SIZE,
-               .type = CRYPTO_ALG_TYPE_AEAD,
-               .template_aead = {
-                       .setkey = aead_setkey,
-                       .setauthsize = aead_setauthsize,
-                       .encrypt = old_aead_encrypt,
-                       .decrypt = old_aead_decrypt,
-                       .givencrypt = aead_null_givencrypt,
+               .name = "cbc(des)",
+               .driver_name = "cbc-des-caam",
+               .blocksize = DES_BLOCK_SIZE,
+               .type = CRYPTO_ALG_TYPE_GIVCIPHER,
+               .template_ablkcipher = {
+                       .setkey = ablkcipher_setkey,
+                       .encrypt = ablkcipher_encrypt,
+                       .decrypt = ablkcipher_decrypt,
+                       .givencrypt = ablkcipher_givencrypt,
                        .geniv = "<built-in>",
-                       .ivsize = NULL_IV_SIZE,
-                       .maxauthsize = SHA256_DIGEST_SIZE,
+                       .min_keysize = DES_KEY_SIZE,
+                       .max_keysize = DES_KEY_SIZE,
+                       .ivsize = DES_BLOCK_SIZE,
                        },
-               .class1_alg_type = 0,
-               .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
-                                  OP_ALG_AAI_HMAC_PRECOMP,
-               .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
+               .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
        },
        {
-               .name = "authenc(hmac(sha384),ecb(cipher_null))",
-               .driver_name = "authenc-hmac-sha384-ecb-cipher_null-caam",
-               .blocksize = NULL_BLOCK_SIZE,
-               .type = CRYPTO_ALG_TYPE_AEAD,
-               .template_aead = {
-                       .setkey = aead_setkey,
-                       .setauthsize = aead_setauthsize,
-                       .encrypt = old_aead_encrypt,
-                       .decrypt = old_aead_decrypt,
-                       .givencrypt = aead_null_givencrypt,
-                       .geniv = "<built-in>",
-                       .ivsize = NULL_IV_SIZE,
-                       .maxauthsize = SHA384_DIGEST_SIZE,
+               .name = "ctr(aes)",
+               .driver_name = "ctr-aes-caam",
+               .blocksize = 1,
+               .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+               .template_ablkcipher = {
+                       .setkey = ablkcipher_setkey,
+                       .encrypt = ablkcipher_encrypt,
+                       .decrypt = ablkcipher_decrypt,
+                       .geniv = "chainiv",
+                       .min_keysize = AES_MIN_KEY_SIZE,
+                       .max_keysize = AES_MAX_KEY_SIZE,
+                       .ivsize = AES_BLOCK_SIZE,
                        },
-               .class1_alg_type = 0,
-               .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
-                                  OP_ALG_AAI_HMAC_PRECOMP,
-               .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
+               .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
        },
        {
-               .name = "authenc(hmac(sha512),ecb(cipher_null))",
-               .driver_name = "authenc-hmac-sha512-ecb-cipher_null-caam",
-               .blocksize = NULL_BLOCK_SIZE,
-               .type = CRYPTO_ALG_TYPE_AEAD,
-               .template_aead = {
-                       .setkey = aead_setkey,
-                       .setauthsize = aead_setauthsize,
-                       .encrypt = old_aead_encrypt,
-                       .decrypt = old_aead_decrypt,
-                       .givencrypt = aead_null_givencrypt,
+               .name = "rfc3686(ctr(aes))",
+               .driver_name = "rfc3686-ctr-aes-caam",
+               .blocksize = 1,
+               .type = CRYPTO_ALG_TYPE_GIVCIPHER,
+               .template_ablkcipher = {
+                       .setkey = ablkcipher_setkey,
+                       .encrypt = ablkcipher_encrypt,
+                       .decrypt = ablkcipher_decrypt,
+                       .givencrypt = ablkcipher_givencrypt,
                        .geniv = "<built-in>",
-                       .ivsize = NULL_IV_SIZE,
-                       .maxauthsize = SHA512_DIGEST_SIZE,
+                       .min_keysize = AES_MIN_KEY_SIZE +
+                                      CTR_RFC3686_NONCE_SIZE,
+                       .max_keysize = AES_MAX_KEY_SIZE +
+                                      CTR_RFC3686_NONCE_SIZE,
+                       .ivsize = CTR_RFC3686_IV_SIZE,
                        },
-               .class1_alg_type = 0,
-               .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
-                                  OP_ALG_AAI_HMAC_PRECOMP,
-               .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
-       },
+               .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
+       }
+};
+
+static struct caam_aead_alg driver_aeads[] = {
        {
-               .name = "authenc(hmac(md5),cbc(aes))",
-               .driver_name = "authenc-hmac-md5-cbc-aes-caam",
-               .blocksize = AES_BLOCK_SIZE,
-               .type = CRYPTO_ALG_TYPE_AEAD,
-               .template_aead = {
-                       .setkey = aead_setkey,
-                       .setauthsize = aead_setauthsize,
-                       .encrypt = old_aead_encrypt,
-                       .decrypt = old_aead_decrypt,
-                       .givencrypt = old_aead_givencrypt,
-                       .geniv = "<built-in>",
-                       .ivsize = AES_BLOCK_SIZE,
-                       .maxauthsize = MD5_DIGEST_SIZE,
+               .aead = {
+                       .base = {
+                               .cra_name = "rfc4106(gcm(aes))",
+                               .cra_driver_name = "rfc4106-gcm-aes-caam",
+                               .cra_blocksize = 1,
                        },
-               .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
-               .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
-               .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
+                       .setkey = rfc4106_setkey,
+                       .setauthsize = rfc4106_setauthsize,
+                       .encrypt = ipsec_gcm_encrypt,
+                       .decrypt = ipsec_gcm_decrypt,
+                       .ivsize = 8,
+                       .maxauthsize = AES_BLOCK_SIZE,
+               },
+               .caam = {
+                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+               },
        },
        {
-               .name = "authenc(hmac(sha1),cbc(aes))",
-               .driver_name = "authenc-hmac-sha1-cbc-aes-caam",
-               .blocksize = AES_BLOCK_SIZE,
-               .type = CRYPTO_ALG_TYPE_AEAD,
-               .template_aead = {
+               .aead = {
+                       .base = {
+                               .cra_name = "rfc4543(gcm(aes))",
+                               .cra_driver_name = "rfc4543-gcm-aes-caam",
+                               .cra_blocksize = 1,
+                       },
+                       .setkey = rfc4543_setkey,
+                       .setauthsize = rfc4543_setauthsize,
+                       .encrypt = ipsec_gcm_encrypt,
+                       .decrypt = ipsec_gcm_decrypt,
+                       .ivsize = 8,
+                       .maxauthsize = AES_BLOCK_SIZE,
+               },
+               .caam = {
+                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+               },
+       },
+       /* Galois Counter Mode */
+       {
+               .aead = {
+                       .base = {
+                               .cra_name = "gcm(aes)",
+                               .cra_driver_name = "gcm-aes-caam",
+                               .cra_blocksize = 1,
+                       },
+                       .setkey = gcm_setkey,
+                       .setauthsize = gcm_setauthsize,
+                       .encrypt = gcm_encrypt,
+                       .decrypt = gcm_decrypt,
+                       .ivsize = 12,
+                       .maxauthsize = AES_BLOCK_SIZE,
+               },
+               .caam = {
+                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+               },
+       },
+       /* single-pass ipsec_esp descriptor */
+       {
+               .aead = {
+                       .base = {
+                               .cra_name = "authenc(hmac(md5),"
+                                           "ecb(cipher_null))",
+                               .cra_driver_name = "authenc-hmac-md5-"
+                                                  "ecb-cipher_null-caam",
+                               .cra_blocksize = NULL_BLOCK_SIZE,
+                       },
                        .setkey = aead_setkey,
                        .setauthsize = aead_setauthsize,
-                       .encrypt = old_aead_encrypt,
-                       .decrypt = old_aead_decrypt,
-                       .givencrypt = old_aead_givencrypt,
-                       .geniv = "<built-in>",
+                       .encrypt = aead_encrypt,
+                       .decrypt = aead_decrypt,
+                       .ivsize = NULL_IV_SIZE,
+                       .maxauthsize = MD5_DIGEST_SIZE,
+               },
+               .caam = {
+                       .class2_alg_type = OP_ALG_ALGSEL_MD5 |
+                                          OP_ALG_AAI_HMAC_PRECOMP,
+                       .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
+               },
+       },
+       {
+               .aead = {
+                       .base = {
+                               .cra_name = "authenc(hmac(sha1),"
+                                           "ecb(cipher_null))",
+                               .cra_driver_name = "authenc-hmac-sha1-"
+                                                  "ecb-cipher_null-caam",
+                               .cra_blocksize = NULL_BLOCK_SIZE,
+                       },
+                       .setkey = aead_setkey,
+                       .setauthsize = aead_setauthsize,
+                       .encrypt = aead_encrypt,
+                       .decrypt = aead_decrypt,
+                       .ivsize = NULL_IV_SIZE,
+                       .maxauthsize = SHA1_DIGEST_SIZE,
+               },
+               .caam = {
+                       .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+                                          OP_ALG_AAI_HMAC_PRECOMP,
+                       .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
+               },
+       },
+       {
+               .aead = {
+                       .base = {
+                               .cra_name = "authenc(hmac(sha224),"
+                                           "ecb(cipher_null))",
+                               .cra_driver_name = "authenc-hmac-sha224-"
+                                                  "ecb-cipher_null-caam",
+                               .cra_blocksize = NULL_BLOCK_SIZE,
+                       },
+                       .setkey = aead_setkey,
+                       .setauthsize = aead_setauthsize,
+                       .encrypt = aead_encrypt,
+                       .decrypt = aead_decrypt,
+                       .ivsize = NULL_IV_SIZE,
+                       .maxauthsize = SHA224_DIGEST_SIZE,
+               },
+               .caam = {
+                       .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+                                          OP_ALG_AAI_HMAC_PRECOMP,
+                       .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
+               },
+       },
+       {
+               .aead = {
+                       .base = {
+                               .cra_name = "authenc(hmac(sha256),"
+                                           "ecb(cipher_null))",
+                               .cra_driver_name = "authenc-hmac-sha256-"
+                                                  "ecb-cipher_null-caam",
+                               .cra_blocksize = NULL_BLOCK_SIZE,
+                       },
+                       .setkey = aead_setkey,
+                       .setauthsize = aead_setauthsize,
+                       .encrypt = aead_encrypt,
+                       .decrypt = aead_decrypt,
+                       .ivsize = NULL_IV_SIZE,
+                       .maxauthsize = SHA256_DIGEST_SIZE,
+               },
+               .caam = {
+                       .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+                                          OP_ALG_AAI_HMAC_PRECOMP,
+                       .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
+               },
+       },
+       {
+               .aead = {
+                       .base = {
+                               .cra_name = "authenc(hmac(sha384),"
+                                           "ecb(cipher_null))",
+                               .cra_driver_name = "authenc-hmac-sha384-"
+                                                  "ecb-cipher_null-caam",
+                               .cra_blocksize = NULL_BLOCK_SIZE,
+                       },
+                       .setkey = aead_setkey,
+                       .setauthsize = aead_setauthsize,
+                       .encrypt = aead_encrypt,
+                       .decrypt = aead_decrypt,
+                       .ivsize = NULL_IV_SIZE,
+                       .maxauthsize = SHA384_DIGEST_SIZE,
+               },
+               .caam = {
+                       .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+                                          OP_ALG_AAI_HMAC_PRECOMP,
+                       .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
+               },
+       },
+       {
+               .aead = {
+                       .base = {
+                               .cra_name = "authenc(hmac(sha512),"
+                                           "ecb(cipher_null))",
+                               .cra_driver_name = "authenc-hmac-sha512-"
+                                                  "ecb-cipher_null-caam",
+                               .cra_blocksize = NULL_BLOCK_SIZE,
+                       },
+                       .setkey = aead_setkey,
+                       .setauthsize = aead_setauthsize,
+                       .encrypt = aead_encrypt,
+                       .decrypt = aead_decrypt,
+                       .ivsize = NULL_IV_SIZE,
+                       .maxauthsize = SHA512_DIGEST_SIZE,
+               },
+               .caam = {
+                       .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+                                          OP_ALG_AAI_HMAC_PRECOMP,
+                       .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
+               },
+       },
+       {
+               .aead = {
+                       .base = {
+                               .cra_name = "authenc(hmac(md5),cbc(aes))",
+                               .cra_driver_name = "authenc-hmac-md5-"
+                                                  "cbc-aes-caam",
+                               .cra_blocksize = AES_BLOCK_SIZE,
+                       },
+                       .setkey = aead_setkey,
+                       .setauthsize = aead_setauthsize,
+                       .encrypt = aead_encrypt,
+                       .decrypt = aead_decrypt,
+                       .ivsize = AES_BLOCK_SIZE,
+                       .maxauthsize = MD5_DIGEST_SIZE,
+               },
+               .caam = {
+                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+                       .class2_alg_type = OP_ALG_ALGSEL_MD5 |
+                                          OP_ALG_AAI_HMAC_PRECOMP,
+                       .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
+               },
+       },
+       {
+               .aead = {
+                       .base = {
+                               .cra_name = "echainiv(authenc(hmac(md5),"
+                                           "cbc(aes)))",
+                               .cra_driver_name = "echainiv-authenc-hmac-md5-"
+                                                  "cbc-aes-caam",
+                               .cra_blocksize = AES_BLOCK_SIZE,
+                       },
+                       .setkey = aead_setkey,
+                       .setauthsize = aead_setauthsize,
+                       .encrypt = aead_encrypt,
+                       .decrypt = aead_givdecrypt,
+                       .ivsize = AES_BLOCK_SIZE,
+                       .maxauthsize = MD5_DIGEST_SIZE,
+               },
+               .caam = {
+                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+                       .class2_alg_type = OP_ALG_ALGSEL_MD5 |
+                                          OP_ALG_AAI_HMAC_PRECOMP,
+                       .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
+                       .geniv = true,
+               },
+       },
+       {
+               .aead = {
+                       .base = {
+                               .cra_name = "authenc(hmac(sha1),cbc(aes))",
+                               .cra_driver_name = "authenc-hmac-sha1-"
+                                                  "cbc-aes-caam",
+                               .cra_blocksize = AES_BLOCK_SIZE,
+                       },
+                       .setkey = aead_setkey,
+                       .setauthsize = aead_setauthsize,
+                       .encrypt = aead_encrypt,
+                       .decrypt = aead_decrypt,
                        .ivsize = AES_BLOCK_SIZE,
                        .maxauthsize = SHA1_DIGEST_SIZE,
+               },
+               .caam = {
+                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+                       .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+                                          OP_ALG_AAI_HMAC_PRECOMP,
+                       .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
+               },
+       },
+       {
+               .aead = {
+                       .base = {
+                               .cra_name = "echainiv(authenc(hmac(sha1),"
+                                           "cbc(aes)))",
+                               .cra_driver_name = "echainiv-authenc-"
+                                                  "hmac-sha1-cbc-aes-caam",
+                               .cra_blocksize = AES_BLOCK_SIZE,
                        },
-               .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
-               .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
-               .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
+                       .setkey = aead_setkey,
+                       .setauthsize = aead_setauthsize,
+                       .encrypt = aead_encrypt,
+                       .decrypt = aead_givdecrypt,
+                       .ivsize = AES_BLOCK_SIZE,
+                       .maxauthsize = SHA1_DIGEST_SIZE,
+               },
+               .caam = {
+                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+                       .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+                                          OP_ALG_AAI_HMAC_PRECOMP,
+                       .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
+                       .geniv = true,
+               },
        },
        {
-               .name = "authenc(hmac(sha224),cbc(aes))",
-               .driver_name = "authenc-hmac-sha224-cbc-aes-caam",
-               .blocksize = AES_BLOCK_SIZE,
-               .type = CRYPTO_ALG_TYPE_AEAD,
-               .template_aead = {
+               .aead = {
+                       .base = {
+                               .cra_name = "authenc(hmac(sha224),cbc(aes))",
+                               .cra_driver_name = "authenc-hmac-sha224-"
+                                                  "cbc-aes-caam",
+                               .cra_blocksize = AES_BLOCK_SIZE,
+                       },
                        .setkey = aead_setkey,
                        .setauthsize = aead_setauthsize,
-                       .encrypt = old_aead_encrypt,
-                       .decrypt = old_aead_decrypt,
-                       .givencrypt = old_aead_givencrypt,
-                       .geniv = "<built-in>",
+                       .encrypt = aead_encrypt,
+                       .decrypt = aead_decrypt,
                        .ivsize = AES_BLOCK_SIZE,
                        .maxauthsize = SHA224_DIGEST_SIZE,
+               },
+               .caam = {
+                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+                       .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+                                          OP_ALG_AAI_HMAC_PRECOMP,
+                       .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
+               },
+       },
+       {
+               .aead = {
+                       .base = {
+                               .cra_name = "echainiv(authenc(hmac(sha224),"
+                                           "cbc(aes)))",
+                               .cra_driver_name = "echainiv-authenc-"
+                                                  "hmac-sha224-cbc-aes-caam",
+                               .cra_blocksize = AES_BLOCK_SIZE,
                        },
-               .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
-               .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
-                                  OP_ALG_AAI_HMAC_PRECOMP,
-               .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
+                       .setkey = aead_setkey,
+                       .setauthsize = aead_setauthsize,
+                       .encrypt = aead_encrypt,
+                       .decrypt = aead_givdecrypt,
+                       .ivsize = AES_BLOCK_SIZE,
+                       .maxauthsize = SHA224_DIGEST_SIZE,
+               },
+               .caam = {
+                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+                       .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+                                          OP_ALG_AAI_HMAC_PRECOMP,
+                       .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
+                       .geniv = true,
+               },
        },
        {
-               .name = "authenc(hmac(sha256),cbc(aes))",
-               .driver_name = "authenc-hmac-sha256-cbc-aes-caam",
-               .blocksize = AES_BLOCK_SIZE,
-               .type = CRYPTO_ALG_TYPE_AEAD,
-               .template_aead = {
+               .aead = {
+                       .base = {
+                               .cra_name = "authenc(hmac(sha256),cbc(aes))",
+                               .cra_driver_name = "authenc-hmac-sha256-"
+                                                  "cbc-aes-caam",
+                               .cra_blocksize = AES_BLOCK_SIZE,
+                       },
                        .setkey = aead_setkey,
                        .setauthsize = aead_setauthsize,
-                       .encrypt = old_aead_encrypt,
-                       .decrypt = old_aead_decrypt,
-                       .givencrypt = old_aead_givencrypt,
-                       .geniv = "<built-in>",
+                       .encrypt = aead_encrypt,
+                       .decrypt = aead_decrypt,
                        .ivsize = AES_BLOCK_SIZE,
                        .maxauthsize = SHA256_DIGEST_SIZE,
+               },
+               .caam = {
+                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+                       .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+                                          OP_ALG_AAI_HMAC_PRECOMP,
+                       .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
+               },
+       },
+       {
+               .aead = {
+                       .base = {
+                               .cra_name = "echainiv(authenc(hmac(sha256),"
+                                           "cbc(aes)))",
+                               .cra_driver_name = "echainiv-authenc-"
+                                                  "hmac-sha256-cbc-aes-caam",
+                               .cra_blocksize = AES_BLOCK_SIZE,
                        },
-               .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
-               .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
-                                  OP_ALG_AAI_HMAC_PRECOMP,
-               .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
+                       .setkey = aead_setkey,
+                       .setauthsize = aead_setauthsize,
+                       .encrypt = aead_encrypt,
+                       .decrypt = aead_givdecrypt,
+                       .ivsize = AES_BLOCK_SIZE,
+                       .maxauthsize = SHA256_DIGEST_SIZE,
+               },
+               .caam = {
+                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+                       .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+                                          OP_ALG_AAI_HMAC_PRECOMP,
+                       .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
+                       .geniv = true,
+               },
        },
        {
-               .name = "authenc(hmac(sha384),cbc(aes))",
-               .driver_name = "authenc-hmac-sha384-cbc-aes-caam",
-               .blocksize = AES_BLOCK_SIZE,
-               .type = CRYPTO_ALG_TYPE_AEAD,
-               .template_aead = {
+               .aead = {
+                       .base = {
+                               .cra_name = "authenc(hmac(sha384),cbc(aes))",
+                               .cra_driver_name = "authenc-hmac-sha384-"
+                                                  "cbc-aes-caam",
+                               .cra_blocksize = AES_BLOCK_SIZE,
+                       },
                        .setkey = aead_setkey,
                        .setauthsize = aead_setauthsize,
-                       .encrypt = old_aead_encrypt,
-                       .decrypt = old_aead_decrypt,
-                       .givencrypt = old_aead_givencrypt,
-                       .geniv = "<built-in>",
+                       .encrypt = aead_encrypt,
+                       .decrypt = aead_decrypt,
                        .ivsize = AES_BLOCK_SIZE,
                        .maxauthsize = SHA384_DIGEST_SIZE,
+               },
+               .caam = {
+                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+                       .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+                                          OP_ALG_AAI_HMAC_PRECOMP,
+                       .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
+               },
+       },
+       {
+               .aead = {
+                       .base = {
+                               .cra_name = "echainiv(authenc(hmac(sha384),"
+                                           "cbc(aes)))",
+                               .cra_driver_name = "echainiv-authenc-"
+                                                  "hmac-sha384-cbc-aes-caam",
+                               .cra_blocksize = AES_BLOCK_SIZE,
                        },
-               .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
-               .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
-                                  OP_ALG_AAI_HMAC_PRECOMP,
-               .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
+                       .setkey = aead_setkey,
+                       .setauthsize = aead_setauthsize,
+                       .encrypt = aead_encrypt,
+                       .decrypt = aead_givdecrypt,
+                       .ivsize = AES_BLOCK_SIZE,
+                       .maxauthsize = SHA384_DIGEST_SIZE,
+               },
+               .caam = {
+                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+                       .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+                                          OP_ALG_AAI_HMAC_PRECOMP,
+                       .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
+                       .geniv = true,
+               },
        },
-
        {
-               .name = "authenc(hmac(sha512),cbc(aes))",
-               .driver_name = "authenc-hmac-sha512-cbc-aes-caam",
-               .blocksize = AES_BLOCK_SIZE,
-               .type = CRYPTO_ALG_TYPE_AEAD,
-               .template_aead = {
+               .aead = {
+                       .base = {
+                               .cra_name = "authenc(hmac(sha512),cbc(aes))",
+                               .cra_driver_name = "authenc-hmac-sha512-"
+                                                  "cbc-aes-caam",
+                               .cra_blocksize = AES_BLOCK_SIZE,
+                       },
                        .setkey = aead_setkey,
                        .setauthsize = aead_setauthsize,
-                       .encrypt = old_aead_encrypt,
-                       .decrypt = old_aead_decrypt,
-                       .givencrypt = old_aead_givencrypt,
-                       .geniv = "<built-in>",
+                       .encrypt = aead_encrypt,
+                       .decrypt = aead_decrypt,
                        .ivsize = AES_BLOCK_SIZE,
                        .maxauthsize = SHA512_DIGEST_SIZE,
+               },
+               .caam = {
+                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+                       .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+                                          OP_ALG_AAI_HMAC_PRECOMP,
+                       .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
+               },
+       },
+       {
+               .aead = {
+                       .base = {
+                               .cra_name = "echainiv(authenc(hmac(sha512),"
+                                           "cbc(aes)))",
+                               .cra_driver_name = "echainiv-authenc-"
+                                                  "hmac-sha512-cbc-aes-caam",
+                               .cra_blocksize = AES_BLOCK_SIZE,
                        },
-               .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
-               .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
-                                  OP_ALG_AAI_HMAC_PRECOMP,
-               .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
+                       .setkey = aead_setkey,
+                       .setauthsize = aead_setauthsize,
+                       .encrypt = aead_encrypt,
+                       .decrypt = aead_givdecrypt,
+                       .ivsize = AES_BLOCK_SIZE,
+                       .maxauthsize = SHA512_DIGEST_SIZE,
+               },
+               .caam = {
+                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+                       .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+                                          OP_ALG_AAI_HMAC_PRECOMP,
+                       .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
+                       .geniv = true,
+               },
        },
        {
-               .name = "authenc(hmac(md5),cbc(des3_ede))",
-               .driver_name = "authenc-hmac-md5-cbc-des3_ede-caam",
-               .blocksize = DES3_EDE_BLOCK_SIZE,
-               .type = CRYPTO_ALG_TYPE_AEAD,
-               .template_aead = {
+               .aead = {
+                       .base = {
+                               .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
+                               .cra_driver_name = "authenc-hmac-md5-"
+                                                  "cbc-des3_ede-caam",
+                               .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+                       },
                        .setkey = aead_setkey,
                        .setauthsize = aead_setauthsize,
-                       .encrypt = old_aead_encrypt,
-                       .decrypt = old_aead_decrypt,
-                       .givencrypt = old_aead_givencrypt,
-                       .geniv = "<built-in>",
+                       .encrypt = aead_encrypt,
+                       .decrypt = aead_decrypt,
                        .ivsize = DES3_EDE_BLOCK_SIZE,
                        .maxauthsize = MD5_DIGEST_SIZE,
+               },
+               .caam = {
+                       .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+                       .class2_alg_type = OP_ALG_ALGSEL_MD5 |
+                                          OP_ALG_AAI_HMAC_PRECOMP,
+                       .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
+               }
+       },
+       {
+               .aead = {
+                       .base = {
+                               .cra_name = "echainiv(authenc(hmac(md5),"
+                                           "cbc(des3_ede)))",
+                               .cra_driver_name = "echainiv-authenc-hmac-md5-"
+                                                  "cbc-des3_ede-caam",
+                               .cra_blocksize = DES3_EDE_BLOCK_SIZE,
                        },
-               .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
-               .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
-               .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
+                       .setkey = aead_setkey,
+                       .setauthsize = aead_setauthsize,
+                       .encrypt = aead_encrypt,
+                       .decrypt = aead_givdecrypt,
+                       .ivsize = DES3_EDE_BLOCK_SIZE,
+                       .maxauthsize = MD5_DIGEST_SIZE,
+               },
+               .caam = {
+                       .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+                       .class2_alg_type = OP_ALG_ALGSEL_MD5 |
+                                          OP_ALG_AAI_HMAC_PRECOMP,
+                       .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
+                       .geniv = true,
+               }
        },
        {
-               .name = "authenc(hmac(sha1),cbc(des3_ede))",
-               .driver_name = "authenc-hmac-sha1-cbc-des3_ede-caam",
-               .blocksize = DES3_EDE_BLOCK_SIZE,
-               .type = CRYPTO_ALG_TYPE_AEAD,
-               .template_aead = {
+               .aead = {
+                       .base = {
+                               .cra_name = "authenc(hmac(sha1),"
+                                           "cbc(des3_ede))",
+                               .cra_driver_name = "authenc-hmac-sha1-"
+                                                  "cbc-des3_ede-caam",
+                               .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+                       },
                        .setkey = aead_setkey,
                        .setauthsize = aead_setauthsize,
-                       .encrypt = old_aead_encrypt,
-                       .decrypt = old_aead_decrypt,
-                       .givencrypt = old_aead_givencrypt,
-                       .geniv = "<built-in>",
+                       .encrypt = aead_encrypt,
+                       .decrypt = aead_decrypt,
                        .ivsize = DES3_EDE_BLOCK_SIZE,
                        .maxauthsize = SHA1_DIGEST_SIZE,
+               },
+               .caam = {
+                       .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+                       .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+                                          OP_ALG_AAI_HMAC_PRECOMP,
+                       .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
+               },
+       },
+       {
+               .aead = {
+                       .base = {
+                               .cra_name = "echainiv(authenc(hmac(sha1),"
+                                           "cbc(des3_ede)))",
+                               .cra_driver_name = "echainiv-authenc-"
+                                                  "hmac-sha1-"
+                                                  "cbc-des3_ede-caam",
+                               .cra_blocksize = DES3_EDE_BLOCK_SIZE,
                        },
-               .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
-               .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
-               .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
+                       .setkey = aead_setkey,
+                       .setauthsize = aead_setauthsize,
+                       .encrypt = aead_encrypt,
+                       .decrypt = aead_givdecrypt,
+                       .ivsize = DES3_EDE_BLOCK_SIZE,
+                       .maxauthsize = SHA1_DIGEST_SIZE,
+               },
+               .caam = {
+                       .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+                       .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+                                          OP_ALG_AAI_HMAC_PRECOMP,
+                       .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
+                       .geniv = true,
+               },
        },
        {
-               .name = "authenc(hmac(sha224),cbc(des3_ede))",
-               .driver_name = "authenc-hmac-sha224-cbc-des3_ede-caam",
-               .blocksize = DES3_EDE_BLOCK_SIZE,
-               .type = CRYPTO_ALG_TYPE_AEAD,
-               .template_aead = {
+               .aead = {
+                       .base = {
+                               .cra_name = "authenc(hmac(sha224),"
+                                           "cbc(des3_ede))",
+                               .cra_driver_name = "authenc-hmac-sha224-"
+                                                  "cbc-des3_ede-caam",
+                               .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+                       },
                        .setkey = aead_setkey,
                        .setauthsize = aead_setauthsize,
-                       .encrypt = old_aead_encrypt,
-                       .decrypt = old_aead_decrypt,
-                       .givencrypt = old_aead_givencrypt,
-                       .geniv = "<built-in>",
+                       .encrypt = aead_encrypt,
+                       .decrypt = aead_decrypt,
                        .ivsize = DES3_EDE_BLOCK_SIZE,
                        .maxauthsize = SHA224_DIGEST_SIZE,
+               },
+               .caam = {
+                       .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+                       .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+                                          OP_ALG_AAI_HMAC_PRECOMP,
+                       .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
+               },
+       },
+       {
+               .aead = {
+                       .base = {
+                               .cra_name = "echainiv(authenc(hmac(sha224),"
+                                           "cbc(des3_ede)))",
+                               .cra_driver_name = "echainiv-authenc-"
+                                                  "hmac-sha224-"
+                                                  "cbc-des3_ede-caam",
+                               .cra_blocksize = DES3_EDE_BLOCK_SIZE,
                        },
-               .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
-               .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
-                                  OP_ALG_AAI_HMAC_PRECOMP,
-               .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
+                       .setkey = aead_setkey,
+                       .setauthsize = aead_setauthsize,
+                       .encrypt = aead_encrypt,
+                       .decrypt = aead_givdecrypt,
+                       .ivsize = DES3_EDE_BLOCK_SIZE,
+                       .maxauthsize = SHA224_DIGEST_SIZE,
+               },
+               .caam = {
+                       .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+                       .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+                                          OP_ALG_AAI_HMAC_PRECOMP,
+                       .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
+                       .geniv = true,
+               },
        },
        {
-               .name = "authenc(hmac(sha256),cbc(des3_ede))",
-               .driver_name = "authenc-hmac-sha256-cbc-des3_ede-caam",
-               .blocksize = DES3_EDE_BLOCK_SIZE,
-               .type = CRYPTO_ALG_TYPE_AEAD,
-               .template_aead = {
+               .aead = {
+                       .base = {
+                               .cra_name = "authenc(hmac(sha256),"
+                                           "cbc(des3_ede))",
+                               .cra_driver_name = "authenc-hmac-sha256-"
+                                                  "cbc-des3_ede-caam",
+                               .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+                       },
                        .setkey = aead_setkey,
                        .setauthsize = aead_setauthsize,
-                       .encrypt = old_aead_encrypt,
-                       .decrypt = old_aead_decrypt,
-                       .givencrypt = old_aead_givencrypt,
-                       .geniv = "<built-in>",
+                       .encrypt = aead_encrypt,
+                       .decrypt = aead_decrypt,
                        .ivsize = DES3_EDE_BLOCK_SIZE,
                        .maxauthsize = SHA256_DIGEST_SIZE,
+               },
+               .caam = {
+                       .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+                       .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+                                          OP_ALG_AAI_HMAC_PRECOMP,
+                       .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
+               },
+       },
+       {
+               .aead = {
+                       .base = {
+                               .cra_name = "echainiv(authenc(hmac(sha256),"
+                                           "cbc(des3_ede)))",
+                               .cra_driver_name = "echainiv-authenc-"
+                                                  "hmac-sha256-"
+                                                  "cbc-des3_ede-caam",
+                               .cra_blocksize = DES3_EDE_BLOCK_SIZE,
                        },
-               .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
-               .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
-                                  OP_ALG_AAI_HMAC_PRECOMP,
-               .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
+                       .setkey = aead_setkey,
+                       .setauthsize = aead_setauthsize,
+                       .encrypt = aead_encrypt,
+                       .decrypt = aead_givdecrypt,
+                       .ivsize = DES3_EDE_BLOCK_SIZE,
+                       .maxauthsize = SHA256_DIGEST_SIZE,
+               },
+               .caam = {
+                       .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+                       .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+                                          OP_ALG_AAI_HMAC_PRECOMP,
+                       .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
+                       .geniv = true,
+               },
        },
        {
-               .name = "authenc(hmac(sha384),cbc(des3_ede))",
-               .driver_name = "authenc-hmac-sha384-cbc-des3_ede-caam",
-               .blocksize = DES3_EDE_BLOCK_SIZE,
-               .type = CRYPTO_ALG_TYPE_AEAD,
-               .template_aead = {
+               .aead = {
+                       .base = {
+                               .cra_name = "authenc(hmac(sha384),"
+                                           "cbc(des3_ede))",
+                               .cra_driver_name = "authenc-hmac-sha384-"
+                                                  "cbc-des3_ede-caam",
+                               .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+                       },
                        .setkey = aead_setkey,
                        .setauthsize = aead_setauthsize,
-                       .encrypt = old_aead_encrypt,
-                       .decrypt = old_aead_decrypt,
-                       .givencrypt = old_aead_givencrypt,
-                       .geniv = "<built-in>",
+                       .encrypt = aead_encrypt,
+                       .decrypt = aead_decrypt,
                        .ivsize = DES3_EDE_BLOCK_SIZE,
                        .maxauthsize = SHA384_DIGEST_SIZE,
+               },
+               .caam = {
+                       .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+                       .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+                                          OP_ALG_AAI_HMAC_PRECOMP,
+                       .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
+               },
+       },
+       {
+               .aead = {
+                       .base = {
+                               .cra_name = "echainiv(authenc(hmac(sha384),"
+                                           "cbc(des3_ede)))",
+                               .cra_driver_name = "echainiv-authenc-"
+                                                  "hmac-sha384-"
+                                                  "cbc-des3_ede-caam",
+                               .cra_blocksize = DES3_EDE_BLOCK_SIZE,
                        },
-               .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
-               .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
-                                  OP_ALG_AAI_HMAC_PRECOMP,
-               .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
+                       .setkey = aead_setkey,
+                       .setauthsize = aead_setauthsize,
+                       .encrypt = aead_encrypt,
+                       .decrypt = aead_givdecrypt,
+                       .ivsize = DES3_EDE_BLOCK_SIZE,
+                       .maxauthsize = SHA384_DIGEST_SIZE,
+               },
+               .caam = {
+                       .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+                       .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+                                          OP_ALG_AAI_HMAC_PRECOMP,
+                       .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
+                       .geniv = true,
+               },
        },
        {
-               .name = "authenc(hmac(sha512),cbc(des3_ede))",
-               .driver_name = "authenc-hmac-sha512-cbc-des3_ede-caam",
-               .blocksize = DES3_EDE_BLOCK_SIZE,
-               .type = CRYPTO_ALG_TYPE_AEAD,
-               .template_aead = {
+               .aead = {
+                       .base = {
+                               .cra_name = "authenc(hmac(sha512),"
+                                           "cbc(des3_ede))",
+                               .cra_driver_name = "authenc-hmac-sha512-"
+                                                  "cbc-des3_ede-caam",
+                               .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+                       },
                        .setkey = aead_setkey,
                        .setauthsize = aead_setauthsize,
-                       .encrypt = old_aead_encrypt,
-                       .decrypt = old_aead_decrypt,
-                       .givencrypt = old_aead_givencrypt,
-                       .geniv = "<built-in>",
+                       .encrypt = aead_encrypt,
+                       .decrypt = aead_decrypt,
                        .ivsize = DES3_EDE_BLOCK_SIZE,
                        .maxauthsize = SHA512_DIGEST_SIZE,
+               },
+               .caam = {
+                       .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+                       .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+                                          OP_ALG_AAI_HMAC_PRECOMP,
+                       .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
+               },
+       },
+       {
+               .aead = {
+                       .base = {
+                               .cra_name = "echainiv(authenc(hmac(sha512),"
+                                           "cbc(des3_ede)))",
+                               .cra_driver_name = "echainiv-authenc-"
+                                                  "hmac-sha512-"
+                                                  "cbc-des3_ede-caam",
+                               .cra_blocksize = DES3_EDE_BLOCK_SIZE,
                        },
-               .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
-               .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
-                                  OP_ALG_AAI_HMAC_PRECOMP,
-               .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
+                       .setkey = aead_setkey,
+                       .setauthsize = aead_setauthsize,
+                       .encrypt = aead_encrypt,
+                       .decrypt = aead_givdecrypt,
+                       .ivsize = DES3_EDE_BLOCK_SIZE,
+                       .maxauthsize = SHA512_DIGEST_SIZE,
+               },
+               .caam = {
+                       .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+                       .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+                                          OP_ALG_AAI_HMAC_PRECOMP,
+                       .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
+                       .geniv = true,
+               },
        },
        {
-               .name = "authenc(hmac(md5),cbc(des))",
-               .driver_name = "authenc-hmac-md5-cbc-des-caam",
-               .blocksize = DES_BLOCK_SIZE,
-               .type = CRYPTO_ALG_TYPE_AEAD,
-               .template_aead = {
+               .aead = {
+                       .base = {
+                               .cra_name = "authenc(hmac(md5),cbc(des))",
+                               .cra_driver_name = "authenc-hmac-md5-"
+                                                  "cbc-des-caam",
+                               .cra_blocksize = DES_BLOCK_SIZE,
+                       },
                        .setkey = aead_setkey,
                        .setauthsize = aead_setauthsize,
-                       .encrypt = old_aead_encrypt,
-                       .decrypt = old_aead_decrypt,
-                       .givencrypt = old_aead_givencrypt,
-                       .geniv = "<built-in>",
+                       .encrypt = aead_encrypt,
+                       .decrypt = aead_decrypt,
                        .ivsize = DES_BLOCK_SIZE,
                        .maxauthsize = MD5_DIGEST_SIZE,
+               },
+               .caam = {
+                       .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+                       .class2_alg_type = OP_ALG_ALGSEL_MD5 |
+                                          OP_ALG_AAI_HMAC_PRECOMP,
+                       .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
+               },
+       },
+       {
+               .aead = {
+                       .base = {
+                               .cra_name = "echainiv(authenc(hmac(md5),"
+                                           "cbc(des)))",
+                               .cra_driver_name = "echainiv-authenc-hmac-md5-"
+                                                  "cbc-des-caam",
+                               .cra_blocksize = DES_BLOCK_SIZE,
                        },
-               .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
-               .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
-               .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
+                       .setkey = aead_setkey,
+                       .setauthsize = aead_setauthsize,
+                       .encrypt = aead_encrypt,
+                       .decrypt = aead_givdecrypt,
+                       .ivsize = DES_BLOCK_SIZE,
+                       .maxauthsize = MD5_DIGEST_SIZE,
+               },
+               .caam = {
+                       .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+                       .class2_alg_type = OP_ALG_ALGSEL_MD5 |
+                                          OP_ALG_AAI_HMAC_PRECOMP,
+                       .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
+                       .geniv = true,
+               },
        },
        {
-               .name = "authenc(hmac(sha1),cbc(des))",
-               .driver_name = "authenc-hmac-sha1-cbc-des-caam",
-               .blocksize = DES_BLOCK_SIZE,
-               .type = CRYPTO_ALG_TYPE_AEAD,
-               .template_aead = {
+               .aead = {
+                       .base = {
+                               .cra_name = "authenc(hmac(sha1),cbc(des))",
+                               .cra_driver_name = "authenc-hmac-sha1-"
+                                                  "cbc-des-caam",
+                               .cra_blocksize = DES_BLOCK_SIZE,
+                       },
                        .setkey = aead_setkey,
                        .setauthsize = aead_setauthsize,
-                       .encrypt = old_aead_encrypt,
-                       .decrypt = old_aead_decrypt,
-                       .givencrypt = old_aead_givencrypt,
-                       .geniv = "<built-in>",
+                       .encrypt = aead_encrypt,
+                       .decrypt = aead_decrypt,
                        .ivsize = DES_BLOCK_SIZE,
                        .maxauthsize = SHA1_DIGEST_SIZE,
+               },
+               .caam = {
+                       .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+                       .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+                                          OP_ALG_AAI_HMAC_PRECOMP,
+                       .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
+               },
+       },
+       {
+               .aead = {
+                       .base = {
+                               .cra_name = "echainiv(authenc(hmac(sha1),"
+                                           "cbc(des)))",
+                               .cra_driver_name = "echainiv-authenc-"
+                                                  "hmac-sha1-cbc-des-caam",
+                               .cra_blocksize = DES_BLOCK_SIZE,
                        },
-               .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
-               .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
-               .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
+                       .setkey = aead_setkey,
+                       .setauthsize = aead_setauthsize,
+                       .encrypt = aead_encrypt,
+                       .decrypt = aead_givdecrypt,
+                       .ivsize = DES_BLOCK_SIZE,
+                       .maxauthsize = SHA1_DIGEST_SIZE,
+               },
+               .caam = {
+                       .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+                       .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+                                          OP_ALG_AAI_HMAC_PRECOMP,
+                       .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
+                       .geniv = true,
+               },
        },
        {
-               .name = "authenc(hmac(sha224),cbc(des))",
-               .driver_name = "authenc-hmac-sha224-cbc-des-caam",
-               .blocksize = DES_BLOCK_SIZE,
-               .type = CRYPTO_ALG_TYPE_AEAD,
-               .template_aead = {
+               .aead = {
+                       .base = {
+                               .cra_name = "authenc(hmac(sha224),cbc(des))",
+                               .cra_driver_name = "authenc-hmac-sha224-"
+                                                  "cbc-des-caam",
+                               .cra_blocksize = DES_BLOCK_SIZE,
+                       },
                        .setkey = aead_setkey,
                        .setauthsize = aead_setauthsize,
-                       .encrypt = old_aead_encrypt,
-                       .decrypt = old_aead_decrypt,
-                       .givencrypt = old_aead_givencrypt,
-                       .geniv = "<built-in>",
+                       .encrypt = aead_encrypt,
+                       .decrypt = aead_decrypt,
                        .ivsize = DES_BLOCK_SIZE,
                        .maxauthsize = SHA224_DIGEST_SIZE,
+               },
+               .caam = {
+                       .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+                       .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+                                          OP_ALG_AAI_HMAC_PRECOMP,
+                       .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
+               },
+       },
+       {
+               .aead = {
+                       .base = {
+                               .cra_name = "echainiv(authenc(hmac(sha224),"
+                                           "cbc(des)))",
+                               .cra_driver_name = "echainiv-authenc-"
+                                                  "hmac-sha224-cbc-des-caam",
+                               .cra_blocksize = DES_BLOCK_SIZE,
                        },
-               .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
-               .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
-                                  OP_ALG_AAI_HMAC_PRECOMP,
-               .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
+                       .setkey = aead_setkey,
+                       .setauthsize = aead_setauthsize,
+                       .encrypt = aead_encrypt,
+                       .decrypt = aead_givdecrypt,
+                       .ivsize = DES_BLOCK_SIZE,
+                       .maxauthsize = SHA224_DIGEST_SIZE,
+               },
+               .caam = {
+                       .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+                       .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+                                          OP_ALG_AAI_HMAC_PRECOMP,
+                       .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
+                       .geniv = true,
+               },
        },
        {
-               .name = "authenc(hmac(sha256),cbc(des))",
-               .driver_name = "authenc-hmac-sha256-cbc-des-caam",
-               .blocksize = DES_BLOCK_SIZE,
-               .type = CRYPTO_ALG_TYPE_AEAD,
-               .template_aead = {
+               .aead = {
+                       .base = {
+                               .cra_name = "authenc(hmac(sha256),cbc(des))",
+                               .cra_driver_name = "authenc-hmac-sha256-"
+                                                  "cbc-des-caam",
+                               .cra_blocksize = DES_BLOCK_SIZE,
+                       },
                        .setkey = aead_setkey,
                        .setauthsize = aead_setauthsize,
-                       .encrypt = old_aead_encrypt,
-                       .decrypt = old_aead_decrypt,
-                       .givencrypt = old_aead_givencrypt,
-                       .geniv = "<built-in>",
+                       .encrypt = aead_encrypt,
+                       .decrypt = aead_decrypt,
                        .ivsize = DES_BLOCK_SIZE,
                        .maxauthsize = SHA256_DIGEST_SIZE,
+               },
+               .caam = {
+                       .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+                       .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+                                          OP_ALG_AAI_HMAC_PRECOMP,
+                       .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
+               },
+       },
+       {
+               .aead = {
+                       .base = {
+                               .cra_name = "echainiv(authenc(hmac(sha256),"
+                                           "cbc(des)))",
+                               .cra_driver_name = "echainiv-authenc-"
+                                                  "hmac-sha256-cbc-des-caam",
+                               .cra_blocksize = DES_BLOCK_SIZE,
                        },
-               .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
-               .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
-                                  OP_ALG_AAI_HMAC_PRECOMP,
-               .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
+                       .setkey = aead_setkey,
+                       .setauthsize = aead_setauthsize,
+                       .encrypt = aead_encrypt,
+                       .decrypt = aead_givdecrypt,
+                       .ivsize = DES_BLOCK_SIZE,
+                       .maxauthsize = SHA256_DIGEST_SIZE,
+               },
+               .caam = {
+                       .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+                       .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+                                          OP_ALG_AAI_HMAC_PRECOMP,
+                       .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
+                       .geniv = true,
+               },
        },
        {
-               .name = "authenc(hmac(sha384),cbc(des))",
-               .driver_name = "authenc-hmac-sha384-cbc-des-caam",
-               .blocksize = DES_BLOCK_SIZE,
-               .type = CRYPTO_ALG_TYPE_AEAD,
-               .template_aead = {
+               .aead = {
+                       .base = {
+                               .cra_name = "authenc(hmac(sha384),cbc(des))",
+                               .cra_driver_name = "authenc-hmac-sha384-"
+                                                  "cbc-des-caam",
+                               .cra_blocksize = DES_BLOCK_SIZE,
+                       },
                        .setkey = aead_setkey,
                        .setauthsize = aead_setauthsize,
-                       .encrypt = old_aead_encrypt,
-                       .decrypt = old_aead_decrypt,
-                       .givencrypt = old_aead_givencrypt,
-                       .geniv = "<built-in>",
+                       .encrypt = aead_encrypt,
+                       .decrypt = aead_decrypt,
                        .ivsize = DES_BLOCK_SIZE,
                        .maxauthsize = SHA384_DIGEST_SIZE,
+               },
+               .caam = {
+                       .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+                       .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+                                          OP_ALG_AAI_HMAC_PRECOMP,
+                       .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
+               },
+       },
+       {
+               .aead = {
+                       .base = {
+                               .cra_name = "echainiv(authenc(hmac(sha384),"
+                                           "cbc(des)))",
+                               .cra_driver_name = "echainiv-authenc-"
+                                                  "hmac-sha384-cbc-des-caam",
+                               .cra_blocksize = DES_BLOCK_SIZE,
                        },
-               .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
-               .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
-                                  OP_ALG_AAI_HMAC_PRECOMP,
-               .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
+                       .setkey = aead_setkey,
+                       .setauthsize = aead_setauthsize,
+                       .encrypt = aead_encrypt,
+                       .decrypt = aead_givdecrypt,
+                       .ivsize = DES_BLOCK_SIZE,
+                       .maxauthsize = SHA384_DIGEST_SIZE,
+               },
+               .caam = {
+                       .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+                       .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+                                          OP_ALG_AAI_HMAC_PRECOMP,
+                       .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
+                       .geniv = true,
+               },
+       },
+       {
+               .aead = {
+                       .base = {
+                               .cra_name = "authenc(hmac(sha512),cbc(des))",
+                               .cra_driver_name = "authenc-hmac-sha512-"
+                                                  "cbc-des-caam",
+                               .cra_blocksize = DES_BLOCK_SIZE,
+                       },
+                       .setkey = aead_setkey,
+                       .setauthsize = aead_setauthsize,
+                       .encrypt = aead_encrypt,
+                       .decrypt = aead_decrypt,
+                       .ivsize = DES_BLOCK_SIZE,
+                       .maxauthsize = SHA512_DIGEST_SIZE,
+               },
+               .caam = {
+                       .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+                       .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+                                          OP_ALG_AAI_HMAC_PRECOMP,
+                       .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
+               },
        },
        {
-               .name = "authenc(hmac(sha512),cbc(des))",
-               .driver_name = "authenc-hmac-sha512-cbc-des-caam",
-               .blocksize = DES_BLOCK_SIZE,
-               .type = CRYPTO_ALG_TYPE_AEAD,
-               .template_aead = {
+               .aead = {
+                       .base = {
+                               .cra_name = "echainiv(authenc(hmac(sha512),"
+                                           "cbc(des)))",
+                               .cra_driver_name = "echainiv-authenc-"
+                                                  "hmac-sha512-cbc-des-caam",
+                               .cra_blocksize = DES_BLOCK_SIZE,
+                       },
                        .setkey = aead_setkey,
                        .setauthsize = aead_setauthsize,
-                       .encrypt = old_aead_encrypt,
-                       .decrypt = old_aead_decrypt,
-                       .givencrypt = old_aead_givencrypt,
-                       .geniv = "<built-in>",
+                       .encrypt = aead_encrypt,
+                       .decrypt = aead_givdecrypt,
                        .ivsize = DES_BLOCK_SIZE,
                        .maxauthsize = SHA512_DIGEST_SIZE,
-                       },
-               .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
-               .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
-                                  OP_ALG_AAI_HMAC_PRECOMP,
-               .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
+               },
+               .caam = {
+                       .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+                       .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+                                          OP_ALG_AAI_HMAC_PRECOMP,
+                       .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
+                       .geniv = true,
+               },
        },
        {
-               .name = "authenc(hmac(md5),rfc3686(ctr(aes)))",
-               .driver_name = "authenc-hmac-md5-rfc3686-ctr-aes-caam",
-               .blocksize = 1,
-               .type = CRYPTO_ALG_TYPE_AEAD,
-               .template_aead = {
+               .aead = {
+                       .base = {
+                               .cra_name = "authenc(hmac(md5),"
+                                           "rfc3686(ctr(aes)))",
+                               .cra_driver_name = "authenc-hmac-md5-"
+                                                  "rfc3686-ctr-aes-caam",
+                               .cra_blocksize = 1,
+                       },
                        .setkey = aead_setkey,
                        .setauthsize = aead_setauthsize,
-                       .encrypt = old_aead_encrypt,
-                       .decrypt = old_aead_decrypt,
-                       .givencrypt = old_aead_givencrypt,
-                       .geniv = "<built-in>",
+                       .encrypt = aead_encrypt,
+                       .decrypt = aead_decrypt,
                        .ivsize = CTR_RFC3686_IV_SIZE,
                        .maxauthsize = MD5_DIGEST_SIZE,
-                       },
-               .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
-               .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
-               .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
+               },
+               .caam = {
+                       .class1_alg_type = OP_ALG_ALGSEL_AES |
+                                          OP_ALG_AAI_CTR_MOD128,
+                       .class2_alg_type = OP_ALG_ALGSEL_MD5 |
+                                          OP_ALG_AAI_HMAC_PRECOMP,
+                       .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
+                       .rfc3686 = true,
+               },
        },
        {
-               .name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
-               .driver_name = "authenc-hmac-sha1-rfc3686-ctr-aes-caam",
-               .blocksize = 1,
-               .type = CRYPTO_ALG_TYPE_AEAD,
-               .template_aead = {
+               .aead = {
+                       .base = {
+                               .cra_name = "seqiv(authenc("
+                                           "hmac(md5),rfc3686(ctr(aes))))",
+                               .cra_driver_name = "seqiv-authenc-hmac-md5-"
+                                                  "rfc3686-ctr-aes-caam",
+                               .cra_blocksize = 1,
+                       },
                        .setkey = aead_setkey,
                        .setauthsize = aead_setauthsize,
-                       .encrypt = old_aead_encrypt,
-                       .decrypt = old_aead_decrypt,
-                       .givencrypt = old_aead_givencrypt,
-                       .geniv = "<built-in>",
+                       .encrypt = aead_encrypt,
+                       .decrypt = aead_givdecrypt,
                        .ivsize = CTR_RFC3686_IV_SIZE,
-                       .maxauthsize = SHA1_DIGEST_SIZE,
-                       },
-               .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
-               .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
-               .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
+                       .maxauthsize = MD5_DIGEST_SIZE,
+               },
+               .caam = {
+                       .class1_alg_type = OP_ALG_ALGSEL_AES |
+                                          OP_ALG_AAI_CTR_MOD128,
+                       .class2_alg_type = OP_ALG_ALGSEL_MD5 |
+                                          OP_ALG_AAI_HMAC_PRECOMP,
+                       .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
+                       .rfc3686 = true,
+                       .geniv = true,
+               },
        },
        {
-               .name = "authenc(hmac(sha224),rfc3686(ctr(aes)))",
-               .driver_name = "authenc-hmac-sha224-rfc3686-ctr-aes-caam",
-               .blocksize = 1,
-               .type = CRYPTO_ALG_TYPE_AEAD,
-               .template_aead = {
+               .aead = {
+                       .base = {
+                               .cra_name = "authenc(hmac(sha1),"
+                                           "rfc3686(ctr(aes)))",
+                               .cra_driver_name = "authenc-hmac-sha1-"
+                                                  "rfc3686-ctr-aes-caam",
+                               .cra_blocksize = 1,
+                       },
                        .setkey = aead_setkey,
                        .setauthsize = aead_setauthsize,
-                       .encrypt = old_aead_encrypt,
-                       .decrypt = old_aead_decrypt,
-                       .givencrypt = old_aead_givencrypt,
-                       .geniv = "<built-in>",
+                       .encrypt = aead_encrypt,
+                       .decrypt = aead_decrypt,
                        .ivsize = CTR_RFC3686_IV_SIZE,
-                       .maxauthsize = SHA224_DIGEST_SIZE,
-                       },
-               .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
-               .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
-                                  OP_ALG_AAI_HMAC_PRECOMP,
-               .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
+                       .maxauthsize = SHA1_DIGEST_SIZE,
+               },
+               .caam = {
+                       .class1_alg_type = OP_ALG_ALGSEL_AES |
+                                          OP_ALG_AAI_CTR_MOD128,
+                       .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+                                          OP_ALG_AAI_HMAC_PRECOMP,
+                       .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
+                       .rfc3686 = true,
+               },
        },
        {
-               .name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
-               .driver_name = "authenc-hmac-sha256-rfc3686-ctr-aes-caam",
-               .blocksize = 1,
-               .type = CRYPTO_ALG_TYPE_AEAD,
-               .template_aead = {
+               .aead = {
+                       .base = {
+                               .cra_name = "seqiv(authenc("
+                                           "hmac(sha1),rfc3686(ctr(aes))))",
+                               .cra_driver_name = "seqiv-authenc-hmac-sha1-"
+                                                  "rfc3686-ctr-aes-caam",
+                               .cra_blocksize = 1,
+                       },
                        .setkey = aead_setkey,
                        .setauthsize = aead_setauthsize,
-                       .encrypt = old_aead_encrypt,
-                       .decrypt = old_aead_decrypt,
-                       .givencrypt = old_aead_givencrypt,
-                       .geniv = "<built-in>",
+                       .encrypt = aead_encrypt,
+                       .decrypt = aead_givdecrypt,
                        .ivsize = CTR_RFC3686_IV_SIZE,
-                       .maxauthsize = SHA256_DIGEST_SIZE,
-                       },
-               .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
-               .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
-                                  OP_ALG_AAI_HMAC_PRECOMP,
-               .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
+                       .maxauthsize = SHA1_DIGEST_SIZE,
+               },
+               .caam = {
+                       .class1_alg_type = OP_ALG_ALGSEL_AES |
+                                          OP_ALG_AAI_CTR_MOD128,
+                       .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+                                          OP_ALG_AAI_HMAC_PRECOMP,
+                       .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
+                       .rfc3686 = true,
+                       .geniv = true,
+               },
        },
        {
-               .name = "authenc(hmac(sha384),rfc3686(ctr(aes)))",
-               .driver_name = "authenc-hmac-sha384-rfc3686-ctr-aes-caam",
-               .blocksize = 1,
-               .type = CRYPTO_ALG_TYPE_AEAD,
-               .template_aead = {
+               .aead = {
+                       .base = {
+                               .cra_name = "authenc(hmac(sha224),"
+                                           "rfc3686(ctr(aes)))",
+                               .cra_driver_name = "authenc-hmac-sha224-"
+                                                  "rfc3686-ctr-aes-caam",
+                               .cra_blocksize = 1,
+                       },
                        .setkey = aead_setkey,
                        .setauthsize = aead_setauthsize,
-                       .encrypt = old_aead_encrypt,
-                       .decrypt = old_aead_decrypt,
-                       .givencrypt = old_aead_givencrypt,
-                       .geniv = "<built-in>",
+                       .encrypt = aead_encrypt,
+                       .decrypt = aead_decrypt,
                        .ivsize = CTR_RFC3686_IV_SIZE,
-                       .maxauthsize = SHA384_DIGEST_SIZE,
-                       },
-               .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
-               .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
-                                  OP_ALG_AAI_HMAC_PRECOMP,
-               .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
+                       .maxauthsize = SHA224_DIGEST_SIZE,
+               },
+               .caam = {
+                       .class1_alg_type = OP_ALG_ALGSEL_AES |
+                                          OP_ALG_AAI_CTR_MOD128,
+                       .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+                                          OP_ALG_AAI_HMAC_PRECOMP,
+                       .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
+                       .rfc3686 = true,
+               },
        },
        {
-               .name = "authenc(hmac(sha512),rfc3686(ctr(aes)))",
-               .driver_name = "authenc-hmac-sha512-rfc3686-ctr-aes-caam",
-               .blocksize = 1,
-               .type = CRYPTO_ALG_TYPE_AEAD,
-               .template_aead = {
+               .aead = {
+                       .base = {
+                               .cra_name = "seqiv(authenc("
+                                           "hmac(sha224),rfc3686(ctr(aes))))",
+                               .cra_driver_name = "seqiv-authenc-hmac-sha224-"
+                                                  "rfc3686-ctr-aes-caam",
+                               .cra_blocksize = 1,
+                       },
                        .setkey = aead_setkey,
                        .setauthsize = aead_setauthsize,
-                       .encrypt = old_aead_encrypt,
-                       .decrypt = old_aead_decrypt,
-                       .givencrypt = old_aead_givencrypt,
-                       .geniv = "<built-in>",
+                       .encrypt = aead_encrypt,
+                       .decrypt = aead_givdecrypt,
                        .ivsize = CTR_RFC3686_IV_SIZE,
-                       .maxauthsize = SHA512_DIGEST_SIZE,
-                       },
-               .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
-               .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
-                                  OP_ALG_AAI_HMAC_PRECOMP,
-               .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
-       },
-       /* ablkcipher descriptor */
-       {
-               .name = "cbc(aes)",
-               .driver_name = "cbc-aes-caam",
-               .blocksize = AES_BLOCK_SIZE,
-               .type = CRYPTO_ALG_TYPE_GIVCIPHER,
-               .template_ablkcipher = {
-                       .setkey = ablkcipher_setkey,
-                       .encrypt = ablkcipher_encrypt,
-                       .decrypt = ablkcipher_decrypt,
-                       .givencrypt = ablkcipher_givencrypt,
-                       .geniv = "<built-in>",
-                       .min_keysize = AES_MIN_KEY_SIZE,
-                       .max_keysize = AES_MAX_KEY_SIZE,
-                       .ivsize = AES_BLOCK_SIZE,
-                       },
-               .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+                       .maxauthsize = SHA224_DIGEST_SIZE,
+               },
+               .caam = {
+                       .class1_alg_type = OP_ALG_ALGSEL_AES |
+                                          OP_ALG_AAI_CTR_MOD128,
+                       .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+                                          OP_ALG_AAI_HMAC_PRECOMP,
+                       .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
+                       .rfc3686 = true,
+                       .geniv = true,
+               },
        },
        {
-               .name = "cbc(des3_ede)",
-               .driver_name = "cbc-3des-caam",
-               .blocksize = DES3_EDE_BLOCK_SIZE,
-               .type = CRYPTO_ALG_TYPE_GIVCIPHER,
-               .template_ablkcipher = {
-                       .setkey = ablkcipher_setkey,
-                       .encrypt = ablkcipher_encrypt,
-                       .decrypt = ablkcipher_decrypt,
-                       .givencrypt = ablkcipher_givencrypt,
-                       .geniv = "<built-in>",
-                       .min_keysize = DES3_EDE_KEY_SIZE,
-                       .max_keysize = DES3_EDE_KEY_SIZE,
-                       .ivsize = DES3_EDE_BLOCK_SIZE,
+               .aead = {
+                       .base = {
+                               .cra_name = "authenc(hmac(sha256),"
+                                           "rfc3686(ctr(aes)))",
+                               .cra_driver_name = "authenc-hmac-sha256-"
+                                                  "rfc3686-ctr-aes-caam",
+                               .cra_blocksize = 1,
                        },
-               .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+                       .setkey = aead_setkey,
+                       .setauthsize = aead_setauthsize,
+                       .encrypt = aead_encrypt,
+                       .decrypt = aead_decrypt,
+                       .ivsize = CTR_RFC3686_IV_SIZE,
+                       .maxauthsize = SHA256_DIGEST_SIZE,
+               },
+               .caam = {
+                       .class1_alg_type = OP_ALG_ALGSEL_AES |
+                                          OP_ALG_AAI_CTR_MOD128,
+                       .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+                                          OP_ALG_AAI_HMAC_PRECOMP,
+                       .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
+                       .rfc3686 = true,
+               },
        },
        {
-               .name = "cbc(des)",
-               .driver_name = "cbc-des-caam",
-               .blocksize = DES_BLOCK_SIZE,
-               .type = CRYPTO_ALG_TYPE_GIVCIPHER,
-               .template_ablkcipher = {
-                       .setkey = ablkcipher_setkey,
-                       .encrypt = ablkcipher_encrypt,
-                       .decrypt = ablkcipher_decrypt,
-                       .givencrypt = ablkcipher_givencrypt,
-                       .geniv = "<built-in>",
-                       .min_keysize = DES_KEY_SIZE,
-                       .max_keysize = DES_KEY_SIZE,
-                       .ivsize = DES_BLOCK_SIZE,
+               .aead = {
+                       .base = {
+                               .cra_name = "seqiv(authenc(hmac(sha256),"
+                                           "rfc3686(ctr(aes))))",
+                               .cra_driver_name = "seqiv-authenc-hmac-sha256-"
+                                                  "rfc3686-ctr-aes-caam",
+                               .cra_blocksize = 1,
                        },
-               .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+                       .setkey = aead_setkey,
+                       .setauthsize = aead_setauthsize,
+                       .encrypt = aead_encrypt,
+                       .decrypt = aead_givdecrypt,
+                       .ivsize = CTR_RFC3686_IV_SIZE,
+                       .maxauthsize = SHA256_DIGEST_SIZE,
+               },
+               .caam = {
+                       .class1_alg_type = OP_ALG_ALGSEL_AES |
+                                          OP_ALG_AAI_CTR_MOD128,
+                       .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+                                          OP_ALG_AAI_HMAC_PRECOMP,
+                       .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
+                       .rfc3686 = true,
+                       .geniv = true,
+               },
        },
        {
-               .name = "ctr(aes)",
-               .driver_name = "ctr-aes-caam",
-               .blocksize = 1,
-               .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
-               .template_ablkcipher = {
-                       .setkey = ablkcipher_setkey,
-                       .encrypt = ablkcipher_encrypt,
-                       .decrypt = ablkcipher_decrypt,
-                       .geniv = "chainiv",
-                       .min_keysize = AES_MIN_KEY_SIZE,
-                       .max_keysize = AES_MAX_KEY_SIZE,
-                       .ivsize = AES_BLOCK_SIZE,
+               .aead = {
+                       .base = {
+                               .cra_name = "authenc(hmac(sha384),"
+                                           "rfc3686(ctr(aes)))",
+                               .cra_driver_name = "authenc-hmac-sha384-"
+                                                  "rfc3686-ctr-aes-caam",
+                               .cra_blocksize = 1,
                        },
-               .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
-       },
-       {
-               .name = "rfc3686(ctr(aes))",
-               .driver_name = "rfc3686-ctr-aes-caam",
-               .blocksize = 1,
-               .type = CRYPTO_ALG_TYPE_GIVCIPHER,
-               .template_ablkcipher = {
-                       .setkey = ablkcipher_setkey,
-                       .encrypt = ablkcipher_encrypt,
-                       .decrypt = ablkcipher_decrypt,
-                       .givencrypt = ablkcipher_givencrypt,
-                       .geniv = "<built-in>",
-                       .min_keysize = AES_MIN_KEY_SIZE +
-                                      CTR_RFC3686_NONCE_SIZE,
-                       .max_keysize = AES_MAX_KEY_SIZE +
-                                      CTR_RFC3686_NONCE_SIZE,
+                       .setkey = aead_setkey,
+                       .setauthsize = aead_setauthsize,
+                       .encrypt = aead_encrypt,
+                       .decrypt = aead_decrypt,
                        .ivsize = CTR_RFC3686_IV_SIZE,
-                       },
-               .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
-       }
-};
-
-struct caam_alg_entry {
-       int class1_alg_type;
-       int class2_alg_type;
-       int alg_op;
-};
-
-struct caam_aead_alg {
-       struct aead_alg aead;
-       struct caam_alg_entry caam;
-       bool registered;
-};
-
-static struct caam_aead_alg driver_aeads[] = {
+                       .maxauthsize = SHA384_DIGEST_SIZE,
+               },
+               .caam = {
+                       .class1_alg_type = OP_ALG_ALGSEL_AES |
+                                          OP_ALG_AAI_CTR_MOD128,
+                       .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+                                          OP_ALG_AAI_HMAC_PRECOMP,
+                       .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
+                       .rfc3686 = true,
+               },
+       },
        {
                .aead = {
                        .base = {
-                               .cra_name = "rfc4106(gcm(aes))",
-                               .cra_driver_name = "rfc4106-gcm-aes-caam",
+                               .cra_name = "seqiv(authenc(hmac(sha384),"
+                                           "rfc3686(ctr(aes))))",
+                               .cra_driver_name = "seqiv-authenc-hmac-sha384-"
+                                                  "rfc3686-ctr-aes-caam",
                                .cra_blocksize = 1,
                        },
-                       .setkey = rfc4106_setkey,
-                       .setauthsize = rfc4106_setauthsize,
-                       .encrypt = gcm_encrypt,
-                       .decrypt = gcm_decrypt,
-                       .ivsize = 8,
-                       .maxauthsize = AES_BLOCK_SIZE,
+                       .setkey = aead_setkey,
+                       .setauthsize = aead_setauthsize,
+                       .encrypt = aead_encrypt,
+                       .decrypt = aead_givdecrypt,
+                       .ivsize = CTR_RFC3686_IV_SIZE,
+                       .maxauthsize = SHA384_DIGEST_SIZE,
                },
                .caam = {
-                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+                       .class1_alg_type = OP_ALG_ALGSEL_AES |
+                                          OP_ALG_AAI_CTR_MOD128,
+                       .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+                                          OP_ALG_AAI_HMAC_PRECOMP,
+                       .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
+                       .rfc3686 = true,
+                       .geniv = true,
                },
        },
        {
                .aead = {
                        .base = {
-                               .cra_name = "rfc4543(gcm(aes))",
-                               .cra_driver_name = "rfc4543-gcm-aes-caam",
+                               .cra_name = "authenc(hmac(sha512),"
+                                           "rfc3686(ctr(aes)))",
+                               .cra_driver_name = "authenc-hmac-sha512-"
+                                                  "rfc3686-ctr-aes-caam",
                                .cra_blocksize = 1,
                        },
-                       .setkey = rfc4543_setkey,
-                       .setauthsize = rfc4543_setauthsize,
-                       .encrypt = gcm_encrypt,
-                       .decrypt = gcm_decrypt,
-                       .ivsize = 8,
-                       .maxauthsize = AES_BLOCK_SIZE,
+                       .setkey = aead_setkey,
+                       .setauthsize = aead_setauthsize,
+                       .encrypt = aead_encrypt,
+                       .decrypt = aead_decrypt,
+                       .ivsize = CTR_RFC3686_IV_SIZE,
+                       .maxauthsize = SHA512_DIGEST_SIZE,
                },
                .caam = {
-                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+                       .class1_alg_type = OP_ALG_ALGSEL_AES |
+                                          OP_ALG_AAI_CTR_MOD128,
+                       .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+                                          OP_ALG_AAI_HMAC_PRECOMP,
+                       .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
+                       .rfc3686 = true,
                },
        },
-       /* Galois Counter Mode */
        {
                .aead = {
                        .base = {
-                               .cra_name = "gcm(aes)",
-                               .cra_driver_name = "gcm-aes-caam",
+                               .cra_name = "seqiv(authenc(hmac(sha512),"
+                                           "rfc3686(ctr(aes))))",
+                               .cra_driver_name = "seqiv-authenc-hmac-sha512-"
+                                                  "rfc3686-ctr-aes-caam",
                                .cra_blocksize = 1,
                        },
-                       .setkey = gcm_setkey,
-                       .setauthsize = gcm_setauthsize,
-                       .encrypt = gcm_encrypt,
-                       .decrypt = gcm_decrypt,
-                       .ivsize = 12,
-                       .maxauthsize = AES_BLOCK_SIZE,
+                       .setkey = aead_setkey,
+                       .setauthsize = aead_setauthsize,
+                       .encrypt = aead_encrypt,
+                       .decrypt = aead_givdecrypt,
+                       .ivsize = CTR_RFC3686_IV_SIZE,
+                       .maxauthsize = SHA512_DIGEST_SIZE,
                },
                .caam = {
-                       .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+                       .class1_alg_type = OP_ALG_ALGSEL_AES |
+                                          OP_ALG_AAI_CTR_MOD128,
+                       .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+                                          OP_ALG_AAI_HMAC_PRECOMP,
+                       .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
+                       .rfc3686 = true,
+                       .geniv = true,
                },
        },
 };
@@ -4211,7 +4314,7 @@ static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
        struct caam_crypto_alg *t_alg;
        struct crypto_alg *alg;
 
-       t_alg = kzalloc(sizeof(struct caam_crypto_alg), GFP_KERNEL);
+       t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
        if (!t_alg) {
                pr_err("failed to allocate t_alg\n");
                return ERR_PTR(-ENOMEM);
@@ -4240,10 +4343,6 @@ static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
                alg->cra_type = &crypto_ablkcipher_type;
                alg->cra_ablkcipher = template->template_ablkcipher;
                break;
-       case CRYPTO_ALG_TYPE_AEAD:
-               alg->cra_type = &crypto_aead_type;
-               alg->cra_aead = template->template_aead;
-               break;
        }
 
        t_alg->caam.class1_alg_type = template->class1_alg_type;
@@ -4271,8 +4370,10 @@ static int __init caam_algapi_init(void)
        struct device_node *dev_node;
        struct platform_device *pdev;
        struct device *ctrldev;
-       void *priv;
+       struct caam_drv_private *priv;
        int i = 0, err = 0;
+       u32 cha_vid, cha_inst, des_inst, aes_inst, md_inst;
+       unsigned int md_limit = SHA512_DIGEST_SIZE;
        bool registered = false;
 
        dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
@@ -4302,16 +4403,39 @@ static int __init caam_algapi_init(void)
 
        INIT_LIST_HEAD(&alg_list);
 
-       /* register crypto algorithms the device supports */
+       /*
+        * Register crypto algorithms the device supports.
+        * First, detect presence and attributes of DES, AES, and MD blocks.
+        */
+       cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
+       cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
+       des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> CHA_ID_LS_DES_SHIFT;
+       aes_inst = (cha_inst & CHA_ID_LS_AES_MASK) >> CHA_ID_LS_AES_SHIFT;
+       md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
+
+       /* If MD is present, limit digest size based on LP256 */
+       if (md_inst && ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256))
+               md_limit = SHA256_DIGEST_SIZE;
+
        for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
-               /* TODO: check if h/w supports alg */
                struct caam_crypto_alg *t_alg;
+               struct caam_alg_template *alg = driver_algs + i;
+               u32 alg_sel = alg->class1_alg_type & OP_ALG_ALGSEL_MASK;
+
+               /* Skip DES algorithms if not supported by device */
+               if (!des_inst &&
+                   ((alg_sel == OP_ALG_ALGSEL_3DES) ||
+                    (alg_sel == OP_ALG_ALGSEL_DES)))
+                               continue;
+
+               /* Skip AES algorithms if not supported by device */
+               if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES))
+                               continue;
 
-               t_alg = caam_alg_alloc(&driver_algs[i]);
+               t_alg = caam_alg_alloc(alg);
                if (IS_ERR(t_alg)) {
                        err = PTR_ERR(t_alg);
-                       pr_warn("%s alg allocation failed\n",
-                               driver_algs[i].driver_name);
+                       pr_warn("%s alg allocation failed\n", alg->driver_name);
                        continue;
                }
 
@@ -4329,6 +4453,37 @@ static int __init caam_algapi_init(void)
 
        for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
                struct caam_aead_alg *t_alg = driver_aeads + i;
+               u32 c1_alg_sel = t_alg->caam.class1_alg_type &
+                                OP_ALG_ALGSEL_MASK;
+               u32 c2_alg_sel = t_alg->caam.class2_alg_type &
+                                OP_ALG_ALGSEL_MASK;
+               u32 alg_aai = t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
+
+               /* Skip DES algorithms if not supported by device */
+               if (!des_inst &&
+                   ((c1_alg_sel == OP_ALG_ALGSEL_3DES) ||
+                    (c1_alg_sel == OP_ALG_ALGSEL_DES)))
+                               continue;
+
+               /* Skip AES algorithms if not supported by device */
+               if (!aes_inst && (c1_alg_sel == OP_ALG_ALGSEL_AES))
+                               continue;
+
+               /*
+                * Check support for AES algorithms not available
+                * on LP devices.
+                */
+               if ((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP)
+                       if (alg_aai == OP_ALG_AAI_GCM)
+                               continue;
+
+               /*
+                * Skip algorithms requiring message digests
+                * if MD or MD size is not supported by device.
+                */
+               if (c2_alg_sel &&
+                   (!md_inst || (t_alg->aead.maxauthsize > md_limit)))
+                               continue;
 
                caam_aead_alg_init(t_alg);
 
index f9c78751989ec865491570ed13bf19dbc6b1a799..94433b9fc200dc5f5322d29819ebfe74f3fd90f1 100644 (file)
@@ -127,7 +127,7 @@ struct caam_hash_state {
        int buflen_0;
        u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
        int buflen_1;
-       u8 caam_ctx[MAX_CTX_LEN];
+       u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
        int (*update)(struct ahash_request *req);
        int (*final)(struct ahash_request *req);
        int (*finup)(struct ahash_request *req);
@@ -807,7 +807,7 @@ static int ahash_update_ctx(struct ahash_request *req)
                 * allocate space for base edesc and hw desc commands,
                 * link tables
                 */
-               edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
+               edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN +
                                sec4_sg_bytes, GFP_DMA | flags);
                if (!edesc) {
                        dev_err(jrdev,
@@ -829,7 +829,7 @@ static int ahash_update_ctx(struct ahash_request *req)
                state->buf_dma = try_buf_map_to_sec4_sg(jrdev,
                                                        edesc->sec4_sg + 1,
                                                        buf, state->buf_dma,
-                                                       *buflen, last_buflen);
+                                                       *next_buflen, *buflen);
 
                if (src_nents) {
                        src_map_to_sec4_sg(jrdev, req->src, src_nents,
@@ -919,8 +919,8 @@ static int ahash_final_ctx(struct ahash_request *req)
        sec4_sg_bytes = sec4_sg_src_index * sizeof(struct sec4_sg_entry);
 
        /* allocate space for base edesc and hw desc commands, link tables */
-       edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
-                       sec4_sg_bytes, GFP_DMA | flags);
+       edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN + sec4_sg_bytes,
+                       GFP_DMA | flags);
        if (!edesc) {
                dev_err(jrdev, "could not allocate extended descriptor\n");
                return -ENOMEM;
@@ -1006,8 +1006,8 @@ static int ahash_finup_ctx(struct ahash_request *req)
                         sizeof(struct sec4_sg_entry);
 
        /* allocate space for base edesc and hw desc commands, link tables */
-       edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
-                       sec4_sg_bytes, GFP_DMA | flags);
+       edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN + sec4_sg_bytes,
+                       GFP_DMA | flags);
        if (!edesc) {
                dev_err(jrdev, "could not allocate extended descriptor\n");
                return -ENOMEM;
@@ -1092,8 +1092,8 @@ static int ahash_digest(struct ahash_request *req)
        sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry);
 
        /* allocate space for base edesc and hw desc commands, link tables */
-       edesc = kmalloc(sizeof(struct ahash_edesc) + sec4_sg_bytes +
-                       DESC_JOB_IO_LEN, GFP_DMA | flags);
+       edesc = kzalloc(sizeof(*edesc) + sec4_sg_bytes + DESC_JOB_IO_LEN,
+                       GFP_DMA | flags);
        if (!edesc) {
                dev_err(jrdev, "could not allocate extended descriptor\n");
                return -ENOMEM;
@@ -1166,8 +1166,7 @@ static int ahash_final_no_ctx(struct ahash_request *req)
        int sh_len;
 
        /* allocate space for base edesc and hw desc commands, link tables */
-       edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN,
-                       GFP_DMA | flags);
+       edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN, GFP_DMA | flags);
        if (!edesc) {
                dev_err(jrdev, "could not allocate extended descriptor\n");
                return -ENOMEM;
@@ -1246,7 +1245,7 @@ static int ahash_update_no_ctx(struct ahash_request *req)
                 * allocate space for base edesc and hw desc commands,
                 * link tables
                 */
-               edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
+               edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN +
                                sec4_sg_bytes, GFP_DMA | flags);
                if (!edesc) {
                        dev_err(jrdev,
@@ -1354,8 +1353,8 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
                         sizeof(struct sec4_sg_entry);
 
        /* allocate space for base edesc and hw desc commands, link tables */
-       edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
-                       sec4_sg_bytes, GFP_DMA | flags);
+       edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN + sec4_sg_bytes,
+                       GFP_DMA | flags);
        if (!edesc) {
                dev_err(jrdev, "could not allocate extended descriptor\n");
                return -ENOMEM;
@@ -1449,7 +1448,7 @@ static int ahash_update_first(struct ahash_request *req)
                 * allocate space for base edesc and hw desc commands,
                 * link tables
                 */
-               edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
+               edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN +
                                sec4_sg_bytes, GFP_DMA | flags);
                if (!edesc) {
                        dev_err(jrdev,
@@ -1843,7 +1842,7 @@ caam_hash_alloc(struct caam_hash_template *template,
        struct ahash_alg *halg;
        struct crypto_alg *alg;
 
-       t_alg = kzalloc(sizeof(struct caam_hash_alg), GFP_KERNEL);
+       t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
        if (!t_alg) {
                pr_err("failed to allocate t_alg\n");
                return ERR_PTR(-ENOMEM);
@@ -1885,8 +1884,10 @@ static int __init caam_algapi_hash_init(void)
        struct device_node *dev_node;
        struct platform_device *pdev;
        struct device *ctrldev;
-       void *priv;
        int i = 0, err = 0;
+       struct caam_drv_private *priv;
+       unsigned int md_limit = SHA512_DIGEST_SIZE;
+       u32 cha_inst, cha_vid;
 
        dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
        if (!dev_node) {
@@ -1912,19 +1913,40 @@ static int __init caam_algapi_hash_init(void)
        if (!priv)
                return -ENODEV;
 
+       /*
+        * Register crypto algorithms the device supports.  First, identify
+        * presence and attributes of MD block.
+        */
+       cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
+       cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
+
+       /*
+        * Skip registration of any hashing algorithms if MD block
+        * is not present.
+        */
+       if (!((cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT))
+               return -ENODEV;
+
+       /* Limit digest size based on LP256 */
+       if ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256)
+               md_limit = SHA256_DIGEST_SIZE;
+
        INIT_LIST_HEAD(&hash_list);
 
        /* register crypto algorithms the device supports */
        for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
-               /* TODO: check if h/w supports alg */
                struct caam_hash_alg *t_alg;
+               struct caam_hash_template *alg = driver_hash + i;
+
+               /* If MD size is not supported by device, skip registration */
+               if (alg->template_ahash.halg.digestsize > md_limit)
+                       continue;
 
                /* register hmac version */
-               t_alg = caam_hash_alloc(&driver_hash[i], true);
+               t_alg = caam_hash_alloc(alg, true);
                if (IS_ERR(t_alg)) {
                        err = PTR_ERR(t_alg);
-                       pr_warn("%s alg allocation failed\n",
-                               driver_hash[i].driver_name);
+                       pr_warn("%s alg allocation failed\n", alg->driver_name);
                        continue;
                }
 
@@ -1937,11 +1959,10 @@ static int __init caam_algapi_hash_init(void)
                        list_add_tail(&t_alg->entry, &hash_list);
 
                /* register unkeyed version */
-               t_alg = caam_hash_alloc(&driver_hash[i], false);
+               t_alg = caam_hash_alloc(alg, false);
                if (IS_ERR(t_alg)) {
                        err = PTR_ERR(t_alg);
-                       pr_warn("%s alg allocation failed\n",
-                               driver_hash[i].driver_name);
+                       pr_warn("%s alg allocation failed\n", alg->driver_name);
                        continue;
                }
 
index 5095337205b830c148696a37d53a8902643b317f..9b92af2c72412fd05390da752956c9e52d84014d 100644 (file)
@@ -108,6 +108,10 @@ static void rng_done(struct device *jrdev, u32 *desc, u32 err, void *context)
 
        atomic_set(&bd->empty, BUF_NOT_EMPTY);
        complete(&bd->filled);
+
+       /* Buffer refilled, invalidate cache */
+       dma_sync_single_for_cpu(jrdev, bd->addr, RN_BUF_SIZE, DMA_FROM_DEVICE);
+
 #ifdef DEBUG
        print_hex_dump(KERN_ERR, "rng refreshed buf@: ",
                       DUMP_PREFIX_ADDRESS, 16, 4, bd->buf, RN_BUF_SIZE, 1);
@@ -311,7 +315,7 @@ static int __init caam_rng_init(void)
        struct device_node *dev_node;
        struct platform_device *pdev;
        struct device *ctrldev;
-       void *priv;
+       struct caam_drv_private *priv;
        int err;
 
        dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
@@ -338,20 +342,32 @@ static int __init caam_rng_init(void)
        if (!priv)
                return -ENODEV;
 
+       /* Check for an instantiated RNG before registration */
+       if (!(rd_reg32(&priv->ctrl->perfmon.cha_num_ls) & CHA_ID_LS_RNG_MASK))
+               return -ENODEV;
+
        dev = caam_jr_alloc();
        if (IS_ERR(dev)) {
                pr_err("Job Ring Device allocation for transform failed\n");
                return PTR_ERR(dev);
        }
-       rng_ctx = kmalloc(sizeof(struct caam_rng_ctx), GFP_DMA);
-       if (!rng_ctx)
-               return -ENOMEM;
+       rng_ctx = kmalloc(sizeof(*rng_ctx), GFP_DMA);
+       if (!rng_ctx) {
+               err = -ENOMEM;
+               goto free_caam_alloc;
+       }
        err = caam_init_rng(rng_ctx, dev);
        if (err)
-               return err;
+               goto free_rng_ctx;
 
        dev_info(dev, "registering rng-caam\n");
        return hwrng_register(&caam_rng);
+
+free_rng_ctx:
+       kfree(rng_ctx);
+free_caam_alloc:
+       caam_jr_free(dev);
+       return err;
 }
 
 module_init(caam_rng_init);
index f57f395db33f73e8fb5630bd7d2c779a4ca2ff7f..b6955ecdfb3f67d39116c9362a16222070de51cd 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/types.h>
 #include <linux/debugfs.h>
 #include <linux/circ_buf.h>
+#include <linux/clk.h>
 #include <net/xfrm.h>
 
 #include <crypto/algapi.h>
index efacab7539ef6a8afdacbfb8d9965a0ec4f9ef75..8abb4bc548cc06a7a8ae97779701d308255ff050 100644 (file)
 #include "desc_constr.h"
 #include "error.h"
 
+/*
+ * i.MX targets tend to have clock control subsystems that can
+ * enable/disable clocking to our device.
+ */
+#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX
+static inline struct clk *caam_drv_identify_clk(struct device *dev,
+                                               char *clk_name)
+{
+       return devm_clk_get(dev, clk_name);
+}
+#else
+static inline struct clk *caam_drv_identify_clk(struct device *dev,
+                                               char *clk_name)
+{
+       return NULL;
+}
+#endif
+
 /*
  * Descriptor to instantiate RNG State Handle 0 in normal mode and
  * load the JDKEK, TDKEK and TDSK registers
@@ -121,7 +139,7 @@ static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc,
                flags |= DECO_JQCR_FOUR;
 
        /* Instruct the DECO to execute it */
-       wr_reg32(&deco->jr_ctl_hi, flags);
+       setbits32(&deco->jr_ctl_hi, flags);
 
        timeout = 10000000;
        do {
@@ -175,7 +193,7 @@ static int instantiate_rng(struct device *ctrldev, int state_handle_mask,
 {
        struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
        struct caam_ctrl __iomem *ctrl;
-       u32 *desc, status, rdsta_val;
+       u32 *desc, status = 0, rdsta_val;
        int ret = 0, sh_idx;
 
        ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl;
@@ -207,7 +225,8 @@ static int instantiate_rng(struct device *ctrldev, int state_handle_mask,
                 * CAAM eras), then try again.
                 */
                rdsta_val = rd_reg32(&ctrl->r4tst[0].rdsta) & RDSTA_IFMASK;
-               if (status || !(rdsta_val & (1 << sh_idx)))
+               if ((status && status != JRSTA_SSRC_JUMP_HALT_CC) ||
+                   !(rdsta_val & (1 << sh_idx)))
                        ret = -EAGAIN;
                if (ret)
                        break;
@@ -279,7 +298,7 @@ static int caam_remove(struct platform_device *pdev)
        struct device *ctrldev;
        struct caam_drv_private *ctrlpriv;
        struct caam_ctrl __iomem *ctrl;
-       int ring, ret = 0;
+       int ring;
 
        ctrldev = &pdev->dev;
        ctrlpriv = dev_get_drvdata(ctrldev);
@@ -303,7 +322,13 @@ static int caam_remove(struct platform_device *pdev)
        /* Unmap controller region */
        iounmap(ctrl);
 
-       return ret;
+       /* shut clocks off before finalizing shutdown */
+       clk_disable_unprepare(ctrlpriv->caam_ipg);
+       clk_disable_unprepare(ctrlpriv->caam_mem);
+       clk_disable_unprepare(ctrlpriv->caam_aclk);
+       clk_disable_unprepare(ctrlpriv->caam_emi_slow);
+
+       return 0;
 }
 
 /*
@@ -370,14 +395,14 @@ static void kick_trng(struct platform_device *pdev, int ent_delay)
 int caam_get_era(void)
 {
        struct device_node *caam_node;
-       for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
-               const uint32_t *prop = (uint32_t *)of_get_property(caam_node,
-                               "fsl,sec-era",
-                               NULL);
-               return prop ? *prop : -ENOTSUPP;
-       }
+       int ret;
+       u32 prop;
 
-       return -ENOTSUPP;
+       caam_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
+       ret = of_property_read_u32(caam_node, "fsl,sec-era", &prop);
+       of_node_put(caam_node);
+
+       return IS_ERR_VALUE(ret) ? -ENOTSUPP : prop;
 }
 EXPORT_SYMBOL(caam_get_era);
 
@@ -390,6 +415,7 @@ static int caam_probe(struct platform_device *pdev)
        struct device_node *nprop, *np;
        struct caam_ctrl __iomem *ctrl;
        struct caam_drv_private *ctrlpriv;
+       struct clk *clk;
 #ifdef CONFIG_DEBUG_FS
        struct caam_perfmon *perfmon;
 #endif
@@ -398,8 +424,7 @@ static int caam_probe(struct platform_device *pdev)
        int pg_size;
        int BLOCK_OFFSET = 0;
 
-       ctrlpriv = devm_kzalloc(&pdev->dev, sizeof(struct caam_drv_private),
-                               GFP_KERNEL);
+       ctrlpriv = devm_kzalloc(&pdev->dev, sizeof(*ctrlpriv), GFP_KERNEL);
        if (!ctrlpriv)
                return -ENOMEM;
 
@@ -408,12 +433,76 @@ static int caam_probe(struct platform_device *pdev)
        ctrlpriv->pdev = pdev;
        nprop = pdev->dev.of_node;
 
+       /* Enable clocking */
+       clk = caam_drv_identify_clk(&pdev->dev, "ipg");
+       if (IS_ERR(clk)) {
+               ret = PTR_ERR(clk);
+               dev_err(&pdev->dev,
+                       "can't identify CAAM ipg clk: %d\n", ret);
+               return ret;
+       }
+       ctrlpriv->caam_ipg = clk;
+
+       clk = caam_drv_identify_clk(&pdev->dev, "mem");
+       if (IS_ERR(clk)) {
+               ret = PTR_ERR(clk);
+               dev_err(&pdev->dev,
+                       "can't identify CAAM mem clk: %d\n", ret);
+               return ret;
+       }
+       ctrlpriv->caam_mem = clk;
+
+       clk = caam_drv_identify_clk(&pdev->dev, "aclk");
+       if (IS_ERR(clk)) {
+               ret = PTR_ERR(clk);
+               dev_err(&pdev->dev,
+                       "can't identify CAAM aclk clk: %d\n", ret);
+               return ret;
+       }
+       ctrlpriv->caam_aclk = clk;
+
+       clk = caam_drv_identify_clk(&pdev->dev, "emi_slow");
+       if (IS_ERR(clk)) {
+               ret = PTR_ERR(clk);
+               dev_err(&pdev->dev,
+                       "can't identify CAAM emi_slow clk: %d\n", ret);
+               return ret;
+       }
+       ctrlpriv->caam_emi_slow = clk;
+
+       ret = clk_prepare_enable(ctrlpriv->caam_ipg);
+       if (ret < 0) {
+               dev_err(&pdev->dev, "can't enable CAAM ipg clock: %d\n", ret);
+               return ret;
+       }
+
+       ret = clk_prepare_enable(ctrlpriv->caam_mem);
+       if (ret < 0) {
+               dev_err(&pdev->dev, "can't enable CAAM secure mem clock: %d\n",
+                       ret);
+               goto disable_caam_ipg;
+       }
+
+       ret = clk_prepare_enable(ctrlpriv->caam_aclk);
+       if (ret < 0) {
+               dev_err(&pdev->dev, "can't enable CAAM aclk clock: %d\n", ret);
+               goto disable_caam_mem;
+       }
+
+       ret = clk_prepare_enable(ctrlpriv->caam_emi_slow);
+       if (ret < 0) {
+               dev_err(&pdev->dev, "can't enable CAAM emi slow clock: %d\n",
+                       ret);
+               goto disable_caam_aclk;
+       }
+
        /* Get configuration properties from device tree */
        /* First, get register page */
        ctrl = of_iomap(nprop, 0);
        if (ctrl == NULL) {
                dev_err(dev, "caam: of_iomap() failed\n");
-               return -ENOMEM;
+               ret = -ENOMEM;
+               goto disable_caam_emi_slow;
        }
        /* Finding the page size for using the CTPR_MS register */
        comp_params = rd_reg32(&ctrl->perfmon.comp_parms_ms);
@@ -444,8 +533,9 @@ static int caam_probe(struct platform_device *pdev)
         * Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel,
         * long pointers in master configuration register
         */
-       setbits32(&ctrl->mcr, MCFGR_WDENABLE |
-                 (sizeof(dma_addr_t) == sizeof(u64) ? MCFGR_LONG_PTR : 0));
+       clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK, MCFGR_AWCACHE_CACH |
+                     MCFGR_WDENABLE | (sizeof(dma_addr_t) == sizeof(u64) ?
+                                       MCFGR_LONG_PTR : 0));
 
        /*
         *  Read the Compile Time paramters and SCFGR to determine
@@ -492,12 +582,11 @@ static int caam_probe(struct platform_device *pdev)
                    of_device_is_compatible(np, "fsl,sec4.0-job-ring"))
                        rspec++;
 
-       ctrlpriv->jrpdev = devm_kzalloc(&pdev->dev,
-                                       sizeof(struct platform_device *) * rspec,
-                                       GFP_KERNEL);
+       ctrlpriv->jrpdev = devm_kcalloc(&pdev->dev, rspec,
+                                       sizeof(*ctrlpriv->jrpdev), GFP_KERNEL);
        if (ctrlpriv->jrpdev == NULL) {
-               iounmap(ctrl);
-               return -ENOMEM;
+               ret = -ENOMEM;
+               goto iounmap_ctrl;
        }
 
        ring = 0;
@@ -537,8 +626,8 @@ static int caam_probe(struct platform_device *pdev)
        /* If no QI and no rings specified, quit and go home */
        if ((!ctrlpriv->qi_present) && (!ctrlpriv->total_jobrs)) {
                dev_err(dev, "no queues configured, terminating\n");
-               caam_remove(pdev);
-               return -ENOMEM;
+               ret = -ENOMEM;
+               goto caam_remove;
        }
 
        cha_vid_ls = rd_reg32(&ctrl->perfmon.cha_id_ls);
@@ -595,8 +684,7 @@ static int caam_probe(struct platform_device *pdev)
                } while ((ret == -EAGAIN) && (ent_delay < RTSDCTL_ENT_DLY_MAX));
                if (ret) {
                        dev_err(dev, "failed to instantiate RNG");
-                       caam_remove(pdev);
-                       return ret;
+                       goto caam_remove;
                }
                /*
                 * Set handles init'ed by this module as the complement of the
@@ -700,6 +788,20 @@ static int caam_probe(struct platform_device *pdev)
                                                 &ctrlpriv->ctl_tdsk_wrap);
 #endif
        return 0;
+
+caam_remove:
+       caam_remove(pdev);
+iounmap_ctrl:
+       iounmap(ctrl);
+disable_caam_emi_slow:
+       clk_disable_unprepare(ctrlpriv->caam_emi_slow);
+disable_caam_aclk:
+       clk_disable_unprepare(ctrlpriv->caam_aclk);
+disable_caam_mem:
+       clk_disable_unprepare(ctrlpriv->caam_mem);
+disable_caam_ipg:
+       clk_disable_unprepare(ctrlpriv->caam_ipg);
+       return ret;
 }
 
 static struct of_device_id caam_match[] = {
index d397ff9d56fd7ae5a603b76e1b282286c1889398..983d663ef6714c46fd7ee4d724e0e3d1adeb9002 100644 (file)
@@ -8,12 +8,29 @@
 #ifndef DESC_H
 #define DESC_H
 
+/*
+ * 16-byte hardware scatter/gather table
+ * An 8-byte table exists in the hardware spec, but has never been
+ * implemented to date. The 8/16 option is selected at RTL-compile-time.
+ * and this selection is visible in the Compile Time Parameters Register
+ */
+
+#define SEC4_SG_LEN_EXT                0x80000000      /* Entry points to table */
+#define SEC4_SG_LEN_FIN                0x40000000      /* Last ent in table */
+#define SEC4_SG_BPID_MASK      0x000000ff
+#define SEC4_SG_BPID_SHIFT     16
+#define SEC4_SG_LEN_MASK       0x3fffffff      /* Excludes EXT and FINAL */
+#define SEC4_SG_OFFS_MASK      0x00001fff
+
 struct sec4_sg_entry {
+#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX
+       u32 rsvd1;
+       dma_addr_t ptr;
+#else
        u64 ptr;
-#define SEC4_SG_LEN_FIN 0x40000000
-#define SEC4_SG_LEN_EXT 0x80000000
+#endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_IMX */
        u32 len;
-       u8 reserved;
+       u8 rsvd2;
        u8 buf_pool_id;
        u16 offset;
 };
index 9f79fd7bd4d7d1b7589a2facdc15e7667dbb8d48..98d07de24fc48c975faf1e13e65d248cc48edd90 100644 (file)
@@ -367,7 +367,7 @@ do { \
        if (upper) \
                append_u64(desc, data); \
        else \
-               append_u32(desc, data); \
+               append_u32(desc, lower_32_bits(data)); \
 } while (0)
 
 #define append_math_add_imm_u64(desc, dest, src0, src1, data) \
index 89b94cc9e7a29b57654ad5931e9999be3d80e664..e2bcacc1a921675cf30f70a40816e1306a8c3ef9 100644 (file)
@@ -91,6 +91,11 @@ struct caam_drv_private {
                                   Handles of the RNG4 block are initialized
                                   by this driver */
 
+       struct clk *caam_ipg;
+       struct clk *caam_mem;
+       struct clk *caam_aclk;
+       struct clk *caam_emi_slow;
+
        /*
         * debugfs entries for developer view into driver/device
         * variables at runtime.
index b8b5d47acd7a9c4ea58dc212f29ecffa84ebc915..f7e0d8d4c3da12cebdfb1df13822eb86da081307 100644 (file)
@@ -202,6 +202,13 @@ static void caam_jr_dequeue(unsigned long devarg)
                userdesc = jrp->entinfo[sw_idx].desc_addr_virt;
                userstatus = jrp->outring[hw_idx].jrstatus;
 
+               /*
+                * Make sure all information from the job has been obtained
+                * before telling CAAM that the job has been removed from the
+                * output ring.
+                */
+               mb();
+
                /* set done */
                wr_reg32(&jrp->rregs->outring_rmvd, 1);
 
@@ -351,12 +358,23 @@ int caam_jr_enqueue(struct device *dev, u32 *desc,
 
        jrp->inpring[jrp->inp_ring_write_index] = desc_dma;
 
+       /*
+        * Guarantee that the descriptor's DMA address has been written to
+        * the next slot in the ring before the write index is updated, since
+        * other cores may update this index independently.
+        */
        smp_wmb();
 
        jrp->inp_ring_write_index = (jrp->inp_ring_write_index + 1) &
                                    (JOBR_DEPTH - 1);
        jrp->head = (head + 1) & (JOBR_DEPTH - 1);
 
+       /*
+        * Ensure that all job information has been written before
+        * notifying CAAM that a new job was added to the input ring.
+        */
+       wmb();
+
        wr_reg32(&jrp->rregs->inpring_jobadd, 1);
 
        spin_unlock_bh(&jrp->inplock);
@@ -392,18 +410,17 @@ static int caam_jr_init(struct device *dev)
                goto out_free_irq;
 
        error = -ENOMEM;
-       jrp->inpring = dma_alloc_coherent(dev, sizeof(dma_addr_t) * JOBR_DEPTH,
-                                         &inpbusaddr, GFP_KERNEL);
+       jrp->inpring = dma_alloc_coherent(dev, sizeof(*jrp->inpring) *
+                                         JOBR_DEPTH, &inpbusaddr, GFP_KERNEL);
        if (!jrp->inpring)
                goto out_free_irq;
 
-       jrp->outring = dma_alloc_coherent(dev, sizeof(struct jr_outentry) *
+       jrp->outring = dma_alloc_coherent(dev, sizeof(*jrp->outring) *
                                          JOBR_DEPTH, &outbusaddr, GFP_KERNEL);
        if (!jrp->outring)
                goto out_free_inpring;
 
-       jrp->entinfo = kzalloc(sizeof(struct caam_jrentry_info) * JOBR_DEPTH,
-                              GFP_KERNEL);
+       jrp->entinfo = kcalloc(JOBR_DEPTH, sizeof(*jrp->entinfo), GFP_KERNEL);
        if (!jrp->entinfo)
                goto out_free_outring;
 
@@ -461,8 +478,7 @@ static int caam_jr_probe(struct platform_device *pdev)
        int error;
 
        jrdev = &pdev->dev;
-       jrpriv = devm_kmalloc(jrdev, sizeof(struct caam_drv_private_jr),
-                             GFP_KERNEL);
+       jrpriv = devm_kmalloc(jrdev, sizeof(*jrpriv), GFP_KERNEL);
        if (!jrpriv)
                return -ENOMEM;
 
index 672c97489505340abd440dc694cbc16d97045760..a8a79975682f2c5096e1beccd1c71714530a7176 100644 (file)
  *
  */
 
+#ifdef CONFIG_ARM
+/* These are common macros for Power, put here for ARM */
+#define setbits32(_addr, _v) writel((readl(_addr) | (_v)), (_addr))
+#define clrbits32(_addr, _v) writel((readl(_addr) & ~(_v)), (_addr))
+
+#define out_arch(type, endian, a, v)   __raw_write##type(cpu_to_##endian(v), a)
+#define in_arch(type, endian, a)       endian##_to_cpu(__raw_read##type(a))
+
+#define out_le32(a, v) out_arch(l, le32, a, v)
+#define in_le32(a)     in_arch(l, le32, a)
+
+#define out_be32(a, v) out_arch(l, be32, a, v)
+#define in_be32(a)     in_arch(l, be32, a)
+
+#define clrsetbits(type, addr, clear, set) \
+       out_##type((addr), (in_##type(addr) & ~(clear)) | (set))
+
+#define clrsetbits_be32(addr, clear, set) clrsetbits(be32, addr, clear, set)
+#define clrsetbits_le32(addr, clear, set) clrsetbits(le32, addr, clear, set)
+#endif
+
 #ifdef __BIG_ENDIAN
 #define wr_reg32(reg, data) out_be32(reg, data)
 #define rd_reg32(reg) in_be32(reg)
+#define clrsetbits_32(addr, clear, set) clrsetbits_be32(addr, clear, set)
 #ifdef CONFIG_64BIT
 #define wr_reg64(reg, data) out_be64(reg, data)
 #define rd_reg64(reg) in_be64(reg)
@@ -76,6 +98,7 @@
 #ifdef __LITTLE_ENDIAN
 #define wr_reg32(reg, data) __raw_writel(data, reg)
 #define rd_reg32(reg) __raw_readl(reg)
+#define clrsetbits_32(addr, clear, set) clrsetbits_le32(addr, clear, set)
 #ifdef CONFIG_64BIT
 #define wr_reg64(reg, data) __raw_writeq(data, reg)
 #define rd_reg64(reg) __raw_readq(reg)
 
 /*
  * The only users of these wr/rd_reg64 functions is the Job Ring (JR).
- * The DMA address registers in the JR are a pair of 32-bit registers.
- * The layout is:
+ * The DMA address registers in the JR are handled differently depending on
+ * platform:
+ *
+ * 1. All BE CAAM platforms and i.MX platforms (LE CAAM):
  *
  *    base + 0x0000 : most-significant 32 bits
  *    base + 0x0004 : least-significant 32 bits
  *
  * The 32-bit version of this core therefore has to write to base + 0x0004
- * to set the 32-bit wide DMA address. This seems to be independent of the
- * endianness of the written/read data.
+ * to set the 32-bit wide DMA address.
+ *
+ * 2. All other LE CAAM platforms (LS1021A etc.)
+ *    base + 0x0000 : least-significant 32 bits
+ *    base + 0x0004 : most-significant 32 bits
  */
 
 #ifndef CONFIG_64BIT
+#if !defined(CONFIG_CRYPTO_DEV_FSL_CAAM_LE) || \
+       defined(CONFIG_CRYPTO_DEV_FSL_CAAM_IMX)
 #define REG64_MS32(reg) ((u32 __iomem *)(reg))
 #define REG64_LS32(reg) ((u32 __iomem *)(reg) + 1)
+#else
+#define REG64_MS32(reg) ((u32 __iomem *)(reg) + 1)
+#define REG64_LS32(reg) ((u32 __iomem *)(reg))
+#endif
 
 static inline void wr_reg64(u64 __iomem *reg, u64 data)
 {
@@ -133,18 +167,28 @@ struct jr_outentry {
 #define CHA_NUM_MS_DECONUM_SHIFT       24
 #define CHA_NUM_MS_DECONUM_MASK        (0xfull << CHA_NUM_MS_DECONUM_SHIFT)
 
-/* CHA Version IDs */
+/*
+ * CHA version IDs / instantiation bitfields
+ * Defined for use with the cha_id fields in perfmon, but the same shift/mask
+ * selectors can be used to pull out the number of instantiated blocks within
+ * cha_num fields in perfmon because the locations are the same.
+ */
 #define CHA_ID_LS_AES_SHIFT    0
-#define CHA_ID_LS_AES_MASK             (0xfull << CHA_ID_LS_AES_SHIFT)
+#define CHA_ID_LS_AES_MASK     (0xfull << CHA_ID_LS_AES_SHIFT)
+#define CHA_ID_LS_AES_LP       (0x3ull << CHA_ID_LS_AES_SHIFT)
+#define CHA_ID_LS_AES_HP       (0x4ull << CHA_ID_LS_AES_SHIFT)
 
 #define CHA_ID_LS_DES_SHIFT    4
-#define CHA_ID_LS_DES_MASK             (0xfull << CHA_ID_LS_DES_SHIFT)
+#define CHA_ID_LS_DES_MASK     (0xfull << CHA_ID_LS_DES_SHIFT)
 
 #define CHA_ID_LS_ARC4_SHIFT   8
 #define CHA_ID_LS_ARC4_MASK    (0xfull << CHA_ID_LS_ARC4_SHIFT)
 
 #define CHA_ID_LS_MD_SHIFT     12
 #define CHA_ID_LS_MD_MASK      (0xfull << CHA_ID_LS_MD_SHIFT)
+#define CHA_ID_LS_MD_LP256     (0x0ull << CHA_ID_LS_MD_SHIFT)
+#define CHA_ID_LS_MD_LP512     (0x1ull << CHA_ID_LS_MD_SHIFT)
+#define CHA_ID_LS_MD_HP                (0x2ull << CHA_ID_LS_MD_SHIFT)
 
 #define CHA_ID_LS_RNG_SHIFT    16
 #define CHA_ID_LS_RNG_MASK     (0xfull << CHA_ID_LS_RNG_SHIFT)
@@ -395,10 +439,16 @@ struct caam_ctrl {
 /* AXI read cache control */
 #define MCFGR_ARCACHE_SHIFT    12
 #define MCFGR_ARCACHE_MASK     (0xf << MCFGR_ARCACHE_SHIFT)
+#define MCFGR_ARCACHE_BUFF     (0x1 << MCFGR_ARCACHE_SHIFT)
+#define MCFGR_ARCACHE_CACH     (0x2 << MCFGR_ARCACHE_SHIFT)
+#define MCFGR_ARCACHE_RALL     (0x4 << MCFGR_ARCACHE_SHIFT)
 
 /* AXI write cache control */
 #define MCFGR_AWCACHE_SHIFT    8
 #define MCFGR_AWCACHE_MASK     (0xf << MCFGR_AWCACHE_SHIFT)
+#define MCFGR_AWCACHE_BUFF     (0x1 << MCFGR_AWCACHE_SHIFT)
+#define MCFGR_AWCACHE_CACH     (0x2 << MCFGR_AWCACHE_SHIFT)
+#define MCFGR_AWCACHE_WALL     (0x8 << MCFGR_AWCACHE_SHIFT)
 
 /* AXI pipeline depth */
 #define MCFGR_AXIPIPE_SHIFT    4
index b68b74cc7b778dcd89eb75d8528fb88bc121b4f3..18cd6d1f587049a63af53d28225cbbe1fb5a866d 100644 (file)
@@ -15,7 +15,6 @@ static inline void dma_to_sec4_sg_one(struct sec4_sg_entry *sec4_sg_ptr,
 {
        sec4_sg_ptr->ptr = dma;
        sec4_sg_ptr->len = len;
-       sec4_sg_ptr->reserved = 0;
        sec4_sg_ptr->buf_pool_id = 0;
        sec4_sg_ptr->offset = offset;
 #ifdef DEBUG
@@ -106,9 +105,15 @@ static inline void dma_unmap_sg_chained(
 {
        if (unlikely(chained)) {
                int i;
+               struct scatterlist *tsg = sg;
+
+               /*
+                * Use a local copy of the sg pointer to avoid moving the
+                * head of the list pointed to by sg as we walk the list.
+                */
                for (i = 0; i < nents; i++) {
-                       dma_unmap_sg(dev, sg, 1, dir);
-                       sg = sg_next(sg);
+                       dma_unmap_sg(dev, tsg, 1, dir);
+                       tsg = sg_next(tsg);
                }
        } else if (nents) {
                dma_unmap_sg(dev, sg, nents, dir);
@@ -119,19 +124,23 @@ static inline int dma_map_sg_chained(
        struct device *dev, struct scatterlist *sg, unsigned int nents,
        enum dma_data_direction dir, bool chained)
 {
-       struct scatterlist *first = sg;
-
        if (unlikely(chained)) {
                int i;
+               struct scatterlist *tsg = sg;
+
+               /*
+                * Use a local copy of the sg pointer to avoid moving the
+                * head of the list pointed to by sg as we walk the list.
+                */
                for (i = 0; i < nents; i++) {
-                       if (!dma_map_sg(dev, sg, 1, dir)) {
-                               dma_unmap_sg_chained(dev, first, i, dir,
+                       if (!dma_map_sg(dev, tsg, 1, dir)) {
+                               dma_unmap_sg_chained(dev, sg, i, dir,
                                                     chained);
                                nents = 0;
                                break;
                        }
 
-                       sg = sg_next(sg);
+                       tsg = sg_next(tsg);
                }
        } else
                nents = dma_map_sg(dev, sg, nents, dir);
index f2e6de361fd1805094b651d3a36c6141d0ad8fbb..bb241c3ab6b9cad5fec5ed22f4a9f96b5d7da551 100644 (file)
@@ -216,6 +216,7 @@ static const struct acpi_device_id ccp_acpi_match[] = {
        { "AMDI0C00", 0 },
        { },
 };
+MODULE_DEVICE_TABLE(acpi, ccp_acpi_match);
 #endif
 
 #ifdef CONFIG_OF
@@ -223,6 +224,7 @@ static const struct of_device_id ccp_of_match[] = {
        { .compatible = "amd,ccp-seattle-v1a" },
        { },
 };
+MODULE_DEVICE_TABLE(of, ccp_of_match);
 #endif
 
 static struct platform_driver ccp_platform_driver = {
index ad47d0d6109845c810055fdb2ddaba8fa9d62039..68e8aa90fe01cbc074d70df5d5262b70834567d5 100644 (file)
@@ -334,7 +334,7 @@ static int img_hash_dma_init(struct img_hash_dev *hdev)
 
        hdev->dma_lch = dma_request_slave_channel(hdev->dev, "tx");
        if (!hdev->dma_lch) {
-               dev_err(hdev->dev, "Couldn't aquire a slave DMA channel.\n");
+               dev_err(hdev->dev, "Couldn't acquire a slave DMA channel.\n");
                return -EBUSY;
        }
        dma_conf.direction = DMA_MEM_TO_DEV;
index 402631a19a112770af83f0f4228176703e1c0b44..8f279035328125de8f8c1216392e5674def45753 100644 (file)
@@ -156,7 +156,8 @@ struct ablk_ctx {
 };
 
 struct aead_ctx {
-       struct buffer_desc *buffer;
+       struct buffer_desc *src;
+       struct buffer_desc *dst;
        struct scatterlist ivlist;
        /* used when the hmac is not on one sg entry */
        u8 *hmac_virt;
@@ -198,6 +199,15 @@ struct ixp_alg {
        int registered;
 };
 
+struct ixp_aead_alg {
+       struct aead_alg crypto;
+       const struct ix_hash_algo *hash;
+       u32 cfg_enc;
+       u32 cfg_dec;
+
+       int registered;
+};
+
 static const struct ix_hash_algo hash_alg_md5 = {
        .cfgword        = 0xAA010004,
        .icv            = "\x01\x23\x45\x67\x89\xAB\xCD\xEF"
@@ -339,11 +349,11 @@ static void finish_scattered_hmac(struct crypt_ctl *crypt)
        struct aead_ctx *req_ctx = aead_request_ctx(req);
        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
        int authsize = crypto_aead_authsize(tfm);
-       int decryptlen = req->cryptlen - authsize;
+       int decryptlen = req->assoclen + req->cryptlen - authsize;
 
        if (req_ctx->encrypt) {
                scatterwalk_map_and_copy(req_ctx->hmac_virt,
-                       req->src, decryptlen, authsize, 1);
+                       req->dst, decryptlen, authsize, 1);
        }
        dma_pool_free(buffer_pool, req_ctx->hmac_virt, crypt->icv_rev_aes);
 }
@@ -364,7 +374,8 @@ static void one_packet(dma_addr_t phys)
                struct aead_request *req = crypt->data.aead_req;
                struct aead_ctx *req_ctx = aead_request_ctx(req);
 
-               free_buf_chain(dev, req_ctx->buffer, crypt->src_buf);
+               free_buf_chain(dev, req_ctx->src, crypt->src_buf);
+               free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
                if (req_ctx->hmac_virt) {
                        finish_scattered_hmac(crypt);
                }
@@ -573,11 +584,10 @@ static int init_tfm_ablk(struct crypto_tfm *tfm)
        return init_tfm(tfm);
 }
 
-static int init_tfm_aead(struct crypto_tfm *tfm)
+static int init_tfm_aead(struct crypto_aead *tfm)
 {
-       crypto_aead_set_reqsize(__crypto_aead_cast(tfm),
-                               sizeof(struct aead_ctx));
-       return init_tfm(tfm);
+       crypto_aead_set_reqsize(tfm, sizeof(struct aead_ctx));
+       return init_tfm(crypto_aead_tfm(tfm));
 }
 
 static void exit_tfm(struct crypto_tfm *tfm)
@@ -587,6 +597,11 @@ static void exit_tfm(struct crypto_tfm *tfm)
        free_sa_dir(&ctx->decrypt);
 }
 
+static void exit_tfm_aead(struct crypto_aead *tfm)
+{
+       exit_tfm(crypto_aead_tfm(tfm));
+}
+
 static int register_chain_var(struct crypto_tfm *tfm, u8 xpad, u32 target,
                int init_len, u32 ctx_addr, const u8 *key, int key_len)
 {
@@ -969,24 +984,6 @@ static int ablk_rfc3686_crypt(struct ablkcipher_request *req)
        return ret;
 }
 
-static int hmac_inconsistent(struct scatterlist *sg, unsigned start,
-               unsigned int nbytes)
-{
-       int offset = 0;
-
-       if (!nbytes)
-               return 0;
-
-       for (;;) {
-               if (start < offset + sg->length)
-                       break;
-
-               offset += sg->length;
-               sg = sg_next(sg);
-       }
-       return (start + nbytes > offset + sg->length);
-}
-
 static int aead_perform(struct aead_request *req, int encrypt,
                int cryptoffset, int eff_cryptlen, u8 *iv)
 {
@@ -1002,6 +999,8 @@ static int aead_perform(struct aead_request *req, int encrypt,
        struct device *dev = &pdev->dev;
        gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
                                GFP_KERNEL : GFP_ATOMIC;
+       enum dma_data_direction src_direction = DMA_BIDIRECTIONAL;
+       unsigned int lastlen;
 
        if (qmgr_stat_full(SEND_QID))
                return -EAGAIN;
@@ -1030,35 +1029,55 @@ static int aead_perform(struct aead_request *req, int encrypt,
        crypt->crypt_len = eff_cryptlen;
 
        crypt->auth_offs = 0;
-       crypt->auth_len = req->assoclen + ivsize + cryptlen;
+       crypt->auth_len = req->assoclen + cryptlen;
        BUG_ON(ivsize && !req->iv);
        memcpy(crypt->iv, req->iv, ivsize);
 
+       req_ctx->dst = NULL;
+
        if (req->src != req->dst) {
-               BUG(); /* -ENOTSUP because of my laziness */
+               struct buffer_desc dst_hook;
+
+               crypt->mode |= NPE_OP_NOT_IN_PLACE;
+               src_direction = DMA_TO_DEVICE;
+
+               buf = chainup_buffers(dev, req->dst, crypt->auth_len,
+                                     &dst_hook, flags, DMA_FROM_DEVICE);
+               req_ctx->dst = dst_hook.next;
+               crypt->dst_buf = dst_hook.phys_next;
+
+               if (!buf)
+                       goto free_buf_dst;
+
+               if (encrypt) {
+                       lastlen = buf->buf_len;
+                       if (lastlen >= authsize)
+                               crypt->icv_rev_aes = buf->phys_addr +
+                                                    buf->buf_len - authsize;
+               }
        }
 
-       /* ASSOC data */
-       buf = chainup_buffers(dev, req->assoc, req->assoclen, &src_hook,
-               flags, DMA_TO_DEVICE);
-       req_ctx->buffer = src_hook.next;
+       buf = chainup_buffers(dev, req->src, crypt->auth_len,
+                             &src_hook, flags, src_direction);
+       req_ctx->src = src_hook.next;
        crypt->src_buf = src_hook.phys_next;
        if (!buf)
-               goto out;
-       /* IV */
-       sg_init_table(&req_ctx->ivlist, 1);
-       sg_set_buf(&req_ctx->ivlist, iv, ivsize);
-       buf = chainup_buffers(dev, &req_ctx->ivlist, ivsize, buf, flags,
-                       DMA_BIDIRECTIONAL);
-       if (!buf)
-               goto free_chain;
-       if (unlikely(hmac_inconsistent(req->src, cryptlen, authsize))) {
+               goto free_buf_src;
+
+       if (!encrypt || !req_ctx->dst) {
+               lastlen = buf->buf_len;
+               if (lastlen >= authsize)
+                       crypt->icv_rev_aes = buf->phys_addr +
+                                            buf->buf_len - authsize;
+       }
+
+       if (unlikely(lastlen < authsize)) {
                /* The 12 hmac bytes are scattered,
                 * we need to copy them into a safe buffer */
                req_ctx->hmac_virt = dma_pool_alloc(buffer_pool, flags,
                                &crypt->icv_rev_aes);
                if (unlikely(!req_ctx->hmac_virt))
-                       goto free_chain;
+                       goto free_buf_src;
                if (!encrypt) {
                        scatterwalk_map_and_copy(req_ctx->hmac_virt,
                                req->src, cryptlen, authsize, 0);
@@ -1067,27 +1086,16 @@ static int aead_perform(struct aead_request *req, int encrypt,
        } else {
                req_ctx->hmac_virt = NULL;
        }
-       /* Crypt */
-       buf = chainup_buffers(dev, req->src, cryptlen + authsize, buf, flags,
-                       DMA_BIDIRECTIONAL);
-       if (!buf)
-               goto free_hmac_virt;
-       if (!req_ctx->hmac_virt) {
-               crypt->icv_rev_aes = buf->phys_addr + buf->buf_len - authsize;
-       }
 
        crypt->ctl_flags |= CTL_FLAG_PERFORM_AEAD;
        qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
        BUG_ON(qmgr_stat_overflow(SEND_QID));
        return -EINPROGRESS;
-free_hmac_virt:
-       if (req_ctx->hmac_virt) {
-               dma_pool_free(buffer_pool, req_ctx->hmac_virt,
-                               crypt->icv_rev_aes);
-       }
-free_chain:
-       free_buf_chain(dev, req_ctx->buffer, crypt->src_buf);
-out:
+
+free_buf_src:
+       free_buf_chain(dev, req_ctx->src, crypt->src_buf);
+free_buf_dst:
+       free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
        crypt->ctl_flags = CTL_FLAG_UNUSED;
        return -ENOMEM;
 }
@@ -1173,40 +1181,12 @@ badkey:
 
 static int aead_encrypt(struct aead_request *req)
 {
-       unsigned ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(req));
-       return aead_perform(req, 1, req->assoclen + ivsize,
-                       req->cryptlen, req->iv);
+       return aead_perform(req, 1, req->assoclen, req->cryptlen, req->iv);
 }
 
 static int aead_decrypt(struct aead_request *req)
 {
-       unsigned ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(req));
-       return aead_perform(req, 0, req->assoclen + ivsize,
-                       req->cryptlen, req->iv);
-}
-
-static int aead_givencrypt(struct aead_givcrypt_request *req)
-{
-       struct crypto_aead *tfm = aead_givcrypt_reqtfm(req);
-       struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
-       unsigned len, ivsize = crypto_aead_ivsize(tfm);
-       __be64 seq;
-
-       /* copied from eseqiv.c */
-       if (!ctx->salted) {
-               get_random_bytes(ctx->salt, ivsize);
-               ctx->salted = 1;
-       }
-       memcpy(req->areq.iv, ctx->salt, ivsize);
-       len = ivsize;
-       if (ivsize > sizeof(u64)) {
-               memset(req->giv, 0, ivsize - sizeof(u64));
-               len = sizeof(u64);
-       }
-       seq = cpu_to_be64(req->seq);
-       memcpy(req->giv + ivsize - len, &seq, len);
-       return aead_perform(&req->areq, 1, req->areq.assoclen,
-                       req->areq.cryptlen +ivsize, req->giv);
+       return aead_perform(req, 0, req->assoclen, req->cryptlen, req->iv);
 }
 
 static struct ixp_alg ixp4xx_algos[] = {
@@ -1319,80 +1299,77 @@ static struct ixp_alg ixp4xx_algos[] = {
        },
        .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CTR,
        .cfg_dec = CIPH_ENCR | MOD_AES | MOD_CTR,
-}, {
+} };
+
+static struct ixp_aead_alg ixp4xx_aeads[] = {
+{
        .crypto = {
-               .cra_name       = "authenc(hmac(md5),cbc(des))",
-               .cra_blocksize  = DES_BLOCK_SIZE,
-               .cra_u          = { .aead = {
-                       .ivsize         = DES_BLOCK_SIZE,
-                       .maxauthsize    = MD5_DIGEST_SIZE,
-                       }
-               }
+               .base = {
+                       .cra_name       = "authenc(hmac(md5),cbc(des))",
+                       .cra_blocksize  = DES_BLOCK_SIZE,
+               },
+               .ivsize         = DES_BLOCK_SIZE,
+               .maxauthsize    = MD5_DIGEST_SIZE,
        },
        .hash = &hash_alg_md5,
        .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
        .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
 }, {
        .crypto = {
-               .cra_name       = "authenc(hmac(md5),cbc(des3_ede))",
-               .cra_blocksize  = DES3_EDE_BLOCK_SIZE,
-               .cra_u          = { .aead = {
-                       .ivsize         = DES3_EDE_BLOCK_SIZE,
-                       .maxauthsize    = MD5_DIGEST_SIZE,
-                       }
-               }
+               .base = {
+                       .cra_name       = "authenc(hmac(md5),cbc(des3_ede))",
+                       .cra_blocksize  = DES3_EDE_BLOCK_SIZE,
+               },
+               .ivsize         = DES3_EDE_BLOCK_SIZE,
+               .maxauthsize    = MD5_DIGEST_SIZE,
        },
        .hash = &hash_alg_md5,
        .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
        .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
 }, {
        .crypto = {
-               .cra_name       = "authenc(hmac(sha1),cbc(des))",
-               .cra_blocksize  = DES_BLOCK_SIZE,
-               .cra_u          = { .aead = {
+               .base = {
+                       .cra_name       = "authenc(hmac(sha1),cbc(des))",
+                       .cra_blocksize  = DES_BLOCK_SIZE,
+               },
                        .ivsize         = DES_BLOCK_SIZE,
                        .maxauthsize    = SHA1_DIGEST_SIZE,
-                       }
-               }
        },
        .hash = &hash_alg_sha1,
        .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
        .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
 }, {
        .crypto = {
-               .cra_name       = "authenc(hmac(sha1),cbc(des3_ede))",
-               .cra_blocksize  = DES3_EDE_BLOCK_SIZE,
-               .cra_u          = { .aead = {
-                       .ivsize         = DES3_EDE_BLOCK_SIZE,
-                       .maxauthsize    = SHA1_DIGEST_SIZE,
-                       }
-               }
+               .base = {
+                       .cra_name       = "authenc(hmac(sha1),cbc(des3_ede))",
+                       .cra_blocksize  = DES3_EDE_BLOCK_SIZE,
+               },
+               .ivsize         = DES3_EDE_BLOCK_SIZE,
+               .maxauthsize    = SHA1_DIGEST_SIZE,
        },
        .hash = &hash_alg_sha1,
        .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
        .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
 }, {
        .crypto = {
-               .cra_name       = "authenc(hmac(md5),cbc(aes))",
-               .cra_blocksize  = AES_BLOCK_SIZE,
-               .cra_u          = { .aead = {
-                       .ivsize         = AES_BLOCK_SIZE,
-                       .maxauthsize    = MD5_DIGEST_SIZE,
-                       }
-               }
+               .base = {
+                       .cra_name       = "authenc(hmac(md5),cbc(aes))",
+                       .cra_blocksize  = AES_BLOCK_SIZE,
+               },
+               .ivsize         = AES_BLOCK_SIZE,
+               .maxauthsize    = MD5_DIGEST_SIZE,
        },
        .hash = &hash_alg_md5,
        .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
        .cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
 }, {
        .crypto = {
-               .cra_name       = "authenc(hmac(sha1),cbc(aes))",
-               .cra_blocksize  = AES_BLOCK_SIZE,
-               .cra_u          = { .aead = {
-                       .ivsize         = AES_BLOCK_SIZE,
-                       .maxauthsize    = SHA1_DIGEST_SIZE,
-                       }
-               }
+               .base = {
+                       .cra_name       = "authenc(hmac(sha1),cbc(aes))",
+                       .cra_blocksize  = AES_BLOCK_SIZE,
+               },
+               .ivsize         = AES_BLOCK_SIZE,
+               .maxauthsize    = SHA1_DIGEST_SIZE,
        },
        .hash = &hash_alg_sha1,
        .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
@@ -1436,32 +1413,20 @@ static int __init ixp_module_init(void)
                if (!support_aes && (ixp4xx_algos[i].cfg_enc & MOD_AES)) {
                        continue;
                }
-               if (!ixp4xx_algos[i].hash) {
-                       /* block ciphers */
-                       cra->cra_type = &crypto_ablkcipher_type;
-                       cra->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
-                                        CRYPTO_ALG_KERN_DRIVER_ONLY |
-                                        CRYPTO_ALG_ASYNC;
-                       if (!cra->cra_ablkcipher.setkey)
-                               cra->cra_ablkcipher.setkey = ablk_setkey;
-                       if (!cra->cra_ablkcipher.encrypt)
-                               cra->cra_ablkcipher.encrypt = ablk_encrypt;
-                       if (!cra->cra_ablkcipher.decrypt)
-                               cra->cra_ablkcipher.decrypt = ablk_decrypt;
-                       cra->cra_init = init_tfm_ablk;
-               } else {
-                       /* authenc */
-                       cra->cra_type = &crypto_aead_type;
-                       cra->cra_flags = CRYPTO_ALG_TYPE_AEAD |
-                                        CRYPTO_ALG_KERN_DRIVER_ONLY |
-                                        CRYPTO_ALG_ASYNC;
-                       cra->cra_aead.setkey = aead_setkey;
-                       cra->cra_aead.setauthsize = aead_setauthsize;
-                       cra->cra_aead.encrypt = aead_encrypt;
-                       cra->cra_aead.decrypt = aead_decrypt;
-                       cra->cra_aead.givencrypt = aead_givencrypt;
-                       cra->cra_init = init_tfm_aead;
-               }
+
+               /* block ciphers */
+               cra->cra_type = &crypto_ablkcipher_type;
+               cra->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+                                CRYPTO_ALG_KERN_DRIVER_ONLY |
+                                CRYPTO_ALG_ASYNC;
+               if (!cra->cra_ablkcipher.setkey)
+                       cra->cra_ablkcipher.setkey = ablk_setkey;
+               if (!cra->cra_ablkcipher.encrypt)
+                       cra->cra_ablkcipher.encrypt = ablk_encrypt;
+               if (!cra->cra_ablkcipher.decrypt)
+                       cra->cra_ablkcipher.decrypt = ablk_decrypt;
+               cra->cra_init = init_tfm_ablk;
+
                cra->cra_ctxsize = sizeof(struct ixp_ctx);
                cra->cra_module = THIS_MODULE;
                cra->cra_alignmask = 3;
@@ -1473,6 +1438,38 @@ static int __init ixp_module_init(void)
                else
                        ixp4xx_algos[i].registered = 1;
        }
+
+       for (i = 0; i < ARRAY_SIZE(ixp4xx_aeads); i++) {
+               struct aead_alg *cra = &ixp4xx_aeads[i].crypto;
+
+               if (snprintf(cra->base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
+                            "%s"IXP_POSTFIX, cra->base.cra_name) >=
+                   CRYPTO_MAX_ALG_NAME)
+                       continue;
+               if (!support_aes && (ixp4xx_algos[i].cfg_enc & MOD_AES))
+                       continue;
+
+               /* authenc */
+               cra->base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+                                     CRYPTO_ALG_ASYNC;
+               cra->setkey = aead_setkey;
+               cra->setauthsize = aead_setauthsize;
+               cra->encrypt = aead_encrypt;
+               cra->decrypt = aead_decrypt;
+               cra->init = init_tfm_aead;
+               cra->exit = exit_tfm_aead;
+
+               cra->base.cra_ctxsize = sizeof(struct ixp_ctx);
+               cra->base.cra_module = THIS_MODULE;
+               cra->base.cra_alignmask = 3;
+               cra->base.cra_priority = 300;
+
+               if (crypto_register_aead(cra))
+                       printk(KERN_ERR "Failed to register '%s'\n",
+                               cra->base.cra_driver_name);
+               else
+                       ixp4xx_aeads[i].registered = 1;
+       }
        return 0;
 }
 
@@ -1481,6 +1478,11 @@ static void __exit ixp_module_exit(void)
        int num = ARRAY_SIZE(ixp4xx_algos);
        int i;
 
+       for (i = 0; i < ARRAY_SIZE(ixp4xx_aeads); i++) {
+               if (ixp4xx_aeads[i].registered)
+                       crypto_unregister_aead(&ixp4xx_aeads[i].crypto);
+       }
+
        for (i=0; i< num; i++) {
                if (ixp4xx_algos[i].registered)
                        crypto_unregister_alg(&ixp4xx_algos[i].crypto);
index 1c6f98dd88f4958003d6da53ce43933e9e016d45..0643e3366e3309de88a03e687a2d5353f5715a22 100644 (file)
@@ -533,7 +533,6 @@ static struct platform_driver marvell_cesa = {
        .probe          = mv_cesa_probe,
        .remove         = mv_cesa_remove,
        .driver         = {
-               .owner  = THIS_MODULE,
                .name   = "marvell-cesa",
                .of_match_table = mv_cesa_of_match_table,
        },
index e421c96c763a6781ac1b40f4f30eb5966dad0e91..ad7552a6998c081a61cbc0fbf5f9d2e28c629bcd 100644 (file)
@@ -14,11 +14,14 @@ config CRYPTO_DEV_NX_ENCRYPT
 config CRYPTO_DEV_NX_COMPRESS
        tristate "Compression acceleration support"
        default y
+       select CRYPTO_ALGAPI
+       select 842_DECOMPRESS
        help
          Support for PowerPC Nest (NX) compression acceleration. This
          module supports acceleration for compressing memory with the 842
-         algorithm.  One of the platform drivers must be selected also.
-         If you choose 'M' here, this module will be called nx_compress.
+         algorithm using the cryptographic API.  One of the platform
+         drivers must be selected also.  If you choose 'M' here, this
+         module will be called nx_compress.
 
 if CRYPTO_DEV_NX_COMPRESS
 
@@ -42,14 +45,4 @@ config CRYPTO_DEV_NX_COMPRESS_POWERNV
          algorithm.  This supports NX hardware on the PowerNV platform.
          If you choose 'M' here, this module will be called nx_compress_powernv.
 
-config CRYPTO_DEV_NX_COMPRESS_CRYPTO
-       tristate "Compression acceleration cryptographic interface"
-       select CRYPTO_ALGAPI
-       select 842_DECOMPRESS
-       default y
-       help
-         Support for PowerPC Nest (NX) accelerators using the cryptographic
-         API.  If you choose 'M' here, this module will be called
-         nx_compress_crypto.
-
 endif
index e1684f5adb11e0f19a398496e47038dd924eada5..b727821c8ed4eb9277113859e5530692ce86ed19 100644 (file)
@@ -10,12 +10,8 @@ nx-crypto-objs := nx.o \
                  nx-sha256.o \
                  nx-sha512.o
 
-obj-$(CONFIG_CRYPTO_DEV_NX_COMPRESS) += nx-compress.o nx-compress-platform.o
-obj-$(CONFIG_CRYPTO_DEV_NX_COMPRESS_PSERIES) += nx-compress-pseries.o
-obj-$(CONFIG_CRYPTO_DEV_NX_COMPRESS_POWERNV) += nx-compress-powernv.o
-obj-$(CONFIG_CRYPTO_DEV_NX_COMPRESS_CRYPTO) += nx-compress-crypto.o
+obj-$(CONFIG_CRYPTO_DEV_NX_COMPRESS_PSERIES) += nx-compress-pseries.o nx-compress.o
+obj-$(CONFIG_CRYPTO_DEV_NX_COMPRESS_POWERNV) += nx-compress-powernv.o nx-compress.o
 nx-compress-objs := nx-842.o
-nx-compress-platform-objs := nx-842-platform.o
 nx-compress-pseries-objs := nx-842-pseries.o
 nx-compress-powernv-objs := nx-842-powernv.o
-nx-compress-crypto-objs := nx-842-crypto.o
diff --git a/drivers/crypto/nx/nx-842-crypto.c b/drivers/crypto/nx/nx-842-crypto.c
deleted file mode 100644 (file)
index d53a1dc..0000000
+++ /dev/null
@@ -1,580 +0,0 @@
-/*
- * Cryptographic API for the NX-842 hardware compression.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * Copyright (C) IBM Corporation, 2011-2015
- *
- * Original Authors: Robert Jennings <rcj@linux.vnet.ibm.com>
- *                   Seth Jennings <sjenning@linux.vnet.ibm.com>
- *
- * Rewrite: Dan Streetman <ddstreet@ieee.org>
- *
- * This is an interface to the NX-842 compression hardware in PowerPC
- * processors.  Most of the complexity of this drvier is due to the fact that
- * the NX-842 compression hardware requires the input and output data buffers
- * to be specifically aligned, to be a specific multiple in length, and within
- * specific minimum and maximum lengths.  Those restrictions, provided by the
- * nx-842 driver via nx842_constraints, mean this driver must use bounce
- * buffers and headers to correct misaligned in or out buffers, and to split
- * input buffers that are too large.
- *
- * This driver will fall back to software decompression if the hardware
- * decompression fails, so this driver's decompression should never fail as
- * long as the provided compressed buffer is valid.  Any compressed buffer
- * created by this driver will have a header (except ones where the input
- * perfectly matches the constraints); so users of this driver cannot simply
- * pass a compressed buffer created by this driver over to the 842 software
- * decompression library.  Instead, users must use this driver to decompress;
- * if the hardware fails or is unavailable, the compressed buffer will be
- * parsed and the header removed, and the raw 842 buffer(s) passed to the 842
- * software decompression library.
- *
- * This does not fall back to software compression, however, since the caller
- * of this function is specifically requesting hardware compression; if the
- * hardware compression fails, the caller can fall back to software
- * compression, and the raw 842 compressed buffer that the software compressor
- * creates can be passed to this driver for hardware decompression; any
- * buffer without our specific header magic is assumed to be a raw 842 buffer
- * and passed directly to the hardware.  Note that the software compression
- * library will produce a compressed buffer that is incompatible with the
- * hardware decompressor if the original input buffer length is not a multiple
- * of 8; if such a compressed buffer is passed to this driver for
- * decompression, the hardware will reject it and this driver will then pass
- * it over to the software library for decompression.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/crypto.h>
-#include <linux/vmalloc.h>
-#include <linux/sw842.h>
-#include <linux/ratelimit.h>
-
-#include "nx-842.h"
-
-/* The first 5 bits of this magic are 0x1f, which is an invalid 842 5-bit
- * template (see lib/842/842.h), so this magic number will never appear at
- * the start of a raw 842 compressed buffer.  That is important, as any buffer
- * passed to us without this magic is assumed to be a raw 842 compressed
- * buffer, and passed directly to the hardware to decompress.
- */
-#define NX842_CRYPTO_MAGIC     (0xf842)
-#define NX842_CRYPTO_GROUP_MAX (0x20)
-#define NX842_CRYPTO_HEADER_SIZE(g)                            \
-       (sizeof(struct nx842_crypto_header) +                   \
-        sizeof(struct nx842_crypto_header_group) * (g))
-#define NX842_CRYPTO_HEADER_MAX_SIZE                           \
-       NX842_CRYPTO_HEADER_SIZE(NX842_CRYPTO_GROUP_MAX)
-
-/* bounce buffer size */
-#define BOUNCE_BUFFER_ORDER    (2)
-#define BOUNCE_BUFFER_SIZE                                     \
-       ((unsigned int)(PAGE_SIZE << BOUNCE_BUFFER_ORDER))
-
-/* try longer on comp because we can fallback to sw decomp if hw is busy */
-#define COMP_BUSY_TIMEOUT      (250) /* ms */
-#define DECOMP_BUSY_TIMEOUT    (50) /* ms */
-
-struct nx842_crypto_header_group {
-       __be16 padding;                 /* unused bytes at start of group */
-       __be32 compressed_length;       /* compressed bytes in group */
-       __be32 uncompressed_length;     /* bytes after decompression */
-} __packed;
-
-struct nx842_crypto_header {
-       __be16 magic;           /* NX842_CRYPTO_MAGIC */
-       __be16 ignore;          /* decompressed end bytes to ignore */
-       u8 groups;              /* total groups in this header */
-       struct nx842_crypto_header_group group[];
-} __packed;
-
-struct nx842_crypto_param {
-       u8 *in;
-       unsigned int iremain;
-       u8 *out;
-       unsigned int oremain;
-       unsigned int ototal;
-};
-
-static int update_param(struct nx842_crypto_param *p,
-                       unsigned int slen, unsigned int dlen)
-{
-       if (p->iremain < slen)
-               return -EOVERFLOW;
-       if (p->oremain < dlen)
-               return -ENOSPC;
-
-       p->in += slen;
-       p->iremain -= slen;
-       p->out += dlen;
-       p->oremain -= dlen;
-       p->ototal += dlen;
-
-       return 0;
-}
-
-struct nx842_crypto_ctx {
-       u8 *wmem;
-       u8 *sbounce, *dbounce;
-
-       struct nx842_crypto_header header;
-       struct nx842_crypto_header_group group[NX842_CRYPTO_GROUP_MAX];
-};
-
-static int nx842_crypto_init(struct crypto_tfm *tfm)
-{
-       struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
-
-       ctx->wmem = kmalloc(nx842_workmem_size(), GFP_KERNEL);
-       ctx->sbounce = (u8 *)__get_free_pages(GFP_KERNEL, BOUNCE_BUFFER_ORDER);
-       ctx->dbounce = (u8 *)__get_free_pages(GFP_KERNEL, BOUNCE_BUFFER_ORDER);
-       if (!ctx->wmem || !ctx->sbounce || !ctx->dbounce) {
-               kfree(ctx->wmem);
-               free_page((unsigned long)ctx->sbounce);
-               free_page((unsigned long)ctx->dbounce);
-               return -ENOMEM;
-       }
-
-       return 0;
-}
-
-static void nx842_crypto_exit(struct crypto_tfm *tfm)
-{
-       struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
-
-       kfree(ctx->wmem);
-       free_page((unsigned long)ctx->sbounce);
-       free_page((unsigned long)ctx->dbounce);
-}
-
-static int read_constraints(struct nx842_constraints *c)
-{
-       int ret;
-
-       ret = nx842_constraints(c);
-       if (ret) {
-               pr_err_ratelimited("could not get nx842 constraints : %d\n",
-                                  ret);
-               return ret;
-       }
-
-       /* limit maximum, to always have enough bounce buffer to decompress */
-       if (c->maximum > BOUNCE_BUFFER_SIZE) {
-               c->maximum = BOUNCE_BUFFER_SIZE;
-               pr_info_once("limiting nx842 maximum to %x\n", c->maximum);
-       }
-
-       return 0;
-}
-
-static int nx842_crypto_add_header(struct nx842_crypto_header *hdr, u8 *buf)
-{
-       int s = NX842_CRYPTO_HEADER_SIZE(hdr->groups);
-
-       /* compress should have added space for header */
-       if (s > be16_to_cpu(hdr->group[0].padding)) {
-               pr_err("Internal error: no space for header\n");
-               return -EINVAL;
-       }
-
-       memcpy(buf, hdr, s);
-
-       print_hex_dump_debug("header ", DUMP_PREFIX_OFFSET, 16, 1, buf, s, 0);
-
-       return 0;
-}
-
-static int compress(struct nx842_crypto_ctx *ctx,
-                   struct nx842_crypto_param *p,
-                   struct nx842_crypto_header_group *g,
-                   struct nx842_constraints *c,
-                   u16 *ignore,
-                   unsigned int hdrsize)
-{
-       unsigned int slen = p->iremain, dlen = p->oremain, tmplen;
-       unsigned int adj_slen = slen;
-       u8 *src = p->in, *dst = p->out;
-       int ret, dskip = 0;
-       ktime_t timeout;
-
-       if (p->iremain == 0)
-               return -EOVERFLOW;
-
-       if (p->oremain == 0 || hdrsize + c->minimum > dlen)
-               return -ENOSPC;
-
-       if (slen % c->multiple)
-               adj_slen = round_up(slen, c->multiple);
-       if (slen < c->minimum)
-               adj_slen = c->minimum;
-       if (slen > c->maximum)
-               adj_slen = slen = c->maximum;
-       if (adj_slen > slen || (u64)src % c->alignment) {
-               adj_slen = min(adj_slen, BOUNCE_BUFFER_SIZE);
-               slen = min(slen, BOUNCE_BUFFER_SIZE);
-               if (adj_slen > slen)
-                       memset(ctx->sbounce + slen, 0, adj_slen - slen);
-               memcpy(ctx->sbounce, src, slen);
-               src = ctx->sbounce;
-               slen = adj_slen;
-               pr_debug("using comp sbounce buffer, len %x\n", slen);
-       }
-
-       dst += hdrsize;
-       dlen -= hdrsize;
-
-       if ((u64)dst % c->alignment) {
-               dskip = (int)(PTR_ALIGN(dst, c->alignment) - dst);
-               dst += dskip;
-               dlen -= dskip;
-       }
-       if (dlen % c->multiple)
-               dlen = round_down(dlen, c->multiple);
-       if (dlen < c->minimum) {
-nospc:
-               dst = ctx->dbounce;
-               dlen = min(p->oremain, BOUNCE_BUFFER_SIZE);
-               dlen = round_down(dlen, c->multiple);
-               dskip = 0;
-               pr_debug("using comp dbounce buffer, len %x\n", dlen);
-       }
-       if (dlen > c->maximum)
-               dlen = c->maximum;
-
-       tmplen = dlen;
-       timeout = ktime_add_ms(ktime_get(), COMP_BUSY_TIMEOUT);
-       do {
-               dlen = tmplen; /* reset dlen, if we're retrying */
-               ret = nx842_compress(src, slen, dst, &dlen, ctx->wmem);
-               /* possibly we should reduce the slen here, instead of
-                * retrying with the dbounce buffer?
-                */
-               if (ret == -ENOSPC && dst != ctx->dbounce)
-                       goto nospc;
-       } while (ret == -EBUSY && ktime_before(ktime_get(), timeout));
-       if (ret)
-               return ret;
-
-       dskip += hdrsize;
-
-       if (dst == ctx->dbounce)
-               memcpy(p->out + dskip, dst, dlen);
-
-       g->padding = cpu_to_be16(dskip);
-       g->compressed_length = cpu_to_be32(dlen);
-       g->uncompressed_length = cpu_to_be32(slen);
-
-       if (p->iremain < slen) {
-               *ignore = slen - p->iremain;
-               slen = p->iremain;
-       }
-
-       pr_debug("compress slen %x ignore %x dlen %x padding %x\n",
-                slen, *ignore, dlen, dskip);
-
-       return update_param(p, slen, dskip + dlen);
-}
-
-static int nx842_crypto_compress(struct crypto_tfm *tfm,
-                                const u8 *src, unsigned int slen,
-                                u8 *dst, unsigned int *dlen)
-{
-       struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
-       struct nx842_crypto_header *hdr = &ctx->header;
-       struct nx842_crypto_param p;
-       struct nx842_constraints c;
-       unsigned int groups, hdrsize, h;
-       int ret, n;
-       bool add_header;
-       u16 ignore = 0;
-
-       p.in = (u8 *)src;
-       p.iremain = slen;
-       p.out = dst;
-       p.oremain = *dlen;
-       p.ototal = 0;
-
-       *dlen = 0;
-
-       ret = read_constraints(&c);
-       if (ret)
-               return ret;
-
-       groups = min_t(unsigned int, NX842_CRYPTO_GROUP_MAX,
-                      DIV_ROUND_UP(p.iremain, c.maximum));
-       hdrsize = NX842_CRYPTO_HEADER_SIZE(groups);
-
-       /* skip adding header if the buffers meet all constraints */
-       add_header = (p.iremain % c.multiple    ||
-                     p.iremain < c.minimum     ||
-                     p.iremain > c.maximum     ||
-                     (u64)p.in % c.alignment   ||
-                     p.oremain % c.multiple    ||
-                     p.oremain < c.minimum     ||
-                     p.oremain > c.maximum     ||
-                     (u64)p.out % c.alignment);
-
-       hdr->magic = cpu_to_be16(NX842_CRYPTO_MAGIC);
-       hdr->groups = 0;
-       hdr->ignore = 0;
-
-       while (p.iremain > 0) {
-               n = hdr->groups++;
-               if (hdr->groups > NX842_CRYPTO_GROUP_MAX)
-                       return -ENOSPC;
-
-               /* header goes before first group */
-               h = !n && add_header ? hdrsize : 0;
-
-               if (ignore)
-                       pr_warn("interal error, ignore is set %x\n", ignore);
-
-               ret = compress(ctx, &p, &hdr->group[n], &c, &ignore, h);
-               if (ret)
-                       return ret;
-       }
-
-       if (!add_header && hdr->groups > 1) {
-               pr_err("Internal error: No header but multiple groups\n");
-               return -EINVAL;
-       }
-
-       /* ignore indicates the input stream needed to be padded */
-       hdr->ignore = cpu_to_be16(ignore);
-       if (ignore)
-               pr_debug("marked %d bytes as ignore\n", ignore);
-
-       if (add_header)
-               ret = nx842_crypto_add_header(hdr, dst);
-       if (ret)
-               return ret;
-
-       *dlen = p.ototal;
-
-       pr_debug("compress total slen %x dlen %x\n", slen, *dlen);
-
-       return 0;
-}
-
-static int decompress(struct nx842_crypto_ctx *ctx,
-                     struct nx842_crypto_param *p,
-                     struct nx842_crypto_header_group *g,
-                     struct nx842_constraints *c,
-                     u16 ignore,
-                     bool usehw)
-{
-       unsigned int slen = be32_to_cpu(g->compressed_length);
-       unsigned int required_len = be32_to_cpu(g->uncompressed_length);
-       unsigned int dlen = p->oremain, tmplen;
-       unsigned int adj_slen = slen;
-       u8 *src = p->in, *dst = p->out;
-       u16 padding = be16_to_cpu(g->padding);
-       int ret, spadding = 0, dpadding = 0;
-       ktime_t timeout;
-
-       if (!slen || !required_len)
-               return -EINVAL;
-
-       if (p->iremain <= 0 || padding + slen > p->iremain)
-               return -EOVERFLOW;
-
-       if (p->oremain <= 0 || required_len - ignore > p->oremain)
-               return -ENOSPC;
-
-       src += padding;
-
-       if (!usehw)
-               goto usesw;
-
-       if (slen % c->multiple)
-               adj_slen = round_up(slen, c->multiple);
-       if (slen < c->minimum)
-               adj_slen = c->minimum;
-       if (slen > c->maximum)
-               goto usesw;
-       if (slen < adj_slen || (u64)src % c->alignment) {
-               /* we can append padding bytes because the 842 format defines
-                * an "end" template (see lib/842/842_decompress.c) and will
-                * ignore any bytes following it.
-                */
-               if (slen < adj_slen)
-                       memset(ctx->sbounce + slen, 0, adj_slen - slen);
-               memcpy(ctx->sbounce, src, slen);
-               src = ctx->sbounce;
-               spadding = adj_slen - slen;
-               slen = adj_slen;
-               pr_debug("using decomp sbounce buffer, len %x\n", slen);
-       }
-
-       if (dlen % c->multiple)
-               dlen = round_down(dlen, c->multiple);
-       if (dlen < required_len || (u64)dst % c->alignment) {
-               dst = ctx->dbounce;
-               dlen = min(required_len, BOUNCE_BUFFER_SIZE);
-               pr_debug("using decomp dbounce buffer, len %x\n", dlen);
-       }
-       if (dlen < c->minimum)
-               goto usesw;
-       if (dlen > c->maximum)
-               dlen = c->maximum;
-
-       tmplen = dlen;
-       timeout = ktime_add_ms(ktime_get(), DECOMP_BUSY_TIMEOUT);
-       do {
-               dlen = tmplen; /* reset dlen, if we're retrying */
-               ret = nx842_decompress(src, slen, dst, &dlen, ctx->wmem);
-       } while (ret == -EBUSY && ktime_before(ktime_get(), timeout));
-       if (ret) {
-usesw:
-               /* reset everything, sw doesn't have constraints */
-               src = p->in + padding;
-               slen = be32_to_cpu(g->compressed_length);
-               spadding = 0;
-               dst = p->out;
-               dlen = p->oremain;
-               dpadding = 0;
-               if (dlen < required_len) { /* have ignore bytes */
-                       dst = ctx->dbounce;
-                       dlen = BOUNCE_BUFFER_SIZE;
-               }
-               pr_info_ratelimited("using software 842 decompression\n");
-               ret = sw842_decompress(src, slen, dst, &dlen);
-       }
-       if (ret)
-               return ret;
-
-       slen -= spadding;
-
-       dlen -= ignore;
-       if (ignore)
-               pr_debug("ignoring last %x bytes\n", ignore);
-
-       if (dst == ctx->dbounce)
-               memcpy(p->out, dst, dlen);
-
-       pr_debug("decompress slen %x padding %x dlen %x ignore %x\n",
-                slen, padding, dlen, ignore);
-
-       return update_param(p, slen + padding, dlen);
-}
-
-static int nx842_crypto_decompress(struct crypto_tfm *tfm,
-                                  const u8 *src, unsigned int slen,
-                                  u8 *dst, unsigned int *dlen)
-{
-       struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
-       struct nx842_crypto_header *hdr;
-       struct nx842_crypto_param p;
-       struct nx842_constraints c;
-       int n, ret, hdr_len;
-       u16 ignore = 0;
-       bool usehw = true;
-
-       p.in = (u8 *)src;
-       p.iremain = slen;
-       p.out = dst;
-       p.oremain = *dlen;
-       p.ototal = 0;
-
-       *dlen = 0;
-
-       if (read_constraints(&c))
-               usehw = false;
-
-       hdr = (struct nx842_crypto_header *)src;
-
-       /* If it doesn't start with our header magic number, assume it's a raw
-        * 842 compressed buffer and pass it directly to the hardware driver
-        */
-       if (be16_to_cpu(hdr->magic) != NX842_CRYPTO_MAGIC) {
-               struct nx842_crypto_header_group g = {
-                       .padding =              0,
-                       .compressed_length =    cpu_to_be32(p.iremain),
-                       .uncompressed_length =  cpu_to_be32(p.oremain),
-               };
-
-               ret = decompress(ctx, &p, &g, &c, 0, usehw);
-               if (ret)
-                       return ret;
-
-               *dlen = p.ototal;
-
-               return 0;
-       }
-
-       if (!hdr->groups) {
-               pr_err("header has no groups\n");
-               return -EINVAL;
-       }
-       if (hdr->groups > NX842_CRYPTO_GROUP_MAX) {
-               pr_err("header has too many groups %x, max %x\n",
-                      hdr->groups, NX842_CRYPTO_GROUP_MAX);
-               return -EINVAL;
-       }
-
-       hdr_len = NX842_CRYPTO_HEADER_SIZE(hdr->groups);
-       if (hdr_len > slen)
-               return -EOVERFLOW;
-
-       memcpy(&ctx->header, src, hdr_len);
-       hdr = &ctx->header;
-
-       for (n = 0; n < hdr->groups; n++) {
-               /* ignore applies to last group */
-               if (n + 1 == hdr->groups)
-                       ignore = be16_to_cpu(hdr->ignore);
-
-               ret = decompress(ctx, &p, &hdr->group[n], &c, ignore, usehw);
-               if (ret)
-                       return ret;
-       }
-
-       *dlen = p.ototal;
-
-       pr_debug("decompress total slen %x dlen %x\n", slen, *dlen);
-
-       return 0;
-}
-
-static struct crypto_alg alg = {
-       .cra_name               = "842",
-       .cra_driver_name        = "842-nx",
-       .cra_priority           = 300,
-       .cra_flags              = CRYPTO_ALG_TYPE_COMPRESS,
-       .cra_ctxsize            = sizeof(struct nx842_crypto_ctx),
-       .cra_module             = THIS_MODULE,
-       .cra_init               = nx842_crypto_init,
-       .cra_exit               = nx842_crypto_exit,
-       .cra_u                  = { .compress = {
-       .coa_compress           = nx842_crypto_compress,
-       .coa_decompress         = nx842_crypto_decompress } }
-};
-
-static int __init nx842_crypto_mod_init(void)
-{
-       return crypto_register_alg(&alg);
-}
-module_init(nx842_crypto_mod_init);
-
-static void __exit nx842_crypto_mod_exit(void)
-{
-       crypto_unregister_alg(&alg);
-}
-module_exit(nx842_crypto_mod_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("IBM PowerPC Nest (NX) 842 Hardware Compression Interface");
-MODULE_ALIAS_CRYPTO("842");
-MODULE_ALIAS_CRYPTO("842-nx");
-MODULE_AUTHOR("Dan Streetman <ddstreet@ieee.org>");
diff --git a/drivers/crypto/nx/nx-842-platform.c b/drivers/crypto/nx/nx-842-platform.c
deleted file mode 100644 (file)
index 664f13d..0000000
+++ /dev/null
@@ -1,84 +0,0 @@
-
-#include "nx-842.h"
-
-/* this is needed, separate from the main nx-842.c driver, because that main
- * driver loads the platform drivers during its init(), and it expects one
- * (or none) of the platform drivers to set this pointer to its driver.
- * That means this pointer can't be in the main nx-842 driver, because it
- * wouldn't be accessible until after the main driver loaded, which wouldn't
- * be possible as it's waiting for the platform driver to load.  So place it
- * here.
- */
-static struct nx842_driver *driver;
-static DEFINE_SPINLOCK(driver_lock);
-
-struct nx842_driver *nx842_platform_driver(void)
-{
-       return driver;
-}
-EXPORT_SYMBOL_GPL(nx842_platform_driver);
-
-bool nx842_platform_driver_set(struct nx842_driver *_driver)
-{
-       bool ret = false;
-
-       spin_lock(&driver_lock);
-
-       if (!driver) {
-               driver = _driver;
-               ret = true;
-       } else
-               WARN(1, "can't set platform driver, already set to %s\n",
-                    driver->name);
-
-       spin_unlock(&driver_lock);
-       return ret;
-}
-EXPORT_SYMBOL_GPL(nx842_platform_driver_set);
-
-/* only call this from the platform driver exit function */
-void nx842_platform_driver_unset(struct nx842_driver *_driver)
-{
-       spin_lock(&driver_lock);
-
-       if (driver == _driver)
-               driver = NULL;
-       else if (driver)
-               WARN(1, "can't unset platform driver %s, currently set to %s\n",
-                    _driver->name, driver->name);
-       else
-               WARN(1, "can't unset platform driver, already unset\n");
-
-       spin_unlock(&driver_lock);
-}
-EXPORT_SYMBOL_GPL(nx842_platform_driver_unset);
-
-bool nx842_platform_driver_get(void)
-{
-       bool ret = false;
-
-       spin_lock(&driver_lock);
-
-       if (driver)
-               ret = try_module_get(driver->owner);
-
-       spin_unlock(&driver_lock);
-
-       return ret;
-}
-EXPORT_SYMBOL_GPL(nx842_platform_driver_get);
-
-void nx842_platform_driver_put(void)
-{
-       spin_lock(&driver_lock);
-
-       if (driver)
-               module_put(driver->owner);
-
-       spin_unlock(&driver_lock);
-}
-EXPORT_SYMBOL_GPL(nx842_platform_driver_put);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Dan Streetman <ddstreet@ieee.org>");
-MODULE_DESCRIPTION("842 H/W Compression platform driver");
index 33b3b0abf4ae7f555f12b5b02bd4e0759a6d5e2b..3750e13d872181c425feefafbc259c03024b75e0 100644 (file)
@@ -26,6 +26,8 @@
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Dan Streetman <ddstreet@ieee.org>");
 MODULE_DESCRIPTION("842 H/W Compression driver for IBM PowerNV processors");
+MODULE_ALIAS_CRYPTO("842");
+MODULE_ALIAS_CRYPTO("842-nx");
 
 #define WORKMEM_ALIGN  (CRB_ALIGN)
 #define CSB_WAIT_MAX   (5000) /* ms */
@@ -344,7 +346,8 @@ static int wait_for_csb(struct nx842_workmem *wmem,
        }
 
        /* successful completion */
-       pr_debug_ratelimited("Processed %u bytes in %lu us\n", csb->count,
+       pr_debug_ratelimited("Processed %u bytes in %lu us\n",
+                            be32_to_cpu(csb->count),
                             (unsigned long)ktime_us_delta(now, start));
 
        return 0;
@@ -581,9 +584,29 @@ static struct nx842_driver nx842_powernv_driver = {
        .decompress =   nx842_powernv_decompress,
 };
 
+static int nx842_powernv_crypto_init(struct crypto_tfm *tfm)
+{
+       return nx842_crypto_init(tfm, &nx842_powernv_driver);
+}
+
+static struct crypto_alg nx842_powernv_alg = {
+       .cra_name               = "842",
+       .cra_driver_name        = "842-nx",
+       .cra_priority           = 300,
+       .cra_flags              = CRYPTO_ALG_TYPE_COMPRESS,
+       .cra_ctxsize            = sizeof(struct nx842_crypto_ctx),
+       .cra_module             = THIS_MODULE,
+       .cra_init               = nx842_powernv_crypto_init,
+       .cra_exit               = nx842_crypto_exit,
+       .cra_u                  = { .compress = {
+       .coa_compress           = nx842_crypto_compress,
+       .coa_decompress         = nx842_crypto_decompress } }
+};
+
 static __init int nx842_powernv_init(void)
 {
        struct device_node *dn;
+       int ret;
 
        /* verify workmem size/align restrictions */
        BUILD_BUG_ON(WORKMEM_ALIGN % CRB_ALIGN);
@@ -594,17 +617,14 @@ static __init int nx842_powernv_init(void)
        BUILD_BUG_ON(DDE_BUFFER_ALIGN % DDE_BUFFER_SIZE_MULT);
        BUILD_BUG_ON(DDE_BUFFER_SIZE_MULT % DDE_BUFFER_LAST_MULT);
 
-       pr_info("loading\n");
-
        for_each_compatible_node(dn, NULL, "ibm,power-nx")
                nx842_powernv_probe(dn);
 
-       if (!nx842_ct) {
-               pr_err("no coprocessors found\n");
+       if (!nx842_ct)
                return -ENODEV;
-       }
 
-       if (!nx842_platform_driver_set(&nx842_powernv_driver)) {
+       ret = crypto_register_alg(&nx842_powernv_alg);
+       if (ret) {
                struct nx842_coproc *coproc, *n;
 
                list_for_each_entry_safe(coproc, n, &nx842_coprocs, list) {
@@ -612,11 +632,9 @@ static __init int nx842_powernv_init(void)
                        kfree(coproc);
                }
 
-               return -EEXIST;
+               return ret;
        }
 
-       pr_info("loaded\n");
-
        return 0;
 }
 module_init(nx842_powernv_init);
@@ -625,13 +643,11 @@ static void __exit nx842_powernv_exit(void)
 {
        struct nx842_coproc *coproc, *n;
 
-       nx842_platform_driver_unset(&nx842_powernv_driver);
+       crypto_unregister_alg(&nx842_powernv_alg);
 
        list_for_each_entry_safe(coproc, n, &nx842_coprocs, list) {
                list_del(&coproc->list);
                kfree(coproc);
        }
-
-       pr_info("unloaded\n");
 }
 module_exit(nx842_powernv_exit);
index 3040a6091bf2797a8a959f375651eb84f54464e4..f4cbde03c6adda03c497f0ce08a358081f78d6d6 100644 (file)
@@ -29,6 +29,8 @@
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Robert Jennings <rcj@linux.vnet.ibm.com>");
 MODULE_DESCRIPTION("842 H/W Compression driver for IBM Power processors");
+MODULE_ALIAS_CRYPTO("842");
+MODULE_ALIAS_CRYPTO("842-nx");
 
 static struct nx842_constraints nx842_pseries_constraints = {
        .alignment =    DDE_BUFFER_ALIGN,
@@ -99,11 +101,6 @@ struct nx842_workmem {
 #define NX842_HW_PAGE_SIZE     (4096)
 #define NX842_HW_PAGE_MASK     (~(NX842_HW_PAGE_SIZE-1))
 
-enum nx842_status {
-       UNAVAILABLE,
-       AVAILABLE
-};
-
 struct ibm_nx842_counters {
        atomic64_t comp_complete;
        atomic64_t comp_failed;
@@ -121,7 +118,6 @@ static struct nx842_devdata {
        unsigned int max_sg_len;
        unsigned int max_sync_size;
        unsigned int max_sync_sg;
-       enum nx842_status status;
 } __rcu *devdata;
 static DEFINE_SPINLOCK(devdata_mutex);
 
@@ -230,9 +226,12 @@ static int nx842_validate_result(struct device *dev,
        switch (csb->completion_code) {
        case 0: /* Completed without error */
                break;
-       case 64: /* Target bytes > Source bytes during compression */
+       case 64: /* Compression ok, but output larger than input */
+               dev_dbg(dev, "%s: output size larger than input size\n",
+                                       __func__);
+               break;
        case 13: /* Output buffer too small */
-               dev_dbg(dev, "%s: Compression output larger than input\n",
+               dev_dbg(dev, "%s: Out of space in output buffer\n",
                                        __func__);
                return -ENOSPC;
        case 66: /* Input data contains an illegal template field */
@@ -537,41 +536,36 @@ static int nx842_OF_set_defaults(struct nx842_devdata *devdata)
                devdata->max_sync_size = 0;
                devdata->max_sync_sg = 0;
                devdata->max_sg_len = 0;
-               devdata->status = UNAVAILABLE;
                return 0;
        } else
                return -ENOENT;
 }
 
 /**
- * nx842_OF_upd_status -- Update the device info from OF status prop
+ * nx842_OF_upd_status -- Check the device info from OF status prop
  *
  * The status property indicates if the accelerator is enabled.  If the
  * device is in the OF tree it indicates that the hardware is present.
  * The status field indicates if the device is enabled when the status
  * is 'okay'.  Otherwise the device driver will be disabled.
  *
- * @devdata - struct nx842_devdata to update
  * @prop - struct property point containing the maxsyncop for the update
  *
  * Returns:
  *  0 - Device is available
- *  -EINVAL - Device is not available
+ *  -ENODEV - Device is not available
  */
-static int nx842_OF_upd_status(struct nx842_devdata *devdata,
-                                       struct property *prop) {
-       int ret = 0;
+static int nx842_OF_upd_status(struct property *prop)
+{
        const char *status = (const char *)prop->value;
 
-       if (!strncmp(status, "okay", (size_t)prop->length)) {
-               devdata->status = AVAILABLE;
-       } else {
-               dev_info(devdata->dev, "%s: status '%s' is not 'okay'\n",
-                               __func__, status);
-               devdata->status = UNAVAILABLE;
-       }
+       if (!strncmp(status, "okay", (size_t)prop->length))
+               return 0;
+       if (!strncmp(status, "disabled", (size_t)prop->length))
+               return -ENODEV;
+       dev_info(devdata->dev, "%s: unknown status '%s'\n", __func__, status);
 
-       return ret;
+       return -EINVAL;
 }
 
 /**
@@ -735,6 +729,10 @@ static int nx842_OF_upd(struct property *new_prop)
        int ret = 0;
        unsigned long flags;
 
+       new_devdata = kzalloc(sizeof(*new_devdata), GFP_NOFS);
+       if (!new_devdata)
+               return -ENOMEM;
+
        spin_lock_irqsave(&devdata_mutex, flags);
        old_devdata = rcu_dereference_check(devdata,
                        lockdep_is_held(&devdata_mutex));
@@ -744,16 +742,10 @@ static int nx842_OF_upd(struct property *new_prop)
        if (!old_devdata || !of_node) {
                pr_err("%s: device is not available\n", __func__);
                spin_unlock_irqrestore(&devdata_mutex, flags);
+               kfree(new_devdata);
                return -ENODEV;
        }
 
-       new_devdata = kzalloc(sizeof(*new_devdata), GFP_NOFS);
-       if (!new_devdata) {
-               dev_err(old_devdata->dev, "%s: Could not allocate memory for device data\n", __func__);
-               ret = -ENOMEM;
-               goto error_out;
-       }
-
        memcpy(new_devdata, old_devdata, sizeof(*old_devdata));
        new_devdata->counters = old_devdata->counters;
 
@@ -777,7 +769,7 @@ static int nx842_OF_upd(struct property *new_prop)
                goto out;
 
        /* Perform property updates */
-       ret = nx842_OF_upd_status(new_devdata, status);
+       ret = nx842_OF_upd_status(status);
        if (ret)
                goto error_out;
 
@@ -970,13 +962,43 @@ static struct nx842_driver nx842_pseries_driver = {
        .decompress =   nx842_pseries_decompress,
 };
 
-static int __init nx842_probe(struct vio_dev *viodev,
-                                 const struct vio_device_id *id)
+static int nx842_pseries_crypto_init(struct crypto_tfm *tfm)
+{
+       return nx842_crypto_init(tfm, &nx842_pseries_driver);
+}
+
+static struct crypto_alg nx842_pseries_alg = {
+       .cra_name               = "842",
+       .cra_driver_name        = "842-nx",
+       .cra_priority           = 300,
+       .cra_flags              = CRYPTO_ALG_TYPE_COMPRESS,
+       .cra_ctxsize            = sizeof(struct nx842_crypto_ctx),
+       .cra_module             = THIS_MODULE,
+       .cra_init               = nx842_pseries_crypto_init,
+       .cra_exit               = nx842_crypto_exit,
+       .cra_u                  = { .compress = {
+       .coa_compress           = nx842_crypto_compress,
+       .coa_decompress         = nx842_crypto_decompress } }
+};
+
+static int nx842_probe(struct vio_dev *viodev,
+                      const struct vio_device_id *id)
 {
        struct nx842_devdata *old_devdata, *new_devdata = NULL;
        unsigned long flags;
        int ret = 0;
 
+       new_devdata = kzalloc(sizeof(*new_devdata), GFP_NOFS);
+       if (!new_devdata)
+               return -ENOMEM;
+
+       new_devdata->counters = kzalloc(sizeof(*new_devdata->counters),
+                       GFP_NOFS);
+       if (!new_devdata->counters) {
+               kfree(new_devdata);
+               return -ENOMEM;
+       }
+
        spin_lock_irqsave(&devdata_mutex, flags);
        old_devdata = rcu_dereference_check(devdata,
                        lockdep_is_held(&devdata_mutex));
@@ -989,21 +1011,6 @@ static int __init nx842_probe(struct vio_dev *viodev,
 
        dev_set_drvdata(&viodev->dev, NULL);
 
-       new_devdata = kzalloc(sizeof(*new_devdata), GFP_NOFS);
-       if (!new_devdata) {
-               dev_err(&viodev->dev, "%s: Could not allocate memory for device data\n", __func__);
-               ret = -ENOMEM;
-               goto error_unlock;
-       }
-
-       new_devdata->counters = kzalloc(sizeof(*new_devdata->counters),
-                       GFP_NOFS);
-       if (!new_devdata->counters) {
-               dev_err(&viodev->dev, "%s: Could not allocate memory for performance counters\n", __func__);
-               ret = -ENOMEM;
-               goto error_unlock;
-       }
-
        new_devdata->vdev = viodev;
        new_devdata->dev = &viodev->dev;
        nx842_OF_set_defaults(new_devdata);
@@ -1016,9 +1023,12 @@ static int __init nx842_probe(struct vio_dev *viodev,
        of_reconfig_notifier_register(&nx842_of_nb);
 
        ret = nx842_OF_upd(NULL);
-       if (ret && ret != -ENODEV) {
-               dev_err(&viodev->dev, "could not parse device tree. %d\n", ret);
-               ret = -1;
+       if (ret)
+               goto error;
+
+       ret = crypto_register_alg(&nx842_pseries_alg);
+       if (ret) {
+               dev_err(&viodev->dev, "could not register comp alg: %d\n", ret);
                goto error;
        }
 
@@ -1043,7 +1053,7 @@ error:
        return ret;
 }
 
-static int __exit nx842_remove(struct vio_dev *viodev)
+static int nx842_remove(struct vio_dev *viodev)
 {
        struct nx842_devdata *old_devdata;
        unsigned long flags;
@@ -1051,6 +1061,8 @@ static int __exit nx842_remove(struct vio_dev *viodev)
        pr_info("Removing IBM Power 842 compression device\n");
        sysfs_remove_group(&viodev->dev.kobj, &nx842_attribute_group);
 
+       crypto_unregister_alg(&nx842_pseries_alg);
+
        spin_lock_irqsave(&devdata_mutex, flags);
        old_devdata = rcu_dereference_check(devdata,
                        lockdep_is_held(&devdata_mutex));
@@ -1074,18 +1086,16 @@ static struct vio_device_id nx842_vio_driver_ids[] = {
 static struct vio_driver nx842_vio_driver = {
        .name = KBUILD_MODNAME,
        .probe = nx842_probe,
-       .remove = __exit_p(nx842_remove),
+       .remove = nx842_remove,
        .get_desired_dma = nx842_get_desired_dma,
        .id_table = nx842_vio_driver_ids,
 };
 
-static int __init nx842_init(void)
+static int __init nx842_pseries_init(void)
 {
        struct nx842_devdata *new_devdata;
        int ret;
 
-       pr_info("Registering IBM Power 842 compression driver\n");
-
        if (!of_find_compatible_node(NULL, NULL, "ibm,compression"))
                return -ENODEV;
 
@@ -1095,7 +1105,6 @@ static int __init nx842_init(void)
                pr_err("Could not allocate memory for device data\n");
                return -ENOMEM;
        }
-       new_devdata->status = UNAVAILABLE;
        RCU_INIT_POINTER(devdata, new_devdata);
 
        ret = vio_register_driver(&nx842_vio_driver);
@@ -1106,24 +1115,18 @@ static int __init nx842_init(void)
                return ret;
        }
 
-       if (!nx842_platform_driver_set(&nx842_pseries_driver)) {
-               vio_unregister_driver(&nx842_vio_driver);
-               kfree(new_devdata);
-               return -EEXIST;
-       }
-
        return 0;
 }
 
-module_init(nx842_init);
+module_init(nx842_pseries_init);
 
-static void __exit nx842_exit(void)
+static void __exit nx842_pseries_exit(void)
 {
        struct nx842_devdata *old_devdata;
        unsigned long flags;
 
-       pr_info("Exiting IBM Power 842 compression driver\n");
-       nx842_platform_driver_unset(&nx842_pseries_driver);
+       crypto_unregister_alg(&nx842_pseries_alg);
+
        spin_lock_irqsave(&devdata_mutex, flags);
        old_devdata = rcu_dereference_check(devdata,
                        lockdep_is_held(&devdata_mutex));
@@ -1136,5 +1139,5 @@ static void __exit nx842_exit(void)
        vio_unregister_driver(&nx842_vio_driver);
 }
 
-module_exit(nx842_exit);
+module_exit(nx842_pseries_exit);
 
index 6e5e0d60d0c8c9b887a19fe9e9f7a193ee298882..046c1c45411bbc7fe21b5207479644130183ac68 100644 (file)
@@ -1,10 +1,5 @@
 /*
- * Driver frontend for IBM Power 842 compression accelerator
- *
- * Copyright (C) 2015 Dan Streetman, IBM Corp
- *
- * Designer of the Power data compression engine:
- *   Bulent Abali <abali@us.ibm.com>
+ * Cryptographic API for the NX-842 hardware compression.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
+ *
+ * Copyright (C) IBM Corporation, 2011-2015
+ *
+ * Designer of the Power data compression engine:
+ *   Bulent Abali <abali@us.ibm.com>
+ *
+ * Original Authors: Robert Jennings <rcj@linux.vnet.ibm.com>
+ *                   Seth Jennings <sjenning@linux.vnet.ibm.com>
+ *
+ * Rewrite: Dan Streetman <ddstreet@ieee.org>
+ *
+ * This is an interface to the NX-842 compression hardware in PowerPC
+ * processors.  Most of the complexity of this drvier is due to the fact that
+ * the NX-842 compression hardware requires the input and output data buffers
+ * to be specifically aligned, to be a specific multiple in length, and within
+ * specific minimum and maximum lengths.  Those restrictions, provided by the
+ * nx-842 driver via nx842_constraints, mean this driver must use bounce
+ * buffers and headers to correct misaligned in or out buffers, and to split
+ * input buffers that are too large.
+ *
+ * This driver will fall back to software decompression if the hardware
+ * decompression fails, so this driver's decompression should never fail as
+ * long as the provided compressed buffer is valid.  Any compressed buffer
+ * created by this driver will have a header (except ones where the input
+ * perfectly matches the constraints); so users of this driver cannot simply
+ * pass a compressed buffer created by this driver over to the 842 software
+ * decompression library.  Instead, users must use this driver to decompress;
+ * if the hardware fails or is unavailable, the compressed buffer will be
+ * parsed and the header removed, and the raw 842 buffer(s) passed to the 842
+ * software decompression library.
+ *
+ * This does not fall back to software compression, however, since the caller
+ * of this function is specifically requesting hardware compression; if the
+ * hardware compression fails, the caller can fall back to software
+ * compression, and the raw 842 compressed buffer that the software compressor
+ * creates can be passed to this driver for hardware decompression; any
+ * buffer without our specific header magic is assumed to be a raw 842 buffer
+ * and passed directly to the hardware.  Note that the software compression
+ * library will produce a compressed buffer that is incompatible with the
+ * hardware decompressor if the original input buffer length is not a multiple
+ * of 8; if such a compressed buffer is passed to this driver for
+ * decompression, the hardware will reject it and this driver will then pass
+ * it over to the software library for decompression.
  */
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
-#include "nx-842.h"
+#include <linux/vmalloc.h>
+#include <linux/sw842.h>
+#include <linux/spinlock.h>
 
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Dan Streetman <ddstreet@ieee.org>");
-MODULE_DESCRIPTION("842 H/W Compression driver for IBM Power processors");
+#include "nx-842.h"
 
-/**
- * nx842_constraints
- *
- * This provides the driver's constraints.  Different nx842 implementations
- * may have varying requirements.  The constraints are:
- *   @alignment:       All buffers should be aligned to this
- *   @multiple:                All buffer lengths should be a multiple of this
- *   @minimum:         Buffer lengths must not be less than this amount
- *   @maximum:         Buffer lengths must not be more than this amount
- *
- * The constraints apply to all buffers and lengths, both input and output,
- * for both compression and decompression, except for the minimum which
- * only applies to compression input and decompression output; the
- * compressed data can be less than the minimum constraint.  It can be
- * assumed that compressed data will always adhere to the multiple
- * constraint.
- *
- * The driver may succeed even if these constraints are violated;
- * however the driver can return failure or suffer reduced performance
- * if any constraint is not met.
+/* The first 5 bits of this magic are 0x1f, which is an invalid 842 5-bit
+ * template (see lib/842/842.h), so this magic number will never appear at
+ * the start of a raw 842 compressed buffer.  That is important, as any buffer
+ * passed to us without this magic is assumed to be a raw 842 compressed
+ * buffer, and passed directly to the hardware to decompress.
  */
-int nx842_constraints(struct nx842_constraints *c)
+#define NX842_CRYPTO_MAGIC     (0xf842)
+#define NX842_CRYPTO_HEADER_SIZE(g)                            \
+       (sizeof(struct nx842_crypto_header) +                   \
+        sizeof(struct nx842_crypto_header_group) * (g))
+#define NX842_CRYPTO_HEADER_MAX_SIZE                           \
+       NX842_CRYPTO_HEADER_SIZE(NX842_CRYPTO_GROUP_MAX)
+
+/* bounce buffer size */
+#define BOUNCE_BUFFER_ORDER    (2)
+#define BOUNCE_BUFFER_SIZE                                     \
+       ((unsigned int)(PAGE_SIZE << BOUNCE_BUFFER_ORDER))
+
+/* try longer on comp because we can fallback to sw decomp if hw is busy */
+#define COMP_BUSY_TIMEOUT      (250) /* ms */
+#define DECOMP_BUSY_TIMEOUT    (50) /* ms */
+
+struct nx842_crypto_param {
+       u8 *in;
+       unsigned int iremain;
+       u8 *out;
+       unsigned int oremain;
+       unsigned int ototal;
+};
+
+static int update_param(struct nx842_crypto_param *p,
+                       unsigned int slen, unsigned int dlen)
 {
-       memcpy(c, nx842_platform_driver()->constraints, sizeof(*c));
+       if (p->iremain < slen)
+               return -EOVERFLOW;
+       if (p->oremain < dlen)
+               return -ENOSPC;
+
+       p->in += slen;
+       p->iremain -= slen;
+       p->out += dlen;
+       p->oremain -= dlen;
+       p->ototal += dlen;
+
        return 0;
 }
-EXPORT_SYMBOL_GPL(nx842_constraints);
 
-/**
- * nx842_workmem_size
- *
- * Get the amount of working memory the driver requires.
- */
-size_t nx842_workmem_size(void)
+int nx842_crypto_init(struct crypto_tfm *tfm, struct nx842_driver *driver)
 {
-       return nx842_platform_driver()->workmem_size;
+       struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
+
+       spin_lock_init(&ctx->lock);
+       ctx->driver = driver;
+       ctx->wmem = kmalloc(driver->workmem_size, GFP_KERNEL);
+       ctx->sbounce = (u8 *)__get_free_pages(GFP_KERNEL, BOUNCE_BUFFER_ORDER);
+       ctx->dbounce = (u8 *)__get_free_pages(GFP_KERNEL, BOUNCE_BUFFER_ORDER);
+       if (!ctx->wmem || !ctx->sbounce || !ctx->dbounce) {
+               kfree(ctx->wmem);
+               free_page((unsigned long)ctx->sbounce);
+               free_page((unsigned long)ctx->dbounce);
+               return -ENOMEM;
+       }
+
+       return 0;
 }
-EXPORT_SYMBOL_GPL(nx842_workmem_size);
+EXPORT_SYMBOL_GPL(nx842_crypto_init);
 
-int nx842_compress(const unsigned char *in, unsigned int ilen,
-                  unsigned char *out, unsigned int *olen, void *wmem)
+void nx842_crypto_exit(struct crypto_tfm *tfm)
 {
-       return nx842_platform_driver()->compress(in, ilen, out, olen, wmem);
+       struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
+
+       kfree(ctx->wmem);
+       free_page((unsigned long)ctx->sbounce);
+       free_page((unsigned long)ctx->dbounce);
 }
-EXPORT_SYMBOL_GPL(nx842_compress);
+EXPORT_SYMBOL_GPL(nx842_crypto_exit);
 
-int nx842_decompress(const unsigned char *in, unsigned int ilen,
-                    unsigned char *out, unsigned int *olen, void *wmem)
+static void check_constraints(struct nx842_constraints *c)
 {
-       return nx842_platform_driver()->decompress(in, ilen, out, olen, wmem);
+       /* limit maximum, to always have enough bounce buffer to decompress */
+       if (c->maximum > BOUNCE_BUFFER_SIZE)
+               c->maximum = BOUNCE_BUFFER_SIZE;
 }
-EXPORT_SYMBOL_GPL(nx842_decompress);
 
-static __init int nx842_init(void)
+static int nx842_crypto_add_header(struct nx842_crypto_header *hdr, u8 *buf)
 {
-       request_module("nx-compress-powernv");
-       request_module("nx-compress-pseries");
+       int s = NX842_CRYPTO_HEADER_SIZE(hdr->groups);
 
-       /* we prevent loading if there's no platform driver, and we get the
-        * module that set it so it won't unload, so we don't need to check
-        * if it's set in any of the above functions
-        */
-       if (!nx842_platform_driver_get()) {
-               pr_err("no nx842 driver found.\n");
-               return -ENODEV;
+       /* compress should have added space for header */
+       if (s > be16_to_cpu(hdr->group[0].padding)) {
+               pr_err("Internal error: no space for header\n");
+               return -EINVAL;
        }
 
+       memcpy(buf, hdr, s);
+
+       print_hex_dump_debug("header ", DUMP_PREFIX_OFFSET, 16, 1, buf, s, 0);
+
        return 0;
 }
-module_init(nx842_init);
 
-static void __exit nx842_exit(void)
+static int compress(struct nx842_crypto_ctx *ctx,
+                   struct nx842_crypto_param *p,
+                   struct nx842_crypto_header_group *g,
+                   struct nx842_constraints *c,
+                   u16 *ignore,
+                   unsigned int hdrsize)
+{
+       unsigned int slen = p->iremain, dlen = p->oremain, tmplen;
+       unsigned int adj_slen = slen;
+       u8 *src = p->in, *dst = p->out;
+       int ret, dskip = 0;
+       ktime_t timeout;
+
+       if (p->iremain == 0)
+               return -EOVERFLOW;
+
+       if (p->oremain == 0 || hdrsize + c->minimum > dlen)
+               return -ENOSPC;
+
+       if (slen % c->multiple)
+               adj_slen = round_up(slen, c->multiple);
+       if (slen < c->minimum)
+               adj_slen = c->minimum;
+       if (slen > c->maximum)
+               adj_slen = slen = c->maximum;
+       if (adj_slen > slen || (u64)src % c->alignment) {
+               adj_slen = min(adj_slen, BOUNCE_BUFFER_SIZE);
+               slen = min(slen, BOUNCE_BUFFER_SIZE);
+               if (adj_slen > slen)
+                       memset(ctx->sbounce + slen, 0, adj_slen - slen);
+               memcpy(ctx->sbounce, src, slen);
+               src = ctx->sbounce;
+               slen = adj_slen;
+               pr_debug("using comp sbounce buffer, len %x\n", slen);
+       }
+
+       dst += hdrsize;
+       dlen -= hdrsize;
+
+       if ((u64)dst % c->alignment) {
+               dskip = (int)(PTR_ALIGN(dst, c->alignment) - dst);
+               dst += dskip;
+               dlen -= dskip;
+       }
+       if (dlen % c->multiple)
+               dlen = round_down(dlen, c->multiple);
+       if (dlen < c->minimum) {
+nospc:
+               dst = ctx->dbounce;
+               dlen = min(p->oremain, BOUNCE_BUFFER_SIZE);
+               dlen = round_down(dlen, c->multiple);
+               dskip = 0;
+               pr_debug("using comp dbounce buffer, len %x\n", dlen);
+       }
+       if (dlen > c->maximum)
+               dlen = c->maximum;
+
+       tmplen = dlen;
+       timeout = ktime_add_ms(ktime_get(), COMP_BUSY_TIMEOUT);
+       do {
+               dlen = tmplen; /* reset dlen, if we're retrying */
+               ret = ctx->driver->compress(src, slen, dst, &dlen, ctx->wmem);
+               /* possibly we should reduce the slen here, instead of
+                * retrying with the dbounce buffer?
+                */
+               if (ret == -ENOSPC && dst != ctx->dbounce)
+                       goto nospc;
+       } while (ret == -EBUSY && ktime_before(ktime_get(), timeout));
+       if (ret)
+               return ret;
+
+       dskip += hdrsize;
+
+       if (dst == ctx->dbounce)
+               memcpy(p->out + dskip, dst, dlen);
+
+       g->padding = cpu_to_be16(dskip);
+       g->compressed_length = cpu_to_be32(dlen);
+       g->uncompressed_length = cpu_to_be32(slen);
+
+       if (p->iremain < slen) {
+               *ignore = slen - p->iremain;
+               slen = p->iremain;
+       }
+
+       pr_debug("compress slen %x ignore %x dlen %x padding %x\n",
+                slen, *ignore, dlen, dskip);
+
+       return update_param(p, slen, dskip + dlen);
+}
+
+int nx842_crypto_compress(struct crypto_tfm *tfm,
+                         const u8 *src, unsigned int slen,
+                         u8 *dst, unsigned int *dlen)
+{
+       struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
+       struct nx842_crypto_header *hdr = &ctx->header;
+       struct nx842_crypto_param p;
+       struct nx842_constraints c = *ctx->driver->constraints;
+       unsigned int groups, hdrsize, h;
+       int ret, n;
+       bool add_header;
+       u16 ignore = 0;
+
+       check_constraints(&c);
+
+       p.in = (u8 *)src;
+       p.iremain = slen;
+       p.out = dst;
+       p.oremain = *dlen;
+       p.ototal = 0;
+
+       *dlen = 0;
+
+       groups = min_t(unsigned int, NX842_CRYPTO_GROUP_MAX,
+                      DIV_ROUND_UP(p.iremain, c.maximum));
+       hdrsize = NX842_CRYPTO_HEADER_SIZE(groups);
+
+       spin_lock_bh(&ctx->lock);
+
+       /* skip adding header if the buffers meet all constraints */
+       add_header = (p.iremain % c.multiple    ||
+                     p.iremain < c.minimum     ||
+                     p.iremain > c.maximum     ||
+                     (u64)p.in % c.alignment   ||
+                     p.oremain % c.multiple    ||
+                     p.oremain < c.minimum     ||
+                     p.oremain > c.maximum     ||
+                     (u64)p.out % c.alignment);
+
+       hdr->magic = cpu_to_be16(NX842_CRYPTO_MAGIC);
+       hdr->groups = 0;
+       hdr->ignore = 0;
+
+       while (p.iremain > 0) {
+               n = hdr->groups++;
+               ret = -ENOSPC;
+               if (hdr->groups > NX842_CRYPTO_GROUP_MAX)
+                       goto unlock;
+
+               /* header goes before first group */
+               h = !n && add_header ? hdrsize : 0;
+
+               if (ignore)
+                       pr_warn("interal error, ignore is set %x\n", ignore);
+
+               ret = compress(ctx, &p, &hdr->group[n], &c, &ignore, h);
+               if (ret)
+                       goto unlock;
+       }
+
+       if (!add_header && hdr->groups > 1) {
+               pr_err("Internal error: No header but multiple groups\n");
+               ret = -EINVAL;
+               goto unlock;
+       }
+
+       /* ignore indicates the input stream needed to be padded */
+       hdr->ignore = cpu_to_be16(ignore);
+       if (ignore)
+               pr_debug("marked %d bytes as ignore\n", ignore);
+
+       if (add_header)
+               ret = nx842_crypto_add_header(hdr, dst);
+       if (ret)
+               goto unlock;
+
+       *dlen = p.ototal;
+
+       pr_debug("compress total slen %x dlen %x\n", slen, *dlen);
+
+unlock:
+       spin_unlock_bh(&ctx->lock);
+       return ret;
+}
+EXPORT_SYMBOL_GPL(nx842_crypto_compress);
+
+static int decompress(struct nx842_crypto_ctx *ctx,
+                     struct nx842_crypto_param *p,
+                     struct nx842_crypto_header_group *g,
+                     struct nx842_constraints *c,
+                     u16 ignore)
 {
-       nx842_platform_driver_put();
+       unsigned int slen = be32_to_cpu(g->compressed_length);
+       unsigned int required_len = be32_to_cpu(g->uncompressed_length);
+       unsigned int dlen = p->oremain, tmplen;
+       unsigned int adj_slen = slen;
+       u8 *src = p->in, *dst = p->out;
+       u16 padding = be16_to_cpu(g->padding);
+       int ret, spadding = 0, dpadding = 0;
+       ktime_t timeout;
+
+       if (!slen || !required_len)
+               return -EINVAL;
+
+       if (p->iremain <= 0 || padding + slen > p->iremain)
+               return -EOVERFLOW;
+
+       if (p->oremain <= 0 || required_len - ignore > p->oremain)
+               return -ENOSPC;
+
+       src += padding;
+
+       if (slen % c->multiple)
+               adj_slen = round_up(slen, c->multiple);
+       if (slen < c->minimum)
+               adj_slen = c->minimum;
+       if (slen > c->maximum)
+               goto usesw;
+       if (slen < adj_slen || (u64)src % c->alignment) {
+               /* we can append padding bytes because the 842 format defines
+                * an "end" template (see lib/842/842_decompress.c) and will
+                * ignore any bytes following it.
+                */
+               if (slen < adj_slen)
+                       memset(ctx->sbounce + slen, 0, adj_slen - slen);
+               memcpy(ctx->sbounce, src, slen);
+               src = ctx->sbounce;
+               spadding = adj_slen - slen;
+               slen = adj_slen;
+               pr_debug("using decomp sbounce buffer, len %x\n", slen);
+       }
+
+       if (dlen % c->multiple)
+               dlen = round_down(dlen, c->multiple);
+       if (dlen < required_len || (u64)dst % c->alignment) {
+               dst = ctx->dbounce;
+               dlen = min(required_len, BOUNCE_BUFFER_SIZE);
+               pr_debug("using decomp dbounce buffer, len %x\n", dlen);
+       }
+       if (dlen < c->minimum)
+               goto usesw;
+       if (dlen > c->maximum)
+               dlen = c->maximum;
+
+       tmplen = dlen;
+       timeout = ktime_add_ms(ktime_get(), DECOMP_BUSY_TIMEOUT);
+       do {
+               dlen = tmplen; /* reset dlen, if we're retrying */
+               ret = ctx->driver->decompress(src, slen, dst, &dlen, ctx->wmem);
+       } while (ret == -EBUSY && ktime_before(ktime_get(), timeout));
+       if (ret) {
+usesw:
+               /* reset everything, sw doesn't have constraints */
+               src = p->in + padding;
+               slen = be32_to_cpu(g->compressed_length);
+               spadding = 0;
+               dst = p->out;
+               dlen = p->oremain;
+               dpadding = 0;
+               if (dlen < required_len) { /* have ignore bytes */
+                       dst = ctx->dbounce;
+                       dlen = BOUNCE_BUFFER_SIZE;
+               }
+               pr_info_ratelimited("using software 842 decompression\n");
+               ret = sw842_decompress(src, slen, dst, &dlen);
+       }
+       if (ret)
+               return ret;
+
+       slen -= spadding;
+
+       dlen -= ignore;
+       if (ignore)
+               pr_debug("ignoring last %x bytes\n", ignore);
+
+       if (dst == ctx->dbounce)
+               memcpy(p->out, dst, dlen);
+
+       pr_debug("decompress slen %x padding %x dlen %x ignore %x\n",
+                slen, padding, dlen, ignore);
+
+       return update_param(p, slen + padding, dlen);
 }
-module_exit(nx842_exit);
+
+int nx842_crypto_decompress(struct crypto_tfm *tfm,
+                           const u8 *src, unsigned int slen,
+                           u8 *dst, unsigned int *dlen)
+{
+       struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
+       struct nx842_crypto_header *hdr;
+       struct nx842_crypto_param p;
+       struct nx842_constraints c = *ctx->driver->constraints;
+       int n, ret, hdr_len;
+       u16 ignore = 0;
+
+       check_constraints(&c);
+
+       p.in = (u8 *)src;
+       p.iremain = slen;
+       p.out = dst;
+       p.oremain = *dlen;
+       p.ototal = 0;
+
+       *dlen = 0;
+
+       hdr = (struct nx842_crypto_header *)src;
+
+       spin_lock_bh(&ctx->lock);
+
+       /* If it doesn't start with our header magic number, assume it's a raw
+        * 842 compressed buffer and pass it directly to the hardware driver
+        */
+       if (be16_to_cpu(hdr->magic) != NX842_CRYPTO_MAGIC) {
+               struct nx842_crypto_header_group g = {
+                       .padding =              0,
+                       .compressed_length =    cpu_to_be32(p.iremain),
+                       .uncompressed_length =  cpu_to_be32(p.oremain),
+               };
+
+               ret = decompress(ctx, &p, &g, &c, 0);
+               if (ret)
+                       goto unlock;
+
+               goto success;
+       }
+
+       if (!hdr->groups) {
+               pr_err("header has no groups\n");
+               ret = -EINVAL;
+               goto unlock;
+       }
+       if (hdr->groups > NX842_CRYPTO_GROUP_MAX) {
+               pr_err("header has too many groups %x, max %x\n",
+                      hdr->groups, NX842_CRYPTO_GROUP_MAX);
+               ret = -EINVAL;
+               goto unlock;
+       }
+
+       hdr_len = NX842_CRYPTO_HEADER_SIZE(hdr->groups);
+       if (hdr_len > slen) {
+               ret = -EOVERFLOW;
+               goto unlock;
+       }
+
+       memcpy(&ctx->header, src, hdr_len);
+       hdr = &ctx->header;
+
+       for (n = 0; n < hdr->groups; n++) {
+               /* ignore applies to last group */
+               if (n + 1 == hdr->groups)
+                       ignore = be16_to_cpu(hdr->ignore);
+
+               ret = decompress(ctx, &p, &hdr->group[n], &c, ignore);
+               if (ret)
+                       goto unlock;
+       }
+
+success:
+       *dlen = p.ototal;
+
+       pr_debug("decompress total slen %x dlen %x\n", slen, *dlen);
+
+       ret = 0;
+
+unlock:
+       spin_unlock_bh(&ctx->lock);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(nx842_crypto_decompress);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("IBM PowerPC Nest (NX) 842 Hardware Compression Driver");
+MODULE_AUTHOR("Dan Streetman <ddstreet@ieee.org>");
index ac0ea79d0f8b98c1d8123f1017256eb145cfc8f4..a4eee3bba93736b601ca587fb702b4e88de1b9a9 100644 (file)
@@ -3,8 +3,9 @@
 #define __NX_842_H__
 
 #include <linux/kernel.h>
+#include <linux/init.h>
 #include <linux/module.h>
-#include <linux/sw842.h>
+#include <linux/crypto.h>
 #include <linux/of.h>
 #include <linux/slab.h>
 #include <linux/io.h>
@@ -104,6 +105,25 @@ static inline unsigned long nx842_get_pa(void *addr)
 #define GET_FIELD(v, m)                (((v) & (m)) >> MASK_LSH(m))
 #define SET_FIELD(v, m, val)   (((v) & ~(m)) | (((val) << MASK_LSH(m)) & (m)))
 
+/**
+ * This provides the driver's constraints.  Different nx842 implementations
+ * may have varying requirements.  The constraints are:
+ *   @alignment:       All buffers should be aligned to this
+ *   @multiple:                All buffer lengths should be a multiple of this
+ *   @minimum:         Buffer lengths must not be less than this amount
+ *   @maximum:         Buffer lengths must not be more than this amount
+ *
+ * The constraints apply to all buffers and lengths, both input and output,
+ * for both compression and decompression, except for the minimum which
+ * only applies to compression input and decompression output; the
+ * compressed data can be less than the minimum constraint.  It can be
+ * assumed that compressed data will always adhere to the multiple
+ * constraint.
+ *
+ * The driver may succeed even if these constraints are violated;
+ * however the driver can return failure or suffer reduced performance
+ * if any constraint is not met.
+ */
 struct nx842_constraints {
        int alignment;
        int multiple;
@@ -126,19 +146,40 @@ struct nx842_driver {
                          void *wrkmem);
 };
 
-struct nx842_driver *nx842_platform_driver(void);
-bool nx842_platform_driver_set(struct nx842_driver *driver);
-void nx842_platform_driver_unset(struct nx842_driver *driver);
-bool nx842_platform_driver_get(void);
-void nx842_platform_driver_put(void);
+struct nx842_crypto_header_group {
+       __be16 padding;                 /* unused bytes at start of group */
+       __be32 compressed_length;       /* compressed bytes in group */
+       __be32 uncompressed_length;     /* bytes after decompression */
+} __packed;
+
+struct nx842_crypto_header {
+       __be16 magic;           /* NX842_CRYPTO_MAGIC */
+       __be16 ignore;          /* decompressed end bytes to ignore */
+       u8 groups;              /* total groups in this header */
+       struct nx842_crypto_header_group group[];
+} __packed;
 
-size_t nx842_workmem_size(void);
+#define NX842_CRYPTO_GROUP_MAX (0x20)
 
-int nx842_constraints(struct nx842_constraints *constraints);
+struct nx842_crypto_ctx {
+       spinlock_t lock;
+
+       u8 *wmem;
+       u8 *sbounce, *dbounce;
+
+       struct nx842_crypto_header header;
+       struct nx842_crypto_header_group group[NX842_CRYPTO_GROUP_MAX];
+
+       struct nx842_driver *driver;
+};
 
-int nx842_compress(const unsigned char *in, unsigned int in_len,
-                  unsigned char *out, unsigned int *out_len, void *wrkmem);
-int nx842_decompress(const unsigned char *in, unsigned int in_len,
-                    unsigned char *out, unsigned int *out_len, void *wrkmem);
+int nx842_crypto_init(struct crypto_tfm *tfm, struct nx842_driver *driver);
+void nx842_crypto_exit(struct crypto_tfm *tfm);
+int nx842_crypto_compress(struct crypto_tfm *tfm,
+                         const u8 *src, unsigned int slen,
+                         u8 *dst, unsigned int *dlen);
+int nx842_crypto_decompress(struct crypto_tfm *tfm,
+                           const u8 *src, unsigned int slen,
+                           u8 *dst, unsigned int *dlen);
 
 #endif /* __NX_842_H__ */
index e4311ce0cd78cfc93eea1cec30a37a9ec3b54d25..73ef499227881e6fd5c71c65d00f486a1f173dd4 100644 (file)
@@ -94,8 +94,6 @@ static int ccm_aes_nx_setauthsize(struct crypto_aead *tfm,
                return -EINVAL;
        }
 
-       crypto_aead_crt(tfm)->authsize = authsize;
-
        return 0;
 }
 
@@ -111,8 +109,6 @@ static int ccm4309_aes_nx_setauthsize(struct crypto_aead *tfm,
                return -EINVAL;
        }
 
-       crypto_aead_crt(tfm)->authsize = authsize;
-
        return 0;
 }
 
@@ -174,6 +170,7 @@ static int generate_pat(u8                   *iv,
                        struct nx_crypto_ctx *nx_ctx,
                        unsigned int          authsize,
                        unsigned int          nbytes,
+                       unsigned int          assoclen,
                        u8                   *out)
 {
        struct nx_sg *nx_insg = nx_ctx->in_sg;
@@ -200,16 +197,16 @@ static int generate_pat(u8                   *iv,
         * greater than 2^32.
         */
 
-       if (!req->assoclen) {
+       if (!assoclen) {
                b0 = nx_ctx->csbcpb->cpb.aes_ccm.in_pat_or_b0;
-       } else if (req->assoclen <= 14) {
+       } else if (assoclen <= 14) {
                /* if associated data is 14 bytes or less, we do 1 GCM
                 * operation on 2 AES blocks, B0 (stored in the csbcpb) and B1,
                 * which is fed in through the source buffers here */
                b0 = nx_ctx->csbcpb->cpb.aes_ccm.in_pat_or_b0;
                b1 = nx_ctx->priv.ccm.iauth_tag;
-               iauth_len = req->assoclen;
-       } else if (req->assoclen <= 65280) {
+               iauth_len = assoclen;
+       } else if (assoclen <= 65280) {
                /* if associated data is less than (2^16 - 2^8), we construct
                 * B1 differently and feed in the associated data to a CCA
                 * operation */
@@ -223,7 +220,7 @@ static int generate_pat(u8                   *iv,
        }
 
        /* generate B0 */
-       rc = generate_b0(iv, req->assoclen, authsize, nbytes, b0);
+       rc = generate_b0(iv, assoclen, authsize, nbytes, b0);
        if (rc)
                return rc;
 
@@ -233,22 +230,22 @@ static int generate_pat(u8                   *iv,
         */
        if (b1) {
                memset(b1, 0, 16);
-               if (req->assoclen <= 65280) {
-                       *(u16 *)b1 = (u16)req->assoclen;
-                       scatterwalk_map_and_copy(b1 + 2, req->assoc, 0,
+               if (assoclen <= 65280) {
+                       *(u16 *)b1 = assoclen;
+                       scatterwalk_map_and_copy(b1 + 2, req->src, 0,
                                         iauth_len, SCATTERWALK_FROM_SG);
                } else {
                        *(u16 *)b1 = (u16)(0xfffe);
-                       *(u32 *)&b1[2] = (u32)req->assoclen;
-                       scatterwalk_map_and_copy(b1 + 6, req->assoc, 0,
+                       *(u32 *)&b1[2] = assoclen;
+                       scatterwalk_map_and_copy(b1 + 6, req->src, 0,
                                         iauth_len, SCATTERWALK_FROM_SG);
                }
        }
 
        /* now copy any remaining AAD to scatterlist and call nx... */
-       if (!req->assoclen) {
+       if (!assoclen) {
                return rc;
-       } else if (req->assoclen <= 14) {
+       } else if (assoclen <= 14) {
                unsigned int len = 16;
 
                nx_insg = nx_build_sg_list(nx_insg, b1, &len, nx_ctx->ap->sglen);
@@ -280,7 +277,7 @@ static int generate_pat(u8                   *iv,
                        return rc;
 
                atomic_inc(&(nx_ctx->stats->aes_ops));
-               atomic64_add(req->assoclen, &(nx_ctx->stats->aes_bytes));
+               atomic64_add(assoclen, &nx_ctx->stats->aes_bytes);
 
        } else {
                unsigned int processed = 0, to_process;
@@ -294,15 +291,15 @@ static int generate_pat(u8                   *iv,
                                nx_ctx->ap->databytelen/NX_PAGE_SIZE);
 
                do {
-                       to_process = min_t(u32, req->assoclen - processed,
+                       to_process = min_t(u32, assoclen - processed,
                                           nx_ctx->ap->databytelen);
 
                        nx_insg = nx_walk_and_build(nx_ctx->in_sg,
                                                    nx_ctx->ap->sglen,
-                                                   req->assoc, processed,
+                                                   req->src, processed,
                                                    &to_process);
 
-                       if ((to_process + processed) < req->assoclen) {
+                       if ((to_process + processed) < assoclen) {
                                NX_CPB_FDM(nx_ctx->csbcpb_aead) |=
                                        NX_FDM_INTERMEDIATE;
                        } else {
@@ -328,11 +325,10 @@ static int generate_pat(u8                   *iv,
                        NX_CPB_FDM(nx_ctx->csbcpb_aead) |= NX_FDM_CONTINUATION;
 
                        atomic_inc(&(nx_ctx->stats->aes_ops));
-                       atomic64_add(req->assoclen,
-                                       &(nx_ctx->stats->aes_bytes));
+                       atomic64_add(assoclen, &nx_ctx->stats->aes_bytes);
 
                        processed += to_process;
-               } while (processed < req->assoclen);
+               } while (processed < assoclen);
 
                result = nx_ctx->csbcpb_aead->cpb.aes_cca.out_pat_or_b0;
        }
@@ -343,7 +339,8 @@ static int generate_pat(u8                   *iv,
 }
 
 static int ccm_nx_decrypt(struct aead_request   *req,
-                         struct blkcipher_desc *desc)
+                         struct blkcipher_desc *desc,
+                         unsigned int assoclen)
 {
        struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
        struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
@@ -360,10 +357,10 @@ static int ccm_nx_decrypt(struct aead_request   *req,
 
        /* copy out the auth tag to compare with later */
        scatterwalk_map_and_copy(priv->oauth_tag,
-                                req->src, nbytes, authsize,
+                                req->src, nbytes + req->assoclen, authsize,
                                 SCATTERWALK_FROM_SG);
 
-       rc = generate_pat(desc->info, req, nx_ctx, authsize, nbytes,
+       rc = generate_pat(desc->info, req, nx_ctx, authsize, nbytes, assoclen,
                          csbcpb->cpb.aes_ccm.in_pat_or_b0);
        if (rc)
                goto out;
@@ -383,8 +380,8 @@ static int ccm_nx_decrypt(struct aead_request   *req,
                NX_CPB_FDM(nx_ctx->csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
 
                rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src,
-                                       &to_process, processed,
-                                       csbcpb->cpb.aes_ccm.iv_or_ctr);
+                                      &to_process, processed + req->assoclen,
+                                      csbcpb->cpb.aes_ccm.iv_or_ctr);
                if (rc)
                        goto out;
 
@@ -420,7 +417,8 @@ out:
 }
 
 static int ccm_nx_encrypt(struct aead_request   *req,
-                         struct blkcipher_desc *desc)
+                         struct blkcipher_desc *desc,
+                         unsigned int assoclen)
 {
        struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
        struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
@@ -432,7 +430,7 @@ static int ccm_nx_encrypt(struct aead_request   *req,
 
        spin_lock_irqsave(&nx_ctx->lock, irq_flags);
 
-       rc = generate_pat(desc->info, req, nx_ctx, authsize, nbytes,
+       rc = generate_pat(desc->info, req, nx_ctx, authsize, nbytes, assoclen,
                          csbcpb->cpb.aes_ccm.in_pat_or_b0);
        if (rc)
                goto out;
@@ -451,7 +449,7 @@ static int ccm_nx_encrypt(struct aead_request   *req,
                NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
 
                rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src,
-                                       &to_process, processed,
+                                      &to_process, processed + req->assoclen,
                                       csbcpb->cpb.aes_ccm.iv_or_ctr);
                if (rc)
                        goto out;
@@ -483,7 +481,7 @@ static int ccm_nx_encrypt(struct aead_request   *req,
 
        /* copy out the auth tag */
        scatterwalk_map_and_copy(csbcpb->cpb.aes_ccm.out_pat_or_mac,
-                                req->dst, nbytes, authsize,
+                                req->dst, nbytes + req->assoclen, authsize,
                                 SCATTERWALK_TO_SG);
 
 out:
@@ -503,9 +501,8 @@ static int ccm4309_aes_nx_encrypt(struct aead_request *req)
        memcpy(iv + 4, req->iv, 8);
 
        desc.info = iv;
-       desc.tfm = (struct crypto_blkcipher *)req->base.tfm;
 
-       return ccm_nx_encrypt(req, &desc);
+       return ccm_nx_encrypt(req, &desc, req->assoclen - 8);
 }
 
 static int ccm_aes_nx_encrypt(struct aead_request *req)
@@ -514,13 +511,12 @@ static int ccm_aes_nx_encrypt(struct aead_request *req)
        int rc;
 
        desc.info = req->iv;
-       desc.tfm = (struct crypto_blkcipher *)req->base.tfm;
 
        rc = crypto_ccm_check_iv(desc.info);
        if (rc)
                return rc;
 
-       return ccm_nx_encrypt(req, &desc);
+       return ccm_nx_encrypt(req, &desc, req->assoclen);
 }
 
 static int ccm4309_aes_nx_decrypt(struct aead_request *req)
@@ -535,9 +531,8 @@ static int ccm4309_aes_nx_decrypt(struct aead_request *req)
        memcpy(iv + 4, req->iv, 8);
 
        desc.info = iv;
-       desc.tfm = (struct crypto_blkcipher *)req->base.tfm;
 
-       return ccm_nx_decrypt(req, &desc);
+       return ccm_nx_decrypt(req, &desc, req->assoclen - 8);
 }
 
 static int ccm_aes_nx_decrypt(struct aead_request *req)
@@ -546,13 +541,12 @@ static int ccm_aes_nx_decrypt(struct aead_request *req)
        int rc;
 
        desc.info = req->iv;
-       desc.tfm = (struct crypto_blkcipher *)req->base.tfm;
 
        rc = crypto_ccm_check_iv(desc.info);
        if (rc)
                return rc;
 
-       return ccm_nx_decrypt(req, &desc);
+       return ccm_nx_decrypt(req, &desc, req->assoclen);
 }
 
 /* tell the block cipher walk routines that this is a stream cipher by
@@ -560,47 +554,42 @@ static int ccm_aes_nx_decrypt(struct aead_request *req)
  * during encrypt/decrypt doesn't solve this problem, because it calls
  * blkcipher_walk_done under the covers, which doesn't use walk->blocksize,
  * but instead uses this tfm->blocksize. */
-struct crypto_alg nx_ccm_aes_alg = {
-       .cra_name        = "ccm(aes)",
-       .cra_driver_name = "ccm-aes-nx",
-       .cra_priority    = 300,
-       .cra_flags       = CRYPTO_ALG_TYPE_AEAD |
-                          CRYPTO_ALG_NEED_FALLBACK,
-       .cra_blocksize   = 1,
-       .cra_ctxsize     = sizeof(struct nx_crypto_ctx),
-       .cra_type        = &crypto_aead_type,
-       .cra_module      = THIS_MODULE,
-       .cra_init        = nx_crypto_ctx_aes_ccm_init,
-       .cra_exit        = nx_crypto_ctx_exit,
-       .cra_aead = {
-               .ivsize      = AES_BLOCK_SIZE,
-               .maxauthsize = AES_BLOCK_SIZE,
-               .setkey      = ccm_aes_nx_set_key,
-               .setauthsize = ccm_aes_nx_setauthsize,
-               .encrypt     = ccm_aes_nx_encrypt,
-               .decrypt     = ccm_aes_nx_decrypt,
-       }
+struct aead_alg nx_ccm_aes_alg = {
+       .base = {
+               .cra_name        = "ccm(aes)",
+               .cra_driver_name = "ccm-aes-nx",
+               .cra_priority    = 300,
+               .cra_flags       = CRYPTO_ALG_NEED_FALLBACK,
+               .cra_blocksize   = 1,
+               .cra_ctxsize     = sizeof(struct nx_crypto_ctx),
+               .cra_module      = THIS_MODULE,
+       },
+       .init        = nx_crypto_ctx_aes_ccm_init,
+       .exit        = nx_crypto_ctx_aead_exit,
+       .ivsize      = AES_BLOCK_SIZE,
+       .maxauthsize = AES_BLOCK_SIZE,
+       .setkey      = ccm_aes_nx_set_key,
+       .setauthsize = ccm_aes_nx_setauthsize,
+       .encrypt     = ccm_aes_nx_encrypt,
+       .decrypt     = ccm_aes_nx_decrypt,
 };
 
-struct crypto_alg nx_ccm4309_aes_alg = {
-       .cra_name        = "rfc4309(ccm(aes))",
-       .cra_driver_name = "rfc4309-ccm-aes-nx",
-       .cra_priority    = 300,
-       .cra_flags       = CRYPTO_ALG_TYPE_AEAD |
-                          CRYPTO_ALG_NEED_FALLBACK,
-       .cra_blocksize   = 1,
-       .cra_ctxsize     = sizeof(struct nx_crypto_ctx),
-       .cra_type        = &crypto_nivaead_type,
-       .cra_module      = THIS_MODULE,
-       .cra_init        = nx_crypto_ctx_aes_ccm_init,
-       .cra_exit        = nx_crypto_ctx_exit,
-       .cra_aead = {
-               .ivsize      = 8,
-               .maxauthsize = AES_BLOCK_SIZE,
-               .setkey      = ccm4309_aes_nx_set_key,
-               .setauthsize = ccm4309_aes_nx_setauthsize,
-               .encrypt     = ccm4309_aes_nx_encrypt,
-               .decrypt     = ccm4309_aes_nx_decrypt,
-               .geniv       = "seqiv",
-       }
+struct aead_alg nx_ccm4309_aes_alg = {
+       .base = {
+               .cra_name        = "rfc4309(ccm(aes))",
+               .cra_driver_name = "rfc4309-ccm-aes-nx",
+               .cra_priority    = 300,
+               .cra_flags       = CRYPTO_ALG_NEED_FALLBACK,
+               .cra_blocksize   = 1,
+               .cra_ctxsize     = sizeof(struct nx_crypto_ctx),
+               .cra_module      = THIS_MODULE,
+       },
+       .init        = nx_crypto_ctx_aes_ccm_init,
+       .exit        = nx_crypto_ctx_aead_exit,
+       .ivsize      = 8,
+       .maxauthsize = AES_BLOCK_SIZE,
+       .setkey      = ccm4309_aes_nx_set_key,
+       .setauthsize = ccm4309_aes_nx_setauthsize,
+       .encrypt     = ccm4309_aes_nx_encrypt,
+       .decrypt     = ccm4309_aes_nx_decrypt,
 };
index dd7e9f3f5b6b2edfb0a4e1c1e442523a30d4f74b..898c0a280511d5c49f8d6e076b4905773c1f3b8f 100644 (file)
@@ -144,27 +144,6 @@ static int ctr3686_aes_nx_crypt(struct blkcipher_desc *desc,
        return ctr_aes_nx_crypt(desc, dst, src, nbytes);
 }
 
-struct crypto_alg nx_ctr_aes_alg = {
-       .cra_name        = "ctr(aes)",
-       .cra_driver_name = "ctr-aes-nx",
-       .cra_priority    = 300,
-       .cra_flags       = CRYPTO_ALG_TYPE_BLKCIPHER,
-       .cra_blocksize   = 1,
-       .cra_ctxsize     = sizeof(struct nx_crypto_ctx),
-       .cra_type        = &crypto_blkcipher_type,
-       .cra_module      = THIS_MODULE,
-       .cra_init        = nx_crypto_ctx_aes_ctr_init,
-       .cra_exit        = nx_crypto_ctx_exit,
-       .cra_blkcipher = {
-               .min_keysize = AES_MIN_KEY_SIZE,
-               .max_keysize = AES_MAX_KEY_SIZE,
-               .ivsize      = AES_BLOCK_SIZE,
-               .setkey      = ctr_aes_nx_set_key,
-               .encrypt     = ctr_aes_nx_crypt,
-               .decrypt     = ctr_aes_nx_crypt,
-       }
-};
-
 struct crypto_alg nx_ctr3686_aes_alg = {
        .cra_name        = "rfc3686(ctr(aes))",
        .cra_driver_name = "rfc3686-ctr-aes-nx",
index 92c993f08213fbd767c897eae178a5129c5af899..eee624f589b6545a310d081e772d5924094ed70d 100644 (file)
 
 #include <crypto/internal/aead.h>
 #include <crypto/aes.h>
-#include <crypto/algapi.h>
 #include <crypto/scatterwalk.h>
 #include <linux/module.h>
 #include <linux/types.h>
-#include <linux/crypto.h>
 #include <asm/vio.h>
 
 #include "nx_csbcpb.h"
@@ -36,7 +34,7 @@ static int gcm_aes_nx_set_key(struct crypto_aead *tfm,
                              const u8           *in_key,
                              unsigned int        key_len)
 {
-       struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&tfm->base);
+       struct nx_crypto_ctx *nx_ctx = crypto_aead_ctx(tfm);
        struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
        struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead;
 
@@ -75,7 +73,7 @@ static int gcm4106_aes_nx_set_key(struct crypto_aead *tfm,
                                  const u8           *in_key,
                                  unsigned int        key_len)
 {
-       struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&tfm->base);
+       struct nx_crypto_ctx *nx_ctx = crypto_aead_ctx(tfm);
        char *nonce = nx_ctx->priv.gcm.nonce;
        int rc;
 
@@ -110,13 +108,14 @@ static int gcm4106_aes_nx_setauthsize(struct crypto_aead *tfm,
 
 static int nx_gca(struct nx_crypto_ctx  *nx_ctx,
                  struct aead_request   *req,
-                 u8                    *out)
+                 u8                    *out,
+                 unsigned int assoclen)
 {
        int rc;
        struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead;
        struct scatter_walk walk;
        struct nx_sg *nx_sg = nx_ctx->in_sg;
-       unsigned int nbytes = req->assoclen;
+       unsigned int nbytes = assoclen;
        unsigned int processed = 0, to_process;
        unsigned int max_sg_len;
 
@@ -167,7 +166,7 @@ static int nx_gca(struct nx_crypto_ctx  *nx_ctx,
                NX_CPB_FDM(csbcpb_aead) |= NX_FDM_CONTINUATION;
 
                atomic_inc(&(nx_ctx->stats->aes_ops));
-               atomic64_add(req->assoclen, &(nx_ctx->stats->aes_bytes));
+               atomic64_add(assoclen, &(nx_ctx->stats->aes_bytes));
 
                processed += to_process;
        } while (processed < nbytes);
@@ -177,13 +176,15 @@ static int nx_gca(struct nx_crypto_ctx  *nx_ctx,
        return rc;
 }
 
-static int gmac(struct aead_request *req, struct blkcipher_desc *desc)
+static int gmac(struct aead_request *req, struct blkcipher_desc *desc,
+               unsigned int assoclen)
 {
        int rc;
-       struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
+       struct nx_crypto_ctx *nx_ctx =
+               crypto_aead_ctx(crypto_aead_reqtfm(req));
        struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
        struct nx_sg *nx_sg;
-       unsigned int nbytes = req->assoclen;
+       unsigned int nbytes = assoclen;
        unsigned int processed = 0, to_process;
        unsigned int max_sg_len;
 
@@ -238,7 +239,7 @@ static int gmac(struct aead_request *req, struct blkcipher_desc *desc)
                NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
 
                atomic_inc(&(nx_ctx->stats->aes_ops));
-               atomic64_add(req->assoclen, &(nx_ctx->stats->aes_bytes));
+               atomic64_add(assoclen, &(nx_ctx->stats->aes_bytes));
 
                processed += to_process;
        } while (processed < nbytes);
@@ -253,7 +254,8 @@ static int gcm_empty(struct aead_request *req, struct blkcipher_desc *desc,
                     int enc)
 {
        int rc;
-       struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
+       struct nx_crypto_ctx *nx_ctx =
+               crypto_aead_ctx(crypto_aead_reqtfm(req));
        struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
        char out[AES_BLOCK_SIZE];
        struct nx_sg *in_sg, *out_sg;
@@ -314,9 +316,11 @@ out:
        return rc;
 }
 
-static int gcm_aes_nx_crypt(struct aead_request *req, int enc)
+static int gcm_aes_nx_crypt(struct aead_request *req, int enc,
+                           unsigned int assoclen)
 {
-       struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
+       struct nx_crypto_ctx *nx_ctx =
+               crypto_aead_ctx(crypto_aead_reqtfm(req));
        struct nx_gcm_rctx *rctx = aead_request_ctx(req);
        struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
        struct blkcipher_desc desc;
@@ -332,10 +336,10 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc)
        *(u32 *)(desc.info + NX_GCM_CTR_OFFSET) = 1;
 
        if (nbytes == 0) {
-               if (req->assoclen == 0)
+               if (assoclen == 0)
                        rc = gcm_empty(req, &desc, enc);
                else
-                       rc = gmac(req, &desc);
+                       rc = gmac(req, &desc, assoclen);
                if (rc)
                        goto out;
                else
@@ -343,9 +347,10 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc)
        }
 
        /* Process associated data */
-       csbcpb->cpb.aes_gcm.bit_length_aad = req->assoclen * 8;
-       if (req->assoclen) {
-               rc = nx_gca(nx_ctx, req, csbcpb->cpb.aes_gcm.in_pat_or_aad);
+       csbcpb->cpb.aes_gcm.bit_length_aad = assoclen * 8;
+       if (assoclen) {
+               rc = nx_gca(nx_ctx, req, csbcpb->cpb.aes_gcm.in_pat_or_aad,
+                           assoclen);
                if (rc)
                        goto out;
        }
@@ -363,7 +368,6 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc)
                to_process = nbytes - processed;
 
                csbcpb->cpb.aes_gcm.bit_length_data = nbytes * 8;
-               desc.tfm = (struct crypto_blkcipher *) req->base.tfm;
                rc = nx_build_sg_lists(nx_ctx, &desc, req->dst,
                                       req->src, &to_process,
                                       processed + req->assoclen,
@@ -430,7 +434,7 @@ static int gcm_aes_nx_encrypt(struct aead_request *req)
 
        memcpy(iv, req->iv, 12);
 
-       return gcm_aes_nx_crypt(req, 1);
+       return gcm_aes_nx_crypt(req, 1, req->assoclen);
 }
 
 static int gcm_aes_nx_decrypt(struct aead_request *req)
@@ -440,12 +444,13 @@ static int gcm_aes_nx_decrypt(struct aead_request *req)
 
        memcpy(iv, req->iv, 12);
 
-       return gcm_aes_nx_crypt(req, 0);
+       return gcm_aes_nx_crypt(req, 0, req->assoclen);
 }
 
 static int gcm4106_aes_nx_encrypt(struct aead_request *req)
 {
-       struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
+       struct nx_crypto_ctx *nx_ctx =
+               crypto_aead_ctx(crypto_aead_reqtfm(req));
        struct nx_gcm_rctx *rctx = aead_request_ctx(req);
        char *iv = rctx->iv;
        char *nonce = nx_ctx->priv.gcm.nonce;
@@ -453,12 +458,16 @@ static int gcm4106_aes_nx_encrypt(struct aead_request *req)
        memcpy(iv, nonce, NX_GCM4106_NONCE_LEN);
        memcpy(iv + NX_GCM4106_NONCE_LEN, req->iv, 8);
 
-       return gcm_aes_nx_crypt(req, 1);
+       if (req->assoclen < 8)
+               return -EINVAL;
+
+       return gcm_aes_nx_crypt(req, 1, req->assoclen - 8);
 }
 
 static int gcm4106_aes_nx_decrypt(struct aead_request *req)
 {
-       struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
+       struct nx_crypto_ctx *nx_ctx =
+               crypto_aead_ctx(crypto_aead_reqtfm(req));
        struct nx_gcm_rctx *rctx = aead_request_ctx(req);
        char *iv = rctx->iv;
        char *nonce = nx_ctx->priv.gcm.nonce;
@@ -466,7 +475,10 @@ static int gcm4106_aes_nx_decrypt(struct aead_request *req)
        memcpy(iv, nonce, NX_GCM4106_NONCE_LEN);
        memcpy(iv + NX_GCM4106_NONCE_LEN, req->iv, 8);
 
-       return gcm_aes_nx_crypt(req, 0);
+       if (req->assoclen < 8)
+               return -EINVAL;
+
+       return gcm_aes_nx_crypt(req, 0, req->assoclen - 8);
 }
 
 /* tell the block cipher walk routines that this is a stream cipher by
index 436971343ff7732b3be14af822e29fcc922c1e0f..0794f1cc00182f986f03673a1e2305ea1e15e094 100644 (file)
@@ -596,13 +596,9 @@ static int nx_register_algs(void)
        if (rc)
                goto out_unreg_ecb;
 
-       rc = nx_register_alg(&nx_ctr_aes_alg, NX_FC_AES, NX_MODE_AES_CTR);
-       if (rc)
-               goto out_unreg_cbc;
-
        rc = nx_register_alg(&nx_ctr3686_aes_alg, NX_FC_AES, NX_MODE_AES_CTR);
        if (rc)
-               goto out_unreg_ctr;
+               goto out_unreg_cbc;
 
        rc = nx_register_aead(&nx_gcm_aes_alg, NX_FC_AES, NX_MODE_AES_GCM);
        if (rc)
@@ -612,11 +608,11 @@ static int nx_register_algs(void)
        if (rc)
                goto out_unreg_gcm;
 
-       rc = nx_register_alg(&nx_ccm_aes_alg, NX_FC_AES, NX_MODE_AES_CCM);
+       rc = nx_register_aead(&nx_ccm_aes_alg, NX_FC_AES, NX_MODE_AES_CCM);
        if (rc)
                goto out_unreg_gcm4106;
 
-       rc = nx_register_alg(&nx_ccm4309_aes_alg, NX_FC_AES, NX_MODE_AES_CCM);
+       rc = nx_register_aead(&nx_ccm4309_aes_alg, NX_FC_AES, NX_MODE_AES_CCM);
        if (rc)
                goto out_unreg_ccm;
 
@@ -644,17 +640,15 @@ out_unreg_s256:
        nx_unregister_shash(&nx_shash_sha256_alg, NX_FC_SHA, NX_MODE_SHA,
                            NX_PROPS_SHA256);
 out_unreg_ccm4309:
-       nx_unregister_alg(&nx_ccm4309_aes_alg, NX_FC_AES, NX_MODE_AES_CCM);
+       nx_unregister_aead(&nx_ccm4309_aes_alg, NX_FC_AES, NX_MODE_AES_CCM);
 out_unreg_ccm:
-       nx_unregister_alg(&nx_ccm_aes_alg, NX_FC_AES, NX_MODE_AES_CCM);
+       nx_unregister_aead(&nx_ccm_aes_alg, NX_FC_AES, NX_MODE_AES_CCM);
 out_unreg_gcm4106:
        nx_unregister_aead(&nx_gcm4106_aes_alg, NX_FC_AES, NX_MODE_AES_GCM);
 out_unreg_gcm:
        nx_unregister_aead(&nx_gcm_aes_alg, NX_FC_AES, NX_MODE_AES_GCM);
 out_unreg_ctr3686:
        nx_unregister_alg(&nx_ctr3686_aes_alg, NX_FC_AES, NX_MODE_AES_CTR);
-out_unreg_ctr:
-       nx_unregister_alg(&nx_ctr_aes_alg, NX_FC_AES, NX_MODE_AES_CTR);
 out_unreg_cbc:
        nx_unregister_alg(&nx_cbc_aes_alg, NX_FC_AES, NX_MODE_AES_CBC);
 out_unreg_ecb:
@@ -711,11 +705,10 @@ static int nx_crypto_ctx_init(struct nx_crypto_ctx *nx_ctx, u32 fc, u32 mode)
 }
 
 /* entry points from the crypto tfm initializers */
-int nx_crypto_ctx_aes_ccm_init(struct crypto_tfm *tfm)
+int nx_crypto_ctx_aes_ccm_init(struct crypto_aead *tfm)
 {
-       crypto_aead_set_reqsize(__crypto_aead_cast(tfm),
-                               sizeof(struct nx_ccm_rctx));
-       return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_AES,
+       crypto_aead_set_reqsize(tfm, sizeof(struct nx_ccm_rctx));
+       return nx_crypto_ctx_init(crypto_aead_ctx(tfm), NX_FC_AES,
                                  NX_MODE_AES_CCM);
 }
 
@@ -813,16 +806,15 @@ static int nx_remove(struct vio_dev *viodev)
                                    NX_FC_SHA, NX_MODE_SHA, NX_PROPS_SHA256);
                nx_unregister_shash(&nx_shash_sha256_alg,
                                    NX_FC_SHA, NX_MODE_SHA, NX_PROPS_SHA512);
-               nx_unregister_alg(&nx_ccm4309_aes_alg,
-                                 NX_FC_AES, NX_MODE_AES_CCM);
-               nx_unregister_alg(&nx_ccm_aes_alg, NX_FC_AES, NX_MODE_AES_CCM);
+               nx_unregister_aead(&nx_ccm4309_aes_alg,
+                                  NX_FC_AES, NX_MODE_AES_CCM);
+               nx_unregister_aead(&nx_ccm_aes_alg, NX_FC_AES, NX_MODE_AES_CCM);
                nx_unregister_aead(&nx_gcm4106_aes_alg,
                                   NX_FC_AES, NX_MODE_AES_GCM);
                nx_unregister_aead(&nx_gcm_aes_alg,
                                   NX_FC_AES, NX_MODE_AES_GCM);
                nx_unregister_alg(&nx_ctr3686_aes_alg,
                                  NX_FC_AES, NX_MODE_AES_CTR);
-               nx_unregister_alg(&nx_ctr_aes_alg, NX_FC_AES, NX_MODE_AES_CTR);
                nx_unregister_alg(&nx_cbc_aes_alg, NX_FC_AES, NX_MODE_AES_CBC);
                nx_unregister_alg(&nx_ecb_aes_alg, NX_FC_AES, NX_MODE_AES_ECB);
        }
index cdff03a42ae7e883648a981779bed47bb9909c92..9347878d4f30c13a06ee6e781e72f1c04ef1d7c2 100644 (file)
@@ -149,8 +149,10 @@ struct nx_crypto_ctx {
        } priv;
 };
 
+struct crypto_aead;
+
 /* prototypes */
-int nx_crypto_ctx_aes_ccm_init(struct crypto_tfm *tfm);
+int nx_crypto_ctx_aes_ccm_init(struct crypto_aead *tfm);
 int nx_crypto_ctx_aes_gcm_init(struct crypto_aead *tfm);
 int nx_crypto_ctx_aes_xcbc_init(struct crypto_tfm *tfm);
 int nx_crypto_ctx_aes_ctr_init(struct crypto_tfm *tfm);
@@ -187,10 +189,9 @@ extern struct crypto_alg nx_cbc_aes_alg;
 extern struct crypto_alg nx_ecb_aes_alg;
 extern struct aead_alg nx_gcm_aes_alg;
 extern struct aead_alg nx_gcm4106_aes_alg;
-extern struct crypto_alg nx_ctr_aes_alg;
 extern struct crypto_alg nx_ctr3686_aes_alg;
-extern struct crypto_alg nx_ccm_aes_alg;
-extern struct crypto_alg nx_ccm4309_aes_alg;
+extern struct aead_alg nx_ccm_aes_alg;
+extern struct aead_alg nx_ccm4309_aes_alg;
 extern struct shash_alg nx_shash_aes_xcbc_alg;
 extern struct shash_alg nx_shash_sha512_alg;
 extern struct shash_alg nx_shash_sha256_alg;
index 9a28b7e07c71a88c2d4e6e89d3a0c01f323f3efa..eba23147c0ee1de8552aabe4b5d0aeeffa2e6aba 100644 (file)
 #define AES_REG_IV(dd, x)              ((dd)->pdata->iv_ofs + ((x) * 0x04))
 
 #define AES_REG_CTRL(dd)               ((dd)->pdata->ctrl_ofs)
-#define AES_REG_CTRL_CTR_WIDTH_MASK    (3 << 7)
-#define AES_REG_CTRL_CTR_WIDTH_32              (0 << 7)
-#define AES_REG_CTRL_CTR_WIDTH_64              (1 << 7)
-#define AES_REG_CTRL_CTR_WIDTH_96              (2 << 7)
-#define AES_REG_CTRL_CTR_WIDTH_128             (3 << 7)
-#define AES_REG_CTRL_CTR               (1 << 6)
-#define AES_REG_CTRL_CBC               (1 << 5)
-#define AES_REG_CTRL_KEY_SIZE          (3 << 3)
-#define AES_REG_CTRL_DIRECTION         (1 << 2)
-#define AES_REG_CTRL_INPUT_READY       (1 << 1)
-#define AES_REG_CTRL_OUTPUT_READY      (1 << 0)
+#define AES_REG_CTRL_CTR_WIDTH_MASK    GENMASK(8, 7)
+#define AES_REG_CTRL_CTR_WIDTH_32      0
+#define AES_REG_CTRL_CTR_WIDTH_64      BIT(7)
+#define AES_REG_CTRL_CTR_WIDTH_96      BIT(8)
+#define AES_REG_CTRL_CTR_WIDTH_128     GENMASK(8, 7)
+#define AES_REG_CTRL_CTR               BIT(6)
+#define AES_REG_CTRL_CBC               BIT(5)
+#define AES_REG_CTRL_KEY_SIZE          GENMASK(4, 3)
+#define AES_REG_CTRL_DIRECTION         BIT(2)
+#define AES_REG_CTRL_INPUT_READY       BIT(1)
+#define AES_REG_CTRL_OUTPUT_READY      BIT(0)
+#define AES_REG_CTRL_MASK              GENMASK(24, 2)
 
 #define AES_REG_DATA_N(dd, x)          ((dd)->pdata->data_ofs + ((x) * 0x04))
 
 #define AES_REG_REV(dd)                        ((dd)->pdata->rev_ofs)
 
 #define AES_REG_MASK(dd)               ((dd)->pdata->mask_ofs)
-#define AES_REG_MASK_SIDLE             (1 << 6)
-#define AES_REG_MASK_START             (1 << 5)
-#define AES_REG_MASK_DMA_OUT_EN                (1 << 3)
-#define AES_REG_MASK_DMA_IN_EN         (1 << 2)
-#define AES_REG_MASK_SOFTRESET         (1 << 1)
-#define AES_REG_AUTOIDLE               (1 << 0)
+#define AES_REG_MASK_SIDLE             BIT(6)
+#define AES_REG_MASK_START             BIT(5)
+#define AES_REG_MASK_DMA_OUT_EN                BIT(3)
+#define AES_REG_MASK_DMA_IN_EN         BIT(2)
+#define AES_REG_MASK_SOFTRESET         BIT(1)
+#define AES_REG_AUTOIDLE               BIT(0)
 
 #define AES_REG_LENGTH_N(x)            (0x54 + ((x) * 0x04))
 
@@ -254,7 +255,7 @@ static int omap_aes_write_ctrl(struct omap_aes_dev *dd)
 {
        unsigned int key32;
        int i, err;
-       u32 val, mask = 0;
+       u32 val;
 
        err = omap_aes_hw_init(dd);
        if (err)
@@ -274,17 +275,13 @@ static int omap_aes_write_ctrl(struct omap_aes_dev *dd)
        val = FLD_VAL(((dd->ctx->keylen >> 3) - 1), 4, 3);
        if (dd->flags & FLAGS_CBC)
                val |= AES_REG_CTRL_CBC;
-       if (dd->flags & FLAGS_CTR) {
+       if (dd->flags & FLAGS_CTR)
                val |= AES_REG_CTRL_CTR | AES_REG_CTRL_CTR_WIDTH_128;
-               mask = AES_REG_CTRL_CTR | AES_REG_CTRL_CTR_WIDTH_MASK;
-       }
+
        if (dd->flags & FLAGS_ENCRYPT)
                val |= AES_REG_CTRL_DIRECTION;
 
-       mask |= AES_REG_CTRL_CBC | AES_REG_CTRL_DIRECTION |
-                       AES_REG_CTRL_KEY_SIZE;
-
-       omap_aes_write_mask(dd, AES_REG_CTRL(dd), val, mask);
+       omap_aes_write_mask(dd, AES_REG_CTRL(dd), val, AES_REG_CTRL_MASK);
 
        return 0;
 }
@@ -558,6 +555,9 @@ static int omap_aes_check_aligned(struct scatterlist *sg, int total)
 {
        int len = 0;
 
+       if (!IS_ALIGNED(total, AES_BLOCK_SIZE))
+               return -EINVAL;
+
        while (sg) {
                if (!IS_ALIGNED(sg->offset, 4))
                        return -1;
@@ -577,9 +577,10 @@ static int omap_aes_check_aligned(struct scatterlist *sg, int total)
 static int omap_aes_copy_sgs(struct omap_aes_dev *dd)
 {
        void *buf_in, *buf_out;
-       int pages;
+       int pages, total;
 
-       pages = get_order(dd->total);
+       total = ALIGN(dd->total, AES_BLOCK_SIZE);
+       pages = get_order(total);
 
        buf_in = (void *)__get_free_pages(GFP_ATOMIC, pages);
        buf_out = (void *)__get_free_pages(GFP_ATOMIC, pages);
@@ -594,11 +595,11 @@ static int omap_aes_copy_sgs(struct omap_aes_dev *dd)
        sg_copy_buf(buf_in, dd->in_sg, 0, dd->total, 0);
 
        sg_init_table(&dd->in_sgl, 1);
-       sg_set_buf(&dd->in_sgl, buf_in, dd->total);
+       sg_set_buf(&dd->in_sgl, buf_in, total);
        dd->in_sg = &dd->in_sgl;
 
        sg_init_table(&dd->out_sgl, 1);
-       sg_set_buf(&dd->out_sgl, buf_out, dd->total);
+       sg_set_buf(&dd->out_sgl, buf_out, total);
        dd->out_sg = &dd->out_sgl;
 
        return 0;
@@ -611,7 +612,7 @@ static int omap_aes_handle_queue(struct omap_aes_dev *dd,
        struct omap_aes_ctx *ctx;
        struct omap_aes_reqctx *rctx;
        unsigned long flags;
-       int err, ret = 0;
+       int err, ret = 0, len;
 
        spin_lock_irqsave(&dd->lock, flags);
        if (req)
@@ -650,8 +651,9 @@ static int omap_aes_handle_queue(struct omap_aes_dev *dd,
                dd->sgs_copied = 0;
        }
 
-       dd->in_sg_len = scatterwalk_bytes_sglen(dd->in_sg, dd->total);
-       dd->out_sg_len = scatterwalk_bytes_sglen(dd->out_sg, dd->total);
+       len = ALIGN(dd->total, AES_BLOCK_SIZE);
+       dd->in_sg_len = scatterwalk_bytes_sglen(dd->in_sg, len);
+       dd->out_sg_len = scatterwalk_bytes_sglen(dd->out_sg, len);
        BUG_ON(dd->in_sg_len < 0 || dd->out_sg_len < 0);
 
        rctx = ablkcipher_request_ctx(req);
@@ -678,7 +680,7 @@ static void omap_aes_done_task(unsigned long data)
 {
        struct omap_aes_dev *dd = (struct omap_aes_dev *)data;
        void *buf_in, *buf_out;
-       int pages;
+       int pages, len;
 
        pr_debug("enter done_task\n");
 
@@ -697,7 +699,8 @@ static void omap_aes_done_task(unsigned long data)
 
                sg_copy_buf(buf_out, dd->orig_out, 0, dd->total_save, 1);
 
-               pages = get_order(dd->total_save);
+               len = ALIGN(dd->total_save, AES_BLOCK_SIZE);
+               pages = get_order(len);
                free_pages((unsigned long)buf_in, pages);
                free_pages((unsigned long)buf_out, pages);
        }
@@ -726,11 +729,6 @@ static int omap_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
                  !!(mode & FLAGS_ENCRYPT),
                  !!(mode & FLAGS_CBC));
 
-       if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) {
-               pr_err("request size is not exact amount of AES blocks\n");
-               return -EINVAL;
-       }
-
        dd = omap_aes_find_dev(ctx);
        if (!dd)
                return -ENODEV;
@@ -833,7 +831,7 @@ static struct crypto_alg algs_ecb_cbc[] = {
 {
        .cra_name               = "ecb(aes)",
        .cra_driver_name        = "ecb-aes-omap",
-       .cra_priority           = 100,
+       .cra_priority           = 300,
        .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER |
                                  CRYPTO_ALG_KERN_DRIVER_ONLY |
                                  CRYPTO_ALG_ASYNC,
@@ -855,7 +853,7 @@ static struct crypto_alg algs_ecb_cbc[] = {
 {
        .cra_name               = "cbc(aes)",
        .cra_driver_name        = "cbc-aes-omap",
-       .cra_priority           = 100,
+       .cra_priority           = 300,
        .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER |
                                  CRYPTO_ALG_KERN_DRIVER_ONLY |
                                  CRYPTO_ALG_ASYNC,
@@ -881,7 +879,7 @@ static struct crypto_alg algs_ctr[] = {
 {
        .cra_name               = "ctr(aes)",
        .cra_driver_name        = "ctr-aes-omap",
-       .cra_priority           = 100,
+       .cra_priority           = 300,
        .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER |
                                  CRYPTO_ALG_KERN_DRIVER_ONLY |
                                  CRYPTO_ALG_ASYNC,
@@ -1046,9 +1044,7 @@ static irqreturn_t omap_aes_irq(int irq, void *dev_id)
                        }
                }
 
-               dd->total -= AES_BLOCK_SIZE;
-
-               BUG_ON(dd->total < 0);
+               dd->total -= min_t(size_t, AES_BLOCK_SIZE, dd->total);
 
                /* Clear IRQ status */
                status &= ~AES_REG_IRQ_DATA_OUT;
index 4f56f3681abdfc6e2a4ecf95e34f3ea30c231961..da36de26a4dc1074859fc5e5082630adc57897f3 100644 (file)
@@ -99,11 +99,16 @@ struct spacc_req {
        dma_addr_t                      src_addr, dst_addr;
        struct spacc_ddt                *src_ddt, *dst_ddt;
        void                            (*complete)(struct spacc_req *req);
+};
 
-       /* AEAD specific bits. */
-       u8                              *giv;
-       size_t                          giv_len;
-       dma_addr_t                      giv_pa;
+struct spacc_aead {
+       unsigned long                   ctrl_default;
+       unsigned long                   type;
+       struct aead_alg                 alg;
+       struct spacc_engine             *engine;
+       struct list_head                entry;
+       int                             key_offs;
+       int                             iv_offs;
 };
 
 struct spacc_engine {
@@ -121,6 +126,9 @@ struct spacc_engine {
        struct spacc_alg                *algs;
        unsigned                        num_algs;
        struct list_head                registered_algs;
+       struct spacc_aead               *aeads;
+       unsigned                        num_aeads;
+       struct list_head                registered_aeads;
        size_t                          cipher_pg_sz;
        size_t                          hash_pg_sz;
        const char                      *name;
@@ -174,8 +182,6 @@ struct spacc_aead_ctx {
        u8                              cipher_key_len;
        u8                              hash_key_len;
        struct crypto_aead              *sw_cipher;
-       size_t                          auth_size;
-       u8                              salt[AES_BLOCK_SIZE];
 };
 
 static int spacc_ablk_submit(struct spacc_req *req);
@@ -185,6 +191,11 @@ static inline struct spacc_alg *to_spacc_alg(struct crypto_alg *alg)
        return alg ? container_of(alg, struct spacc_alg, alg) : NULL;
 }
 
+static inline struct spacc_aead *to_spacc_aead(struct aead_alg *alg)
+{
+       return container_of(alg, struct spacc_aead, alg);
+}
+
 static inline int spacc_fifo_cmd_full(struct spacc_engine *engine)
 {
        u32 fifo_stat = readl(engine->regs + SPA_FIFO_STAT_REG_OFFSET);
@@ -310,120 +321,117 @@ out:
        return NULL;
 }
 
-static int spacc_aead_make_ddts(struct spacc_req *req, u8 *giv)
+static int spacc_aead_make_ddts(struct aead_request *areq)
 {
-       struct aead_request *areq = container_of(req->req, struct aead_request,
-                                                base);
+       struct crypto_aead *aead = crypto_aead_reqtfm(areq);
+       struct spacc_req *req = aead_request_ctx(areq);
        struct spacc_engine *engine = req->engine;
        struct spacc_ddt *src_ddt, *dst_ddt;
-       unsigned ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(areq));
-       unsigned nents = sg_count(areq->src, areq->cryptlen);
        unsigned total;
-       dma_addr_t iv_addr;
+       unsigned int src_nents, dst_nents;
        struct scatterlist *cur;
-       int i, dst_ents, src_ents, assoc_ents;
-       u8 *iv = giv ? giv : areq->iv;
+       int i, dst_ents, src_ents;
+
+       total = areq->assoclen + areq->cryptlen;
+       if (req->is_encrypt)
+               total += crypto_aead_authsize(aead);
+
+       src_nents = sg_count(areq->src, total);
+       if (src_nents + 1 > MAX_DDT_LEN)
+               return -E2BIG;
+
+       dst_nents = 0;
+       if (areq->src != areq->dst) {
+               dst_nents = sg_count(areq->dst, total);
+               if (src_nents + 1 > MAX_DDT_LEN)
+                       return -E2BIG;
+       }
 
        src_ddt = dma_pool_alloc(engine->req_pool, GFP_ATOMIC, &req->src_addr);
        if (!src_ddt)
-               return -ENOMEM;
+               goto err;
 
        dst_ddt = dma_pool_alloc(engine->req_pool, GFP_ATOMIC, &req->dst_addr);
-       if (!dst_ddt) {
-               dma_pool_free(engine->req_pool, src_ddt, req->src_addr);
-               return -ENOMEM;
-       }
+       if (!dst_ddt)
+               goto err_free_src;
 
        req->src_ddt = src_ddt;
        req->dst_ddt = dst_ddt;
 
-       assoc_ents = dma_map_sg(engine->dev, areq->assoc,
-               sg_count(areq->assoc, areq->assoclen), DMA_TO_DEVICE);
-       if (areq->src != areq->dst) {
-               src_ents = dma_map_sg(engine->dev, areq->src, nents,
+       if (dst_nents) {
+               src_ents = dma_map_sg(engine->dev, areq->src, src_nents,
                                      DMA_TO_DEVICE);
-               dst_ents = dma_map_sg(engine->dev, areq->dst, nents,
+               if (!src_ents)
+                       goto err_free_dst;
+
+               dst_ents = dma_map_sg(engine->dev, areq->dst, dst_nents,
                                      DMA_FROM_DEVICE);
+
+               if (!dst_ents) {
+                       dma_unmap_sg(engine->dev, areq->src, src_nents,
+                                    DMA_TO_DEVICE);
+                       goto err_free_dst;
+               }
        } else {
-               src_ents = dma_map_sg(engine->dev, areq->src, nents,
+               src_ents = dma_map_sg(engine->dev, areq->src, src_nents,
                                      DMA_BIDIRECTIONAL);
-               dst_ents = 0;
+               if (!src_ents)
+                       goto err_free_dst;
+               dst_ents = src_ents;
        }
 
        /*
-        * Map the IV/GIV. For the GIV it needs to be bidirectional as it is
-        * formed by the crypto block and sent as the ESP IV for IPSEC.
+        * Now map in the payload for the source and destination and terminate
+        * with the NULL pointers.
         */
-       iv_addr = dma_map_single(engine->dev, iv, ivsize,
-                                giv ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
-       req->giv_pa = iv_addr;
+       for_each_sg(areq->src, cur, src_ents, i)
+               ddt_set(src_ddt++, sg_dma_address(cur), sg_dma_len(cur));
 
-       /*
-        * Map the associated data. For decryption we don't copy the
-        * associated data.
-        */
-       total = areq->assoclen;
-       for_each_sg(areq->assoc, cur, assoc_ents, i) {
+       /* For decryption we need to skip the associated data. */
+       total = req->is_encrypt ? 0 : areq->assoclen;
+       for_each_sg(areq->dst, cur, dst_ents, i) {
                unsigned len = sg_dma_len(cur);
 
-               if (len > total)
-                       len = total;
-
-               total -= len;
+               if (len <= total) {
+                       total -= len;
+                       continue;
+               }
 
-               ddt_set(src_ddt++, sg_dma_address(cur), len);
-               if (req->is_encrypt)
-                       ddt_set(dst_ddt++, sg_dma_address(cur), len);
+               ddt_set(dst_ddt++, sg_dma_address(cur) + total, len - total);
        }
-       ddt_set(src_ddt++, iv_addr, ivsize);
-
-       if (giv || req->is_encrypt)
-               ddt_set(dst_ddt++, iv_addr, ivsize);
-
-       /*
-        * Now map in the payload for the source and destination and terminate
-        * with the NULL pointers.
-        */
-       for_each_sg(areq->src, cur, src_ents, i) {
-               ddt_set(src_ddt++, sg_dma_address(cur), sg_dma_len(cur));
-               if (areq->src == areq->dst)
-                       ddt_set(dst_ddt++, sg_dma_address(cur),
-                               sg_dma_len(cur));
-       }
-
-       for_each_sg(areq->dst, cur, dst_ents, i)
-               ddt_set(dst_ddt++, sg_dma_address(cur),
-                       sg_dma_len(cur));
 
        ddt_set(src_ddt, 0, 0);
        ddt_set(dst_ddt, 0, 0);
 
        return 0;
+
+err_free_dst:
+       dma_pool_free(engine->req_pool, dst_ddt, req->dst_addr);
+err_free_src:
+       dma_pool_free(engine->req_pool, src_ddt, req->src_addr);
+err:
+       return -ENOMEM;
 }
 
 static void spacc_aead_free_ddts(struct spacc_req *req)
 {
        struct aead_request *areq = container_of(req->req, struct aead_request,
                                                 base);
-       struct spacc_alg *alg = to_spacc_alg(req->req->tfm->__crt_alg);
-       struct spacc_ablk_ctx *aead_ctx = crypto_tfm_ctx(req->req->tfm);
+       struct crypto_aead *aead = crypto_aead_reqtfm(areq);
+       unsigned total = areq->assoclen + areq->cryptlen +
+                        (req->is_encrypt ? crypto_aead_authsize(aead) : 0);
+       struct spacc_aead_ctx *aead_ctx = crypto_aead_ctx(aead);
        struct spacc_engine *engine = aead_ctx->generic.engine;
-       unsigned ivsize = alg->alg.cra_aead.ivsize;
-       unsigned nents = sg_count(areq->src, areq->cryptlen);
+       unsigned nents = sg_count(areq->src, total);
 
        if (areq->src != areq->dst) {
                dma_unmap_sg(engine->dev, areq->src, nents, DMA_TO_DEVICE);
                dma_unmap_sg(engine->dev, areq->dst,
-                            sg_count(areq->dst, areq->cryptlen),
+                            sg_count(areq->dst, total),
                             DMA_FROM_DEVICE);
        } else
                dma_unmap_sg(engine->dev, areq->src, nents, DMA_BIDIRECTIONAL);
 
-       dma_unmap_sg(engine->dev, areq->assoc,
-                    sg_count(areq->assoc, areq->assoclen), DMA_TO_DEVICE);
-
-       dma_unmap_single(engine->dev, req->giv_pa, ivsize, DMA_BIDIRECTIONAL);
-
        dma_pool_free(engine->req_pool, req->src_ddt, req->src_addr);
        dma_pool_free(engine->req_pool, req->dst_ddt, req->dst_addr);
 }
@@ -438,65 +446,22 @@ static void spacc_free_ddt(struct spacc_req *req, struct spacc_ddt *ddt,
        dma_pool_free(req->engine->req_pool, ddt, ddt_addr);
 }
 
-/*
- * Set key for a DES operation in an AEAD cipher. This also performs weak key
- * checking if required.
- */
-static int spacc_aead_des_setkey(struct crypto_aead *aead, const u8 *key,
-                                unsigned int len)
-{
-       struct crypto_tfm *tfm = crypto_aead_tfm(aead);
-       struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm);
-       u32 tmp[DES_EXPKEY_WORDS];
-
-       if (unlikely(!des_ekey(tmp, key)) &&
-           (crypto_aead_get_flags(aead)) & CRYPTO_TFM_REQ_WEAK_KEY) {
-               tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
-               return -EINVAL;
-       }
-
-       memcpy(ctx->cipher_key, key, len);
-       ctx->cipher_key_len = len;
-
-       return 0;
-}
-
-/* Set the key for the AES block cipher component of the AEAD transform. */
-static int spacc_aead_aes_setkey(struct crypto_aead *aead, const u8 *key,
-                                unsigned int len)
-{
-       struct crypto_tfm *tfm = crypto_aead_tfm(aead);
-       struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm);
-
-       /*
-        * IPSec engine only supports 128 and 256 bit AES keys. If we get a
-        * request for any other size (192 bits) then we need to do a software
-        * fallback.
-        */
-       if (len != AES_KEYSIZE_128 && len != AES_KEYSIZE_256) {
-               /*
-                * Set the fallback transform to use the same request flags as
-                * the hardware transform.
-                */
-               ctx->sw_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
-               ctx->sw_cipher->base.crt_flags |=
-                       tfm->crt_flags & CRYPTO_TFM_REQ_MASK;
-               return crypto_aead_setkey(ctx->sw_cipher, key, len);
-       }
-
-       memcpy(ctx->cipher_key, key, len);
-       ctx->cipher_key_len = len;
-
-       return 0;
-}
-
 static int spacc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
                             unsigned int keylen)
 {
        struct spacc_aead_ctx *ctx = crypto_aead_ctx(tfm);
-       struct spacc_alg *alg = to_spacc_alg(tfm->base.__crt_alg);
        struct crypto_authenc_keys keys;
-       int err = -EINVAL;
+       int err;
+
+       crypto_aead_clear_flags(ctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
+       crypto_aead_set_flags(ctx->sw_cipher, crypto_aead_get_flags(tfm) &
+                                             CRYPTO_TFM_REQ_MASK);
+       err = crypto_aead_setkey(ctx->sw_cipher, key, keylen);
+       crypto_aead_clear_flags(tfm, CRYPTO_TFM_RES_MASK);
+       crypto_aead_set_flags(tfm, crypto_aead_get_flags(ctx->sw_cipher) &
+                                  CRYPTO_TFM_RES_MASK);
+       if (err)
+               return err;
 
        if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
                goto badkey;
@@ -507,14 +472,8 @@ static int spacc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
        if (keys.authkeylen > sizeof(ctx->hash_ctx))
                goto badkey;
 
-       if ((alg->ctrl_default & SPACC_CRYPTO_ALG_MASK) ==
-           SPA_CTRL_CIPH_ALG_AES)
-               err = spacc_aead_aes_setkey(tfm, keys.enckey, keys.enckeylen);
-       else
-               err = spacc_aead_des_setkey(tfm, keys.enckey, keys.enckeylen);
-
-       if (err)
-               goto badkey;
+       memcpy(ctx->cipher_key, keys.enckey, keys.enckeylen);
+       ctx->cipher_key_len = keys.enckeylen;
 
        memcpy(ctx->hash_ctx, keys.authkey, keys.authkeylen);
        ctx->hash_key_len = keys.authkeylen;
@@ -531,9 +490,7 @@ static int spacc_aead_setauthsize(struct crypto_aead *tfm,
 {
        struct spacc_aead_ctx *ctx = crypto_tfm_ctx(crypto_aead_tfm(tfm));
 
-       ctx->auth_size = authsize;
-
-       return 0;
+       return crypto_aead_setauthsize(ctx->sw_cipher, authsize);
 }
 
 /*
@@ -541,15 +498,13 @@ static int spacc_aead_setauthsize(struct crypto_aead *tfm,
  * be completed in hardware because the hardware may not support certain key
  * sizes. In these cases we need to complete the request in software.
  */
-static int spacc_aead_need_fallback(struct spacc_req *req)
+static int spacc_aead_need_fallback(struct aead_request *aead_req)
 {
-       struct aead_request *aead_req;
-       struct crypto_tfm *tfm = req->req->tfm;
-       struct crypto_alg *alg = req->req->tfm->__crt_alg;
-       struct spacc_alg *spacc_alg = to_spacc_alg(alg);
-       struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm);
+       struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
+       struct aead_alg *alg = crypto_aead_alg(aead);
+       struct spacc_aead *spacc_alg = to_spacc_aead(alg);
+       struct spacc_aead_ctx *ctx = crypto_aead_ctx(aead);
 
-       aead_req = container_of(req->req, struct aead_request, base);
        /*
         * If we have a non-supported key-length, then we need to do a
         * software fallback.
@@ -568,22 +523,17 @@ static int spacc_aead_do_fallback(struct aead_request *req, unsigned alg_type,
 {
        struct crypto_tfm *old_tfm = crypto_aead_tfm(crypto_aead_reqtfm(req));
        struct spacc_aead_ctx *ctx = crypto_tfm_ctx(old_tfm);
-       int err;
+       struct aead_request *subreq = aead_request_ctx(req);
 
-       if (ctx->sw_cipher) {
-               /*
-                * Change the request to use the software fallback transform,
-                * and once the ciphering has completed, put the old transform
-                * back into the request.
-                */
-               aead_request_set_tfm(req, ctx->sw_cipher);
-               err = is_encrypt ? crypto_aead_encrypt(req) :
-                   crypto_aead_decrypt(req);
-               aead_request_set_tfm(req, __crypto_aead_cast(old_tfm));
-       } else
-               err = -EINVAL;
+       aead_request_set_tfm(subreq, ctx->sw_cipher);
+       aead_request_set_callback(subreq, req->base.flags,
+                                 req->base.complete, req->base.data);
+       aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
+                              req->iv);
+       aead_request_set_ad(subreq, req->assoclen);
 
-       return err;
+       return is_encrypt ? crypto_aead_encrypt(subreq) :
+                           crypto_aead_decrypt(subreq);
 }
 
 static void spacc_aead_complete(struct spacc_req *req)
@@ -594,18 +544,19 @@ static void spacc_aead_complete(struct spacc_req *req)
 
 static int spacc_aead_submit(struct spacc_req *req)
 {
-       struct crypto_tfm *tfm = req->req->tfm;
-       struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm);
-       struct crypto_alg *alg = req->req->tfm->__crt_alg;
-       struct spacc_alg *spacc_alg = to_spacc_alg(alg);
-       struct spacc_engine *engine = ctx->generic.engine;
-       u32 ctrl, proc_len, assoc_len;
        struct aead_request *aead_req =
                container_of(req->req, struct aead_request, base);
+       struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
+       unsigned int authsize = crypto_aead_authsize(aead);
+       struct spacc_aead_ctx *ctx = crypto_aead_ctx(aead);
+       struct aead_alg *alg = crypto_aead_alg(aead);
+       struct spacc_aead *spacc_alg = to_spacc_aead(alg);
+       struct spacc_engine *engine = ctx->generic.engine;
+       u32 ctrl, proc_len, assoc_len;
 
        req->result = -EINPROGRESS;
        req->ctx_id = spacc_load_ctx(&ctx->generic, ctx->cipher_key,
-               ctx->cipher_key_len, aead_req->iv, alg->cra_aead.ivsize,
+               ctx->cipher_key_len, aead_req->iv, crypto_aead_ivsize(aead),
                ctx->hash_ctx, ctx->hash_key_len);
 
        /* Set the source and destination DDT pointers. */
@@ -616,26 +567,16 @@ static int spacc_aead_submit(struct spacc_req *req)
        assoc_len = aead_req->assoclen;
        proc_len = aead_req->cryptlen + assoc_len;
 
-       /*
-        * If we aren't generating an IV, then we need to include the IV in the
-        * associated data so that it is included in the hash.
-        */
-       if (!req->giv) {
-               assoc_len += crypto_aead_ivsize(crypto_aead_reqtfm(aead_req));
-               proc_len += crypto_aead_ivsize(crypto_aead_reqtfm(aead_req));
-       } else
-               proc_len += req->giv_len;
-
        /*
         * If we are decrypting, we need to take the length of the ICV out of
         * the processing length.
         */
        if (!req->is_encrypt)
-               proc_len -= ctx->auth_size;
+               proc_len -= authsize;
 
        writel(proc_len, engine->regs + SPA_PROC_LEN_REG_OFFSET);
        writel(assoc_len, engine->regs + SPA_AAD_LEN_REG_OFFSET);
-       writel(ctx->auth_size, engine->regs + SPA_ICV_LEN_REG_OFFSET);
+       writel(authsize, engine->regs + SPA_ICV_LEN_REG_OFFSET);
        writel(0, engine->regs + SPA_ICV_OFFSET_REG_OFFSET);
        writel(0, engine->regs + SPA_AUX_INFO_REG_OFFSET);
 
@@ -674,32 +615,29 @@ static void spacc_push(struct spacc_engine *engine)
 /*
  * Setup an AEAD request for processing. This will configure the engine, load
  * the context and then start the packet processing.
- *
- * @giv Pointer to destination address for a generated IV. If the
- *     request does not need to generate an IV then this should be set to NULL.
  */
-static int spacc_aead_setup(struct aead_request *req, u8 *giv,
+static int spacc_aead_setup(struct aead_request *req,
                            unsigned alg_type, bool is_encrypt)
 {
-       struct crypto_alg *alg = req->base.tfm->__crt_alg;
-       struct spacc_engine *engine = to_spacc_alg(alg)->engine;
+       struct crypto_aead *aead = crypto_aead_reqtfm(req);
+       struct aead_alg *alg = crypto_aead_alg(aead);
+       struct spacc_engine *engine = to_spacc_aead(alg)->engine;
        struct spacc_req *dev_req = aead_request_ctx(req);
-       int err = -EINPROGRESS;
+       int err;
        unsigned long flags;
-       unsigned ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(req));
 
-       dev_req->giv            = giv;
-       dev_req->giv_len        = ivsize;
        dev_req->req            = &req->base;
        dev_req->is_encrypt     = is_encrypt;
        dev_req->result         = -EBUSY;
        dev_req->engine         = engine;
        dev_req->complete       = spacc_aead_complete;
 
-       if (unlikely(spacc_aead_need_fallback(dev_req)))
+       if (unlikely(spacc_aead_need_fallback(req) ||
+                    ((err = spacc_aead_make_ddts(req)) == -E2BIG)))
                return spacc_aead_do_fallback(req, alg_type, is_encrypt);
 
-       spacc_aead_make_ddts(dev_req, dev_req->giv);
+       if (err)
+               goto out;
 
        err = -EINPROGRESS;
        spin_lock_irqsave(&engine->hw_lock, flags);
@@ -728,70 +666,44 @@ out:
 static int spacc_aead_encrypt(struct aead_request *req)
 {
        struct crypto_aead *aead = crypto_aead_reqtfm(req);
-       struct crypto_tfm *tfm = crypto_aead_tfm(aead);
-       struct spacc_alg *alg = to_spacc_alg(tfm->__crt_alg);
+       struct spacc_aead *alg = to_spacc_aead(crypto_aead_alg(aead));
 
-       return spacc_aead_setup(req, NULL, alg->type, 1);
-}
-
-static int spacc_aead_givencrypt(struct aead_givcrypt_request *req)
-{
-       struct crypto_aead *tfm = aead_givcrypt_reqtfm(req);
-       struct spacc_aead_ctx *ctx = crypto_aead_ctx(tfm);
-       size_t ivsize = crypto_aead_ivsize(tfm);
-       struct spacc_alg *alg = to_spacc_alg(tfm->base.__crt_alg);
-       unsigned len;
-       __be64 seq;
-
-       memcpy(req->areq.iv, ctx->salt, ivsize);
-       len = ivsize;
-       if (ivsize > sizeof(u64)) {
-               memset(req->giv, 0, ivsize - sizeof(u64));
-               len = sizeof(u64);
-       }
-       seq = cpu_to_be64(req->seq);
-       memcpy(req->giv + ivsize - len, &seq, len);
-
-       return spacc_aead_setup(&req->areq, req->giv, alg->type, 1);
+       return spacc_aead_setup(req, alg->type, 1);
 }
 
 static int spacc_aead_decrypt(struct aead_request *req)
 {
        struct crypto_aead *aead = crypto_aead_reqtfm(req);
-       struct crypto_tfm *tfm = crypto_aead_tfm(aead);
-       struct spacc_alg *alg = to_spacc_alg(tfm->__crt_alg);
+       struct spacc_aead  *alg = to_spacc_aead(crypto_aead_alg(aead));
 
-       return spacc_aead_setup(req, NULL, alg->type, 0);
+       return spacc_aead_setup(req, alg->type, 0);
 }
 
 /*
  * Initialise a new AEAD context. This is responsible for allocating the
  * fallback cipher and initialising the context.
  */
-static int spacc_aead_cra_init(struct crypto_tfm *tfm)
+static int spacc_aead_cra_init(struct crypto_aead *tfm)
 {
-       struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm);
-       struct crypto_alg *alg = tfm->__crt_alg;
-       struct spacc_alg *spacc_alg = to_spacc_alg(alg);
+       struct spacc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+       struct aead_alg *alg = crypto_aead_alg(tfm);
+       struct spacc_aead *spacc_alg = to_spacc_aead(alg);
        struct spacc_engine *engine = spacc_alg->engine;
 
        ctx->generic.flags = spacc_alg->type;
        ctx->generic.engine = engine;
-       ctx->sw_cipher = crypto_alloc_aead(alg->cra_name, 0,
-                                          CRYPTO_ALG_ASYNC |
+       ctx->sw_cipher = crypto_alloc_aead(alg->base.cra_name, 0,
                                           CRYPTO_ALG_NEED_FALLBACK);
-       if (IS_ERR(ctx->sw_cipher)) {
-               dev_warn(engine->dev, "failed to allocate fallback for %s\n",
-                        alg->cra_name);
-               ctx->sw_cipher = NULL;
-       }
+       if (IS_ERR(ctx->sw_cipher))
+               return PTR_ERR(ctx->sw_cipher);
        ctx->generic.key_offs = spacc_alg->key_offs;
        ctx->generic.iv_offs = spacc_alg->iv_offs;
 
-       get_random_bytes(ctx->salt, sizeof(ctx->salt));
-
-       crypto_aead_set_reqsize(__crypto_aead_cast(tfm),
-                               sizeof(struct spacc_req));
+       crypto_aead_set_reqsize(
+               tfm,
+               max(sizeof(struct spacc_req),
+                   sizeof(struct aead_request) +
+                   crypto_aead_reqsize(ctx->sw_cipher)));
 
        return 0;
 }
@@ -800,13 +712,11 @@ static int spacc_aead_cra_init(struct crypto_tfm *tfm)
  * Destructor for an AEAD context. This is called when the transform is freed
  * and must free the fallback cipher.
  */
-static void spacc_aead_cra_exit(struct crypto_tfm *tfm)
+static void spacc_aead_cra_exit(struct crypto_aead *tfm)
 {
-       struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm);
+       struct spacc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 
-       if (ctx->sw_cipher)
-               crypto_free_aead(ctx->sw_cipher);
-       ctx->sw_cipher = NULL;
+       crypto_free_aead(ctx->sw_cipher);
 }
 
 /*
@@ -1458,180 +1368,188 @@ static struct spacc_alg ipsec_engine_algs[] = {
                        .cra_exit = spacc_ablk_cra_exit,
                },
        },
+};
+
+static struct spacc_aead ipsec_engine_aeads[] = {
        {
-               .ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_CBC |
-                               SPA_CTRL_HASH_ALG_SHA | SPA_CTRL_HASH_MODE_HMAC,
+               .ctrl_default = SPA_CTRL_CIPH_ALG_AES |
+                               SPA_CTRL_CIPH_MODE_CBC |
+                               SPA_CTRL_HASH_ALG_SHA |
+                               SPA_CTRL_HASH_MODE_HMAC,
                .key_offs = 0,
                .iv_offs = AES_MAX_KEY_SIZE,
                .alg = {
-                       .cra_name = "authenc(hmac(sha1),cbc(aes))",
-                       .cra_driver_name = "authenc-hmac-sha1-cbc-aes-picoxcell",
-                       .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
-                       .cra_flags = CRYPTO_ALG_TYPE_AEAD |
-                                       CRYPTO_ALG_ASYNC |
-                                       CRYPTO_ALG_KERN_DRIVER_ONLY,
-                       .cra_blocksize = AES_BLOCK_SIZE,
-                       .cra_ctxsize = sizeof(struct spacc_aead_ctx),
-                       .cra_type = &crypto_aead_type,
-                       .cra_module = THIS_MODULE,
-                       .cra_aead = {
-                               .setkey = spacc_aead_setkey,
-                               .setauthsize = spacc_aead_setauthsize,
-                               .encrypt = spacc_aead_encrypt,
-                               .decrypt = spacc_aead_decrypt,
-                               .givencrypt = spacc_aead_givencrypt,
-                               .ivsize = AES_BLOCK_SIZE,
-                               .maxauthsize = SHA1_DIGEST_SIZE,
+                       .base = {
+                               .cra_name = "authenc(hmac(sha1),cbc(aes))",
+                               .cra_driver_name = "authenc-hmac-sha1-"
+                                                  "cbc-aes-picoxcell",
+                               .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
+                               .cra_flags = CRYPTO_ALG_ASYNC |
+                                            CRYPTO_ALG_NEED_FALLBACK |
+                                            CRYPTO_ALG_KERN_DRIVER_ONLY,
+                               .cra_blocksize = AES_BLOCK_SIZE,
+                               .cra_ctxsize = sizeof(struct spacc_aead_ctx),
+                               .cra_module = THIS_MODULE,
                        },
-                       .cra_init = spacc_aead_cra_init,
-                       .cra_exit = spacc_aead_cra_exit,
+                       .setkey = spacc_aead_setkey,
+                       .setauthsize = spacc_aead_setauthsize,
+                       .encrypt = spacc_aead_encrypt,
+                       .decrypt = spacc_aead_decrypt,
+                       .ivsize = AES_BLOCK_SIZE,
+                       .maxauthsize = SHA1_DIGEST_SIZE,
+                       .init = spacc_aead_cra_init,
+                       .exit = spacc_aead_cra_exit,
                },
        },
        {
-               .ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_CBC |
+               .ctrl_default = SPA_CTRL_CIPH_ALG_AES |
+                               SPA_CTRL_CIPH_MODE_CBC |
                                SPA_CTRL_HASH_ALG_SHA256 |
                                SPA_CTRL_HASH_MODE_HMAC,
                .key_offs = 0,
                .iv_offs = AES_MAX_KEY_SIZE,
                .alg = {
-                       .cra_name = "authenc(hmac(sha256),cbc(aes))",
-                       .cra_driver_name = "authenc-hmac-sha256-cbc-aes-picoxcell",
-                       .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
-                       .cra_flags = CRYPTO_ALG_TYPE_AEAD |
-                                       CRYPTO_ALG_ASYNC |
-                                       CRYPTO_ALG_KERN_DRIVER_ONLY,
-                       .cra_blocksize = AES_BLOCK_SIZE,
-                       .cra_ctxsize = sizeof(struct spacc_aead_ctx),
-                       .cra_type = &crypto_aead_type,
-                       .cra_module = THIS_MODULE,
-                       .cra_aead = {
-                               .setkey = spacc_aead_setkey,
-                               .setauthsize = spacc_aead_setauthsize,
-                               .encrypt = spacc_aead_encrypt,
-                               .decrypt = spacc_aead_decrypt,
-                               .givencrypt = spacc_aead_givencrypt,
-                               .ivsize = AES_BLOCK_SIZE,
-                               .maxauthsize = SHA256_DIGEST_SIZE,
+                       .base = {
+                               .cra_name = "authenc(hmac(sha256),cbc(aes))",
+                               .cra_driver_name = "authenc-hmac-sha256-"
+                                                  "cbc-aes-picoxcell",
+                               .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
+                               .cra_flags = CRYPTO_ALG_ASYNC |
+                                            CRYPTO_ALG_NEED_FALLBACK |
+                                            CRYPTO_ALG_KERN_DRIVER_ONLY,
+                               .cra_blocksize = AES_BLOCK_SIZE,
+                               .cra_ctxsize = sizeof(struct spacc_aead_ctx),
+                               .cra_module = THIS_MODULE,
                        },
-                       .cra_init = spacc_aead_cra_init,
-                       .cra_exit = spacc_aead_cra_exit,
+                       .setkey = spacc_aead_setkey,
+                       .setauthsize = spacc_aead_setauthsize,
+                       .encrypt = spacc_aead_encrypt,
+                       .decrypt = spacc_aead_decrypt,
+                       .ivsize = AES_BLOCK_SIZE,
+                       .maxauthsize = SHA256_DIGEST_SIZE,
+                       .init = spacc_aead_cra_init,
+                       .exit = spacc_aead_cra_exit,
                },
        },
        {
                .key_offs = 0,
                .iv_offs = AES_MAX_KEY_SIZE,
-               .ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_CBC |
-                               SPA_CTRL_HASH_ALG_MD5 | SPA_CTRL_HASH_MODE_HMAC,
+               .ctrl_default = SPA_CTRL_CIPH_ALG_AES |
+                               SPA_CTRL_CIPH_MODE_CBC |
+                               SPA_CTRL_HASH_ALG_MD5 |
+                               SPA_CTRL_HASH_MODE_HMAC,
                .alg = {
-                       .cra_name = "authenc(hmac(md5),cbc(aes))",
-                       .cra_driver_name = "authenc-hmac-md5-cbc-aes-picoxcell",
-                       .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
-                       .cra_flags = CRYPTO_ALG_TYPE_AEAD |
-                                       CRYPTO_ALG_ASYNC |
-                                       CRYPTO_ALG_KERN_DRIVER_ONLY,
-                       .cra_blocksize = AES_BLOCK_SIZE,
-                       .cra_ctxsize = sizeof(struct spacc_aead_ctx),
-                       .cra_type = &crypto_aead_type,
-                       .cra_module = THIS_MODULE,
-                       .cra_aead = {
-                               .setkey = spacc_aead_setkey,
-                               .setauthsize = spacc_aead_setauthsize,
-                               .encrypt = spacc_aead_encrypt,
-                               .decrypt = spacc_aead_decrypt,
-                               .givencrypt = spacc_aead_givencrypt,
-                               .ivsize = AES_BLOCK_SIZE,
-                               .maxauthsize = MD5_DIGEST_SIZE,
+                       .base = {
+                               .cra_name = "authenc(hmac(md5),cbc(aes))",
+                               .cra_driver_name = "authenc-hmac-md5-"
+                                                  "cbc-aes-picoxcell",
+                               .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
+                               .cra_flags = CRYPTO_ALG_ASYNC |
+                                            CRYPTO_ALG_NEED_FALLBACK |
+                                            CRYPTO_ALG_KERN_DRIVER_ONLY,
+                               .cra_blocksize = AES_BLOCK_SIZE,
+                               .cra_ctxsize = sizeof(struct spacc_aead_ctx),
+                               .cra_module = THIS_MODULE,
                        },
-                       .cra_init = spacc_aead_cra_init,
-                       .cra_exit = spacc_aead_cra_exit,
+                       .setkey = spacc_aead_setkey,
+                       .setauthsize = spacc_aead_setauthsize,
+                       .encrypt = spacc_aead_encrypt,
+                       .decrypt = spacc_aead_decrypt,
+                       .ivsize = AES_BLOCK_SIZE,
+                       .maxauthsize = MD5_DIGEST_SIZE,
+                       .init = spacc_aead_cra_init,
+                       .exit = spacc_aead_cra_exit,
                },
        },
        {
                .key_offs = DES_BLOCK_SIZE,
                .iv_offs = 0,
-               .ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_CBC |
-                               SPA_CTRL_HASH_ALG_SHA | SPA_CTRL_HASH_MODE_HMAC,
+               .ctrl_default = SPA_CTRL_CIPH_ALG_DES |
+                               SPA_CTRL_CIPH_MODE_CBC |
+                               SPA_CTRL_HASH_ALG_SHA |
+                               SPA_CTRL_HASH_MODE_HMAC,
                .alg = {
-                       .cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
-                       .cra_driver_name = "authenc-hmac-sha1-cbc-3des-picoxcell",
-                       .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
-                       .cra_flags = CRYPTO_ALG_TYPE_AEAD |
-                                       CRYPTO_ALG_ASYNC |
-                                       CRYPTO_ALG_KERN_DRIVER_ONLY,
-                       .cra_blocksize = DES3_EDE_BLOCK_SIZE,
-                       .cra_ctxsize = sizeof(struct spacc_aead_ctx),
-                       .cra_type = &crypto_aead_type,
-                       .cra_module = THIS_MODULE,
-                       .cra_aead = {
-                               .setkey = spacc_aead_setkey,
-                               .setauthsize = spacc_aead_setauthsize,
-                               .encrypt = spacc_aead_encrypt,
-                               .decrypt = spacc_aead_decrypt,
-                               .givencrypt = spacc_aead_givencrypt,
-                               .ivsize = DES3_EDE_BLOCK_SIZE,
-                               .maxauthsize = SHA1_DIGEST_SIZE,
+                       .base = {
+                               .cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
+                               .cra_driver_name = "authenc-hmac-sha1-"
+                                                  "cbc-3des-picoxcell",
+                               .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
+                               .cra_flags = CRYPTO_ALG_ASYNC |
+                                            CRYPTO_ALG_NEED_FALLBACK |
+                                            CRYPTO_ALG_KERN_DRIVER_ONLY,
+                               .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+                               .cra_ctxsize = sizeof(struct spacc_aead_ctx),
+                               .cra_module = THIS_MODULE,
                        },
-                       .cra_init = spacc_aead_cra_init,
-                       .cra_exit = spacc_aead_cra_exit,
+                       .setkey = spacc_aead_setkey,
+                       .setauthsize = spacc_aead_setauthsize,
+                       .encrypt = spacc_aead_encrypt,
+                       .decrypt = spacc_aead_decrypt,
+                       .ivsize = DES3_EDE_BLOCK_SIZE,
+                       .maxauthsize = SHA1_DIGEST_SIZE,
+                       .init = spacc_aead_cra_init,
+                       .exit = spacc_aead_cra_exit,
                },
        },
        {
                .key_offs = DES_BLOCK_SIZE,
                .iv_offs = 0,
-               .ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_CBC |
+               .ctrl_default = SPA_CTRL_CIPH_ALG_AES |
+                               SPA_CTRL_CIPH_MODE_CBC |
                                SPA_CTRL_HASH_ALG_SHA256 |
                                SPA_CTRL_HASH_MODE_HMAC,
                .alg = {
-                       .cra_name = "authenc(hmac(sha256),cbc(des3_ede))",
-                       .cra_driver_name = "authenc-hmac-sha256-cbc-3des-picoxcell",
-                       .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
-                       .cra_flags = CRYPTO_ALG_TYPE_AEAD |
-                                       CRYPTO_ALG_ASYNC |
-                                       CRYPTO_ALG_KERN_DRIVER_ONLY,
-                       .cra_blocksize = DES3_EDE_BLOCK_SIZE,
-                       .cra_ctxsize = sizeof(struct spacc_aead_ctx),
-                       .cra_type = &crypto_aead_type,
-                       .cra_module = THIS_MODULE,
-                       .cra_aead = {
-                               .setkey = spacc_aead_setkey,
-                               .setauthsize = spacc_aead_setauthsize,
-                               .encrypt = spacc_aead_encrypt,
-                               .decrypt = spacc_aead_decrypt,
-                               .givencrypt = spacc_aead_givencrypt,
-                               .ivsize = DES3_EDE_BLOCK_SIZE,
-                               .maxauthsize = SHA256_DIGEST_SIZE,
+                       .base = {
+                               .cra_name = "authenc(hmac(sha256),"
+                                           "cbc(des3_ede))",
+                               .cra_driver_name = "authenc-hmac-sha256-"
+                                                  "cbc-3des-picoxcell",
+                               .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
+                               .cra_flags = CRYPTO_ALG_ASYNC |
+                                            CRYPTO_ALG_NEED_FALLBACK |
+                                            CRYPTO_ALG_KERN_DRIVER_ONLY,
+                               .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+                               .cra_ctxsize = sizeof(struct spacc_aead_ctx),
+                               .cra_module = THIS_MODULE,
                        },
-                       .cra_init = spacc_aead_cra_init,
-                       .cra_exit = spacc_aead_cra_exit,
+                       .setkey = spacc_aead_setkey,
+                       .setauthsize = spacc_aead_setauthsize,
+                       .encrypt = spacc_aead_encrypt,
+                       .decrypt = spacc_aead_decrypt,
+                       .ivsize = DES3_EDE_BLOCK_SIZE,
+                       .maxauthsize = SHA256_DIGEST_SIZE,
+                       .init = spacc_aead_cra_init,
+                       .exit = spacc_aead_cra_exit,
                },
        },
        {
                .key_offs = DES_BLOCK_SIZE,
                .iv_offs = 0,
-               .ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_CBC |
-                               SPA_CTRL_HASH_ALG_MD5 | SPA_CTRL_HASH_MODE_HMAC,
+               .ctrl_default = SPA_CTRL_CIPH_ALG_DES |
+                               SPA_CTRL_CIPH_MODE_CBC |
+                               SPA_CTRL_HASH_ALG_MD5 |
+                               SPA_CTRL_HASH_MODE_HMAC,
                .alg = {
-                       .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
-                       .cra_driver_name = "authenc-hmac-md5-cbc-3des-picoxcell",
-                       .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
-                       .cra_flags = CRYPTO_ALG_TYPE_AEAD |
-                                       CRYPTO_ALG_ASYNC |
-                                       CRYPTO_ALG_KERN_DRIVER_ONLY,
-                       .cra_blocksize = DES3_EDE_BLOCK_SIZE,
-                       .cra_ctxsize = sizeof(struct spacc_aead_ctx),
-                       .cra_type = &crypto_aead_type,
-                       .cra_module = THIS_MODULE,
-                       .cra_aead = {
-                               .setkey = spacc_aead_setkey,
-                               .setauthsize = spacc_aead_setauthsize,
-                               .encrypt = spacc_aead_encrypt,
-                               .decrypt = spacc_aead_decrypt,
-                               .givencrypt = spacc_aead_givencrypt,
-                               .ivsize = DES3_EDE_BLOCK_SIZE,
-                               .maxauthsize = MD5_DIGEST_SIZE,
+                       .base = {
+                               .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
+                               .cra_driver_name = "authenc-hmac-md5-"
+                                                  "cbc-3des-picoxcell",
+                               .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
+                               .cra_flags = CRYPTO_ALG_ASYNC |
+                                            CRYPTO_ALG_NEED_FALLBACK |
+                                            CRYPTO_ALG_KERN_DRIVER_ONLY,
+                               .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+                               .cra_ctxsize = sizeof(struct spacc_aead_ctx),
+                               .cra_module = THIS_MODULE,
                        },
-                       .cra_init = spacc_aead_cra_init,
-                       .cra_exit = spacc_aead_cra_exit,
+                       .setkey = spacc_aead_setkey,
+                       .setauthsize = spacc_aead_setauthsize,
+                       .encrypt = spacc_aead_encrypt,
+                       .decrypt = spacc_aead_decrypt,
+                       .ivsize = DES3_EDE_BLOCK_SIZE,
+                       .maxauthsize = MD5_DIGEST_SIZE,
+                       .init = spacc_aead_cra_init,
+                       .exit = spacc_aead_cra_exit,
                },
        },
 };
@@ -1707,6 +1625,8 @@ static int spacc_probe(struct platform_device *pdev)
                engine->fifo_sz         = SPACC_CRYPTO_IPSEC_FIFO_SZ;
                engine->algs            = ipsec_engine_algs;
                engine->num_algs        = ARRAY_SIZE(ipsec_engine_algs);
+               engine->aeads           = ipsec_engine_aeads;
+               engine->num_aeads       = ARRAY_SIZE(ipsec_engine_aeads);
        } else if (spacc_is_compatible(pdev, "picochip,spacc-l2")) {
                engine->max_ctxs        = SPACC_CRYPTO_L2_MAX_CTXS;
                engine->cipher_pg_sz    = SPACC_CRYPTO_L2_CIPHER_PG_SZ;
@@ -1815,17 +1735,40 @@ static int spacc_probe(struct platform_device *pdev)
                                engine->algs[i].alg.cra_name);
        }
 
+       INIT_LIST_HEAD(&engine->registered_aeads);
+       for (i = 0; i < engine->num_aeads; ++i) {
+               engine->aeads[i].engine = engine;
+               err = crypto_register_aead(&engine->aeads[i].alg);
+               if (!err) {
+                       list_add_tail(&engine->aeads[i].entry,
+                                     &engine->registered_aeads);
+                       ret = 0;
+               }
+               if (err)
+                       dev_err(engine->dev, "failed to register alg \"%s\"\n",
+                               engine->aeads[i].alg.base.cra_name);
+               else
+                       dev_dbg(engine->dev, "registered alg \"%s\"\n",
+                               engine->aeads[i].alg.base.cra_name);
+       }
+
        return ret;
 }
 
 static int spacc_remove(struct platform_device *pdev)
 {
+       struct spacc_aead *aead, *an;
        struct spacc_alg *alg, *next;
        struct spacc_engine *engine = platform_get_drvdata(pdev);
 
        del_timer_sync(&engine->packet_timeout);
        device_remove_file(&pdev->dev, &dev_attr_stat_irq_thresh);
 
+       list_for_each_entry_safe(aead, an, &engine->registered_aeads, entry) {
+               list_del(&aead->entry);
+               crypto_unregister_aead(&aead->alg);
+       }
+
        list_for_each_entry_safe(alg, next, &engine->registered_algs, entry) {
                list_del(&alg->entry);
                crypto_unregister_alg(&alg->alg);
index 6fdb9e8b22a75247971fca0b500414b8028bc7dd..eefccf7b8be749ca4a0c24df900ec879608f0a74 100644 (file)
@@ -3,11 +3,13 @@ config CRYPTO_DEV_QAT
        select CRYPTO_AEAD
        select CRYPTO_AUTHENC
        select CRYPTO_BLKCIPHER
+       select CRYPTO_AKCIPHER
        select CRYPTO_HMAC
        select CRYPTO_SHA1
        select CRYPTO_SHA256
        select CRYPTO_SHA512
        select FW_LOADER
+       select ASN1
 
 config CRYPTO_DEV_QAT_DH895xCC
        tristate "Support for Intel(R) DH895xCC"
@@ -19,3 +21,16 @@ config CRYPTO_DEV_QAT_DH895xCC
 
          To compile this as a module, choose M here: the module
          will be called qat_dh895xcc.
+
+config CRYPTO_DEV_QAT_DH895xCCVF
+       tristate "Support for Intel(R) DH895xCC Virtual Function"
+       depends on X86 && PCI
+       select PCI_IOV
+       select CRYPTO_DEV_QAT
+
+       help
+         Support for Intel(R) DH895xcc with Intel(R) QuickAssist Technology
+         Virtual Function for accelerating crypto and compression workloads.
+
+         To compile this as a module, choose M here: the module
+         will be called qat_dh895xccvf.
index d11481be225e5788273f61b7e10e92da26dcf2ad..a3ce0b70e32ff3f3767456039e871ea8d5c64961 100644 (file)
@@ -1,2 +1,3 @@
 obj-$(CONFIG_CRYPTO_DEV_QAT) += qat_common/
 obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCC) += qat_dh895xcc/
+obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCCVF) += qat_dh895xccvf/
diff --git a/drivers/crypto/qat/qat_common/.gitignore b/drivers/crypto/qat/qat_common/.gitignore
new file mode 100644 (file)
index 0000000..ee32837
--- /dev/null
@@ -0,0 +1 @@
+*-asn1.[ch]
index e0424dc382feb1543784b704b4c33984927739a1..df20a9de1c586ef26bb3c5ba513ed052c35d573c 100644 (file)
@@ -1,3 +1,6 @@
+$(obj)/qat_rsakey-asn1.o: $(obj)/qat_rsakey-asn1.c $(obj)/qat_rsakey-asn1.h
+clean-files += qat_rsakey-asn1.c qat_rsakey-asn1.h
+
 obj-$(CONFIG_CRYPTO_DEV_QAT) += intel_qat.o
 intel_qat-objs := adf_cfg.o \
        adf_ctl_drv.o \
@@ -6,9 +9,14 @@ intel_qat-objs := adf_cfg.o \
        adf_accel_engine.o \
        adf_aer.o \
        adf_transport.o \
+       adf_admin.o \
+       adf_hw_arbiter.o \
        qat_crypto.o \
        qat_algs.o \
+       qat_rsakey-asn1.o \
+       qat_asym_algs.o \
        qat_uclo.o \
        qat_hal.o
 
 intel_qat-$(CONFIG_DEBUG_FS) += adf_transport_debug.o
+intel_qat-$(CONFIG_PCI_IOV) += adf_sriov.o adf_pf2vf_msg.o
index 5fe90296762083e12690e70ace018b273d9e5569..ca853d50b4b78154779a8ac50f7cfeb5428c28f3 100644 (file)
 */
 #ifndef ADF_ACCEL_DEVICES_H_
 #define ADF_ACCEL_DEVICES_H_
+#include <linux/interrupt.h>
 #include <linux/module.h>
 #include <linux/list.h>
 #include <linux/io.h>
+#include <linux/ratelimit.h>
 #include "adf_cfg_common.h"
 
 #define ADF_DH895XCC_DEVICE_NAME "dh895xcc"
+#define ADF_DH895XCCVF_DEVICE_NAME "dh895xccvf"
 #define ADF_DH895XCC_PCI_DEVICE_ID 0x435
+#define ADF_DH895XCCIOV_PCI_DEVICE_ID 0x443
 #define ADF_PCI_MAX_BARS 3
 #define ADF_DEVICE_NAME_LENGTH 32
 #define ADF_ETR_MAX_RINGS_PER_BANK 16
@@ -79,6 +83,7 @@ struct adf_bar {
 struct adf_accel_msix {
        struct msix_entry *entries;
        char **names;
+       u32 num_entries;
 } __packed;
 
 struct adf_accel_pci {
@@ -99,6 +104,7 @@ enum dev_sku_info {
        DEV_SKU_2,
        DEV_SKU_3,
        DEV_SKU_4,
+       DEV_SKU_VF,
        DEV_SKU_UNKNOWN,
 };
 
@@ -113,6 +119,8 @@ static inline const char *get_sku_info(enum dev_sku_info info)
                return "SKU3";
        case DEV_SKU_4:
                return "SKU4";
+       case DEV_SKU_VF:
+               return "SKUVF";
        case DEV_SKU_UNKNOWN:
        default:
                break;
@@ -135,23 +143,29 @@ struct adf_hw_device_data {
        struct adf_hw_device_class *dev_class;
        uint32_t (*get_accel_mask)(uint32_t fuse);
        uint32_t (*get_ae_mask)(uint32_t fuse);
+       uint32_t (*get_sram_bar_id)(struct adf_hw_device_data *self);
        uint32_t (*get_misc_bar_id)(struct adf_hw_device_data *self);
        uint32_t (*get_etr_bar_id)(struct adf_hw_device_data *self);
        uint32_t (*get_num_aes)(struct adf_hw_device_data *self);
        uint32_t (*get_num_accels)(struct adf_hw_device_data *self);
+       uint32_t (*get_pf2vf_offset)(uint32_t i);
+       uint32_t (*get_vintmsk_offset)(uint32_t i);
        enum dev_sku_info (*get_sku)(struct adf_hw_device_data *self);
-       void (*hw_arb_ring_enable)(struct adf_etr_ring_data *ring);
-       void (*hw_arb_ring_disable)(struct adf_etr_ring_data *ring);
        int (*alloc_irq)(struct adf_accel_dev *accel_dev);
        void (*free_irq)(struct adf_accel_dev *accel_dev);
        void (*enable_error_correction)(struct adf_accel_dev *accel_dev);
        int (*init_admin_comms)(struct adf_accel_dev *accel_dev);
        void (*exit_admin_comms)(struct adf_accel_dev *accel_dev);
+       int (*send_admin_init)(struct adf_accel_dev *accel_dev);
        int (*init_arb)(struct adf_accel_dev *accel_dev);
        void (*exit_arb)(struct adf_accel_dev *accel_dev);
+       void (*get_arb_mapping)(struct adf_accel_dev *accel_dev,
+                               const uint32_t **cfg);
+       void (*disable_iov)(struct adf_accel_dev *accel_dev);
        void (*enable_ints)(struct adf_accel_dev *accel_dev);
+       int (*enable_vf2pf_comms)(struct adf_accel_dev *accel_dev);
        const char *fw_name;
-       uint32_t pci_dev_id;
+       const char *fw_mmp_name;
        uint32_t fuses;
        uint32_t accel_capabilities_mask;
        uint16_t accel_mask;
@@ -163,6 +177,7 @@ struct adf_hw_device_data {
        uint8_t num_accel;
        uint8_t num_logical_accel;
        uint8_t num_engines;
+       uint8_t min_iov_compat_ver;
 } __packed;
 
 /* CSR write macro */
@@ -184,6 +199,16 @@ struct icp_qat_fw_loader_handle;
 struct adf_fw_loader_data {
        struct icp_qat_fw_loader_handle *fw_loader;
        const struct firmware *uof_fw;
+       const struct firmware *mmp_fw;
+};
+
+struct adf_accel_vf_info {
+       struct adf_accel_dev *accel_dev;
+       struct tasklet_struct vf2pf_bh_tasklet;
+       struct mutex pf2vf_lock; /* protect CSR access for PF2VF messages */
+       struct ratelimit_state vf2pf_ratelimit;
+       u32 vf_nr;
+       bool init;
 };
 
 struct adf_accel_dev {
@@ -199,6 +224,21 @@ struct adf_accel_dev {
        struct list_head list;
        struct module *owner;
        struct adf_accel_pci accel_pci_dev;
+       union {
+               struct {
+                       /* vf_info is non-zero when SR-IOV is init'ed */
+                       struct adf_accel_vf_info *vf_info;
+               } pf;
+               struct {
+                       char *irq_name;
+                       struct tasklet_struct pf2vf_bh_tasklet;
+                       struct mutex vf2pf_lock; /* protect CSR access */
+                       struct completion iov_msg_completion;
+                       uint8_t compatible;
+                       uint8_t pf_version;
+               } vf;
+       };
+       bool is_vf;
        uint8_t accel_id;
 } __packed;
 #endif
index fdda8e7ae302511bec5c0e1c18d2c5c4b2b3d351..20b08bdcb1466f4ba8d15ccd933abd7ea4663a29 100644 (file)
@@ -55,24 +55,36 @@ int adf_ae_fw_load(struct adf_accel_dev *accel_dev)
 {
        struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
        struct adf_hw_device_data *hw_device = accel_dev->hw_device;
-       void *uof_addr;
-       uint32_t uof_size;
+       void *uof_addr, *mmp_addr;
+       u32 uof_size, mmp_size;
 
+       if (!hw_device->fw_name)
+               return 0;
+
+       if (request_firmware(&loader_data->mmp_fw, hw_device->fw_mmp_name,
+                            &accel_dev->accel_pci_dev.pci_dev->dev)) {
+               dev_err(&GET_DEV(accel_dev), "Failed to load MMP firmware %s\n",
+                       hw_device->fw_mmp_name);
+               return -EFAULT;
+       }
        if (request_firmware(&loader_data->uof_fw, hw_device->fw_name,
                             &accel_dev->accel_pci_dev.pci_dev->dev)) {
-               dev_err(&GET_DEV(accel_dev), "Failed to load firmware %s\n",
+               dev_err(&GET_DEV(accel_dev), "Failed to load UOF firmware %s\n",
                        hw_device->fw_name);
-               return -EFAULT;
+               goto out_err;
        }
 
        uof_size = loader_data->uof_fw->size;
        uof_addr = (void *)loader_data->uof_fw->data;
+       mmp_size = loader_data->mmp_fw->size;
+       mmp_addr = (void *)loader_data->mmp_fw->data;
+       qat_uclo_wr_mimage(loader_data->fw_loader, mmp_addr, mmp_size);
        if (qat_uclo_map_uof_obj(loader_data->fw_loader, uof_addr, uof_size)) {
                dev_err(&GET_DEV(accel_dev), "Failed to map UOF\n");
                goto out_err;
        }
        if (qat_uclo_wr_all_uimage(loader_data->fw_loader)) {
-               dev_err(&GET_DEV(accel_dev), "Failed to map UOF\n");
+               dev_err(&GET_DEV(accel_dev), "Failed to load UOF\n");
                goto out_err;
        }
        return 0;
@@ -85,11 +97,17 @@ out_err:
 void adf_ae_fw_release(struct adf_accel_dev *accel_dev)
 {
        struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
+       struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+
+       if (!hw_device->fw_name)
+               return;
 
        qat_uclo_del_uof_obj(loader_data->fw_loader);
        qat_hal_deinit(loader_data->fw_loader);
        release_firmware(loader_data->uof_fw);
+       release_firmware(loader_data->mmp_fw);
        loader_data->uof_fw = NULL;
+       loader_data->mmp_fw = NULL;
        loader_data->fw_loader = NULL;
 }
 
@@ -99,6 +117,9 @@ int adf_ae_start(struct adf_accel_dev *accel_dev)
        struct adf_hw_device_data *hw_data = accel_dev->hw_device;
        uint32_t ae_ctr, ae, max_aes = GET_MAX_ACCELENGINES(accel_dev);
 
+       if (!hw_data->fw_name)
+               return 0;
+
        for (ae = 0, ae_ctr = 0; ae < max_aes; ae++) {
                if (hw_data->ae_mask & (1 << ae)) {
                        qat_hal_start(loader_data->fw_loader, ae, 0xFF);
@@ -117,6 +138,9 @@ int adf_ae_stop(struct adf_accel_dev *accel_dev)
        struct adf_hw_device_data *hw_data = accel_dev->hw_device;
        uint32_t ae_ctr, ae, max_aes = GET_MAX_ACCELENGINES(accel_dev);
 
+       if (!hw_data->fw_name)
+               return 0;
+
        for (ae = 0, ae_ctr = 0; ae < max_aes; ae++) {
                if (hw_data->ae_mask & (1 << ae)) {
                        qat_hal_stop(loader_data->fw_loader, ae, 0xFF);
@@ -143,6 +167,10 @@ static int adf_ae_reset(struct adf_accel_dev *accel_dev, int ae)
 int adf_ae_init(struct adf_accel_dev *accel_dev)
 {
        struct adf_fw_loader_data *loader_data;
+       struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+
+       if (!hw_device->fw_name)
+               return 0;
 
        loader_data = kzalloc(sizeof(*loader_data), GFP_KERNEL);
        if (!loader_data)
@@ -166,6 +194,10 @@ int adf_ae_init(struct adf_accel_dev *accel_dev)
 int adf_ae_shutdown(struct adf_accel_dev *accel_dev)
 {
        struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
+       struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+
+       if (!hw_device->fw_name)
+               return 0;
 
        qat_hal_deinit(loader_data->fw_loader);
        kfree(accel_dev->fw_loader);
diff --git a/drivers/crypto/qat/qat_common/adf_admin.c b/drivers/crypto/qat/qat_common/adf_admin.c
new file mode 100644 (file)
index 0000000..147d755
--- /dev/null
@@ -0,0 +1,290 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include "adf_accel_devices.h"
+#include "icp_qat_fw_init_admin.h"
+
+/* Admin Messages Registers */
+#define ADF_DH895XCC_ADMINMSGUR_OFFSET (0x3A000 + 0x574)
+#define ADF_DH895XCC_ADMINMSGLR_OFFSET (0x3A000 + 0x578)
+#define ADF_DH895XCC_MAILBOX_BASE_OFFSET 0x20970
+#define ADF_DH895XCC_MAILBOX_STRIDE 0x1000
+#define ADF_ADMINMSG_LEN 32
+
+static const u8 const_tab[1024] = {
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x03, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x01,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x13, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, 0x02, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x13, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13,
+0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x23, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x33, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, 0xfe, 0xdc, 0xba, 0x98, 0x76,
+0x54, 0x32, 0x10, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x67, 0x45, 0x23, 0x01, 0xef, 0xcd, 0xab,
+0x89, 0x98, 0xba, 0xdc, 0xfe, 0x10, 0x32, 0x54, 0x76, 0xc3, 0xd2, 0xe1, 0xf0,
+0x00, 0x00, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc1, 0x05, 0x9e,
+0xd8, 0x36, 0x7c, 0xd5, 0x07, 0x30, 0x70, 0xdd, 0x17, 0xf7, 0x0e, 0x59, 0x39,
+0xff, 0xc0, 0x0b, 0x31, 0x68, 0x58, 0x15, 0x11, 0x64, 0xf9, 0x8f, 0xa7, 0xbe,
+0xfa, 0x4f, 0xa4, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6a, 0x09, 0xe6, 0x67, 0xbb, 0x67, 0xae,
+0x85, 0x3c, 0x6e, 0xf3, 0x72, 0xa5, 0x4f, 0xf5, 0x3a, 0x51, 0x0e, 0x52, 0x7f,
+0x9b, 0x05, 0x68, 0x8c, 0x1f, 0x83, 0xd9, 0xab, 0x5b, 0xe0, 0xcd, 0x19, 0x05,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0xcb, 0xbb, 0x9d, 0x5d, 0xc1, 0x05, 0x9e, 0xd8, 0x62, 0x9a, 0x29,
+0x2a, 0x36, 0x7c, 0xd5, 0x07, 0x91, 0x59, 0x01, 0x5a, 0x30, 0x70, 0xdd, 0x17,
+0x15, 0x2f, 0xec, 0xd8, 0xf7, 0x0e, 0x59, 0x39, 0x67, 0x33, 0x26, 0x67, 0xff,
+0xc0, 0x0b, 0x31, 0x8e, 0xb4, 0x4a, 0x87, 0x68, 0x58, 0x15, 0x11, 0xdb, 0x0c,
+0x2e, 0x0d, 0x64, 0xf9, 0x8f, 0xa7, 0x47, 0xb5, 0x48, 0x1d, 0xbe, 0xfa, 0x4f,
+0xa4, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x6a, 0x09, 0xe6, 0x67, 0xf3, 0xbc, 0xc9, 0x08, 0xbb,
+0x67, 0xae, 0x85, 0x84, 0xca, 0xa7, 0x3b, 0x3c, 0x6e, 0xf3, 0x72, 0xfe, 0x94,
+0xf8, 0x2b, 0xa5, 0x4f, 0xf5, 0x3a, 0x5f, 0x1d, 0x36, 0xf1, 0x51, 0x0e, 0x52,
+0x7f, 0xad, 0xe6, 0x82, 0xd1, 0x9b, 0x05, 0x68, 0x8c, 0x2b, 0x3e, 0x6c, 0x1f,
+0x1f, 0x83, 0xd9, 0xab, 0xfb, 0x41, 0xbd, 0x6b, 0x5b, 0xe0, 0xcd, 0x19, 0x13,
+0x7e, 0x21, 0x79, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+
+struct adf_admin_comms {
+       dma_addr_t phy_addr;
+       dma_addr_t const_tbl_addr;
+       void *virt_addr;
+       void __iomem *mailbox_addr;
+       struct mutex lock;      /* protects adf_admin_comms struct */
+};
+
+static int adf_put_admin_msg_sync(struct adf_accel_dev *accel_dev, u32 ae,
+                                 void *in, void *out)
+{
+       struct adf_admin_comms *admin = accel_dev->admin;
+       int offset = ae * ADF_ADMINMSG_LEN * 2;
+       void __iomem *mailbox = admin->mailbox_addr;
+       int mb_offset = ae * ADF_DH895XCC_MAILBOX_STRIDE;
+       int times, received;
+
+       mutex_lock(&admin->lock);
+
+       if (ADF_CSR_RD(mailbox, mb_offset) == 1) {
+               mutex_unlock(&admin->lock);
+               return -EAGAIN;
+       }
+
+       memcpy(admin->virt_addr + offset, in, ADF_ADMINMSG_LEN);
+       ADF_CSR_WR(mailbox, mb_offset, 1);
+       received = 0;
+       for (times = 0; times < 50; times++) {
+               msleep(20);
+               if (ADF_CSR_RD(mailbox, mb_offset) == 0) {
+                       received = 1;
+                       break;
+               }
+       }
+       if (received)
+               memcpy(out, admin->virt_addr + offset +
+                      ADF_ADMINMSG_LEN, ADF_ADMINMSG_LEN);
+       else
+               dev_err(&GET_DEV(accel_dev),
+                       "Failed to send admin msg to accelerator\n");
+
+       mutex_unlock(&admin->lock);
+       return received ? 0 : -EFAULT;
+}
+
+static int adf_send_admin_cmd(struct adf_accel_dev *accel_dev, int cmd)
+{
+       struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+       struct icp_qat_fw_init_admin_req req;
+       struct icp_qat_fw_init_admin_resp resp;
+       int i;
+
+       memset(&req, 0, sizeof(struct icp_qat_fw_init_admin_req));
+       req.init_admin_cmd_id = cmd;
+
+       if (cmd == ICP_QAT_FW_CONSTANTS_CFG) {
+               req.init_cfg_sz = 1024;
+               req.init_cfg_ptr = accel_dev->admin->const_tbl_addr;
+       }
+       for (i = 0; i < hw_device->get_num_aes(hw_device); i++) {
+               memset(&resp, 0, sizeof(struct icp_qat_fw_init_admin_resp));
+               if (adf_put_admin_msg_sync(accel_dev, i, &req, &resp) ||
+                   resp.init_resp_hdr.status)
+                       return -EFAULT;
+       }
+       return 0;
+}
+
+/**
+ * adf_send_admin_init() - Function sends init message to FW
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * Function sends admin init message to the FW
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_send_admin_init(struct adf_accel_dev *accel_dev)
+{
+       int ret = adf_send_admin_cmd(accel_dev, ICP_QAT_FW_INIT_ME);
+
+       if (ret)
+               return ret;
+       return adf_send_admin_cmd(accel_dev, ICP_QAT_FW_CONSTANTS_CFG);
+}
+EXPORT_SYMBOL_GPL(adf_send_admin_init);
+
+int adf_init_admin_comms(struct adf_accel_dev *accel_dev)
+{
+       struct adf_admin_comms *admin;
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       struct adf_bar *pmisc =
+               &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
+       void __iomem *csr = pmisc->virt_addr;
+       void __iomem *mailbox = csr + ADF_DH895XCC_MAILBOX_BASE_OFFSET;
+       u64 reg_val;
+
+       admin = kzalloc_node(sizeof(*accel_dev->admin), GFP_KERNEL,
+                            dev_to_node(&GET_DEV(accel_dev)));
+       if (!admin)
+               return -ENOMEM;
+       admin->virt_addr = dma_zalloc_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
+                                              &admin->phy_addr, GFP_KERNEL);
+       if (!admin->virt_addr) {
+               dev_err(&GET_DEV(accel_dev), "Failed to allocate dma buff\n");
+               kfree(admin);
+               return -ENOMEM;
+       }
+
+       admin->const_tbl_addr = dma_map_single(&GET_DEV(accel_dev),
+                                              (void *) const_tab, 1024,
+                                              DMA_TO_DEVICE);
+
+       if (unlikely(dma_mapping_error(&GET_DEV(accel_dev),
+                                      admin->const_tbl_addr))) {
+               dma_free_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
+                                 admin->virt_addr, admin->phy_addr);
+               kfree(admin);
+               return -ENOMEM;
+       }
+       reg_val = (u64)admin->phy_addr;
+       ADF_CSR_WR(csr, ADF_DH895XCC_ADMINMSGUR_OFFSET, reg_val >> 32);
+       ADF_CSR_WR(csr, ADF_DH895XCC_ADMINMSGLR_OFFSET, reg_val);
+       mutex_init(&admin->lock);
+       admin->mailbox_addr = mailbox;
+       accel_dev->admin = admin;
+       return 0;
+}
+EXPORT_SYMBOL_GPL(adf_init_admin_comms);
+
+void adf_exit_admin_comms(struct adf_accel_dev *accel_dev)
+{
+       struct adf_admin_comms *admin = accel_dev->admin;
+
+       if (!admin)
+               return;
+
+       if (admin->virt_addr)
+               dma_free_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
+                                 admin->virt_addr, admin->phy_addr);
+
+       dma_unmap_single(&GET_DEV(accel_dev), admin->const_tbl_addr, 1024,
+                        DMA_TO_DEVICE);
+       mutex_destroy(&admin->lock);
+       kfree(admin);
+       accel_dev->admin = NULL;
+}
+EXPORT_SYMBOL_GPL(adf_exit_admin_comms);
index 2dbc733b8ab2483e8790d26a2f509239c681573d..a57b4194de2845aaee3eb77d9e7bd268cf63184f 100644 (file)
@@ -91,6 +91,9 @@ static void adf_dev_restore(struct adf_accel_dev *accel_dev)
        dev_info(&GET_DEV(accel_dev), "Resetting device qat_dev%d\n",
                 accel_dev->accel_id);
 
+       if (!parent)
+               parent = pdev;
+
        if (!pci_wait_for_pending_transaction(pdev))
                dev_info(&GET_DEV(accel_dev),
                         "Transaction still in progress. Proceeding\n");
@@ -206,7 +209,7 @@ static struct pci_error_handlers adf_err_handler = {
  * QAT acceleration device accel_dev.
  * To be used by QAT device specific drivers.
  *
- * Return: 0 on success, error code othewise.
+ * Return: 0 on success, error code otherwise.
  */
 int adf_enable_aer(struct adf_accel_dev *accel_dev, struct pci_driver *adf)
 {
index ab65bc274561dc58000f644d68bef6bf3b2b7926..d0879790561fa68efdebcc3a643d946cacd8257d 100644 (file)
@@ -123,7 +123,7 @@ static const struct file_operations qat_dev_cfg_fops = {
  * The table stores device specific config values.
  * To be used by QAT device specific drivers.
  *
- * Return: 0 on success, error code othewise.
+ * Return: 0 on success, error code otherwise.
  */
 int adf_cfg_dev_add(struct adf_accel_dev *accel_dev)
 {
@@ -178,6 +178,9 @@ void adf_cfg_dev_remove(struct adf_accel_dev *accel_dev)
 {
        struct adf_cfg_device_data *dev_cfg_data = accel_dev->cfg;
 
+       if (!dev_cfg_data)
+               return;
+
        down_write(&dev_cfg_data->lock);
        adf_cfg_section_del_all(&dev_cfg_data->sec_list);
        up_write(&dev_cfg_data->lock);
@@ -276,7 +279,7 @@ static int adf_cfg_key_val_get(struct adf_accel_dev *accel_dev,
  * in the given acceleration device
  * To be used by QAT device specific drivers.
  *
- * Return: 0 on success, error code othewise.
+ * Return: 0 on success, error code otherwise.
  */
 int adf_cfg_add_key_value_param(struct adf_accel_dev *accel_dev,
                                const char *section_name,
@@ -327,7 +330,7 @@ EXPORT_SYMBOL_GPL(adf_cfg_add_key_value_param);
  * will be stored.
  * To be used by QAT device specific drivers.
  *
- * Return: 0 on success, error code othewise.
+ * Return: 0 on success, error code otherwise.
  */
 int adf_cfg_section_add(struct adf_accel_dev *accel_dev, const char *name)
 {
index 88b82187ac3543e25d869972287cef8bad5f72fa..c697fb1cdfb5af2a47697b42f39e10649dab2adc 100644 (file)
@@ -60,7 +60,7 @@
 #define ADF_CFG_NO_DEVICE 0xFF
 #define ADF_CFG_AFFINITY_WHATEVER 0xFF
 #define MAX_DEVICE_NAME_SIZE 32
-#define ADF_MAX_DEVICES 32
+#define ADF_MAX_DEVICES (32 * 32)
 
 enum adf_cfg_val_type {
        ADF_DEC,
@@ -71,6 +71,7 @@ enum adf_cfg_val_type {
 enum adf_device_type {
        DEV_UNKNOWN = 0,
        DEV_DH895XCC,
+       DEV_DH895XCCVF,
 };
 
 struct adf_dev_status_info {
index 27e16c09230bffa24e505a03c8848fc7ab2bb40d..7836dffc3d4728abbe54dc0fc15af920cff9f84f 100644 (file)
@@ -54,8 +54,8 @@
 #include "icp_qat_hal.h"
 
 #define ADF_MAJOR_VERSION      0
-#define ADF_MINOR_VERSION      1
-#define ADF_BUILD_VERSION      3
+#define ADF_MINOR_VERSION      2
+#define ADF_BUILD_VERSION      0
 #define ADF_DRV_VERSION                __stringify(ADF_MAJOR_VERSION) "." \
                                __stringify(ADF_MINOR_VERSION) "." \
                                __stringify(ADF_BUILD_VERSION)
@@ -91,9 +91,13 @@ struct service_hndl {
        unsigned long start_status;
        char *name;
        struct list_head list;
-       int admin;
 };
 
+static inline int get_current_node(void)
+{
+       return topology_physical_package_id(smp_processor_id());
+}
+
 int adf_service_register(struct service_hndl *service);
 int adf_service_unregister(struct service_hndl *service);
 
@@ -102,13 +106,24 @@ int adf_dev_start(struct adf_accel_dev *accel_dev);
 int adf_dev_stop(struct adf_accel_dev *accel_dev);
 void adf_dev_shutdown(struct adf_accel_dev *accel_dev);
 
+void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev);
+void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev);
+int adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr);
+void adf_pf2vf_notify_restarting(struct adf_accel_dev *accel_dev);
+int adf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev);
+void adf_vf2pf_req_hndl(struct adf_accel_vf_info *vf_info);
+void adf_devmgr_update_class_index(struct adf_hw_device_data *hw_data);
+void adf_clean_vf_map(bool);
+
 int adf_ctl_dev_register(void);
 void adf_ctl_dev_unregister(void);
 int adf_processes_dev_register(void);
 void adf_processes_dev_unregister(void);
 
-int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev);
-void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev);
+int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev,
+                      struct adf_accel_dev *pf);
+void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev,
+                      struct adf_accel_dev *pf);
 struct list_head *adf_devmgr_get_head(void);
 struct adf_accel_dev *adf_devmgr_get_dev_by_id(uint32_t id);
 struct adf_accel_dev *adf_devmgr_get_first(void);
@@ -130,6 +145,12 @@ int adf_enable_aer(struct adf_accel_dev *accel_dev, struct pci_driver *adf);
 void adf_disable_aer(struct adf_accel_dev *accel_dev);
 int adf_init_aer(void);
 void adf_exit_aer(void);
+int adf_init_admin_comms(struct adf_accel_dev *accel_dev);
+void adf_exit_admin_comms(struct adf_accel_dev *accel_dev);
+int adf_send_admin_init(struct adf_accel_dev *accel_dev);
+int adf_init_arb(struct adf_accel_dev *accel_dev);
+void adf_exit_arb(struct adf_accel_dev *accel_dev);
+void adf_update_ring_arb(struct adf_etr_ring_data *ring);
 
 int adf_dev_get(struct adf_accel_dev *accel_dev);
 void adf_dev_put(struct adf_accel_dev *accel_dev);
@@ -141,10 +162,13 @@ int qat_crypto_unregister(void);
 struct qat_crypto_instance *qat_crypto_get_instance_node(int node);
 void qat_crypto_put_instance(struct qat_crypto_instance *inst);
 void qat_alg_callback(void *resp);
+void qat_alg_asym_callback(void *resp);
 int qat_algs_init(void);
 void qat_algs_exit(void);
 int qat_algs_register(void);
 int qat_algs_unregister(void);
+int qat_asym_algs_register(void);
+void qat_asym_algs_unregister(void);
 
 int qat_hal_init(struct adf_accel_dev *accel_dev);
 void qat_hal_deinit(struct icp_qat_fw_loader_handle *handle);
@@ -196,4 +220,23 @@ int qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle *handle);
 void qat_uclo_del_uof_obj(struct icp_qat_fw_loader_handle *handle);
 int qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle *handle,
                         void *addr_ptr, int mem_size);
+void qat_uclo_wr_mimage(struct icp_qat_fw_loader_handle *handle,
+                       void *addr_ptr, int mem_size);
+#if defined(CONFIG_PCI_IOV)
+int adf_sriov_configure(struct pci_dev *pdev, int numvfs);
+void adf_disable_sriov(struct adf_accel_dev *accel_dev);
+void adf_disable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
+                                 uint32_t vf_mask);
+void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
+                                uint32_t vf_mask);
+#else
+static inline int adf_sriov_configure(struct pci_dev *pdev, int numvfs)
+{
+       return 0;
+}
+
+static inline void adf_disable_sriov(struct adf_accel_dev *accel_dev)
+{
+}
+#endif
 #endif
index e056b9e9bf8a99068aa0c0097fc57fe9da25a27f..cd8a12af8ec53467019683d5ce962947eaa72f57 100644 (file)
@@ -398,10 +398,9 @@ static int adf_ctl_ioctl_get_status(struct file *fp, unsigned int cmd,
        }
 
        accel_dev = adf_devmgr_get_dev_by_id(dev_info.accel_id);
-       if (!accel_dev) {
-               pr_err("QAT: Device %d not found\n", dev_info.accel_id);
+       if (!accel_dev)
                return -ENODEV;
-       }
+
        hw_data = accel_dev->hw_device;
        dev_info.state = adf_dev_started(accel_dev) ? DEV_UP : DEV_DOWN;
        dev_info.num_ae = hw_data->get_num_aes(hw_data);
@@ -495,6 +494,7 @@ static void __exit adf_unregister_ctl_device_driver(void)
        adf_exit_aer();
        qat_crypto_unregister();
        qat_algs_exit();
+       adf_clean_vf_map(false);
        mutex_destroy(&adf_ctl_lock);
 }
 
index 3f0ff9e7d84060918b55321b09be752533e166e2..8dfdb8f907973359bb3bd4da893d94f46540cd7c 100644 (file)
 #include "adf_common_drv.h"
 
 static LIST_HEAD(accel_table);
+static LIST_HEAD(vfs_table);
 static DEFINE_MUTEX(table_lock);
 static uint32_t num_devices;
 
+struct vf_id_map {
+       u32 bdf;
+       u32 id;
+       u32 fake_id;
+       bool attached;
+       struct list_head list;
+};
+
+static int adf_get_vf_id(struct adf_accel_dev *vf)
+{
+       return ((7 * (PCI_SLOT(accel_to_pci_dev(vf)->devfn) - 1)) +
+               PCI_FUNC(accel_to_pci_dev(vf)->devfn) +
+               (PCI_SLOT(accel_to_pci_dev(vf)->devfn) - 1));
+}
+
+static int adf_get_vf_num(struct adf_accel_dev *vf)
+{
+       return (accel_to_pci_dev(vf)->bus->number << 8) | adf_get_vf_id(vf);
+}
+
+static struct vf_id_map *adf_find_vf(u32 bdf)
+{
+       struct list_head *itr;
+
+       list_for_each(itr, &vfs_table) {
+               struct vf_id_map *ptr =
+                       list_entry(itr, struct vf_id_map, list);
+
+               if (ptr->bdf == bdf)
+                       return ptr;
+       }
+       return NULL;
+}
+
+static int adf_get_vf_real_id(u32 fake)
+{
+       struct list_head *itr;
+
+       list_for_each(itr, &vfs_table) {
+               struct vf_id_map *ptr =
+                       list_entry(itr, struct vf_id_map, list);
+               if (ptr->fake_id == fake)
+                       return ptr->id;
+       }
+       return -1;
+}
+
+/**
+ * adf_clean_vf_map() - Cleans VF id mapings
+ *
+ * Function cleans internal ids for virtual functions.
+ * @vf: flag indicating whether mappings is cleaned
+ *     for vfs only or for vfs and pfs
+ */
+void adf_clean_vf_map(bool vf)
+{
+       struct vf_id_map *map;
+       struct list_head *ptr, *tmp;
+
+       mutex_lock(&table_lock);
+       list_for_each_safe(ptr, tmp, &vfs_table) {
+               map = list_entry(ptr, struct vf_id_map, list);
+               if (map->bdf != -1)
+                       num_devices--;
+
+               if (vf && map->bdf == -1)
+                       continue;
+
+               list_del(ptr);
+               kfree(map);
+       }
+       mutex_unlock(&table_lock);
+}
+EXPORT_SYMBOL_GPL(adf_clean_vf_map);
+
+/**
+ * adf_devmgr_update_class_index() - Update internal index
+ * @hw_data:  Pointer to internal device data.
+ *
+ * Function updates internal dev index for VFs
+ */
+void adf_devmgr_update_class_index(struct adf_hw_device_data *hw_data)
+{
+       struct adf_hw_device_class *class = hw_data->dev_class;
+       struct list_head *itr;
+       int i = 0;
+
+       list_for_each(itr, &accel_table) {
+               struct adf_accel_dev *ptr =
+                               list_entry(itr, struct adf_accel_dev, list);
+
+               if (ptr->hw_device->dev_class == class)
+                       ptr->hw_device->instance_id = i++;
+
+               if (i == class->instances)
+                               break;
+       }
+}
+EXPORT_SYMBOL_GPL(adf_devmgr_update_class_index);
+
 /**
  * adf_devmgr_add_dev() - Add accel_dev to the acceleration framework
  * @accel_dev:  Pointer to acceleration device.
+ * @pf:                Corresponding PF if the accel_dev is a VF
  *
  * Function adds acceleration device to the acceleration framework.
  * To be used by QAT device specific drivers.
  *
- * Return: 0 on success, error code othewise.
+ * Return: 0 on success, error code otherwise.
  */
-int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev)
+int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev,
+                      struct adf_accel_dev *pf)
 {
        struct list_head *itr;
+       int ret = 0;
 
        if (num_devices == ADF_MAX_DEVICES) {
                dev_err(&GET_DEV(accel_dev), "Only support up to %d devices\n",
@@ -73,20 +177,77 @@ int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev)
        }
 
        mutex_lock(&table_lock);
-       list_for_each(itr, &accel_table) {
-               struct adf_accel_dev *ptr =
+       atomic_set(&accel_dev->ref_count, 0);
+
+       /* PF on host or VF on guest */
+       if (!accel_dev->is_vf || (accel_dev->is_vf && !pf)) {
+               struct vf_id_map *map;
+
+               list_for_each(itr, &accel_table) {
+                       struct adf_accel_dev *ptr =
                                list_entry(itr, struct adf_accel_dev, list);
 
-               if (ptr == accel_dev) {
-                       mutex_unlock(&table_lock);
-                       return -EEXIST;
+                       if (ptr == accel_dev) {
+                               ret = -EEXIST;
+                               goto unlock;
+                       }
                }
+
+               list_add_tail(&accel_dev->list, &accel_table);
+               accel_dev->accel_id = num_devices++;
+
+               map = kzalloc(sizeof(*map), GFP_KERNEL);
+               if (!map) {
+                       ret = -ENOMEM;
+                       goto unlock;
+               }
+               map->bdf = ~0;
+               map->id = accel_dev->accel_id;
+               map->fake_id = map->id;
+               map->attached = true;
+               list_add_tail(&map->list, &vfs_table);
+       } else if (accel_dev->is_vf && pf) {
+               /* VF on host */
+               struct adf_accel_vf_info *vf_info;
+               struct vf_id_map *map;
+
+               vf_info = pf->pf.vf_info + adf_get_vf_id(accel_dev);
+
+               map = adf_find_vf(adf_get_vf_num(accel_dev));
+               if (map) {
+                       struct vf_id_map *next;
+
+                       accel_dev->accel_id = map->id;
+                       list_add_tail(&accel_dev->list, &accel_table);
+                       map->fake_id++;
+                       map->attached = true;
+                       next = list_next_entry(map, list);
+                       while (next && &next->list != &vfs_table) {
+                               next->fake_id++;
+                               next = list_next_entry(next, list);
+                       }
+
+                       ret = 0;
+                       goto unlock;
+               }
+
+               map = kzalloc(sizeof(*map), GFP_KERNEL);
+               if (!map) {
+                       ret = -ENOMEM;
+                       goto unlock;
+               }
+
+               accel_dev->accel_id = num_devices++;
+               list_add_tail(&accel_dev->list, &accel_table);
+               map->bdf = adf_get_vf_num(accel_dev);
+               map->id = accel_dev->accel_id;
+               map->fake_id = map->id;
+               map->attached = true;
+               list_add_tail(&map->list, &vfs_table);
        }
-       atomic_set(&accel_dev->ref_count, 0);
-       list_add_tail(&accel_dev->list, &accel_table);
-       accel_dev->accel_id = num_devices++;
+unlock:
        mutex_unlock(&table_lock);
-       return 0;
+       return ret;
 }
 EXPORT_SYMBOL_GPL(adf_devmgr_add_dev);
 
@@ -98,17 +259,37 @@ struct list_head *adf_devmgr_get_head(void)
 /**
  * adf_devmgr_rm_dev() - Remove accel_dev from the acceleration framework.
  * @accel_dev:  Pointer to acceleration device.
+ * @pf:                Corresponding PF if the accel_dev is a VF
  *
  * Function removes acceleration device from the acceleration framework.
  * To be used by QAT device specific drivers.
  *
  * Return: void
  */
-void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev)
+void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev,
+                      struct adf_accel_dev *pf)
 {
        mutex_lock(&table_lock);
+       if (!accel_dev->is_vf || (accel_dev->is_vf && !pf)) {
+               num_devices--;
+       } else if (accel_dev->is_vf && pf) {
+               struct vf_id_map *map, *next;
+
+               map = adf_find_vf(adf_get_vf_num(accel_dev));
+               if (!map) {
+                       dev_err(&GET_DEV(accel_dev), "Failed to find VF map\n");
+                       goto unlock;
+               }
+               map->fake_id--;
+               map->attached = false;
+               next = list_next_entry(map, list);
+               while (next && &next->list != &vfs_table) {
+                       next->fake_id--;
+                       next = list_next_entry(next, list);
+               }
+       }
+unlock:
        list_del(&accel_dev->list);
-       num_devices--;
        mutex_unlock(&table_lock);
 }
 EXPORT_SYMBOL_GPL(adf_devmgr_rm_dev);
@@ -154,17 +335,24 @@ EXPORT_SYMBOL_GPL(adf_devmgr_pci_to_accel_dev);
 struct adf_accel_dev *adf_devmgr_get_dev_by_id(uint32_t id)
 {
        struct list_head *itr;
+       int real_id;
 
        mutex_lock(&table_lock);
+       real_id = adf_get_vf_real_id(id);
+       if (real_id < 0)
+               goto unlock;
+
+       id = real_id;
+
        list_for_each(itr, &accel_table) {
                struct adf_accel_dev *ptr =
                                list_entry(itr, struct adf_accel_dev, list);
-
                if (ptr->accel_id == id) {
                        mutex_unlock(&table_lock);
                        return ptr;
                }
        }
+unlock:
        mutex_unlock(&table_lock);
        return NULL;
 }
@@ -180,21 +368,52 @@ int adf_devmgr_verify_id(uint32_t id)
        return -ENODEV;
 }
 
-void adf_devmgr_get_num_dev(uint32_t *num)
+static int adf_get_num_dettached_vfs(void)
 {
        struct list_head *itr;
+       int vfs = 0;
 
-       *num = 0;
-       list_for_each(itr, &accel_table) {
-               (*num)++;
+       mutex_lock(&table_lock);
+       list_for_each(itr, &vfs_table) {
+               struct vf_id_map *ptr =
+                       list_entry(itr, struct vf_id_map, list);
+               if (ptr->bdf != ~0 && !ptr->attached)
+                       vfs++;
        }
+       mutex_unlock(&table_lock);
+       return vfs;
+}
+
+void adf_devmgr_get_num_dev(uint32_t *num)
+{
+       *num = num_devices - adf_get_num_dettached_vfs();
 }
 
+/**
+ * adf_dev_in_use() - Check whether accel_dev is currently in use
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 1 when device is in use, 0 otherwise.
+ */
 int adf_dev_in_use(struct adf_accel_dev *accel_dev)
 {
        return atomic_read(&accel_dev->ref_count) != 0;
 }
+EXPORT_SYMBOL_GPL(adf_dev_in_use);
 
+/**
+ * adf_dev_get() - Increment accel_dev reference count
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * Increment the accel_dev refcount and if this is the first time
+ * incrementing it during this period the accel_dev is in use,
+ * increment the module refcount too.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 0 when successful, EFAULT when fail to bump module refcount
+ */
 int adf_dev_get(struct adf_accel_dev *accel_dev)
 {
        if (atomic_add_return(1, &accel_dev->ref_count) == 1)
@@ -202,19 +421,50 @@ int adf_dev_get(struct adf_accel_dev *accel_dev)
                        return -EFAULT;
        return 0;
 }
+EXPORT_SYMBOL_GPL(adf_dev_get);
 
+/**
+ * adf_dev_put() - Decrement accel_dev reference count
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * Decrement the accel_dev refcount and if this is the last time
+ * decrementing it during this period the accel_dev is in use,
+ * decrement the module refcount too.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: void
+ */
 void adf_dev_put(struct adf_accel_dev *accel_dev)
 {
        if (atomic_sub_return(1, &accel_dev->ref_count) == 0)
                module_put(accel_dev->owner);
 }
+EXPORT_SYMBOL_GPL(adf_dev_put);
 
+/**
+ * adf_devmgr_in_reset() - Check whether device is in reset
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 1 when the device is being reset, 0 otherwise.
+ */
 int adf_devmgr_in_reset(struct adf_accel_dev *accel_dev)
 {
        return test_bit(ADF_STATUS_RESTARTING, &accel_dev->status);
 }
+EXPORT_SYMBOL_GPL(adf_devmgr_in_reset);
 
+/**
+ * adf_dev_started() - Check whether device has started
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 1 when the device has started, 0 otherwise
+ */
 int adf_dev_started(struct adf_accel_dev *accel_dev)
 {
        return test_bit(ADF_STATUS_STARTED, &accel_dev->status);
 }
+EXPORT_SYMBOL_GPL(adf_dev_started);
diff --git a/drivers/crypto/qat/qat_common/adf_hw_arbiter.c b/drivers/crypto/qat/qat_common/adf_hw_arbiter.c
new file mode 100644 (file)
index 0000000..6849422
--- /dev/null
@@ -0,0 +1,168 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include "adf_accel_devices.h"
+#include "adf_transport_internal.h"
+
+#define ADF_ARB_NUM 4
+#define ADF_ARB_REQ_RING_NUM 8
+#define ADF_ARB_REG_SIZE 0x4
+#define ADF_ARB_WTR_SIZE 0x20
+#define ADF_ARB_OFFSET 0x30000
+#define ADF_ARB_REG_SLOT 0x1000
+#define ADF_ARB_WTR_OFFSET 0x010
+#define ADF_ARB_RO_EN_OFFSET 0x090
+#define ADF_ARB_WQCFG_OFFSET 0x100
+#define ADF_ARB_WRK_2_SER_MAP_OFFSET 0x180
+#define ADF_ARB_RINGSRVARBEN_OFFSET 0x19C
+
+#define WRITE_CSR_ARB_RINGSRVARBEN(csr_addr, index, value) \
+       ADF_CSR_WR(csr_addr, ADF_ARB_RINGSRVARBEN_OFFSET + \
+       (ADF_ARB_REG_SLOT * index), value)
+
+#define WRITE_CSR_ARB_RESPORDERING(csr_addr, index, value) \
+       ADF_CSR_WR(csr_addr, (ADF_ARB_OFFSET + \
+       ADF_ARB_RO_EN_OFFSET) + (ADF_ARB_REG_SIZE * index), value)
+
+#define WRITE_CSR_ARB_WEIGHT(csr_addr, arb, index, value) \
+       ADF_CSR_WR(csr_addr, (ADF_ARB_OFFSET + \
+       ADF_ARB_WTR_OFFSET) + (ADF_ARB_WTR_SIZE * arb) + \
+       (ADF_ARB_REG_SIZE * index), value)
+
+#define WRITE_CSR_ARB_SARCONFIG(csr_addr, index, value) \
+       ADF_CSR_WR(csr_addr, ADF_ARB_OFFSET + \
+       (ADF_ARB_REG_SIZE * index), value)
+
+#define WRITE_CSR_ARB_WRK_2_SER_MAP(csr_addr, index, value) \
+       ADF_CSR_WR(csr_addr, (ADF_ARB_OFFSET + \
+       ADF_ARB_WRK_2_SER_MAP_OFFSET) + \
+       (ADF_ARB_REG_SIZE * index), value)
+
+#define WRITE_CSR_ARB_WQCFG(csr_addr, index, value) \
+       ADF_CSR_WR(csr_addr, (ADF_ARB_OFFSET + \
+       ADF_ARB_WQCFG_OFFSET) + (ADF_ARB_REG_SIZE * index), value)
+
+int adf_init_arb(struct adf_accel_dev *accel_dev)
+{
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       void __iomem *csr = accel_dev->transport->banks[0].csr_addr;
+       u32 arb_cfg = 0x1 << 31 | 0x4 << 4 | 0x1;
+       u32 arb, i;
+       const u32 *thd_2_arb_cfg;
+
+       /* Service arb configured for 32 bytes responses and
+        * ring flow control check enabled. */
+       for (arb = 0; arb < ADF_ARB_NUM; arb++)
+               WRITE_CSR_ARB_SARCONFIG(csr, arb, arb_cfg);
+
+       /* Setup service weighting */
+       for (arb = 0; arb < ADF_ARB_NUM; arb++)
+               for (i = 0; i < ADF_ARB_REQ_RING_NUM; i++)
+                       WRITE_CSR_ARB_WEIGHT(csr, arb, i, 0xFFFFFFFF);
+
+       /* Setup ring response ordering */
+       for (i = 0; i < ADF_ARB_REQ_RING_NUM; i++)
+               WRITE_CSR_ARB_RESPORDERING(csr, i, 0xFFFFFFFF);
+
+       /* Setup worker queue registers */
+       for (i = 0; i < hw_data->num_engines; i++)
+               WRITE_CSR_ARB_WQCFG(csr, i, i);
+
+       /* Map worker threads to service arbiters */
+       hw_data->get_arb_mapping(accel_dev, &thd_2_arb_cfg);
+
+       if (!thd_2_arb_cfg)
+               return -EFAULT;
+
+       for (i = 0; i < hw_data->num_engines; i++)
+               WRITE_CSR_ARB_WRK_2_SER_MAP(csr, i, *(thd_2_arb_cfg + i));
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(adf_init_arb);
+
+/**
+ * adf_update_ring_arb() - update ring arbitration rgister
+ * @accel_dev:  Pointer to ring data.
+ *
+ * Function enables or disables rings for/from arbitration.
+ */
+void adf_update_ring_arb(struct adf_etr_ring_data *ring)
+{
+       WRITE_CSR_ARB_RINGSRVARBEN(ring->bank->csr_addr,
+                                  ring->bank->bank_number,
+                                  ring->bank->ring_mask & 0xFF);
+}
+EXPORT_SYMBOL_GPL(adf_update_ring_arb);
+
+void adf_exit_arb(struct adf_accel_dev *accel_dev)
+{
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       void __iomem *csr;
+       unsigned int i;
+
+       if (!accel_dev->transport)
+               return;
+
+       csr = accel_dev->transport->banks[0].csr_addr;
+
+       /* Reset arbiter configuration */
+       for (i = 0; i < ADF_ARB_NUM; i++)
+               WRITE_CSR_ARB_SARCONFIG(csr, i, 0);
+
+       /* Shutdown work queue */
+       for (i = 0; i < hw_data->num_engines; i++)
+               WRITE_CSR_ARB_WQCFG(csr, i, 0);
+
+       /* Unmap worker threads to service arbiters */
+       for (i = 0; i < hw_data->num_engines; i++)
+               WRITE_CSR_ARB_WRK_2_SER_MAP(csr, i, 0);
+
+       /* Disable arbitration on all rings */
+       for (i = 0; i < GET_MAX_BANKS(accel_dev); i++)
+               WRITE_CSR_ARB_RINGSRVARBEN(csr, i, 0);
+}
+EXPORT_SYMBOL_GPL(adf_exit_arb);
index 245f43237a2d8b168b034f8a5568753819f027dc..ac37a89965acb6afa936c4949eb84b81bdca9d74 100644 (file)
@@ -69,7 +69,7 @@ static void adf_service_add(struct service_hndl *service)
  * Function adds the acceleration service to the acceleration framework.
  * To be used by QAT device specific drivers.
  *
- * Return: 0 on success, error code othewise.
+ * Return: 0 on success, error code otherwise.
  */
 int adf_service_register(struct service_hndl *service)
 {
@@ -94,7 +94,7 @@ static void adf_service_remove(struct service_hndl *service)
  * Function remove the acceleration service from the acceleration framework.
  * To be used by QAT device specific drivers.
  *
- * Return: 0 on success, error code othewise.
+ * Return: 0 on success, error code otherwise.
  */
 int adf_service_unregister(struct service_hndl *service)
 {
@@ -114,7 +114,7 @@ EXPORT_SYMBOL_GPL(adf_service_unregister);
  * Initialize the ring data structures and the admin comms and arbitration
  * services.
  *
- * Return: 0 on success, error code othewise.
+ * Return: 0 on success, error code otherwise.
  */
 int adf_dev_init(struct adf_accel_dev *accel_dev)
 {
@@ -177,20 +177,6 @@ int adf_dev_init(struct adf_accel_dev *accel_dev)
         */
        list_for_each(list_itr, &service_table) {
                service = list_entry(list_itr, struct service_hndl, list);
-               if (!service->admin)
-                       continue;
-               if (service->event_hld(accel_dev, ADF_EVENT_INIT)) {
-                       dev_err(&GET_DEV(accel_dev),
-                               "Failed to initialise service %s\n",
-                               service->name);
-                       return -EFAULT;
-               }
-               set_bit(accel_dev->accel_id, &service->init_status);
-       }
-       list_for_each(list_itr, &service_table) {
-               service = list_entry(list_itr, struct service_hndl, list);
-               if (service->admin)
-                       continue;
                if (service->event_hld(accel_dev, ADF_EVENT_INIT)) {
                        dev_err(&GET_DEV(accel_dev),
                                "Failed to initialise service %s\n",
@@ -201,6 +187,7 @@ int adf_dev_init(struct adf_accel_dev *accel_dev)
        }
 
        hw_data->enable_error_correction(accel_dev);
+       hw_data->enable_vf2pf_comms(accel_dev);
 
        return 0;
 }
@@ -214,10 +201,11 @@ EXPORT_SYMBOL_GPL(adf_dev_init);
  * is ready to be used.
  * To be used by QAT device specific drivers.
  *
- * Return: 0 on success, error code othewise.
+ * Return: 0 on success, error code otherwise.
  */
 int adf_dev_start(struct adf_accel_dev *accel_dev)
 {
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
        struct service_hndl *service;
        struct list_head *list_itr;
 
@@ -229,22 +217,13 @@ int adf_dev_start(struct adf_accel_dev *accel_dev)
        }
        set_bit(ADF_STATUS_AE_STARTED, &accel_dev->status);
 
-       list_for_each(list_itr, &service_table) {
-               service = list_entry(list_itr, struct service_hndl, list);
-               if (!service->admin)
-                       continue;
-               if (service->event_hld(accel_dev, ADF_EVENT_START)) {
-                       dev_err(&GET_DEV(accel_dev),
-                               "Failed to start service %s\n",
-                               service->name);
-                       return -EFAULT;
-               }
-               set_bit(accel_dev->accel_id, &service->start_status);
+       if (hw_data->send_admin_init(accel_dev)) {
+               dev_err(&GET_DEV(accel_dev), "Failed to send init message\n");
+               return -EFAULT;
        }
+
        list_for_each(list_itr, &service_table) {
                service = list_entry(list_itr, struct service_hndl, list);
-               if (service->admin)
-                       continue;
                if (service->event_hld(accel_dev, ADF_EVENT_START)) {
                        dev_err(&GET_DEV(accel_dev),
                                "Failed to start service %s\n",
@@ -257,7 +236,8 @@ int adf_dev_start(struct adf_accel_dev *accel_dev)
        clear_bit(ADF_STATUS_STARTING, &accel_dev->status);
        set_bit(ADF_STATUS_STARTED, &accel_dev->status);
 
-       if (qat_algs_register()) {
+       if (!list_empty(&accel_dev->crypto_list) &&
+           (qat_algs_register() || qat_asym_algs_register())) {
                dev_err(&GET_DEV(accel_dev),
                        "Failed to register crypto algs\n");
                set_bit(ADF_STATUS_STARTING, &accel_dev->status);
@@ -276,7 +256,7 @@ EXPORT_SYMBOL_GPL(adf_dev_start);
  * is shuting down.
  * To be used by QAT device specific drivers.
  *
- * Return: 0 on success, error code othewise.
+ * Return: 0 on success, error code otherwise.
  */
 int adf_dev_stop(struct adf_accel_dev *accel_dev)
 {
@@ -292,14 +272,15 @@ int adf_dev_stop(struct adf_accel_dev *accel_dev)
        clear_bit(ADF_STATUS_STARTING, &accel_dev->status);
        clear_bit(ADF_STATUS_STARTED, &accel_dev->status);
 
-       if (qat_algs_unregister())
+       if (!list_empty(&accel_dev->crypto_list) && qat_algs_unregister())
                dev_err(&GET_DEV(accel_dev),
                        "Failed to unregister crypto algs\n");
 
+       if (!list_empty(&accel_dev->crypto_list))
+               qat_asym_algs_unregister();
+
        list_for_each(list_itr, &service_table) {
                service = list_entry(list_itr, struct service_hndl, list);
-               if (service->admin)
-                       continue;
                if (!test_bit(accel_dev->accel_id, &service->start_status))
                        continue;
                ret = service->event_hld(accel_dev, ADF_EVENT_STOP);
@@ -310,19 +291,6 @@ int adf_dev_stop(struct adf_accel_dev *accel_dev)
                        clear_bit(accel_dev->accel_id, &service->start_status);
                }
        }
-       list_for_each(list_itr, &service_table) {
-               service = list_entry(list_itr, struct service_hndl, list);
-               if (!service->admin)
-                       continue;
-               if (!test_bit(accel_dev->accel_id, &service->start_status))
-                       continue;
-               if (service->event_hld(accel_dev, ADF_EVENT_STOP))
-                       dev_err(&GET_DEV(accel_dev),
-                               "Failed to shutdown service %s\n",
-                               service->name);
-               else
-                       clear_bit(accel_dev->accel_id, &service->start_status);
-       }
 
        if (wait)
                msleep(100);
@@ -373,21 +341,6 @@ void adf_dev_shutdown(struct adf_accel_dev *accel_dev)
 
        list_for_each(list_itr, &service_table) {
                service = list_entry(list_itr, struct service_hndl, list);
-               if (service->admin)
-                       continue;
-               if (!test_bit(accel_dev->accel_id, &service->init_status))
-                       continue;
-               if (service->event_hld(accel_dev, ADF_EVENT_SHUTDOWN))
-                       dev_err(&GET_DEV(accel_dev),
-                               "Failed to shutdown service %s\n",
-                               service->name);
-               else
-                       clear_bit(accel_dev->accel_id, &service->init_status);
-       }
-       list_for_each(list_itr, &service_table) {
-               service = list_entry(list_itr, struct service_hndl, list);
-               if (!service->admin)
-                       continue;
                if (!test_bit(accel_dev->accel_id, &service->init_status))
                        continue;
                if (service->event_hld(accel_dev, ADF_EVENT_SHUTDOWN))
@@ -413,6 +366,7 @@ void adf_dev_shutdown(struct adf_accel_dev *accel_dev)
        if (hw_data->exit_admin_comms)
                hw_data->exit_admin_comms(accel_dev);
 
+       hw_data->disable_iov(accel_dev);
        adf_cleanup_etr_data(accel_dev);
 }
 EXPORT_SYMBOL_GPL(adf_dev_shutdown);
@@ -424,17 +378,6 @@ int adf_dev_restarting_notify(struct adf_accel_dev *accel_dev)
 
        list_for_each(list_itr, &service_table) {
                service = list_entry(list_itr, struct service_hndl, list);
-               if (service->admin)
-                       continue;
-               if (service->event_hld(accel_dev, ADF_EVENT_RESTARTING))
-                       dev_err(&GET_DEV(accel_dev),
-                               "Failed to restart service %s.\n",
-                               service->name);
-       }
-       list_for_each(list_itr, &service_table) {
-               service = list_entry(list_itr, struct service_hndl, list);
-               if (!service->admin)
-                       continue;
                if (service->event_hld(accel_dev, ADF_EVENT_RESTARTING))
                        dev_err(&GET_DEV(accel_dev),
                                "Failed to restart service %s.\n",
@@ -450,17 +393,6 @@ int adf_dev_restarted_notify(struct adf_accel_dev *accel_dev)
 
        list_for_each(list_itr, &service_table) {
                service = list_entry(list_itr, struct service_hndl, list);
-               if (service->admin)
-                       continue;
-               if (service->event_hld(accel_dev, ADF_EVENT_RESTARTED))
-                       dev_err(&GET_DEV(accel_dev),
-                               "Failed to restart service %s.\n",
-                               service->name);
-       }
-       list_for_each(list_itr, &service_table) {
-               service = list_entry(list_itr, struct service_hndl, list);
-               if (!service->admin)
-                       continue;
                if (service->event_hld(accel_dev, ADF_EVENT_RESTARTED))
                        dev_err(&GET_DEV(accel_dev),
                                "Failed to restart service %s.\n",
diff --git a/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c b/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c
new file mode 100644 (file)
index 0000000..5fdbad8
--- /dev/null
@@ -0,0 +1,438 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2015 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2015 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#include <linux/pci.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "adf_pf2vf_msg.h"
+
+#define ADF_DH895XCC_EP_OFFSET 0x3A000
+#define ADF_DH895XCC_ERRMSK3   (ADF_DH895XCC_EP_OFFSET + 0x1C)
+#define ADF_DH895XCC_ERRMSK3_VF2PF_L_MASK(vf_mask) ((vf_mask & 0xFFFF) << 9)
+#define ADF_DH895XCC_ERRMSK5   (ADF_DH895XCC_EP_OFFSET + 0xDC)
+#define ADF_DH895XCC_ERRMSK5_VF2PF_U_MASK(vf_mask) (vf_mask >> 16)
+
+/**
+ * adf_enable_pf2vf_interrupts() - Enable PF to VF interrupts
+ * @accel_dev:  Pointer to acceleration device.
+ *
+ * Function enables PF to VF interrupts
+ */
+void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
+{
+       struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       void __iomem *pmisc_bar_addr =
+               pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)].virt_addr;
+
+       ADF_CSR_WR(pmisc_bar_addr, hw_data->get_vintmsk_offset(0), 0x0);
+}
+EXPORT_SYMBOL_GPL(adf_enable_pf2vf_interrupts);
+
+/**
+ * adf_disable_pf2vf_interrupts() - Disable PF to VF interrupts
+ * @accel_dev:  Pointer to acceleration device.
+ *
+ * Function disables PF to VF interrupts
+ */
+void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
+{
+       struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       void __iomem *pmisc_bar_addr =
+               pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)].virt_addr;
+
+       ADF_CSR_WR(pmisc_bar_addr, hw_data->get_vintmsk_offset(0), 0x2);
+}
+EXPORT_SYMBOL_GPL(adf_disable_pf2vf_interrupts);
+
+void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
+                                u32 vf_mask)
+{
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       struct adf_bar *pmisc =
+                       &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
+       void __iomem *pmisc_addr = pmisc->virt_addr;
+       u32 reg;
+
+       /* Enable VF2PF Messaging Ints - VFs 1 through 16 per vf_mask[15:0] */
+       if (vf_mask & 0xFFFF) {
+               reg = ADF_CSR_RD(pmisc_addr, ADF_DH895XCC_ERRMSK3);
+               reg &= ~ADF_DH895XCC_ERRMSK3_VF2PF_L_MASK(vf_mask);
+               ADF_CSR_WR(pmisc_addr, ADF_DH895XCC_ERRMSK3, reg);
+       }
+
+       /* Enable VF2PF Messaging Ints - VFs 17 through 32 per vf_mask[31:16] */
+       if (vf_mask >> 16) {
+               reg = ADF_CSR_RD(pmisc_addr, ADF_DH895XCC_ERRMSK5);
+               reg &= ~ADF_DH895XCC_ERRMSK5_VF2PF_U_MASK(vf_mask);
+               ADF_CSR_WR(pmisc_addr, ADF_DH895XCC_ERRMSK5, reg);
+       }
+}
+
+/**
+ * adf_disable_pf2vf_interrupts() - Disable VF to PF interrupts
+ * @accel_dev:  Pointer to acceleration device.
+ *
+ * Function disables VF to PF interrupts
+ */
+void adf_disable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask)
+{
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       struct adf_bar *pmisc =
+                       &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
+       void __iomem *pmisc_addr = pmisc->virt_addr;
+       u32 reg;
+
+       /* Disable VF2PF interrupts for VFs 1 through 16 per vf_mask[15:0] */
+       if (vf_mask & 0xFFFF) {
+               reg = ADF_CSR_RD(pmisc_addr, ADF_DH895XCC_ERRMSK3) |
+                       ADF_DH895XCC_ERRMSK3_VF2PF_L_MASK(vf_mask);
+               ADF_CSR_WR(pmisc_addr, ADF_DH895XCC_ERRMSK3, reg);
+       }
+
+       /* Disable VF2PF interrupts for VFs 17 through 32 per vf_mask[31:16] */
+       if (vf_mask >> 16) {
+               reg = ADF_CSR_RD(pmisc_addr, ADF_DH895XCC_ERRMSK5) |
+                       ADF_DH895XCC_ERRMSK5_VF2PF_U_MASK(vf_mask);
+               ADF_CSR_WR(pmisc_addr, ADF_DH895XCC_ERRMSK5, reg);
+       }
+}
+EXPORT_SYMBOL_GPL(adf_disable_vf2pf_interrupts);
+
+static int __adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr)
+{
+       struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       void __iomem *pmisc_bar_addr =
+               pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)].virt_addr;
+       u32 val, pf2vf_offset, count = 0;
+       u32 local_in_use_mask, local_in_use_pattern;
+       u32 remote_in_use_mask, remote_in_use_pattern;
+       struct mutex *lock;     /* lock preventing concurrent acces of CSR */
+       u32 int_bit;
+       int ret = 0;
+
+       if (accel_dev->is_vf) {
+               pf2vf_offset = hw_data->get_pf2vf_offset(0);
+               lock = &accel_dev->vf.vf2pf_lock;
+               local_in_use_mask = ADF_VF2PF_IN_USE_BY_VF_MASK;
+               local_in_use_pattern = ADF_VF2PF_IN_USE_BY_VF;
+               remote_in_use_mask = ADF_PF2VF_IN_USE_BY_PF_MASK;
+               remote_in_use_pattern = ADF_PF2VF_IN_USE_BY_PF;
+               int_bit = ADF_VF2PF_INT;
+       } else {
+               pf2vf_offset = hw_data->get_pf2vf_offset(vf_nr);
+               lock = &accel_dev->pf.vf_info[vf_nr].pf2vf_lock;
+               local_in_use_mask = ADF_PF2VF_IN_USE_BY_PF_MASK;
+               local_in_use_pattern = ADF_PF2VF_IN_USE_BY_PF;
+               remote_in_use_mask = ADF_VF2PF_IN_USE_BY_VF_MASK;
+               remote_in_use_pattern = ADF_VF2PF_IN_USE_BY_VF;
+               int_bit = ADF_PF2VF_INT;
+       }
+
+       mutex_lock(lock);
+
+       /* Check if PF2VF CSR is in use by remote function */
+       val = ADF_CSR_RD(pmisc_bar_addr, pf2vf_offset);
+       if ((val & remote_in_use_mask) == remote_in_use_pattern) {
+               dev_dbg(&GET_DEV(accel_dev),
+                       "PF2VF CSR in use by remote function\n");
+               ret = -EBUSY;
+               goto out;
+       }
+
+       /* Attempt to get ownership of PF2VF CSR */
+       msg &= ~local_in_use_mask;
+       msg |= local_in_use_pattern;
+       ADF_CSR_WR(pmisc_bar_addr, pf2vf_offset, msg);
+
+       /* Wait in case remote func also attempting to get ownership */
+       msleep(ADF_IOV_MSG_COLLISION_DETECT_DELAY);
+
+       val = ADF_CSR_RD(pmisc_bar_addr, pf2vf_offset);
+       if ((val & local_in_use_mask) != local_in_use_pattern) {
+               dev_dbg(&GET_DEV(accel_dev),
+                       "PF2VF CSR in use by remote - collision detected\n");
+               ret = -EBUSY;
+               goto out;
+       }
+
+       /*
+        * This function now owns the PV2VF CSR.  The IN_USE_BY pattern must
+        * remain in the PF2VF CSR for all writes including ACK from remote
+        * until this local function relinquishes the CSR.  Send the message
+        * by interrupting the remote.
+        */
+       ADF_CSR_WR(pmisc_bar_addr, pf2vf_offset, msg | int_bit);
+
+       /* Wait for confirmation from remote func it received the message */
+       do {
+               msleep(ADF_IOV_MSG_ACK_DELAY);
+               val = ADF_CSR_RD(pmisc_bar_addr, pf2vf_offset);
+       } while ((val & int_bit) && (count++ < ADF_IOV_MSG_ACK_MAX_RETRY));
+
+       if (val & int_bit) {
+               dev_dbg(&GET_DEV(accel_dev), "ACK not received from remote\n");
+               val &= ~int_bit;
+               ret = -EIO;
+       }
+
+       /* Finished with PF2VF CSR; relinquish it and leave msg in CSR */
+       ADF_CSR_WR(pmisc_bar_addr, pf2vf_offset, val & ~local_in_use_mask);
+out:
+       mutex_unlock(lock);
+       return ret;
+}
+
+/**
+ * adf_iov_putmsg() - send PF2VF message
+ * @accel_dev:  Pointer to acceleration device.
+ * @msg:       Message to send
+ * @vf_nr:     VF number to which the message will be sent
+ *
+ * Function sends a messge from the PF to a VF
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr)
+{
+       u32 count = 0;
+       int ret;
+
+       do {
+               ret = __adf_iov_putmsg(accel_dev, msg, vf_nr);
+               if (ret)
+                       msleep(ADF_IOV_MSG_RETRY_DELAY);
+       } while (ret && (count++ < ADF_IOV_MSG_MAX_RETRIES));
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(adf_iov_putmsg);
+
+void adf_vf2pf_req_hndl(struct adf_accel_vf_info *vf_info)
+{
+       struct adf_accel_dev *accel_dev = vf_info->accel_dev;
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       int bar_id = hw_data->get_misc_bar_id(hw_data);
+       struct adf_bar *pmisc = &GET_BARS(accel_dev)[bar_id];
+       void __iomem *pmisc_addr = pmisc->virt_addr;
+       u32 msg, resp = 0, vf_nr = vf_info->vf_nr;
+
+       /* Read message from the VF */
+       msg = ADF_CSR_RD(pmisc_addr, hw_data->get_pf2vf_offset(vf_nr));
+
+       /* To ACK, clear the VF2PFINT bit */
+       msg &= ~ADF_VF2PF_INT;
+       ADF_CSR_WR(pmisc_addr, hw_data->get_pf2vf_offset(vf_nr), msg);
+
+       if (!(msg & ADF_VF2PF_MSGORIGIN_SYSTEM))
+               /* Ignore legacy non-system (non-kernel) VF2PF messages */
+               goto err;
+
+       switch ((msg & ADF_VF2PF_MSGTYPE_MASK) >> ADF_VF2PF_MSGTYPE_SHIFT) {
+       case ADF_VF2PF_MSGTYPE_COMPAT_VER_REQ:
+               {
+               u8 vf_compat_ver = msg >> ADF_VF2PF_COMPAT_VER_REQ_SHIFT;
+
+               resp = (ADF_PF2VF_MSGORIGIN_SYSTEM |
+                        (ADF_PF2VF_MSGTYPE_VERSION_RESP <<
+                         ADF_PF2VF_MSGTYPE_SHIFT) |
+                        (ADF_PFVF_COMPATIBILITY_VERSION <<
+                         ADF_PF2VF_VERSION_RESP_VERS_SHIFT));
+
+               dev_dbg(&GET_DEV(accel_dev),
+                       "Compatibility Version Request from VF%d vers=%u\n",
+                       vf_nr + 1, vf_compat_ver);
+
+               if (vf_compat_ver < hw_data->min_iov_compat_ver) {
+                       dev_err(&GET_DEV(accel_dev),
+                               "VF (vers %d) incompatible with PF (vers %d)\n",
+                               vf_compat_ver, ADF_PFVF_COMPATIBILITY_VERSION);
+                       resp |= ADF_PF2VF_VF_INCOMPATIBLE <<
+                               ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
+               } else if (vf_compat_ver > ADF_PFVF_COMPATIBILITY_VERSION) {
+                       dev_err(&GET_DEV(accel_dev),
+                               "VF (vers %d) compat with PF (vers %d) unkn.\n",
+                               vf_compat_ver, ADF_PFVF_COMPATIBILITY_VERSION);
+                       resp |= ADF_PF2VF_VF_COMPAT_UNKNOWN <<
+                               ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
+               } else {
+                       dev_dbg(&GET_DEV(accel_dev),
+                               "VF (vers %d) compatible with PF (vers %d)\n",
+                               vf_compat_ver, ADF_PFVF_COMPATIBILITY_VERSION);
+                       resp |= ADF_PF2VF_VF_COMPATIBLE <<
+                               ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
+               }
+               }
+               break;
+       case ADF_VF2PF_MSGTYPE_VERSION_REQ:
+               dev_dbg(&GET_DEV(accel_dev),
+                       "Legacy VersionRequest received from VF%d 0x%x\n",
+                       vf_nr + 1, msg);
+               resp = (ADF_PF2VF_MSGORIGIN_SYSTEM |
+                        (ADF_PF2VF_MSGTYPE_VERSION_RESP <<
+                         ADF_PF2VF_MSGTYPE_SHIFT) |
+                        (ADF_PFVF_COMPATIBILITY_VERSION <<
+                         ADF_PF2VF_VERSION_RESP_VERS_SHIFT));
+               resp |= ADF_PF2VF_VF_COMPATIBLE <<
+                       ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
+               /* Set legacy major and minor version num */
+               resp |= 1 << ADF_PF2VF_MAJORVERSION_SHIFT |
+                       1 << ADF_PF2VF_MINORVERSION_SHIFT;
+               break;
+       case ADF_VF2PF_MSGTYPE_INIT:
+               {
+               dev_dbg(&GET_DEV(accel_dev),
+                       "Init message received from VF%d 0x%x\n",
+                       vf_nr + 1, msg);
+               vf_info->init = true;
+               }
+               break;
+       case ADF_VF2PF_MSGTYPE_SHUTDOWN:
+               {
+               dev_dbg(&GET_DEV(accel_dev),
+                       "Shutdown message received from VF%d 0x%x\n",
+                       vf_nr + 1, msg);
+               vf_info->init = false;
+               }
+               break;
+       default:
+               goto err;
+       }
+
+       if (resp && adf_iov_putmsg(accel_dev, resp, vf_nr))
+               dev_err(&GET_DEV(accel_dev), "Failed to send response to VF\n");
+
+       /* re-enable interrupt on PF from this VF */
+       adf_enable_vf2pf_interrupts(accel_dev, (1 << vf_nr));
+       return;
+err:
+       dev_dbg(&GET_DEV(accel_dev), "Unknown message from VF%d (0x%x);\n",
+               vf_nr + 1, msg);
+}
+
+void adf_pf2vf_notify_restarting(struct adf_accel_dev *accel_dev)
+{
+       struct adf_accel_vf_info *vf;
+       u32 msg = (ADF_PF2VF_MSGORIGIN_SYSTEM |
+               (ADF_PF2VF_MSGTYPE_RESTARTING << ADF_PF2VF_MSGTYPE_SHIFT));
+       int i, num_vfs = pci_num_vf(accel_to_pci_dev(accel_dev));
+
+       for (i = 0, vf = accel_dev->pf.vf_info; i < num_vfs; i++, vf++) {
+               if (vf->init && adf_iov_putmsg(accel_dev, msg, i))
+                       dev_err(&GET_DEV(accel_dev),
+                               "Failed to send restarting msg to VF%d\n", i);
+       }
+}
+
+static int adf_vf2pf_request_version(struct adf_accel_dev *accel_dev)
+{
+       unsigned long timeout = msecs_to_jiffies(ADF_IOV_MSG_RESP_TIMEOUT);
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       u32 msg = 0;
+       int ret;
+
+       msg = ADF_VF2PF_MSGORIGIN_SYSTEM;
+       msg |= ADF_VF2PF_MSGTYPE_COMPAT_VER_REQ << ADF_VF2PF_MSGTYPE_SHIFT;
+       msg |= ADF_PFVF_COMPATIBILITY_VERSION << ADF_VF2PF_COMPAT_VER_REQ_SHIFT;
+       BUILD_BUG_ON(ADF_PFVF_COMPATIBILITY_VERSION > 255);
+
+       /* Send request from VF to PF */
+       ret = adf_iov_putmsg(accel_dev, msg, 0);
+       if (ret) {
+               dev_err(&GET_DEV(accel_dev),
+                       "Failed to send Compatibility Version Request.\n");
+               return ret;
+       }
+
+       /* Wait for response */
+       if (!wait_for_completion_timeout(&accel_dev->vf.iov_msg_completion,
+                                        timeout)) {
+               dev_err(&GET_DEV(accel_dev),
+                       "IOV request/response message timeout expired\n");
+               return -EIO;
+       }
+
+       /* Response from PF received, check compatibility */
+       switch (accel_dev->vf.compatible) {
+       case ADF_PF2VF_VF_COMPATIBLE:
+               break;
+       case ADF_PF2VF_VF_COMPAT_UNKNOWN:
+               /* VF is newer than PF and decides whether it is compatible */
+               if (accel_dev->vf.pf_version >= hw_data->min_iov_compat_ver)
+                       break;
+               /* fall through */
+       case ADF_PF2VF_VF_INCOMPATIBLE:
+               dev_err(&GET_DEV(accel_dev),
+                       "PF (vers %d) and VF (vers %d) are not compatible\n",
+                       accel_dev->vf.pf_version,
+                       ADF_PFVF_COMPATIBILITY_VERSION);
+               return -EINVAL;
+       default:
+               dev_err(&GET_DEV(accel_dev),
+                       "Invalid response from PF; assume not compatible\n");
+               return -EINVAL;
+       }
+       return ret;
+}
+
+/**
+ * adf_enable_vf2pf_comms() - Function enables communication from vf to pf
+ *
+ * @accel_dev: Pointer to acceleration device virtual function.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev)
+{
+       adf_enable_pf2vf_interrupts(accel_dev);
+       return adf_vf2pf_request_version(accel_dev);
+}
+EXPORT_SYMBOL_GPL(adf_enable_vf2pf_comms);
diff --git a/drivers/crypto/qat/qat_common/adf_pf2vf_msg.h b/drivers/crypto/qat/qat_common/adf_pf2vf_msg.h
new file mode 100644 (file)
index 0000000..5acd531
--- /dev/null
@@ -0,0 +1,146 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2015 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2015 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef ADF_PF2VF_MSG_H
+#define ADF_PF2VF_MSG_H
+
+/*
+ * PF<->VF Messaging
+ * The PF has an array of 32-bit PF2VF registers, one for each VF.  The
+ * PF can access all these registers; each VF can access only the one
+ * register associated with that particular VF.
+ *
+ * The register functionally is split into two parts:
+ * The bottom half is for PF->VF messages. In particular when the first
+ * bit of this register (bit 0) gets set an interrupt will be triggered
+ * in the respective VF.
+ * The top half is for VF->PF messages. In particular when the first bit
+ * of this half of register (bit 16) gets set an interrupt will be triggered
+ * in the PF.
+ *
+ * The remaining bits within this register are available to encode messages.
+ * and implement a collision control mechanism to prevent concurrent use of
+ * the PF2VF register by both the PF and VF.
+ *
+ *  31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16
+ *  _______________________________________________
+ * |  |  |  |  |  |  |  |  |  |  |  |  |  |  |  |  |
+ * +-----------------------------------------------+
+ *  \___________________________/ \_________/ ^   ^
+ *                ^                    ^      |   |
+ *                |                    |      |   VF2PF Int
+ *                |                    |      Message Origin
+ *                |                    Message Type
+ *                Message-specific Data/Reserved
+ *
+ *  15 14 13 12 11 10  9  8  7  6  5  4  3  2  1  0
+ *  _______________________________________________
+ * |  |  |  |  |  |  |  |  |  |  |  |  |  |  |  |  |
+ * +-----------------------------------------------+
+ *  \___________________________/ \_________/ ^   ^
+ *                ^                    ^      |   |
+ *                |                    |      |   PF2VF Int
+ *                |                    |      Message Origin
+ *                |                    Message Type
+ *                Message-specific Data/Reserved
+ *
+ * Message Origin (Should always be 1)
+ * A legacy out-of-tree QAT driver allowed for a set of messages not supported
+ * by this driver; these had a Msg Origin of 0 and are ignored by this driver.
+ *
+ * When a PF or VF attempts to send a message in the lower or upper 16 bits,
+ * respectively, the other 16 bits are written to first with a defined
+ * IN_USE_BY pattern as part of a collision control scheme (see adf_iov_putmsg).
+ */
+
+#define ADF_PFVF_COMPATIBILITY_VERSION         0x1     /* PF<->VF compat */
+
+/* PF->VF messages */
+#define ADF_PF2VF_INT                          BIT(0)
+#define ADF_PF2VF_MSGORIGIN_SYSTEM             BIT(1)
+#define ADF_PF2VF_MSGTYPE_MASK                 0x0000003C
+#define ADF_PF2VF_MSGTYPE_SHIFT                        2
+#define ADF_PF2VF_MSGTYPE_RESTARTING           0x01
+#define ADF_PF2VF_MSGTYPE_VERSION_RESP         0x02
+#define ADF_PF2VF_IN_USE_BY_PF                 0x6AC20000
+#define ADF_PF2VF_IN_USE_BY_PF_MASK            0xFFFE0000
+
+/* PF->VF Version Response */
+#define ADF_PF2VF_VERSION_RESP_VERS_MASK       0x00003FC0
+#define ADF_PF2VF_VERSION_RESP_VERS_SHIFT      6
+#define ADF_PF2VF_VERSION_RESP_RESULT_MASK     0x0000C000
+#define ADF_PF2VF_VERSION_RESP_RESULT_SHIFT    14
+#define ADF_PF2VF_MINORVERSION_SHIFT           6
+#define ADF_PF2VF_MAJORVERSION_SHIFT           10
+#define ADF_PF2VF_VF_COMPATIBLE                        1
+#define ADF_PF2VF_VF_INCOMPATIBLE              2
+#define ADF_PF2VF_VF_COMPAT_UNKNOWN            3
+
+/* VF->PF messages */
+#define ADF_VF2PF_IN_USE_BY_VF                 0x00006AC2
+#define ADF_VF2PF_IN_USE_BY_VF_MASK            0x0000FFFE
+#define ADF_VF2PF_INT                          BIT(16)
+#define ADF_VF2PF_MSGORIGIN_SYSTEM             BIT(17)
+#define ADF_VF2PF_MSGTYPE_MASK                 0x003C0000
+#define ADF_VF2PF_MSGTYPE_SHIFT                        18
+#define ADF_VF2PF_MSGTYPE_INIT                 0x3
+#define ADF_VF2PF_MSGTYPE_SHUTDOWN             0x4
+#define ADF_VF2PF_MSGTYPE_VERSION_REQ          0x5
+#define ADF_VF2PF_MSGTYPE_COMPAT_VER_REQ       0x6
+
+/* VF->PF Compatible Version Request */
+#define ADF_VF2PF_COMPAT_VER_REQ_SHIFT         22
+
+/* Collision detection */
+#define ADF_IOV_MSG_COLLISION_DETECT_DELAY     10
+#define ADF_IOV_MSG_ACK_DELAY                  2
+#define ADF_IOV_MSG_ACK_MAX_RETRY              100
+#define ADF_IOV_MSG_RETRY_DELAY                        5
+#define ADF_IOV_MSG_MAX_RETRIES                        3
+#define ADF_IOV_MSG_RESP_TIMEOUT       (ADF_IOV_MSG_ACK_DELAY * \
+                                        ADF_IOV_MSG_ACK_MAX_RETRY + \
+                                        ADF_IOV_MSG_COLLISION_DETECT_DELAY)
+#endif /* ADF_IOV_MSG_H */
diff --git a/drivers/crypto/qat/qat_common/adf_sriov.c b/drivers/crypto/qat/qat_common/adf_sriov.c
new file mode 100644 (file)
index 0000000..2f77a4a
--- /dev/null
@@ -0,0 +1,309 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2015 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2015 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/workqueue.h>
+#include <linux/pci.h>
+#include <linux/device.h>
+#include <linux/iommu.h>
+#include "adf_common_drv.h"
+#include "adf_cfg.h"
+#include "adf_pf2vf_msg.h"
+
+static struct workqueue_struct *pf2vf_resp_wq;
+
+#define ME2FUNCTION_MAP_A_OFFSET       (0x3A400 + 0x190)
+#define ME2FUNCTION_MAP_A_NUM_REGS     96
+
+#define ME2FUNCTION_MAP_B_OFFSET       (0x3A400 + 0x310)
+#define ME2FUNCTION_MAP_B_NUM_REGS     12
+
+#define ME2FUNCTION_MAP_REG_SIZE       4
+#define ME2FUNCTION_MAP_VALID          BIT(7)
+
+#define READ_CSR_ME2FUNCTION_MAP_A(pmisc_bar_addr, index)              \
+       ADF_CSR_RD(pmisc_bar_addr, ME2FUNCTION_MAP_A_OFFSET +           \
+                  ME2FUNCTION_MAP_REG_SIZE * index)
+
+#define WRITE_CSR_ME2FUNCTION_MAP_A(pmisc_bar_addr, index, value)      \
+       ADF_CSR_WR(pmisc_bar_addr, ME2FUNCTION_MAP_A_OFFSET +           \
+                  ME2FUNCTION_MAP_REG_SIZE * index, value)
+
+#define READ_CSR_ME2FUNCTION_MAP_B(pmisc_bar_addr, index)              \
+       ADF_CSR_RD(pmisc_bar_addr, ME2FUNCTION_MAP_B_OFFSET +           \
+                  ME2FUNCTION_MAP_REG_SIZE * index)
+
+#define WRITE_CSR_ME2FUNCTION_MAP_B(pmisc_bar_addr, index, value)      \
+       ADF_CSR_WR(pmisc_bar_addr, ME2FUNCTION_MAP_B_OFFSET +           \
+                  ME2FUNCTION_MAP_REG_SIZE * index, value)
+
+struct adf_pf2vf_resp {
+       struct work_struct pf2vf_resp_work;
+       struct adf_accel_vf_info *vf_info;
+};
+
+static void adf_iov_send_resp(struct work_struct *work)
+{
+       struct adf_pf2vf_resp *pf2vf_resp =
+               container_of(work, struct adf_pf2vf_resp, pf2vf_resp_work);
+
+       adf_vf2pf_req_hndl(pf2vf_resp->vf_info);
+       kfree(pf2vf_resp);
+}
+
+static void adf_vf2pf_bh_handler(void *data)
+{
+       struct adf_accel_vf_info *vf_info = (struct adf_accel_vf_info *)data;
+       struct adf_pf2vf_resp *pf2vf_resp;
+
+       pf2vf_resp = kzalloc(sizeof(*pf2vf_resp), GFP_ATOMIC);
+       if (!pf2vf_resp)
+               return;
+
+       pf2vf_resp->vf_info = vf_info;
+       INIT_WORK(&pf2vf_resp->pf2vf_resp_work, adf_iov_send_resp);
+       queue_work(pf2vf_resp_wq, &pf2vf_resp->pf2vf_resp_work);
+}
+
+static int adf_enable_sriov(struct adf_accel_dev *accel_dev)
+{
+       struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
+       int totalvfs = pci_sriov_get_totalvfs(pdev);
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       struct adf_bar *pmisc =
+                       &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
+       void __iomem *pmisc_addr = pmisc->virt_addr;
+       struct adf_accel_vf_info *vf_info;
+       int i;
+       u32 reg;
+
+       /* Workqueue for PF2VF responses */
+       pf2vf_resp_wq = create_workqueue("qat_pf2vf_resp_wq");
+       if (!pf2vf_resp_wq)
+               return -ENOMEM;
+
+       for (i = 0, vf_info = accel_dev->pf.vf_info; i < totalvfs;
+            i++, vf_info++) {
+               /* This ptr will be populated when VFs will be created */
+               vf_info->accel_dev = accel_dev;
+               vf_info->vf_nr = i;
+
+               tasklet_init(&vf_info->vf2pf_bh_tasklet,
+                            (void *)adf_vf2pf_bh_handler,
+                            (unsigned long)vf_info);
+               mutex_init(&vf_info->pf2vf_lock);
+               ratelimit_state_init(&vf_info->vf2pf_ratelimit,
+                                    DEFAULT_RATELIMIT_INTERVAL,
+                                    DEFAULT_RATELIMIT_BURST);
+       }
+
+       /* Set Valid bits in ME Thread to PCIe Function Mapping Group A */
+       for (i = 0; i < ME2FUNCTION_MAP_A_NUM_REGS; i++) {
+               reg = READ_CSR_ME2FUNCTION_MAP_A(pmisc_addr, i);
+               reg |= ME2FUNCTION_MAP_VALID;
+               WRITE_CSR_ME2FUNCTION_MAP_A(pmisc_addr, i, reg);
+       }
+
+       /* Set Valid bits in ME Thread to PCIe Function Mapping Group B */
+       for (i = 0; i < ME2FUNCTION_MAP_B_NUM_REGS; i++) {
+               reg = READ_CSR_ME2FUNCTION_MAP_B(pmisc_addr, i);
+               reg |= ME2FUNCTION_MAP_VALID;
+               WRITE_CSR_ME2FUNCTION_MAP_B(pmisc_addr, i, reg);
+       }
+
+       /* Enable VF to PF interrupts for all VFs */
+       adf_enable_vf2pf_interrupts(accel_dev, GENMASK_ULL(totalvfs - 1, 0));
+
+       /*
+        * Due to the hardware design, when SR-IOV and the ring arbiter
+        * are enabled all the VFs supported in hardware must be enabled in
+        * order for all the hardware resources (i.e. bundles) to be usable.
+        * When SR-IOV is enabled, each of the VFs will own one bundle.
+        */
+       return pci_enable_sriov(pdev, totalvfs);
+}
+
+/**
+ * adf_disable_sriov() - Disable SRIOV for the device
+ * @pdev:  Pointer to pci device.
+ *
+ * Function disables SRIOV for the pci device.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+void adf_disable_sriov(struct adf_accel_dev *accel_dev)
+{
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       struct adf_bar *pmisc =
+                       &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
+       void __iomem *pmisc_addr = pmisc->virt_addr;
+       int totalvfs = pci_sriov_get_totalvfs(accel_to_pci_dev(accel_dev));
+       struct adf_accel_vf_info *vf;
+       u32 reg;
+       int i;
+
+       if (!accel_dev->pf.vf_info)
+               return;
+
+       adf_pf2vf_notify_restarting(accel_dev);
+
+       pci_disable_sriov(accel_to_pci_dev(accel_dev));
+
+       /* Disable VF to PF interrupts */
+       adf_disable_vf2pf_interrupts(accel_dev, 0xFFFFFFFF);
+
+       /* Clear Valid bits in ME Thread to PCIe Function Mapping Group A */
+       for (i = 0; i < ME2FUNCTION_MAP_A_NUM_REGS; i++) {
+               reg = READ_CSR_ME2FUNCTION_MAP_A(pmisc_addr, i);
+               reg &= ~ME2FUNCTION_MAP_VALID;
+               WRITE_CSR_ME2FUNCTION_MAP_A(pmisc_addr, i, reg);
+       }
+
+       /* Clear Valid bits in ME Thread to PCIe Function Mapping Group B */
+       for (i = 0; i < ME2FUNCTION_MAP_B_NUM_REGS; i++) {
+               reg = READ_CSR_ME2FUNCTION_MAP_B(pmisc_addr, i);
+               reg &= ~ME2FUNCTION_MAP_VALID;
+               WRITE_CSR_ME2FUNCTION_MAP_B(pmisc_addr, i, reg);
+       }
+
+       for (i = 0, vf = accel_dev->pf.vf_info; i < totalvfs; i++, vf++) {
+               tasklet_disable(&vf->vf2pf_bh_tasklet);
+               tasklet_kill(&vf->vf2pf_bh_tasklet);
+               mutex_destroy(&vf->pf2vf_lock);
+       }
+
+       kfree(accel_dev->pf.vf_info);
+       accel_dev->pf.vf_info = NULL;
+
+       if (pf2vf_resp_wq) {
+               destroy_workqueue(pf2vf_resp_wq);
+               pf2vf_resp_wq = NULL;
+       }
+}
+EXPORT_SYMBOL_GPL(adf_disable_sriov);
+
+/**
+ * adf_sriov_configure() - Enable SRIOV for the device
+ * @pdev:  Pointer to pci device.
+ *
+ * Function enables SRIOV for the pci device.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_sriov_configure(struct pci_dev *pdev, int numvfs)
+{
+       struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+       int totalvfs = pci_sriov_get_totalvfs(pdev);
+       unsigned long val;
+       int ret;
+
+       if (!accel_dev) {
+               dev_err(&pdev->dev, "Failed to find accel_dev\n");
+               return -EFAULT;
+       }
+
+       if (!iommu_present(&pci_bus_type)) {
+               dev_err(&pdev->dev,
+                       "IOMMU must be enabled for SR-IOV to work\n");
+               return -EINVAL;
+       }
+
+       if (accel_dev->pf.vf_info) {
+               dev_info(&pdev->dev, "Already enabled for this device\n");
+               return -EINVAL;
+       }
+
+       if (adf_dev_started(accel_dev)) {
+               if (adf_devmgr_in_reset(accel_dev) ||
+                   adf_dev_in_use(accel_dev)) {
+                       dev_err(&GET_DEV(accel_dev), "Device busy\n");
+                       return -EBUSY;
+               }
+
+               if (adf_dev_stop(accel_dev)) {
+                       dev_err(&GET_DEV(accel_dev),
+                               "Failed to stop qat_dev%d\n",
+                               accel_dev->accel_id);
+                       return -EFAULT;
+               }
+
+               adf_dev_shutdown(accel_dev);
+       }
+
+       if (adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC))
+               return -EFAULT;
+       val = 0;
+       if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+                                       ADF_NUM_CY, (void *)&val, ADF_DEC))
+               return -EFAULT;
+
+       set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
+
+       /* Allocate memory for VF info structs */
+       accel_dev->pf.vf_info = kcalloc(totalvfs,
+                                       sizeof(struct adf_accel_vf_info),
+                                       GFP_KERNEL);
+       if (!accel_dev->pf.vf_info)
+               return -ENOMEM;
+
+       if (adf_dev_init(accel_dev)) {
+               dev_err(&GET_DEV(accel_dev), "Failed to init qat_dev%d\n",
+                       accel_dev->accel_id);
+               return -EFAULT;
+       }
+
+       if (adf_dev_start(accel_dev)) {
+               dev_err(&GET_DEV(accel_dev), "Failed to start qat_dev%d\n",
+                       accel_dev->accel_id);
+               return -EFAULT;
+       }
+
+       ret = adf_enable_sriov(accel_dev);
+       if (ret)
+               return ret;
+
+       return numvfs;
+}
+EXPORT_SYMBOL_GPL(adf_sriov_configure);
index db2926bff8a5bc843741b7cf56facb114ab853b7..3865ae8d96d9cda8a96a01d0ccfe49e70b3cf7b6 100644 (file)
@@ -264,6 +264,10 @@ int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section,
                dev_err(&GET_DEV(accel_dev), "Can't get ring number\n");
                return -EFAULT;
        }
+       if (ring_num >= ADF_ETR_MAX_RINGS_PER_BANK) {
+               dev_err(&GET_DEV(accel_dev), "Invalid ring number\n");
+               return -EFAULT;
+       }
 
        bank = &transport_data->banks[bank_num];
        if (adf_reserve_ring(bank, ring_num)) {
@@ -285,7 +289,7 @@ int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section,
                goto err;
 
        /* Enable HW arbitration for the given ring */
-       accel_dev->hw_device->hw_arb_ring_enable(ring);
+       adf_update_ring_arb(ring);
 
        if (adf_ring_debugfs_add(ring, ring_name)) {
                dev_err(&GET_DEV(accel_dev),
@@ -302,14 +306,13 @@ int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section,
 err:
        adf_cleanup_ring(ring);
        adf_unreserve_ring(bank, ring_num);
-       accel_dev->hw_device->hw_arb_ring_disable(ring);
+       adf_update_ring_arb(ring);
        return ret;
 }
 
 void adf_remove_ring(struct adf_etr_ring_data *ring)
 {
        struct adf_etr_bank_data *bank = ring->bank;
-       struct adf_accel_dev *accel_dev = bank->accel_dev;
 
        /* Disable interrupts for the given ring */
        adf_disable_ring_irq(bank, ring->ring_number);
@@ -322,7 +325,7 @@ void adf_remove_ring(struct adf_etr_ring_data *ring)
        adf_ring_debugfs_rm(ring);
        adf_unreserve_ring(bank, ring->ring_number);
        /* Disable HW arbitration for the given ring */
-       accel_dev->hw_device->hw_arb_ring_disable(ring);
+       adf_update_ring_arb(ring);
        adf_cleanup_ring(ring);
 }
 
@@ -463,7 +466,7 @@ err:
  * acceleration device accel_dev.
  * To be used by QAT device specific drivers.
  *
- * Return: 0 on success, error code othewise.
+ * Return: 0 on success, error code otherwise.
  */
 int adf_init_etr_data(struct adf_accel_dev *accel_dev)
 {
index 160c9a36c9198689d957c010e14930e3b3e8ffae..6ad7e4e1edcadd2a839ed866791b0dfb0d60cb47 100644 (file)
@@ -97,8 +97,9 @@
 #define ADF_RING_SIZE_IN_BYTES_TO_SIZE(SIZE) ((1 << (SIZE - 1)) >> 7)
 
 /* Minimum ring bufer size for memory allocation */
-#define ADF_RING_SIZE_BYTES_MIN(SIZE) ((SIZE < ADF_RING_SIZE_4K) ? \
-                               ADF_RING_SIZE_4K : SIZE)
+#define ADF_RING_SIZE_BYTES_MIN(SIZE) \
+       ((SIZE < ADF_SIZE_TO_RING_SIZE_IN_BYTES(ADF_RING_SIZE_4K)) ? \
+               ADF_SIZE_TO_RING_SIZE_IN_BYTES(ADF_RING_SIZE_4K) : SIZE)
 #define ADF_RING_SIZE_MODULO(SIZE) (SIZE + 0x6)
 #define ADF_SIZE_TO_POW(SIZE) ((((SIZE & 0x4) >> 1) | ((SIZE & 0x4) >> 2) | \
                                SIZE) & ~0x4)
index f1e30e24a4191b13f284d863ea096b1b0f5e6c02..46747f01b1d1fe18f3935a444ee76fc75d4349f4 100644 (file)
@@ -249,6 +249,8 @@ struct icp_qat_fw_comn_resp {
 
 #define QAT_COMN_RESP_CRYPTO_STATUS_BITPOS 7
 #define QAT_COMN_RESP_CRYPTO_STATUS_MASK 0x1
+#define QAT_COMN_RESP_PKE_STATUS_BITPOS 6
+#define QAT_COMN_RESP_PKE_STATUS_MASK 0x1
 #define QAT_COMN_RESP_CMP_STATUS_BITPOS 5
 #define QAT_COMN_RESP_CMP_STATUS_MASK 0x1
 #define QAT_COMN_RESP_XLAT_STATUS_BITPOS 4
diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw_pke.h b/drivers/crypto/qat/qat_common/icp_qat_fw_pke.h
new file mode 100644 (file)
index 0000000..0d7a9b5
--- /dev/null
@@ -0,0 +1,112 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+       * Redistributions of source code must retain the above copyright
+         notice, this list of conditions and the following disclaimer.
+       * Redistributions in binary form must reproduce the above copyright
+         notice, this list of conditions and the following disclaimer in
+         the documentation and/or other materials provided with the
+         distribution.
+       * Neither the name of Intel Corporation nor the names of its
+         contributors may be used to endorse or promote products derived
+         from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef _ICP_QAT_FW_PKE_
+#define _ICP_QAT_FW_PKE_
+
+#include "icp_qat_fw.h"
+
+struct icp_qat_fw_req_hdr_pke_cd_pars {
+       u64 content_desc_addr;
+       u32 content_desc_resrvd;
+       u32 func_id;
+};
+
+struct icp_qat_fw_req_pke_mid {
+       u64 opaque;
+       u64 src_data_addr;
+       u64 dest_data_addr;
+};
+
+struct icp_qat_fw_req_pke_hdr {
+       u8 resrvd1;
+       u8 resrvd2;
+       u8 service_type;
+       u8 hdr_flags;
+       u16 comn_req_flags;
+       u16 resrvd4;
+       struct icp_qat_fw_req_hdr_pke_cd_pars cd_pars;
+};
+
+struct icp_qat_fw_pke_request {
+       struct icp_qat_fw_req_pke_hdr pke_hdr;
+       struct icp_qat_fw_req_pke_mid pke_mid;
+       u8 output_param_count;
+       u8 input_param_count;
+       u16 resrvd1;
+       u32 resrvd2;
+       u64 next_req_adr;
+};
+
+struct icp_qat_fw_resp_pke_hdr {
+       u8 resrvd1;
+       u8 resrvd2;
+       u8 response_type;
+       u8 hdr_flags;
+       u16 comn_resp_flags;
+       u16 resrvd4;
+};
+
+struct icp_qat_fw_pke_resp {
+       struct icp_qat_fw_resp_pke_hdr pke_resp_hdr;
+       u64 opaque;
+       u64 src_data_addr;
+       u64 dest_data_addr;
+};
+
+#define ICP_QAT_FW_PKE_HDR_VALID_FLAG_BITPOS              7
+#define ICP_QAT_FW_PKE_HDR_VALID_FLAG_MASK                0x1
+#define ICP_QAT_FW_PKE_RESP_PKE_STAT_GET(status_word) \
+       QAT_FIELD_GET(((status_word >> ICP_QAT_FW_COMN_ONE_BYTE_SHIFT) & \
+               ICP_QAT_FW_COMN_SINGLE_BYTE_MASK), \
+               QAT_COMN_RESP_PKE_STATUS_BITPOS, \
+               QAT_COMN_RESP_PKE_STATUS_MASK)
+
+#define ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(hdr_t, val) \
+       QAT_FIELD_SET((hdr_t.hdr_flags), (val), \
+               ICP_QAT_FW_PKE_HDR_VALID_FLAG_BITPOS, \
+               ICP_QAT_FW_PKE_HDR_VALID_FLAG_MASK)
+#endif
index df427c0e9e7b2c99c8ee6cbe0c91b98c1ff47c43..2bd913aceaeb5c0370bc428d6c52278d1a4ecd78 100644 (file)
@@ -53,7 +53,6 @@
 #include <crypto/hash.h>
 #include <crypto/algapi.h>
 #include <crypto/authenc.h>
-#include <crypto/rng.h>
 #include <linux/dma-mapping.h>
 #include "adf_accel_devices.h"
 #include "adf_transport.h"
@@ -113,9 +112,6 @@ struct qat_alg_aead_ctx {
        struct crypto_shash *hash_tfm;
        enum icp_qat_hw_auth_algo qat_hash_alg;
        struct qat_crypto_instance *inst;
-       struct crypto_tfm *tfm;
-       uint8_t salt[AES_BLOCK_SIZE];
-       spinlock_t lock;        /* protects qat_alg_aead_ctx struct */
 };
 
 struct qat_alg_ablkcipher_ctx {
@@ -130,11 +126,6 @@ struct qat_alg_ablkcipher_ctx {
        spinlock_t lock;        /* protects qat_alg_ablkcipher_ctx struct */
 };
 
-static int get_current_node(void)
-{
-       return cpu_data(current_thread_info()->cpu).phys_proc_id;
-}
-
 static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)
 {
        switch (qat_hash_alg) {
@@ -278,12 +269,12 @@ static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header)
                                       ICP_QAT_FW_LA_NO_UPDATE_STATE);
 }
 
-static int qat_alg_aead_init_enc_session(struct qat_alg_aead_ctx *ctx,
+static int qat_alg_aead_init_enc_session(struct crypto_aead *aead_tfm,
                                         int alg,
                                         struct crypto_authenc_keys *keys)
 {
-       struct crypto_aead *aead_tfm = __crypto_aead_cast(ctx->tfm);
-       unsigned int digestsize = crypto_aead_crt(aead_tfm)->authsize;
+       struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(aead_tfm);
+       unsigned int digestsize = crypto_aead_authsize(aead_tfm);
        struct qat_enc *enc_ctx = &ctx->enc_cd->qat_enc_cd;
        struct icp_qat_hw_cipher_algo_blk *cipher = &enc_ctx->cipher;
        struct icp_qat_hw_auth_algo_blk *hash =
@@ -358,12 +349,12 @@ static int qat_alg_aead_init_enc_session(struct qat_alg_aead_ctx *ctx,
        return 0;
 }
 
-static int qat_alg_aead_init_dec_session(struct qat_alg_aead_ctx *ctx,
+static int qat_alg_aead_init_dec_session(struct crypto_aead *aead_tfm,
                                         int alg,
                                         struct crypto_authenc_keys *keys)
 {
-       struct crypto_aead *aead_tfm = __crypto_aead_cast(ctx->tfm);
-       unsigned int digestsize = crypto_aead_crt(aead_tfm)->authsize;
+       struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(aead_tfm);
+       unsigned int digestsize = crypto_aead_authsize(aead_tfm);
        struct qat_dec *dec_ctx = &ctx->dec_cd->qat_dec_cd;
        struct icp_qat_hw_auth_algo_blk *hash = &dec_ctx->hash;
        struct icp_qat_hw_cipher_algo_blk *cipher =
@@ -515,30 +506,27 @@ static int qat_alg_validate_key(int key_len, int *alg)
        return 0;
 }
 
-static int qat_alg_aead_init_sessions(struct qat_alg_aead_ctx *ctx,
+static int qat_alg_aead_init_sessions(struct crypto_aead *tfm,
                                      const uint8_t *key, unsigned int keylen)
 {
        struct crypto_authenc_keys keys;
        int alg;
 
-       if (crypto_rng_get_bytes(crypto_default_rng, ctx->salt, AES_BLOCK_SIZE))
-               return -EFAULT;
-
        if (crypto_authenc_extractkeys(&keys, key, keylen))
                goto bad_key;
 
        if (qat_alg_validate_key(keys.enckeylen, &alg))
                goto bad_key;
 
-       if (qat_alg_aead_init_enc_session(ctx, alg, &keys))
+       if (qat_alg_aead_init_enc_session(tfm, alg, &keys))
                goto error;
 
-       if (qat_alg_aead_init_dec_session(ctx, alg, &keys))
+       if (qat_alg_aead_init_dec_session(tfm, alg, &keys))
                goto error;
 
        return 0;
 bad_key:
-       crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+       crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
        return -EINVAL;
 error:
        return -EFAULT;
@@ -567,7 +555,6 @@ static int qat_alg_aead_setkey(struct crypto_aead *tfm, const uint8_t *key,
        struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
        struct device *dev;
 
-       spin_lock(&ctx->lock);
        if (ctx->enc_cd) {
                /* rekeying */
                dev = &GET_DEV(ctx->inst->accel_dev);
@@ -581,7 +568,6 @@ static int qat_alg_aead_setkey(struct crypto_aead *tfm, const uint8_t *key,
                struct qat_crypto_instance *inst =
                                qat_crypto_get_instance_node(node);
                if (!inst) {
-                       spin_unlock(&ctx->lock);
                        return -EINVAL;
                }
 
@@ -591,19 +577,16 @@ static int qat_alg_aead_setkey(struct crypto_aead *tfm, const uint8_t *key,
                                                  &ctx->enc_cd_paddr,
                                                  GFP_ATOMIC);
                if (!ctx->enc_cd) {
-                       spin_unlock(&ctx->lock);
                        return -ENOMEM;
                }
                ctx->dec_cd = dma_zalloc_coherent(dev, sizeof(*ctx->dec_cd),
                                                  &ctx->dec_cd_paddr,
                                                  GFP_ATOMIC);
                if (!ctx->dec_cd) {
-                       spin_unlock(&ctx->lock);
                        goto out_free_enc;
                }
        }
-       spin_unlock(&ctx->lock);
-       if (qat_alg_aead_init_sessions(ctx, key, keylen))
+       if (qat_alg_aead_init_sessions(tfm, key, keylen))
                goto out_free_all;
 
        return 0;
@@ -654,22 +637,20 @@ static void qat_alg_free_bufl(struct qat_crypto_instance *inst,
 }
 
 static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
-                              struct scatterlist *assoc, int assoclen,
                               struct scatterlist *sgl,
-                              struct scatterlist *sglout, uint8_t *iv,
-                              uint8_t ivlen,
+                              struct scatterlist *sglout,
                               struct qat_crypto_request *qat_req)
 {
        struct device *dev = &GET_DEV(inst->accel_dev);
-       int i, bufs = 0, sg_nctr = 0;
-       int n = sg_nents(sgl), assoc_n = sg_nents(assoc);
+       int i, sg_nctr = 0;
+       int n = sg_nents(sgl);
        struct qat_alg_buf_list *bufl;
        struct qat_alg_buf_list *buflout = NULL;
        dma_addr_t blp;
        dma_addr_t bloutp = 0;
        struct scatterlist *sg;
        size_t sz_out, sz = sizeof(struct qat_alg_buf_list) +
-                       ((1 + n + assoc_n) * sizeof(struct qat_alg_buf));
+                       ((1 + n) * sizeof(struct qat_alg_buf));
 
        if (unlikely(!n))
                return -EINVAL;
@@ -683,35 +664,8 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
        if (unlikely(dma_mapping_error(dev, blp)))
                goto err;
 
-       for_each_sg(assoc, sg, assoc_n, i) {
-               if (!sg->length)
-                       continue;
-
-               if (!(assoclen > 0))
-                       break;
-
-               bufl->bufers[bufs].addr =
-                       dma_map_single(dev, sg_virt(sg),
-                                      min_t(int, assoclen, sg->length),
-                                      DMA_BIDIRECTIONAL);
-               bufl->bufers[bufs].len = min_t(int, assoclen, sg->length);
-               if (unlikely(dma_mapping_error(dev, bufl->bufers[bufs].addr)))
-                       goto err;
-               bufs++;
-               assoclen -= sg->length;
-       }
-
-       if (ivlen) {
-               bufl->bufers[bufs].addr = dma_map_single(dev, iv, ivlen,
-                                                        DMA_BIDIRECTIONAL);
-               bufl->bufers[bufs].len = ivlen;
-               if (unlikely(dma_mapping_error(dev, bufl->bufers[bufs].addr)))
-                       goto err;
-               bufs++;
-       }
-
        for_each_sg(sgl, sg, n, i) {
-               int y = sg_nctr + bufs;
+               int y = sg_nctr;
 
                if (!sg->length)
                        continue;
@@ -724,7 +678,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
                        goto err;
                sg_nctr++;
        }
-       bufl->num_bufs = sg_nctr + bufs;
+       bufl->num_bufs = sg_nctr;
        qat_req->buf.bl = bufl;
        qat_req->buf.blp = blp;
        qat_req->buf.sz = sz;
@@ -734,7 +688,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
 
                n = sg_nents(sglout);
                sz_out = sizeof(struct qat_alg_buf_list) +
-                       ((1 + n + assoc_n) * sizeof(struct qat_alg_buf));
+                       ((1 + n) * sizeof(struct qat_alg_buf));
                sg_nctr = 0;
                buflout = kzalloc_node(sz_out, GFP_ATOMIC,
                                       dev_to_node(&GET_DEV(inst->accel_dev)));
@@ -744,14 +698,8 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
                if (unlikely(dma_mapping_error(dev, bloutp)))
                        goto err;
                bufers = buflout->bufers;
-               /* For out of place operation dma map only data and
-                * reuse assoc mapping and iv */
-               for (i = 0; i < bufs; i++) {
-                       bufers[i].len = bufl->bufers[i].len;
-                       bufers[i].addr = bufl->bufers[i].addr;
-               }
                for_each_sg(sglout, sg, n, i) {
-                       int y = sg_nctr + bufs;
+                       int y = sg_nctr;
 
                        if (!sg->length)
                                continue;
@@ -764,7 +712,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
                        bufers[y].len = sg->length;
                        sg_nctr++;
                }
-               buflout->num_bufs = sg_nctr + bufs;
+               buflout->num_bufs = sg_nctr;
                buflout->num_mapped_bufs = sg_nctr;
                qat_req->buf.blout = buflout;
                qat_req->buf.bloutp = bloutp;
@@ -778,7 +726,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
 err:
        dev_err(dev, "Failed to map buf for dma\n");
        sg_nctr = 0;
-       for (i = 0; i < n + bufs; i++)
+       for (i = 0; i < n; i++)
                if (!dma_mapping_error(dev, bufl->bufers[i].addr))
                        dma_unmap_single(dev, bufl->bufers[i].addr,
                                         bufl->bufers[i].len,
@@ -789,7 +737,7 @@ err:
        kfree(bufl);
        if (sgl != sglout && buflout) {
                n = sg_nents(sglout);
-               for (i = bufs; i < n + bufs; i++)
+               for (i = 0; i < n; i++)
                        if (!dma_mapping_error(dev, buflout->bufers[i].addr))
                                dma_unmap_single(dev, buflout->bufers[i].addr,
                                                 buflout->bufers[i].len,
@@ -849,12 +797,10 @@ static int qat_alg_aead_dec(struct aead_request *areq)
        struct icp_qat_fw_la_cipher_req_params *cipher_param;
        struct icp_qat_fw_la_auth_req_params *auth_param;
        struct icp_qat_fw_la_bulk_req *msg;
-       int digst_size = crypto_aead_crt(aead_tfm)->authsize;
+       int digst_size = crypto_aead_authsize(aead_tfm);
        int ret, ctr = 0;
 
-       ret = qat_alg_sgl_to_bufl(ctx->inst, areq->assoc, areq->assoclen,
-                                 areq->src, areq->dst, areq->iv,
-                                 AES_BLOCK_SIZE, qat_req);
+       ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req);
        if (unlikely(ret))
                return ret;
 
@@ -868,12 +814,11 @@ static int qat_alg_aead_dec(struct aead_request *areq)
        qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
        cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
        cipher_param->cipher_length = areq->cryptlen - digst_size;
-       cipher_param->cipher_offset = areq->assoclen + AES_BLOCK_SIZE;
+       cipher_param->cipher_offset = areq->assoclen;
        memcpy(cipher_param->u.cipher_IV_array, areq->iv, AES_BLOCK_SIZE);
        auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
        auth_param->auth_off = 0;
-       auth_param->auth_len = areq->assoclen +
-                               cipher_param->cipher_length + AES_BLOCK_SIZE;
+       auth_param->auth_len = areq->assoclen + cipher_param->cipher_length;
        do {
                ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
        } while (ret == -EAGAIN && ctr++ < 10);
@@ -885,8 +830,7 @@ static int qat_alg_aead_dec(struct aead_request *areq)
        return -EINPROGRESS;
 }
 
-static int qat_alg_aead_enc_internal(struct aead_request *areq, uint8_t *iv,
-                                    int enc_iv)
+static int qat_alg_aead_enc(struct aead_request *areq)
 {
        struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
        struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
@@ -895,11 +839,10 @@ static int qat_alg_aead_enc_internal(struct aead_request *areq, uint8_t *iv,
        struct icp_qat_fw_la_cipher_req_params *cipher_param;
        struct icp_qat_fw_la_auth_req_params *auth_param;
        struct icp_qat_fw_la_bulk_req *msg;
+       uint8_t *iv = areq->iv;
        int ret, ctr = 0;
 
-       ret = qat_alg_sgl_to_bufl(ctx->inst, areq->assoc, areq->assoclen,
-                                 areq->src, areq->dst, iv, AES_BLOCK_SIZE,
-                                 qat_req);
+       ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req);
        if (unlikely(ret))
                return ret;
 
@@ -914,16 +857,12 @@ static int qat_alg_aead_enc_internal(struct aead_request *areq, uint8_t *iv,
        cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
        auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
 
-       if (enc_iv) {
-               cipher_param->cipher_length = areq->cryptlen + AES_BLOCK_SIZE;
-               cipher_param->cipher_offset = areq->assoclen;
-       } else {
-               memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE);
-               cipher_param->cipher_length = areq->cryptlen;
-               cipher_param->cipher_offset = areq->assoclen + AES_BLOCK_SIZE;
-       }
+       memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE);
+       cipher_param->cipher_length = areq->cryptlen;
+       cipher_param->cipher_offset = areq->assoclen;
+
        auth_param->auth_off = 0;
-       auth_param->auth_len = areq->assoclen + areq->cryptlen + AES_BLOCK_SIZE;
+       auth_param->auth_len = areq->assoclen + areq->cryptlen;
 
        do {
                ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
@@ -936,25 +875,6 @@ static int qat_alg_aead_enc_internal(struct aead_request *areq, uint8_t *iv,
        return -EINPROGRESS;
 }
 
-static int qat_alg_aead_enc(struct aead_request *areq)
-{
-       return qat_alg_aead_enc_internal(areq, areq->iv, 0);
-}
-
-static int qat_alg_aead_genivenc(struct aead_givcrypt_request *req)
-{
-       struct crypto_aead *aead_tfm = crypto_aead_reqtfm(&req->areq);
-       struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
-       struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
-       __be64 seq;
-
-       memcpy(req->giv, ctx->salt, AES_BLOCK_SIZE);
-       seq = cpu_to_be64(req->seq);
-       memcpy(req->giv + AES_BLOCK_SIZE - sizeof(uint64_t),
-              &seq, sizeof(uint64_t));
-       return qat_alg_aead_enc_internal(&req->areq, req->giv, 1);
-}
-
 static int qat_alg_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
                                     const uint8_t *key,
                                     unsigned int keylen)
@@ -1026,8 +946,7 @@ static int qat_alg_ablkcipher_encrypt(struct ablkcipher_request *req)
        struct icp_qat_fw_la_bulk_req *msg;
        int ret, ctr = 0;
 
-       ret = qat_alg_sgl_to_bufl(ctx->inst, NULL, 0, req->src, req->dst,
-                                 NULL, 0, qat_req);
+       ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req);
        if (unlikely(ret))
                return ret;
 
@@ -1064,8 +983,7 @@ static int qat_alg_ablkcipher_decrypt(struct ablkcipher_request *req)
        struct icp_qat_fw_la_bulk_req *msg;
        int ret, ctr = 0;
 
-       ret = qat_alg_sgl_to_bufl(ctx->inst, NULL, 0, req->src, req->dst,
-                                 NULL, 0, qat_req);
+       ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req);
        if (unlikely(ret))
                return ret;
 
@@ -1092,47 +1010,43 @@ static int qat_alg_ablkcipher_decrypt(struct ablkcipher_request *req)
        return -EINPROGRESS;
 }
 
-static int qat_alg_aead_init(struct crypto_tfm *tfm,
+static int qat_alg_aead_init(struct crypto_aead *tfm,
                             enum icp_qat_hw_auth_algo hash,
                             const char *hash_name)
 {
-       struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
+       struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
 
        ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
        if (IS_ERR(ctx->hash_tfm))
-               return -EFAULT;
-       spin_lock_init(&ctx->lock);
+               return PTR_ERR(ctx->hash_tfm);
        ctx->qat_hash_alg = hash;
-       crypto_aead_set_reqsize(__crypto_aead_cast(tfm),
-               sizeof(struct aead_request) +
-               sizeof(struct qat_crypto_request));
-       ctx->tfm = tfm;
+       crypto_aead_set_reqsize(tfm, sizeof(struct aead_request) +
+                                    sizeof(struct qat_crypto_request));
        return 0;
 }
 
-static int qat_alg_aead_sha1_init(struct crypto_tfm *tfm)
+static int qat_alg_aead_sha1_init(struct crypto_aead *tfm)
 {
        return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA1, "sha1");
 }
 
-static int qat_alg_aead_sha256_init(struct crypto_tfm *tfm)
+static int qat_alg_aead_sha256_init(struct crypto_aead *tfm)
 {
        return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA256, "sha256");
 }
 
-static int qat_alg_aead_sha512_init(struct crypto_tfm *tfm)
+static int qat_alg_aead_sha512_init(struct crypto_aead *tfm)
 {
        return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA512, "sha512");
 }
 
-static void qat_alg_aead_exit(struct crypto_tfm *tfm)
+static void qat_alg_aead_exit(struct crypto_aead *tfm)
 {
-       struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
+       struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
        struct qat_crypto_instance *inst = ctx->inst;
        struct device *dev;
 
-       if (!IS_ERR(ctx->hash_tfm))
-               crypto_free_shash(ctx->hash_tfm);
+       crypto_free_shash(ctx->hash_tfm);
 
        if (!inst)
                return;
@@ -1189,73 +1103,61 @@ static void qat_alg_ablkcipher_exit(struct crypto_tfm *tfm)
        qat_crypto_put_instance(inst);
 }
 
-static struct crypto_alg qat_algs[] = { {
-       .cra_name = "authenc(hmac(sha1),cbc(aes))",
-       .cra_driver_name = "qat_aes_cbc_hmac_sha1",
-       .cra_priority = 4001,
-       .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
-       .cra_blocksize = AES_BLOCK_SIZE,
-       .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
-       .cra_alignmask = 0,
-       .cra_type = &crypto_aead_type,
-       .cra_module = THIS_MODULE,
-       .cra_init = qat_alg_aead_sha1_init,
-       .cra_exit = qat_alg_aead_exit,
-       .cra_u = {
-               .aead = {
-                       .setkey = qat_alg_aead_setkey,
-                       .decrypt = qat_alg_aead_dec,
-                       .encrypt = qat_alg_aead_enc,
-                       .givencrypt = qat_alg_aead_genivenc,
-                       .ivsize = AES_BLOCK_SIZE,
-                       .maxauthsize = SHA1_DIGEST_SIZE,
-               },
+
+static struct aead_alg qat_aeads[] = { {
+       .base = {
+               .cra_name = "authenc(hmac(sha1),cbc(aes))",
+               .cra_driver_name = "qat_aes_cbc_hmac_sha1",
+               .cra_priority = 4001,
+               .cra_flags = CRYPTO_ALG_ASYNC,
+               .cra_blocksize = AES_BLOCK_SIZE,
+               .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
+               .cra_module = THIS_MODULE,
        },
+       .init = qat_alg_aead_sha1_init,
+       .exit = qat_alg_aead_exit,
+       .setkey = qat_alg_aead_setkey,
+       .decrypt = qat_alg_aead_dec,
+       .encrypt = qat_alg_aead_enc,
+       .ivsize = AES_BLOCK_SIZE,
+       .maxauthsize = SHA1_DIGEST_SIZE,
 }, {
-       .cra_name = "authenc(hmac(sha256),cbc(aes))",
-       .cra_driver_name = "qat_aes_cbc_hmac_sha256",
-       .cra_priority = 4001,
-       .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
-       .cra_blocksize = AES_BLOCK_SIZE,
-       .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
-       .cra_alignmask = 0,
-       .cra_type = &crypto_aead_type,
-       .cra_module = THIS_MODULE,
-       .cra_init = qat_alg_aead_sha256_init,
-       .cra_exit = qat_alg_aead_exit,
-       .cra_u = {
-               .aead = {
-                       .setkey = qat_alg_aead_setkey,
-                       .decrypt = qat_alg_aead_dec,
-                       .encrypt = qat_alg_aead_enc,
-                       .givencrypt = qat_alg_aead_genivenc,
-                       .ivsize = AES_BLOCK_SIZE,
-                       .maxauthsize = SHA256_DIGEST_SIZE,
-               },
+       .base = {
+               .cra_name = "authenc(hmac(sha256),cbc(aes))",
+               .cra_driver_name = "qat_aes_cbc_hmac_sha256",
+               .cra_priority = 4001,
+               .cra_flags = CRYPTO_ALG_ASYNC,
+               .cra_blocksize = AES_BLOCK_SIZE,
+               .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
+               .cra_module = THIS_MODULE,
        },
+       .init = qat_alg_aead_sha256_init,
+       .exit = qat_alg_aead_exit,
+       .setkey = qat_alg_aead_setkey,
+       .decrypt = qat_alg_aead_dec,
+       .encrypt = qat_alg_aead_enc,
+       .ivsize = AES_BLOCK_SIZE,
+       .maxauthsize = SHA256_DIGEST_SIZE,
 }, {
-       .cra_name = "authenc(hmac(sha512),cbc(aes))",
-       .cra_driver_name = "qat_aes_cbc_hmac_sha512",
-       .cra_priority = 4001,
-       .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
-       .cra_blocksize = AES_BLOCK_SIZE,
-       .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
-       .cra_alignmask = 0,
-       .cra_type = &crypto_aead_type,
-       .cra_module = THIS_MODULE,
-       .cra_init = qat_alg_aead_sha512_init,
-       .cra_exit = qat_alg_aead_exit,
-       .cra_u = {
-               .aead = {
-                       .setkey = qat_alg_aead_setkey,
-                       .decrypt = qat_alg_aead_dec,
-                       .encrypt = qat_alg_aead_enc,
-                       .givencrypt = qat_alg_aead_genivenc,
-                       .ivsize = AES_BLOCK_SIZE,
-                       .maxauthsize = SHA512_DIGEST_SIZE,
-               },
+       .base = {
+               .cra_name = "authenc(hmac(sha512),cbc(aes))",
+               .cra_driver_name = "qat_aes_cbc_hmac_sha512",
+               .cra_priority = 4001,
+               .cra_flags = CRYPTO_ALG_ASYNC,
+               .cra_blocksize = AES_BLOCK_SIZE,
+               .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
+               .cra_module = THIS_MODULE,
        },
-}, {
+       .init = qat_alg_aead_sha512_init,
+       .exit = qat_alg_aead_exit,
+       .setkey = qat_alg_aead_setkey,
+       .decrypt = qat_alg_aead_dec,
+       .encrypt = qat_alg_aead_enc,
+       .ivsize = AES_BLOCK_SIZE,
+       .maxauthsize = SHA512_DIGEST_SIZE,
+} };
+
+static struct crypto_alg qat_algs[] = { {
        .cra_name = "cbc(aes)",
        .cra_driver_name = "qat_aes_cbc",
        .cra_priority = 4001,
@@ -1281,42 +1183,54 @@ static struct crypto_alg qat_algs[] = { {
 
 int qat_algs_register(void)
 {
-       int ret = 0;
+       int ret = 0, i;
 
        mutex_lock(&algs_lock);
-       if (++active_devs == 1) {
-               int i;
+       if (++active_devs != 1)
+               goto unlock;
 
-               for (i = 0; i < ARRAY_SIZE(qat_algs); i++)
-                       qat_algs[i].cra_flags =
-                               (qat_algs[i].cra_type == &crypto_aead_type) ?
-                               CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC :
-                               CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
+       for (i = 0; i < ARRAY_SIZE(qat_algs); i++)
+               qat_algs[i].cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
 
-               ret = crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs));
-       }
+       ret = crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs));
+       if (ret)
+               goto unlock;
+
+       for (i = 0; i < ARRAY_SIZE(qat_aeads); i++)
+               qat_aeads[i].base.cra_flags = CRYPTO_ALG_ASYNC;
+
+       ret = crypto_register_aeads(qat_aeads, ARRAY_SIZE(qat_aeads));
+       if (ret)
+               goto unreg_algs;
+
+unlock:
        mutex_unlock(&algs_lock);
        return ret;
+
+unreg_algs:
+       crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
+       goto unlock;
 }
 
 int qat_algs_unregister(void)
 {
-       int ret = 0;
-
        mutex_lock(&algs_lock);
-       if (--active_devs == 0)
-               ret = crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
+       if (--active_devs != 0)
+               goto unlock;
+
+       crypto_unregister_aeads(qat_aeads, ARRAY_SIZE(qat_aeads));
+       crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
+
+unlock:
        mutex_unlock(&algs_lock);
-       return ret;
+       return 0;
 }
 
 int qat_algs_init(void)
 {
-       crypto_get_default_rng();
        return 0;
 }
 
 void qat_algs_exit(void)
 {
-       crypto_put_default_rng();
 }
diff --git a/drivers/crypto/qat/qat_common/qat_asym_algs.c b/drivers/crypto/qat/qat_common/qat_asym_algs.c
new file mode 100644 (file)
index 0000000..e87f510
--- /dev/null
@@ -0,0 +1,652 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+       * Redistributions of source code must retain the above copyright
+         notice, this list of conditions and the following disclaimer.
+       * Redistributions in binary form must reproduce the above copyright
+         notice, this list of conditions and the following disclaimer in
+         the documentation and/or other materials provided with the
+         distribution.
+       * Neither the name of Intel Corporation nor the names of its
+         contributors may be used to endorse or promote products derived
+         from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#include <linux/module.h>
+#include <crypto/internal/rsa.h>
+#include <crypto/internal/akcipher.h>
+#include <crypto/akcipher.h>
+#include <linux/dma-mapping.h>
+#include <linux/fips.h>
+#include "qat_rsakey-asn1.h"
+#include "icp_qat_fw_pke.h"
+#include "adf_accel_devices.h"
+#include "adf_transport.h"
+#include "adf_common_drv.h"
+#include "qat_crypto.h"
+
+static DEFINE_MUTEX(algs_lock);
+static unsigned int active_devs;
+
+struct qat_rsa_input_params {
+       union {
+               struct {
+                       dma_addr_t m;
+                       dma_addr_t e;
+                       dma_addr_t n;
+               } enc;
+               struct {
+                       dma_addr_t c;
+                       dma_addr_t d;
+                       dma_addr_t n;
+               } dec;
+               u64 in_tab[8];
+       };
+} __packed __aligned(64);
+
+struct qat_rsa_output_params {
+       union {
+               struct {
+                       dma_addr_t c;
+               } enc;
+               struct {
+                       dma_addr_t m;
+               } dec;
+               u64 out_tab[8];
+       };
+} __packed __aligned(64);
+
+struct qat_rsa_ctx {
+       char *n;
+       char *e;
+       char *d;
+       dma_addr_t dma_n;
+       dma_addr_t dma_e;
+       dma_addr_t dma_d;
+       unsigned int key_sz;
+       struct qat_crypto_instance *inst;
+} __packed __aligned(64);
+
+struct qat_rsa_request {
+       struct qat_rsa_input_params in;
+       struct qat_rsa_output_params out;
+       dma_addr_t phy_in;
+       dma_addr_t phy_out;
+       char *src_align;
+       struct icp_qat_fw_pke_request req;
+       struct qat_rsa_ctx *ctx;
+       int err;
+} __aligned(64);
+
+static void qat_rsa_cb(struct icp_qat_fw_pke_resp *resp)
+{
+       struct akcipher_request *areq = (void *)(__force long)resp->opaque;
+       struct qat_rsa_request *req = PTR_ALIGN(akcipher_request_ctx(areq), 64);
+       struct device *dev = &GET_DEV(req->ctx->inst->accel_dev);
+       int err = ICP_QAT_FW_PKE_RESP_PKE_STAT_GET(
+                               resp->pke_resp_hdr.comn_resp_flags);
+       char *ptr = areq->dst;
+
+       err = (err == ICP_QAT_FW_COMN_STATUS_FLAG_OK) ? 0 : -EINVAL;
+
+       if (req->src_align)
+               dma_free_coherent(dev, req->ctx->key_sz, req->src_align,
+                                 req->in.enc.m);
+       else
+               dma_unmap_single(dev, req->in.enc.m, req->ctx->key_sz,
+                                DMA_TO_DEVICE);
+
+       dma_unmap_single(dev, req->out.enc.c, req->ctx->key_sz,
+                        DMA_FROM_DEVICE);
+       dma_unmap_single(dev, req->phy_in, sizeof(struct qat_rsa_input_params),
+                        DMA_TO_DEVICE);
+       dma_unmap_single(dev, req->phy_out,
+                        sizeof(struct qat_rsa_output_params),
+                        DMA_TO_DEVICE);
+
+       areq->dst_len = req->ctx->key_sz;
+       /* Need to set the corect length of the output */
+       while (!(*ptr) && areq->dst_len) {
+               areq->dst_len--;
+               ptr++;
+       }
+
+       if (areq->dst_len != req->ctx->key_sz)
+               memmove(areq->dst, ptr, areq->dst_len);
+
+       akcipher_request_complete(areq, err);
+}
+
+void qat_alg_asym_callback(void *_resp)
+{
+       struct icp_qat_fw_pke_resp *resp = _resp;
+
+       qat_rsa_cb(resp);
+}
+
+#define PKE_RSA_EP_512 0x1c161b21
+#define PKE_RSA_EP_1024 0x35111bf7
+#define PKE_RSA_EP_1536 0x4d111cdc
+#define PKE_RSA_EP_2048 0x6e111dba
+#define PKE_RSA_EP_3072 0x7d111ea3
+#define PKE_RSA_EP_4096 0xa5101f7e
+
+static unsigned long qat_rsa_enc_fn_id(unsigned int len)
+{
+       unsigned int bitslen = len << 3;
+
+       switch (bitslen) {
+       case 512:
+               return PKE_RSA_EP_512;
+       case 1024:
+               return PKE_RSA_EP_1024;
+       case 1536:
+               return PKE_RSA_EP_1536;
+       case 2048:
+               return PKE_RSA_EP_2048;
+       case 3072:
+               return PKE_RSA_EP_3072;
+       case 4096:
+               return PKE_RSA_EP_4096;
+       default:
+               return 0;
+       };
+}
+
+#define PKE_RSA_DP1_512 0x1c161b3c
+#define PKE_RSA_DP1_1024 0x35111c12
+#define PKE_RSA_DP1_1536 0x4d111cf7
+#define PKE_RSA_DP1_2048 0x6e111dda
+#define PKE_RSA_DP1_3072 0x7d111ebe
+#define PKE_RSA_DP1_4096 0xa5101f98
+
+static unsigned long qat_rsa_dec_fn_id(unsigned int len)
+{
+       unsigned int bitslen = len << 3;
+
+       switch (bitslen) {
+       case 512:
+               return PKE_RSA_DP1_512;
+       case 1024:
+               return PKE_RSA_DP1_1024;
+       case 1536:
+               return PKE_RSA_DP1_1536;
+       case 2048:
+               return PKE_RSA_DP1_2048;
+       case 3072:
+               return PKE_RSA_DP1_3072;
+       case 4096:
+               return PKE_RSA_DP1_4096;
+       default:
+               return 0;
+       };
+}
+
+static int qat_rsa_enc(struct akcipher_request *req)
+{
+       struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+       struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+       struct qat_crypto_instance *inst = ctx->inst;
+       struct device *dev = &GET_DEV(inst->accel_dev);
+       struct qat_rsa_request *qat_req =
+                       PTR_ALIGN(akcipher_request_ctx(req), 64);
+       struct icp_qat_fw_pke_request *msg = &qat_req->req;
+       int ret, ctr = 0;
+
+       if (unlikely(!ctx->n || !ctx->e))
+               return -EINVAL;
+
+       if (req->dst_len < ctx->key_sz) {
+               req->dst_len = ctx->key_sz;
+               return -EOVERFLOW;
+       }
+       memset(msg, '\0', sizeof(*msg));
+       ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr,
+                                         ICP_QAT_FW_COMN_REQ_FLAG_SET);
+       msg->pke_hdr.cd_pars.func_id = qat_rsa_enc_fn_id(ctx->key_sz);
+       if (unlikely(!msg->pke_hdr.cd_pars.func_id))
+               return -EINVAL;
+
+       qat_req->ctx = ctx;
+       msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
+       msg->pke_hdr.comn_req_flags =
+               ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT,
+                                           QAT_COMN_CD_FLD_TYPE_64BIT_ADR);
+
+       qat_req->in.enc.e = ctx->dma_e;
+       qat_req->in.enc.n = ctx->dma_n;
+       ret = -ENOMEM;
+
+       /*
+        * src can be of any size in valid range, but HW expects it to be the
+        * same as modulo n so in case it is different we need to allocate a
+        * new buf and copy src data.
+        * In other case we just need to map the user provided buffer.
+        */
+       if (req->src_len < ctx->key_sz) {
+               int shift = ctx->key_sz - req->src_len;
+
+               qat_req->src_align = dma_zalloc_coherent(dev, ctx->key_sz,
+                                                        &qat_req->in.enc.m,
+                                                        GFP_KERNEL);
+               if (unlikely(!qat_req->src_align))
+                       return ret;
+
+               memcpy(qat_req->src_align + shift, req->src, req->src_len);
+       } else {
+               qat_req->src_align = NULL;
+               qat_req->in.enc.m = dma_map_single(dev, req->src, req->src_len,
+                                          DMA_TO_DEVICE);
+       }
+       qat_req->in.in_tab[3] = 0;
+       qat_req->out.enc.c = dma_map_single(dev, req->dst, req->dst_len,
+                                           DMA_FROM_DEVICE);
+       qat_req->out.out_tab[1] = 0;
+       qat_req->phy_in = dma_map_single(dev, &qat_req->in.enc.m,
+                                        sizeof(struct qat_rsa_input_params),
+                                        DMA_TO_DEVICE);
+       qat_req->phy_out = dma_map_single(dev, &qat_req->out.enc.c,
+                                         sizeof(struct qat_rsa_output_params),
+                                           DMA_TO_DEVICE);
+
+       if (unlikely((!qat_req->src_align &&
+                     dma_mapping_error(dev, qat_req->in.enc.m)) ||
+                    dma_mapping_error(dev, qat_req->out.enc.c) ||
+                    dma_mapping_error(dev, qat_req->phy_in) ||
+                    dma_mapping_error(dev, qat_req->phy_out)))
+               goto unmap;
+
+       msg->pke_mid.src_data_addr = qat_req->phy_in;
+       msg->pke_mid.dest_data_addr = qat_req->phy_out;
+       msg->pke_mid.opaque = (uint64_t)(__force long)req;
+       msg->input_param_count = 3;
+       msg->output_param_count = 1;
+       do {
+               ret = adf_send_message(ctx->inst->pke_tx, (uint32_t *)msg);
+       } while (ret == -EBUSY && ctr++ < 100);
+
+       if (!ret)
+               return -EINPROGRESS;
+unmap:
+       if (qat_req->src_align)
+               dma_free_coherent(dev, ctx->key_sz, qat_req->src_align,
+                                 qat_req->in.enc.m);
+       else
+               if (!dma_mapping_error(dev, qat_req->in.enc.m))
+                       dma_unmap_single(dev, qat_req->in.enc.m, ctx->key_sz,
+                                        DMA_TO_DEVICE);
+       if (!dma_mapping_error(dev, qat_req->out.enc.c))
+               dma_unmap_single(dev, qat_req->out.enc.c, ctx->key_sz,
+                                DMA_FROM_DEVICE);
+       if (!dma_mapping_error(dev, qat_req->phy_in))
+               dma_unmap_single(dev, qat_req->phy_in,
+                                sizeof(struct qat_rsa_input_params),
+                                DMA_TO_DEVICE);
+       if (!dma_mapping_error(dev, qat_req->phy_out))
+               dma_unmap_single(dev, qat_req->phy_out,
+                                sizeof(struct qat_rsa_output_params),
+                                DMA_TO_DEVICE);
+       return ret;
+}
+
+static int qat_rsa_dec(struct akcipher_request *req)
+{
+       struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+       struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+       struct qat_crypto_instance *inst = ctx->inst;
+       struct device *dev = &GET_DEV(inst->accel_dev);
+       struct qat_rsa_request *qat_req =
+                       PTR_ALIGN(akcipher_request_ctx(req), 64);
+       struct icp_qat_fw_pke_request *msg = &qat_req->req;
+       int ret, ctr = 0;
+
+       if (unlikely(!ctx->n || !ctx->d))
+               return -EINVAL;
+
+       if (req->dst_len < ctx->key_sz) {
+               req->dst_len = ctx->key_sz;
+               return -EOVERFLOW;
+       }
+       memset(msg, '\0', sizeof(*msg));
+       ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr,
+                                         ICP_QAT_FW_COMN_REQ_FLAG_SET);
+       msg->pke_hdr.cd_pars.func_id = qat_rsa_dec_fn_id(ctx->key_sz);
+       if (unlikely(!msg->pke_hdr.cd_pars.func_id))
+               return -EINVAL;
+
+       qat_req->ctx = ctx;
+       msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
+       msg->pke_hdr.comn_req_flags =
+               ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT,
+                                           QAT_COMN_CD_FLD_TYPE_64BIT_ADR);
+
+       qat_req->in.dec.d = ctx->dma_d;
+       qat_req->in.dec.n = ctx->dma_n;
+       ret = -ENOMEM;
+
+       /*
+        * src can be of any size in valid range, but HW expects it to be the
+        * same as modulo n so in case it is different we need to allocate a
+        * new buf and copy src data.
+        * In other case we just need to map the user provided buffer.
+        */
+       if (req->src_len < ctx->key_sz) {
+               int shift = ctx->key_sz - req->src_len;
+
+               qat_req->src_align = dma_zalloc_coherent(dev, ctx->key_sz,
+                                                        &qat_req->in.dec.c,
+                                                        GFP_KERNEL);
+               if (unlikely(!qat_req->src_align))
+                       return ret;
+
+               memcpy(qat_req->src_align + shift, req->src, req->src_len);
+       } else {
+               qat_req->src_align = NULL;
+               qat_req->in.dec.c = dma_map_single(dev, req->src, req->src_len,
+                                                  DMA_TO_DEVICE);
+       }
+       qat_req->in.in_tab[3] = 0;
+       qat_req->out.dec.m = dma_map_single(dev, req->dst, req->dst_len,
+                                           DMA_FROM_DEVICE);
+       qat_req->out.out_tab[1] = 0;
+       qat_req->phy_in = dma_map_single(dev, &qat_req->in.dec.c,
+                                        sizeof(struct qat_rsa_input_params),
+                                        DMA_TO_DEVICE);
+       qat_req->phy_out = dma_map_single(dev, &qat_req->out.dec.m,
+                                         sizeof(struct qat_rsa_output_params),
+                                           DMA_TO_DEVICE);
+
+       if (unlikely((!qat_req->src_align &&
+                     dma_mapping_error(dev, qat_req->in.dec.c)) ||
+                    dma_mapping_error(dev, qat_req->out.dec.m) ||
+                    dma_mapping_error(dev, qat_req->phy_in) ||
+                    dma_mapping_error(dev, qat_req->phy_out)))
+               goto unmap;
+
+       msg->pke_mid.src_data_addr = qat_req->phy_in;
+       msg->pke_mid.dest_data_addr = qat_req->phy_out;
+       msg->pke_mid.opaque = (uint64_t)(__force long)req;
+       msg->input_param_count = 3;
+       msg->output_param_count = 1;
+       do {
+               ret = adf_send_message(ctx->inst->pke_tx, (uint32_t *)msg);
+       } while (ret == -EBUSY && ctr++ < 100);
+
+       if (!ret)
+               return -EINPROGRESS;
+unmap:
+       if (qat_req->src_align)
+               dma_free_coherent(dev, ctx->key_sz, qat_req->src_align,
+                                 qat_req->in.dec.c);
+       else
+               if (!dma_mapping_error(dev, qat_req->in.dec.c))
+                       dma_unmap_single(dev, qat_req->in.dec.c, ctx->key_sz,
+                                        DMA_TO_DEVICE);
+       if (!dma_mapping_error(dev, qat_req->out.dec.m))
+               dma_unmap_single(dev, qat_req->out.dec.m, ctx->key_sz,
+                                DMA_FROM_DEVICE);
+       if (!dma_mapping_error(dev, qat_req->phy_in))
+               dma_unmap_single(dev, qat_req->phy_in,
+                                sizeof(struct qat_rsa_input_params),
+                                DMA_TO_DEVICE);
+       if (!dma_mapping_error(dev, qat_req->phy_out))
+               dma_unmap_single(dev, qat_req->phy_out,
+                                sizeof(struct qat_rsa_output_params),
+                                DMA_TO_DEVICE);
+       return ret;
+}
+
+int qat_rsa_get_n(void *context, size_t hdrlen, unsigned char tag,
+                 const void *value, size_t vlen)
+{
+       struct qat_rsa_ctx *ctx = context;
+       struct qat_crypto_instance *inst = ctx->inst;
+       struct device *dev = &GET_DEV(inst->accel_dev);
+       const char *ptr = value;
+       int ret;
+
+       while (!*ptr && vlen) {
+               ptr++;
+               vlen--;
+       }
+
+       ctx->key_sz = vlen;
+       ret = -EINVAL;
+       /* In FIPS mode only allow key size 2K & 3K */
+       if (fips_enabled && (ctx->key_sz != 256 && ctx->key_sz != 384)) {
+               pr_err("QAT: RSA: key size not allowed in FIPS mode\n");
+               goto err;
+       }
+       /* invalid key size provided */
+       if (!qat_rsa_enc_fn_id(ctx->key_sz))
+               goto err;
+
+       ret = -ENOMEM;
+       ctx->n = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_n, GFP_KERNEL);
+       if (!ctx->n)
+               goto err;
+
+       memcpy(ctx->n, ptr, ctx->key_sz);
+       return 0;
+err:
+       ctx->key_sz = 0;
+       ctx->n = NULL;
+       return ret;
+}
+
+int qat_rsa_get_e(void *context, size_t hdrlen, unsigned char tag,
+                 const void *value, size_t vlen)
+{
+       struct qat_rsa_ctx *ctx = context;
+       struct qat_crypto_instance *inst = ctx->inst;
+       struct device *dev = &GET_DEV(inst->accel_dev);
+       const char *ptr = value;
+
+       while (!*ptr && vlen) {
+               ptr++;
+               vlen--;
+       }
+
+       if (!ctx->key_sz || !vlen || vlen > ctx->key_sz) {
+               ctx->e = NULL;
+               return -EINVAL;
+       }
+
+       ctx->e = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_e, GFP_KERNEL);
+       if (!ctx->e) {
+               ctx->e = NULL;
+               return -ENOMEM;
+       }
+       memcpy(ctx->e + (ctx->key_sz - vlen), ptr, vlen);
+       return 0;
+}
+
+int qat_rsa_get_d(void *context, size_t hdrlen, unsigned char tag,
+                 const void *value, size_t vlen)
+{
+       struct qat_rsa_ctx *ctx = context;
+       struct qat_crypto_instance *inst = ctx->inst;
+       struct device *dev = &GET_DEV(inst->accel_dev);
+       const char *ptr = value;
+       int ret;
+
+       while (!*ptr && vlen) {
+               ptr++;
+               vlen--;
+       }
+
+       ret = -EINVAL;
+       if (!ctx->key_sz || !vlen || vlen > ctx->key_sz)
+               goto err;
+
+       /* In FIPS mode only allow key size 2K & 3K */
+       if (fips_enabled && (vlen != 256 && vlen != 384)) {
+               pr_err("QAT: RSA: key size not allowed in FIPS mode\n");
+               goto err;
+       }
+
+       ret = -ENOMEM;
+       ctx->d = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_d, GFP_KERNEL);
+       if (!ctx->n)
+               goto err;
+
+       memcpy(ctx->d + (ctx->key_sz - vlen), ptr, vlen);
+       return 0;
+err:
+       ctx->d = NULL;
+       return ret;
+}
+
+static int qat_rsa_setkey(struct crypto_akcipher *tfm, const void *key,
+                         unsigned int keylen)
+{
+       struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+       struct device *dev = &GET_DEV(ctx->inst->accel_dev);
+       int ret;
+
+       /* Free the old key if any */
+       if (ctx->n)
+               dma_free_coherent(dev, ctx->key_sz, ctx->n, ctx->dma_n);
+       if (ctx->e)
+               dma_free_coherent(dev, ctx->key_sz, ctx->e, ctx->dma_e);
+       if (ctx->d) {
+               memset(ctx->d, '\0', ctx->key_sz);
+               dma_free_coherent(dev, ctx->key_sz, ctx->d, ctx->dma_d);
+       }
+
+       ctx->n = NULL;
+       ctx->e = NULL;
+       ctx->d = NULL;
+       ret = asn1_ber_decoder(&qat_rsakey_decoder, ctx, key, keylen);
+       if (ret < 0)
+               goto free;
+
+       if (!ctx->n || !ctx->e) {
+               /* invalid key provided */
+               ret = -EINVAL;
+               goto free;
+       }
+
+       return 0;
+free:
+       if (ctx->d) {
+               memset(ctx->d, '\0', ctx->key_sz);
+               dma_free_coherent(dev, ctx->key_sz, ctx->d, ctx->dma_d);
+               ctx->d = NULL;
+       }
+       if (ctx->e) {
+               dma_free_coherent(dev, ctx->key_sz, ctx->e, ctx->dma_e);
+               ctx->e = NULL;
+       }
+       if (ctx->n) {
+               dma_free_coherent(dev, ctx->key_sz, ctx->n, ctx->dma_n);
+               ctx->n = NULL;
+               ctx->key_sz = 0;
+       }
+       return ret;
+}
+
+static int qat_rsa_init_tfm(struct crypto_akcipher *tfm)
+{
+       struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+       struct qat_crypto_instance *inst =
+                       qat_crypto_get_instance_node(get_current_node());
+
+       if (!inst)
+               return -EINVAL;
+
+       ctx->key_sz = 0;
+       ctx->inst = inst;
+       return 0;
+}
+
+static void qat_rsa_exit_tfm(struct crypto_akcipher *tfm)
+{
+       struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+       struct device *dev = &GET_DEV(ctx->inst->accel_dev);
+
+       if (ctx->n)
+               dma_free_coherent(dev, ctx->key_sz, ctx->n, ctx->dma_n);
+       if (ctx->e)
+               dma_free_coherent(dev, ctx->key_sz, ctx->e, ctx->dma_e);
+       if (ctx->d) {
+               memset(ctx->d, '\0', ctx->key_sz);
+               dma_free_coherent(dev, ctx->key_sz, ctx->d, ctx->dma_d);
+       }
+       qat_crypto_put_instance(ctx->inst);
+       ctx->n = NULL;
+       ctx->d = NULL;
+       ctx->d = NULL;
+}
+
+static struct akcipher_alg rsa = {
+       .encrypt = qat_rsa_enc,
+       .decrypt = qat_rsa_dec,
+       .sign = qat_rsa_dec,
+       .verify = qat_rsa_enc,
+       .setkey = qat_rsa_setkey,
+       .init = qat_rsa_init_tfm,
+       .exit = qat_rsa_exit_tfm,
+       .reqsize = sizeof(struct qat_rsa_request) + 64,
+       .base = {
+               .cra_name = "rsa",
+               .cra_driver_name = "qat-rsa",
+               .cra_priority = 1000,
+               .cra_module = THIS_MODULE,
+               .cra_ctxsize = sizeof(struct qat_rsa_ctx),
+       },
+};
+
+int qat_asym_algs_register(void)
+{
+       int ret = 0;
+
+       mutex_lock(&algs_lock);
+       if (++active_devs == 1) {
+               rsa.base.cra_flags = 0;
+               ret = crypto_register_akcipher(&rsa);
+       }
+       mutex_unlock(&algs_lock);
+       return ret;
+}
+
+void qat_asym_algs_unregister(void)
+{
+       mutex_lock(&algs_lock);
+       if (--active_devs == 0)
+               crypto_unregister_akcipher(&rsa);
+       mutex_unlock(&algs_lock);
+}
index 3bd705ca5973c1d02598731d387afb9c12da5caf..07c2f9f9d1fcd8831938e4c10711dd2a6a736acb 100644 (file)
@@ -88,12 +88,6 @@ static int qat_crypto_free_instances(struct adf_accel_dev *accel_dev)
                if (inst->pke_rx)
                        adf_remove_ring(inst->pke_rx);
 
-               if (inst->rnd_tx)
-                       adf_remove_ring(inst->rnd_tx);
-
-               if (inst->rnd_rx)
-                       adf_remove_ring(inst->rnd_rx);
-
                list_del(list_ptr);
                kfree(inst);
        }
@@ -109,9 +103,11 @@ struct qat_crypto_instance *qat_crypto_get_instance_node(int node)
 
        list_for_each(itr, adf_devmgr_get_head()) {
                accel_dev = list_entry(itr, struct adf_accel_dev, list);
+
                if ((node == dev_to_node(&GET_DEV(accel_dev)) ||
                     dev_to_node(&GET_DEV(accel_dev)) < 0) &&
-                   adf_dev_started(accel_dev))
+                   adf_dev_started(accel_dev) &&
+                   !list_empty(&accel_dev->crypto_list))
                        break;
                accel_dev = NULL;
        }
@@ -158,7 +154,6 @@ static int qat_crypto_create_instances(struct adf_accel_dev *accel_dev)
 
        INIT_LIST_HEAD(&accel_dev->crypto_list);
        strlcpy(key, ADF_NUM_CY, sizeof(key));
-
        if (adf_cfg_get_param_value(accel_dev, SEC, key, val))
                return -EFAULT;
 
@@ -187,7 +182,9 @@ static int qat_crypto_create_instances(struct adf_accel_dev *accel_dev)
 
                if (kstrtoul(val, 10, &num_msg_sym))
                        goto err;
+
                num_msg_sym = num_msg_sym >> 1;
+
                snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, i);
                if (adf_cfg_get_param_value(accel_dev, SEC, key, val))
                        goto err;
@@ -202,11 +199,6 @@ static int qat_crypto_create_instances(struct adf_accel_dev *accel_dev)
                                    msg_size, key, NULL, 0, &inst->sym_tx))
                        goto err;
 
-               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_RND_TX, i);
-               if (adf_create_ring(accel_dev, SEC, bank, num_msg_asym,
-                                   msg_size, key, NULL, 0, &inst->rnd_tx))
-                       goto err;
-
                msg_size = msg_size >> 1;
                snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, i);
                if (adf_create_ring(accel_dev, SEC, bank, num_msg_asym,
@@ -220,15 +212,9 @@ static int qat_crypto_create_instances(struct adf_accel_dev *accel_dev)
                                    &inst->sym_rx))
                        goto err;
 
-               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_RND_RX, i);
-               if (adf_create_ring(accel_dev, SEC, bank, num_msg_asym,
-                                   msg_size, key, qat_alg_callback, 0,
-                                   &inst->rnd_rx))
-                       goto err;
-
                snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i);
                if (adf_create_ring(accel_dev, SEC, bank, num_msg_asym,
-                                   msg_size, key, qat_alg_callback, 0,
+                                   msg_size, key, qat_alg_asym_callback, 0,
                                    &inst->pke_rx))
                        goto err;
        }
index d503007b49e6ef3f7499a19836e5f09c1b7de9ae..dc0273fe36206e7faaa9cb0f901d580a4b5f037f 100644 (file)
@@ -57,8 +57,6 @@ struct qat_crypto_instance {
        struct adf_etr_ring_data *sym_rx;
        struct adf_etr_ring_data *pke_tx;
        struct adf_etr_ring_data *pke_rx;
-       struct adf_etr_ring_data *rnd_tx;
-       struct adf_etr_ring_data *rnd_rx;
        struct adf_accel_dev *accel_dev;
        struct list_head list;
        unsigned long state;
index 274ff7e9de6e9cfa165a8c99297cfada512bc794..8e711d1c308402ad35ce87822ed16f572d07fe5b 100644 (file)
@@ -671,7 +671,6 @@ static int qat_hal_clear_gpr(struct icp_qat_fw_loader_handle *handle)
 #define ICP_DH895XCC_CAP_OFFSET     (ICP_DH895XCC_AE_OFFSET + 0x10000)
 #define LOCAL_TO_XFER_REG_OFFSET    0x800
 #define ICP_DH895XCC_EP_OFFSET      0x3a000
-#define ICP_DH895XCC_PMISC_BAR 1
 int qat_hal_init(struct adf_accel_dev *accel_dev)
 {
        unsigned char ae;
@@ -679,21 +678,24 @@ int qat_hal_init(struct adf_accel_dev *accel_dev)
        struct icp_qat_fw_loader_handle *handle;
        struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
        struct adf_hw_device_data *hw_data = accel_dev->hw_device;
-       struct adf_bar *bar =
+       struct adf_bar *misc_bar =
                        &pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)];
+       struct adf_bar *sram_bar =
+                       &pci_info->pci_bars[hw_data->get_sram_bar_id(hw_data)];
 
        handle = kzalloc(sizeof(*handle), GFP_KERNEL);
        if (!handle)
                return -ENOMEM;
 
-       handle->hal_cap_g_ctl_csr_addr_v = bar->virt_addr +
+       handle->hal_cap_g_ctl_csr_addr_v = misc_bar->virt_addr +
                                                ICP_DH895XCC_CAP_OFFSET;
-       handle->hal_cap_ae_xfer_csr_addr_v = bar->virt_addr +
+       handle->hal_cap_ae_xfer_csr_addr_v = misc_bar->virt_addr +
                                                ICP_DH895XCC_AE_OFFSET;
-       handle->hal_ep_csr_addr_v = bar->virt_addr + ICP_DH895XCC_EP_OFFSET;
+       handle->hal_ep_csr_addr_v = misc_bar->virt_addr +
+                                   ICP_DH895XCC_EP_OFFSET;
        handle->hal_cap_ae_local_csr_addr_v =
                handle->hal_cap_ae_xfer_csr_addr_v + LOCAL_TO_XFER_REG_OFFSET;
-
+       handle->hal_sram_addr_v = sram_bar->virt_addr;
        handle->hal_handle = kzalloc(sizeof(*handle->hal_handle), GFP_KERNEL);
        if (!handle->hal_handle)
                goto out_hal_handle;
diff --git a/drivers/crypto/qat/qat_common/qat_rsakey.asn1 b/drivers/crypto/qat/qat_common/qat_rsakey.asn1
new file mode 100644 (file)
index 0000000..97b0e02
--- /dev/null
@@ -0,0 +1,5 @@
+RsaKey ::= SEQUENCE {
+       n INTEGER ({ qat_rsa_get_n }),
+       e INTEGER ({ qat_rsa_get_e }),
+       d INTEGER ({ qat_rsa_get_d })
+}
index 1e27f9f7fddf64dbb5ed959305dc93aa7c894fda..c48f181e894157a1c8767d64d4ecf00975bd1ef6 100644 (file)
@@ -359,28 +359,7 @@ static int qat_uclo_init_umem_seg(struct icp_qat_fw_loader_handle *handle,
 static int qat_uclo_init_ae_memory(struct icp_qat_fw_loader_handle *handle,
                                   struct icp_qat_uof_initmem *init_mem)
 {
-       unsigned int i;
-       struct icp_qat_uof_memvar_attr *mem_val_attr;
-
-       mem_val_attr =
-               (struct icp_qat_uof_memvar_attr *)((unsigned long)init_mem +
-               sizeof(struct icp_qat_uof_initmem));
-
        switch (init_mem->region) {
-       case ICP_QAT_UOF_SRAM_REGION:
-               if ((init_mem->addr + init_mem->num_in_bytes) >
-                   ICP_DH895XCC_PESRAM_BAR_SIZE) {
-                       pr_err("QAT: initmem on SRAM is out of range");
-                       return -EINVAL;
-               }
-               for (i = 0; i < init_mem->val_attr_num; i++) {
-                       qat_uclo_wr_sram_by_words(handle,
-                                                 init_mem->addr +
-                                                 mem_val_attr->offset_in_byte,
-                                                 &mem_val_attr->value, 4);
-                       mem_val_attr++;
-               }
-               break;
        case ICP_QAT_UOF_LMEM_REGION:
                if (qat_uclo_init_lmem_seg(handle, init_mem))
                        return -EINVAL;
@@ -990,6 +969,12 @@ out_err:
        return -EFAULT;
 }
 
+void qat_uclo_wr_mimage(struct icp_qat_fw_loader_handle *handle,
+                       void *addr_ptr, int mem_size)
+{
+       qat_uclo_wr_sram_by_words(handle, 0, addr_ptr, ALIGN(mem_size, 4));
+}
+
 int qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle *handle,
                         void *addr_ptr, int mem_size)
 {
index 25171c557043300d440d74f385bd8a8ad8beec51..8c79c543740f673be990ed5aa5099b5e1eb871cb 100644 (file)
@@ -2,7 +2,4 @@ ccflags-y := -I$(src)/../qat_common
 obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCC) += qat_dh895xcc.o
 qat_dh895xcc-objs := adf_drv.o \
                adf_isr.o \
-               adf_dh895xcc_hw_data.o \
-               adf_hw_arbiter.o \
-               qat_admin.o \
-               adf_admin.o
+               adf_dh895xcc_hw_data.o
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_admin.c b/drivers/crypto/qat/qat_dh895xcc/adf_admin.c
deleted file mode 100644 (file)
index e466606..0000000
+++ /dev/null
@@ -1,145 +0,0 @@
-/*
-  This file is provided under a dual BSD/GPLv2 license.  When using or
-  redistributing this file, you may do so under either license.
-
-  GPL LICENSE SUMMARY
-  Copyright(c) 2014 Intel Corporation.
-  This program is free software; you can redistribute it and/or modify
-  it under the terms of version 2 of the GNU General Public License as
-  published by the Free Software Foundation.
-
-  This program is distributed in the hope that it will be useful, but
-  WITHOUT ANY WARRANTY; without even the implied warranty of
-  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
-  General Public License for more details.
-
-  Contact Information:
-  qat-linux@intel.com
-
-  BSD LICENSE
-  Copyright(c) 2014 Intel Corporation.
-  Redistribution and use in source and binary forms, with or without
-  modification, are permitted provided that the following conditions
-  are met:
-
-    * Redistributions of source code must retain the above copyright
-      notice, this list of conditions and the following disclaimer.
-    * Redistributions in binary form must reproduce the above copyright
-      notice, this list of conditions and the following disclaimer in
-      the documentation and/or other materials provided with the
-      distribution.
-    * Neither the name of Intel Corporation nor the names of its
-      contributors may be used to endorse or promote products derived
-      from this software without specific prior written permission.
-
-  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-#include <linux/types.h>
-#include <linux/mutex.h>
-#include <linux/slab.h>
-#include <linux/delay.h>
-#include <linux/pci.h>
-#include <linux/dma-mapping.h>
-#include <adf_accel_devices.h>
-#include "adf_drv.h"
-#include "adf_dh895xcc_hw_data.h"
-
-#define ADF_ADMINMSG_LEN 32
-
-struct adf_admin_comms {
-       dma_addr_t phy_addr;
-       void *virt_addr;
-       void __iomem *mailbox_addr;
-       struct mutex lock;      /* protects adf_admin_comms struct */
-};
-
-int adf_put_admin_msg_sync(struct adf_accel_dev *accel_dev,
-                          uint32_t ae, void *in, void *out)
-{
-       struct adf_admin_comms *admin = accel_dev->admin;
-       int offset = ae * ADF_ADMINMSG_LEN * 2;
-       void __iomem *mailbox = admin->mailbox_addr;
-       int mb_offset = ae * ADF_DH895XCC_MAILBOX_STRIDE;
-       int times, received;
-
-       mutex_lock(&admin->lock);
-
-       if (ADF_CSR_RD(mailbox, mb_offset) == 1) {
-               mutex_unlock(&admin->lock);
-               return -EAGAIN;
-       }
-
-       memcpy(admin->virt_addr + offset, in, ADF_ADMINMSG_LEN);
-       ADF_CSR_WR(mailbox, mb_offset, 1);
-       received = 0;
-       for (times = 0; times < 50; times++) {
-               msleep(20);
-               if (ADF_CSR_RD(mailbox, mb_offset) == 0) {
-                       received = 1;
-                       break;
-               }
-       }
-       if (received)
-               memcpy(out, admin->virt_addr + offset +
-                      ADF_ADMINMSG_LEN, ADF_ADMINMSG_LEN);
-       else
-               dev_err(&GET_DEV(accel_dev),
-                       "Failed to send admin msg to accelerator\n");
-
-       mutex_unlock(&admin->lock);
-       return received ? 0 : -EFAULT;
-}
-
-int adf_init_admin_comms(struct adf_accel_dev *accel_dev)
-{
-       struct adf_admin_comms *admin;
-       struct adf_bar *pmisc = &GET_BARS(accel_dev)[ADF_DH895XCC_PMISC_BAR];
-       void __iomem *csr = pmisc->virt_addr;
-       void __iomem *mailbox = csr + ADF_DH895XCC_MAILBOX_BASE_OFFSET;
-       uint64_t reg_val;
-
-       admin = kzalloc_node(sizeof(*accel_dev->admin), GFP_KERNEL,
-                            dev_to_node(&GET_DEV(accel_dev)));
-       if (!admin)
-               return -ENOMEM;
-       admin->virt_addr = dma_zalloc_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
-                                              &admin->phy_addr, GFP_KERNEL);
-       if (!admin->virt_addr) {
-               dev_err(&GET_DEV(accel_dev), "Failed to allocate dma buff\n");
-               kfree(admin);
-               return -ENOMEM;
-       }
-       reg_val = (uint64_t)admin->phy_addr;
-       ADF_CSR_WR(csr, ADF_DH895XCC_ADMINMSGUR_OFFSET, reg_val >> 32);
-       ADF_CSR_WR(csr, ADF_DH895XCC_ADMINMSGLR_OFFSET, reg_val);
-       mutex_init(&admin->lock);
-       admin->mailbox_addr = mailbox;
-       accel_dev->admin = admin;
-       return 0;
-}
-
-void adf_exit_admin_comms(struct adf_accel_dev *accel_dev)
-{
-       struct adf_admin_comms *admin = accel_dev->admin;
-
-       if (!admin)
-               return;
-
-       if (admin->virt_addr)
-               dma_free_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
-                                 admin->virt_addr, admin->phy_addr);
-
-       mutex_destroy(&admin->lock);
-       kfree(admin);
-       accel_dev->admin = NULL;
-}
index b1386922d7a2a7c0aaf6bf06c52c1a860ffd12bf..ff54257eced47c619d14e3e4275383d3cf186d47 100644 (file)
@@ -45,8 +45,9 @@
   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 */
 #include <adf_accel_devices.h>
+#include <adf_pf2vf_msg.h>
+#include <adf_common_drv.h>
 #include "adf_dh895xcc_hw_data.h"
-#include "adf_common_drv.h"
 #include "adf_drv.h"
 
 /* Worker thread to service arbiter mappings based on dev SKUs */
@@ -117,6 +118,11 @@ static uint32_t get_etr_bar_id(struct adf_hw_device_data *self)
        return ADF_DH895XCC_ETR_BAR;
 }
 
+static uint32_t get_sram_bar_id(struct adf_hw_device_data *self)
+{
+       return ADF_DH895XCC_SRAM_BAR;
+}
+
 static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
 {
        int sku = (self->fuses & ADF_DH895XCC_FUSECTL_SKU_MASK)
@@ -156,6 +162,16 @@ void adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev,
        }
 }
 
+static uint32_t get_pf2vf_offset(uint32_t i)
+{
+       return ADF_DH895XCC_PF2VF_OFFSET(i);
+}
+
+static uint32_t get_vintmsk_offset(uint32_t i)
+{
+       return ADF_DH895XCC_VINTMSK_OFFSET(i);
+}
+
 static void adf_enable_error_correction(struct adf_accel_dev *accel_dev)
 {
        struct adf_hw_device_data *hw_device = accel_dev->hw_device;
@@ -192,18 +208,23 @@ static void adf_enable_ints(struct adf_accel_dev *accel_dev)
 
        /* Enable bundle and misc interrupts */
        ADF_CSR_WR(addr, ADF_DH895XCC_SMIAPF0_MASK_OFFSET,
-                  ADF_DH895XCC_SMIA0_MASK);
+                  accel_dev->pf.vf_info ? 0 :
+                       GENMASK_ULL(GET_MAX_BANKS(accel_dev) - 1, 0));
        ADF_CSR_WR(addr, ADF_DH895XCC_SMIAPF1_MASK_OFFSET,
                   ADF_DH895XCC_SMIA1_MASK);
 }
 
+static int adf_pf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev)
+{
+       return 0;
+}
+
 void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data)
 {
        hw_data->dev_class = &dh895xcc_class;
        hw_data->instance_id = dh895xcc_class.instances++;
        hw_data->num_banks = ADF_DH895XCC_ETR_MAX_BANKS;
        hw_data->num_accel = ADF_DH895XCC_MAX_ACCELERATORS;
-       hw_data->pci_dev_id = ADF_DH895XCC_PCI_DEVICE_ID;
        hw_data->num_logical_accel = 1;
        hw_data->num_engines = ADF_DH895XCC_MAX_ACCELENGINES;
        hw_data->tx_rx_gap = ADF_DH895XCC_RX_RINGS_OFFSET;
@@ -211,21 +232,28 @@ void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data)
        hw_data->alloc_irq = adf_isr_resource_alloc;
        hw_data->free_irq = adf_isr_resource_free;
        hw_data->enable_error_correction = adf_enable_error_correction;
-       hw_data->hw_arb_ring_enable = adf_update_ring_arb_enable;
-       hw_data->hw_arb_ring_disable = adf_update_ring_arb_enable;
        hw_data->get_accel_mask = get_accel_mask;
        hw_data->get_ae_mask = get_ae_mask;
        hw_data->get_num_accels = get_num_accels;
        hw_data->get_num_aes = get_num_aes;
        hw_data->get_etr_bar_id = get_etr_bar_id;
        hw_data->get_misc_bar_id = get_misc_bar_id;
+       hw_data->get_pf2vf_offset = get_pf2vf_offset;
+       hw_data->get_vintmsk_offset = get_vintmsk_offset;
+       hw_data->get_sram_bar_id = get_sram_bar_id;
        hw_data->get_sku = get_sku;
        hw_data->fw_name = ADF_DH895XCC_FW;
+       hw_data->fw_mmp_name = ADF_DH895XCC_MMP;
        hw_data->init_admin_comms = adf_init_admin_comms;
        hw_data->exit_admin_comms = adf_exit_admin_comms;
+       hw_data->disable_iov = adf_disable_sriov;
+       hw_data->send_admin_init = adf_send_admin_init;
        hw_data->init_arb = adf_init_arb;
        hw_data->exit_arb = adf_exit_arb;
+       hw_data->get_arb_mapping = adf_get_arbiter_mapping;
        hw_data->enable_ints = adf_enable_ints;
+       hw_data->enable_vf2pf_comms = adf_pf_enable_vf2pf_comms;
+       hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION;
 }
 
 void adf_clean_hw_data_dh895xcc(struct adf_hw_device_data *hw_data)
index 25269a9f24a2bca0cb41e095668589b609a462ff..88dffb29734683efa437d618464e263dfb95ec00 100644 (file)
@@ -48,6 +48,7 @@
 #define ADF_DH895x_HW_DATA_H_
 
 /* PCIe configuration space */
+#define ADF_DH895XCC_SRAM_BAR 0
 #define ADF_DH895XCC_PMISC_BAR 1
 #define ADF_DH895XCC_ETR_BAR 2
 #define ADF_DH895XCC_RX_RINGS_OFFSET 8
 #define ADF_DH895XCC_CERRSSMSH(i) (i * 0x4000 + 0x10)
 #define ADF_DH895XCC_ERRSSMSH_EN BIT(3)
 
-/* Admin Messages Registers */
-#define ADF_DH895XCC_ADMINMSGUR_OFFSET (0x3A000 + 0x574)
-#define ADF_DH895XCC_ADMINMSGLR_OFFSET (0x3A000 + 0x578)
-#define ADF_DH895XCC_MAILBOX_BASE_OFFSET 0x20970
-#define ADF_DH895XCC_MAILBOX_STRIDE 0x1000
+#define ADF_DH895XCC_ERRSOU3   (0x3A000 + 0x00C)
+#define ADF_DH895XCC_ERRSOU5   (0x3A000 + 0x0D8)
+#define ADF_DH895XCC_PF2VF_OFFSET(i)   (0x3A000 + 0x280 + ((i) * 0x04))
+#define ADF_DH895XCC_VINTMSK_OFFSET(i) (0x3A000 + 0x200 + ((i) * 0x04))
+/* FW names */
 #define ADF_DH895XCC_FW "qat_895xcc.bin"
+#define ADF_DH895XCC_MMP "qat_mmp.bin"
 #endif
index 1bde45b7a3c560f5e409c17f45c4bf6bb9eb730e..f8dd14f232c83bffee35a2f096cd13dd88cd15dd 100644 (file)
@@ -82,16 +82,21 @@ static struct pci_driver adf_driver = {
        .id_table = adf_pci_tbl,
        .name = adf_driver_name,
        .probe = adf_probe,
-       .remove = adf_remove
+       .remove = adf_remove,
+       .sriov_configure = adf_sriov_configure,
 };
 
+static void adf_cleanup_pci_dev(struct adf_accel_dev *accel_dev)
+{
+       pci_release_regions(accel_dev->accel_pci_dev.pci_dev);
+       pci_disable_device(accel_dev->accel_pci_dev.pci_dev);
+}
+
 static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
 {
        struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev;
        int i;
 
-       adf_dev_shutdown(accel_dev);
-
        for (i = 0; i < ADF_PCI_MAX_BARS; i++) {
                struct adf_bar *bar = &accel_pci_dev->pci_bars[i];
 
@@ -100,7 +105,7 @@ static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
        }
 
        if (accel_dev->hw_device) {
-               switch (accel_dev->hw_device->pci_dev_id) {
+               switch (accel_pci_dev->pci_dev->device) {
                case ADF_DH895XCC_PCI_DEVICE_ID:
                        adf_clean_hw_data_dh895xcc(accel_dev->hw_device);
                        break;
@@ -108,13 +113,11 @@ static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
                        break;
                }
                kfree(accel_dev->hw_device);
+               accel_dev->hw_device = NULL;
        }
        adf_cfg_dev_remove(accel_dev);
        debugfs_remove(accel_dev->debugfs_dir);
-       adf_devmgr_rm_dev(accel_dev);
-       pci_release_regions(accel_pci_dev->pci_dev);
-       pci_disable_device(accel_pci_dev->pci_dev);
-       kfree(accel_dev);
+       adf_devmgr_rm_dev(accel_dev, NULL);
 }
 
 static int adf_dev_configure(struct adf_accel_dev *accel_dev)
@@ -167,12 +170,6 @@ static int adf_dev_configure(struct adf_accel_dev *accel_dev)
                                                key, (void *)&val, ADF_DEC))
                        goto err;
 
-               val = 4;
-               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_RND_TX, i);
-               if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
-                                               key, (void *)&val, ADF_DEC))
-                       goto err;
-
                val = 8;
                snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i);
                if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
@@ -185,12 +182,6 @@ static int adf_dev_configure(struct adf_accel_dev *accel_dev)
                                                key, (void *)&val, ADF_DEC))
                        goto err;
 
-               val = 12;
-               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_RND_RX, i);
-               if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
-                                               key, (void *)&val, ADF_DEC))
-                       goto err;
-
                val = ADF_COALESCING_DEF_TIME;
                snprintf(key, sizeof(key), ADF_ETRMGR_COALESCE_TIMER_FORMAT, i);
                if (adf_cfg_add_key_value_param(accel_dev, "Accelerator0",
@@ -217,7 +208,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        struct adf_hw_device_data *hw_data;
        char name[ADF_DEVICE_NAME_LENGTH];
        unsigned int i, bar_nr;
-       int ret;
+       int ret, bar_mask;
 
        switch (ent->device) {
        case ADF_DH895XCC_PCI_DEVICE_ID:
@@ -241,10 +232,12 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                return -ENOMEM;
 
        INIT_LIST_HEAD(&accel_dev->crypto_list);
+       accel_pci_dev = &accel_dev->accel_pci_dev;
+       accel_pci_dev->pci_dev = pdev;
 
        /* Add accel device to accel table.
         * This should be called before adf_cleanup_accel is called */
-       if (adf_devmgr_add_dev(accel_dev)) {
+       if (adf_devmgr_add_dev(accel_dev, NULL)) {
                dev_err(&pdev->dev, "Failed to add new accelerator device.\n");
                kfree(accel_dev);
                return -EFAULT;
@@ -267,7 +260,6 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        default:
                return -ENODEV;
        }
-       accel_pci_dev = &accel_dev->accel_pci_dev;
        pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid);
        pci_read_config_dword(pdev, ADF_DH895XCC_FUSECTL_OFFSET,
                              &hw_data->fuses);
@@ -276,7 +268,6 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        hw_data->accel_mask = hw_data->get_accel_mask(hw_data->fuses);
        hw_data->ae_mask = hw_data->get_ae_mask(hw_data->fuses);
        accel_pci_dev->sku = hw_data->get_sku(hw_data);
-       accel_pci_dev->pci_dev = pdev;
        /* If the device has no acceleration engines then ignore it. */
        if (!hw_data->accel_mask || !hw_data->ae_mask ||
            ((~hw_data->ae_mask) & 0x01)) {
@@ -286,11 +277,14 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        }
 
        /* Create dev top level debugfs entry */
-       snprintf(name, sizeof(name), "%s%s_dev%d", ADF_DEVICE_NAME_PREFIX,
-                hw_data->dev_class->name, hw_data->instance_id);
+       snprintf(name, sizeof(name), "%s%s_%02x:%02d.%02d",
+                ADF_DEVICE_NAME_PREFIX, hw_data->dev_class->name,
+                pdev->bus->number, PCI_SLOT(pdev->devfn),
+                PCI_FUNC(pdev->devfn));
+
        accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
        if (!accel_dev->debugfs_dir) {
-               dev_err(&pdev->dev, "Could not create debugfs dir\n");
+               dev_err(&pdev->dev, "Could not create debugfs dir %s\n", name);
                ret = -EINVAL;
                goto out_err;
        }
@@ -313,7 +307,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
                        dev_err(&pdev->dev, "No usable DMA configuration\n");
                        ret = -EFAULT;
-                       goto out_err;
+                       goto out_err_disable;
                } else {
                        pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
                }
@@ -324,7 +318,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        if (pci_request_regions(pdev, adf_driver_name)) {
                ret = -EFAULT;
-               goto out_err;
+               goto out_err_disable;
        }
 
        /* Read accelerator capabilities mask */
@@ -332,19 +326,21 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                              &hw_data->accel_capabilities_mask);
 
        /* Find and map all the device's BARS */
-       for (i = 0; i < ADF_PCI_MAX_BARS; i++) {
-               struct adf_bar *bar = &accel_pci_dev->pci_bars[i];
+       i = 0;
+       bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
+       for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
+                        ADF_PCI_MAX_BARS * 2) {
+               struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
 
-               bar_nr = i * 2;
                bar->base_addr = pci_resource_start(pdev, bar_nr);
                if (!bar->base_addr)
                        break;
                bar->size = pci_resource_len(pdev, bar_nr);
                bar->virt_addr = pci_iomap(accel_pci_dev->pci_dev, bar_nr, 0);
                if (!bar->virt_addr) {
-                       dev_err(&pdev->dev, "Failed to map BAR %d\n", i);
+                       dev_err(&pdev->dev, "Failed to map BAR %d\n", bar_nr);
                        ret = -EFAULT;
-                       goto out_err;
+                       goto out_err_free_reg;
                }
        }
        pci_set_master(pdev);
@@ -352,32 +348,40 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (adf_enable_aer(accel_dev, &adf_driver)) {
                dev_err(&pdev->dev, "Failed to enable aer\n");
                ret = -EFAULT;
-               goto out_err;
+               goto out_err_free_reg;
        }
 
        if (pci_save_state(pdev)) {
                dev_err(&pdev->dev, "Failed to save pci state\n");
                ret = -ENOMEM;
-               goto out_err;
+               goto out_err_free_reg;
        }
 
        ret = adf_dev_configure(accel_dev);
        if (ret)
-               goto out_err;
+               goto out_err_free_reg;
 
        ret = adf_dev_init(accel_dev);
        if (ret)
-               goto out_err;
+               goto out_err_dev_shutdown;
 
        ret = adf_dev_start(accel_dev);
-       if (ret) {
-               adf_dev_stop(accel_dev);
-               goto out_err;
-       }
+       if (ret)
+               goto out_err_dev_stop;
 
-       return 0;
+       return ret;
+
+out_err_dev_stop:
+       adf_dev_stop(accel_dev);
+out_err_dev_shutdown:
+       adf_dev_shutdown(accel_dev);
+out_err_free_reg:
+       pci_release_regions(accel_pci_dev->pci_dev);
+out_err_disable:
+       pci_disable_device(accel_pci_dev->pci_dev);
 out_err:
        adf_cleanup_accel(accel_dev);
+       kfree(accel_dev);
        return ret;
 }
 
@@ -391,15 +395,17 @@ static void adf_remove(struct pci_dev *pdev)
        }
        if (adf_dev_stop(accel_dev))
                dev_err(&GET_DEV(accel_dev), "Failed to stop QAT accel dev\n");
+
+       adf_dev_shutdown(accel_dev);
        adf_disable_aer(accel_dev);
        adf_cleanup_accel(accel_dev);
+       adf_cleanup_pci_dev(accel_dev);
+       kfree(accel_dev);
 }
 
 static int __init adfdrv_init(void)
 {
        request_module("intel_qat");
-       if (qat_admin_register())
-               return -EFAULT;
 
        if (pci_register_driver(&adf_driver)) {
                pr_err("QAT: Driver initialization failed\n");
@@ -411,7 +417,6 @@ static int __init adfdrv_init(void)
 static void __exit adfdrv_release(void)
 {
        pci_unregister_driver(&adf_driver);
-       qat_admin_unregister();
 }
 
 module_init(adfdrv_init);
index a2fbb6ce75cd8093f49ea22520cf2bca6cd9450e..85ff245bd1d8d982133516d90557fbe4cb28441b 100644 (file)
@@ -53,15 +53,6 @@ void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data);
 void adf_clean_hw_data_dh895xcc(struct adf_hw_device_data *hw_data);
 int adf_isr_resource_alloc(struct adf_accel_dev *accel_dev);
 void adf_isr_resource_free(struct adf_accel_dev *accel_dev);
-void adf_update_ring_arb_enable(struct adf_etr_ring_data *ring);
 void adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev,
                             uint32_t const **arb_map_config);
-int adf_init_admin_comms(struct adf_accel_dev *accel_dev);
-void adf_exit_admin_comms(struct adf_accel_dev *accel_dev);
-int adf_put_admin_msg_sync(struct adf_accel_dev *accel_dev,
-                          uint32_t ae, void *in, void *out);
-int qat_admin_register(void);
-int qat_admin_unregister(void);
-int adf_init_arb(struct adf_accel_dev *accel_dev);
-void adf_exit_arb(struct adf_accel_dev *accel_dev);
 #endif
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_hw_arbiter.c b/drivers/crypto/qat/qat_dh895xcc/adf_hw_arbiter.c
deleted file mode 100644 (file)
index 1864bdb..0000000
+++ /dev/null
@@ -1,159 +0,0 @@
-/*
-  This file is provided under a dual BSD/GPLv2 license.  When using or
-  redistributing this file, you may do so under either license.
-
-  GPL LICENSE SUMMARY
-  Copyright(c) 2014 Intel Corporation.
-  This program is free software; you can redistribute it and/or modify
-  it under the terms of version 2 of the GNU General Public License as
-  published by the Free Software Foundation.
-
-  This program is distributed in the hope that it will be useful, but
-  WITHOUT ANY WARRANTY; without even the implied warranty of
-  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
-  General Public License for more details.
-
-  Contact Information:
-  qat-linux@intel.com
-
-  BSD LICENSE
-  Copyright(c) 2014 Intel Corporation.
-  Redistribution and use in source and binary forms, with or without
-  modification, are permitted provided that the following conditions
-  are met:
-
-    * Redistributions of source code must retain the above copyright
-      notice, this list of conditions and the following disclaimer.
-    * Redistributions in binary form must reproduce the above copyright
-      notice, this list of conditions and the following disclaimer in
-      the documentation and/or other materials provided with the
-      distribution.
-    * Neither the name of Intel Corporation nor the names of its
-      contributors may be used to endorse or promote products derived
-      from this software without specific prior written permission.
-
-  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-#include <adf_accel_devices.h>
-#include <adf_transport_internal.h>
-#include "adf_drv.h"
-
-#define ADF_ARB_NUM 4
-#define ADF_ARB_REQ_RING_NUM 8
-#define ADF_ARB_REG_SIZE 0x4
-#define ADF_ARB_WTR_SIZE 0x20
-#define ADF_ARB_OFFSET 0x30000
-#define ADF_ARB_REG_SLOT 0x1000
-#define ADF_ARB_WTR_OFFSET 0x010
-#define ADF_ARB_RO_EN_OFFSET 0x090
-#define ADF_ARB_WQCFG_OFFSET 0x100
-#define ADF_ARB_WRK_2_SER_MAP_OFFSET 0x180
-#define ADF_ARB_WRK_2_SER_MAP 10
-#define ADF_ARB_RINGSRVARBEN_OFFSET 0x19C
-
-#define WRITE_CSR_ARB_RINGSRVARBEN(csr_addr, index, value) \
-       ADF_CSR_WR(csr_addr, ADF_ARB_RINGSRVARBEN_OFFSET + \
-       (ADF_ARB_REG_SLOT * index), value)
-
-#define WRITE_CSR_ARB_RESPORDERING(csr_addr, index, value) \
-       ADF_CSR_WR(csr_addr, (ADF_ARB_OFFSET + \
-       ADF_ARB_RO_EN_OFFSET) + (ADF_ARB_REG_SIZE * index), value)
-
-#define WRITE_CSR_ARB_WEIGHT(csr_addr, arb, index, value) \
-       ADF_CSR_WR(csr_addr, (ADF_ARB_OFFSET + \
-       ADF_ARB_WTR_OFFSET) + (ADF_ARB_WTR_SIZE * arb) + \
-       (ADF_ARB_REG_SIZE * index), value)
-
-#define WRITE_CSR_ARB_SARCONFIG(csr_addr, index, value) \
-       ADF_CSR_WR(csr_addr, ADF_ARB_OFFSET + \
-       (ADF_ARB_REG_SIZE * index), value)
-
-#define WRITE_CSR_ARB_WRK_2_SER_MAP(csr_addr, index, value) \
-       ADF_CSR_WR(csr_addr, (ADF_ARB_OFFSET + \
-       ADF_ARB_WRK_2_SER_MAP_OFFSET) + \
-       (ADF_ARB_REG_SIZE * index), value)
-
-#define WRITE_CSR_ARB_WQCFG(csr_addr, index, value) \
-       ADF_CSR_WR(csr_addr, (ADF_ARB_OFFSET + \
-       ADF_ARB_WQCFG_OFFSET) + (ADF_ARB_REG_SIZE * index), value)
-
-int adf_init_arb(struct adf_accel_dev *accel_dev)
-{
-       void __iomem *csr = accel_dev->transport->banks[0].csr_addr;
-       uint32_t arb_cfg = 0x1 << 31 | 0x4 << 4 | 0x1;
-       uint32_t arb, i;
-       const uint32_t *thd_2_arb_cfg;
-
-       /* Service arb configured for 32 bytes responses and
-        * ring flow control check enabled. */
-       for (arb = 0; arb < ADF_ARB_NUM; arb++)
-               WRITE_CSR_ARB_SARCONFIG(csr, arb, arb_cfg);
-
-       /* Setup service weighting */
-       for (arb = 0; arb < ADF_ARB_NUM; arb++)
-               for (i = 0; i < ADF_ARB_REQ_RING_NUM; i++)
-                       WRITE_CSR_ARB_WEIGHT(csr, arb, i, 0xFFFFFFFF);
-
-       /* Setup ring response ordering */
-       for (i = 0; i < ADF_ARB_REQ_RING_NUM; i++)
-               WRITE_CSR_ARB_RESPORDERING(csr, i, 0xFFFFFFFF);
-
-       /* Setup worker queue registers */
-       for (i = 0; i < ADF_ARB_WRK_2_SER_MAP; i++)
-               WRITE_CSR_ARB_WQCFG(csr, i, i);
-
-       /* Map worker threads to service arbiters */
-       adf_get_arbiter_mapping(accel_dev, &thd_2_arb_cfg);
-
-       if (!thd_2_arb_cfg)
-               return -EFAULT;
-
-       for (i = 0; i < ADF_ARB_WRK_2_SER_MAP; i++)
-               WRITE_CSR_ARB_WRK_2_SER_MAP(csr, i, *(thd_2_arb_cfg + i));
-
-       return 0;
-}
-
-void adf_update_ring_arb_enable(struct adf_etr_ring_data *ring)
-{
-       WRITE_CSR_ARB_RINGSRVARBEN(ring->bank->csr_addr,
-                                  ring->bank->bank_number,
-                                  ring->bank->ring_mask & 0xFF);
-}
-
-void adf_exit_arb(struct adf_accel_dev *accel_dev)
-{
-       void __iomem *csr;
-       unsigned int i;
-
-       if (!accel_dev->transport)
-               return;
-
-       csr = accel_dev->transport->banks[0].csr_addr;
-
-       /* Reset arbiter configuration */
-       for (i = 0; i < ADF_ARB_NUM; i++)
-               WRITE_CSR_ARB_SARCONFIG(csr, i, 0);
-
-       /* Shutdown work queue */
-       for (i = 0; i < ADF_ARB_WRK_2_SER_MAP; i++)
-               WRITE_CSR_ARB_WQCFG(csr, i, 0);
-
-       /* Unmap worker threads to service arbiters */
-       for (i = 0; i < ADF_ARB_WRK_2_SER_MAP; i++)
-               WRITE_CSR_ARB_WRK_2_SER_MAP(csr, i, 0);
-
-       /* Disable arbitration on all rings */
-       for (i = 0; i < GET_MAX_BANKS(accel_dev); i++)
-               WRITE_CSR_ARB_RINGSRVARBEN(csr, i, 0);
-}
index 0d03c109c2d32f56c80e1d5d3e7f490fe5a0bd55..5570f78795c1840e029f7c560cc4bd6c5b59cc45 100644 (file)
 #include <adf_transport_access_macros.h>
 #include <adf_transport_internal.h>
 #include "adf_drv.h"
+#include "adf_dh895xcc_hw_data.h"
 
 static int adf_enable_msix(struct adf_accel_dev *accel_dev)
 {
        struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
        struct adf_hw_device_data *hw_data = accel_dev->hw_device;
-       uint32_t msix_num_entries = hw_data->num_banks + 1;
-       int i;
-
-       for (i = 0; i < msix_num_entries; i++)
-               pci_dev_info->msix_entries.entries[i].entry = i;
+       u32 msix_num_entries = 1;
+
+       /* If SR-IOV is disabled, add entries for each bank */
+       if (!accel_dev->pf.vf_info) {
+               int i;
+
+               msix_num_entries += hw_data->num_banks;
+               for (i = 0; i < msix_num_entries; i++)
+                       pci_dev_info->msix_entries.entries[i].entry = i;
+       } else {
+               pci_dev_info->msix_entries.entries[0].entry =
+                       hw_data->num_banks;
+       }
 
        if (pci_enable_msix_exact(pci_dev_info->pci_dev,
                                  pci_dev_info->msix_entries.entries,
                                  msix_num_entries)) {
-               dev_err(&GET_DEV(accel_dev), "Failed to enable MSIX IRQ\n");
+               dev_err(&GET_DEV(accel_dev), "Failed to enable MSI-X IRQ(s)\n");
                return -EFAULT;
        }
        return 0;
@@ -97,9 +106,58 @@ static irqreturn_t adf_msix_isr_ae(int irq, void *dev_ptr)
 {
        struct adf_accel_dev *accel_dev = dev_ptr;
 
-       dev_info(&GET_DEV(accel_dev), "qat_dev%d spurious AE interrupt\n",
-                accel_dev->accel_id);
-       return IRQ_HANDLED;
+#ifdef CONFIG_PCI_IOV
+       /* If SR-IOV is enabled (vf_info is non-NULL), check for VF->PF ints */
+       if (accel_dev->pf.vf_info) {
+               void __iomem *pmisc_bar_addr =
+                   (&GET_BARS(accel_dev)[ADF_DH895XCC_PMISC_BAR])->virt_addr;
+               u32 vf_mask;
+
+               /* Get the interrupt sources triggered by VFs */
+               vf_mask = ((ADF_CSR_RD(pmisc_bar_addr, ADF_DH895XCC_ERRSOU5) &
+                           0x0000FFFF) << 16) |
+                         ((ADF_CSR_RD(pmisc_bar_addr, ADF_DH895XCC_ERRSOU3) &
+                           0x01FFFE00) >> 9);
+
+               if (vf_mask) {
+                       struct adf_accel_vf_info *vf_info;
+                       bool irq_handled = false;
+                       int i;
+
+                       /* Disable VF2PF interrupts for VFs with pending ints */
+                       adf_disable_vf2pf_interrupts(accel_dev, vf_mask);
+
+                       /*
+                        * Schedule tasklets to handle VF2PF interrupt BHs
+                        * unless the VF is malicious and is attempting to
+                        * flood the host OS with VF2PF interrupts.
+                        */
+                       for_each_set_bit(i, (const unsigned long *)&vf_mask,
+                                        (sizeof(vf_mask) * BITS_PER_BYTE)) {
+                               vf_info = accel_dev->pf.vf_info + i;
+
+                               if (!__ratelimit(&vf_info->vf2pf_ratelimit)) {
+                                       dev_info(&GET_DEV(accel_dev),
+                                                "Too many ints from VF%d\n",
+                                                 vf_info->vf_nr + 1);
+                                       continue;
+                               }
+
+                               /* Tasklet will re-enable ints from this VF */
+                               tasklet_hi_schedule(&vf_info->vf2pf_bh_tasklet);
+                               irq_handled = true;
+                       }
+
+                       if (irq_handled)
+                               return IRQ_HANDLED;
+               }
+       }
+#endif /* CONFIG_PCI_IOV */
+
+       dev_dbg(&GET_DEV(accel_dev), "qat_dev%d spurious AE interrupt\n",
+               accel_dev->accel_id);
+
+       return IRQ_NONE;
 }
 
 static int adf_request_irqs(struct adf_accel_dev *accel_dev)
@@ -108,28 +166,32 @@ static int adf_request_irqs(struct adf_accel_dev *accel_dev)
        struct adf_hw_device_data *hw_data = accel_dev->hw_device;
        struct msix_entry *msixe = pci_dev_info->msix_entries.entries;
        struct adf_etr_data *etr_data = accel_dev->transport;
-       int ret, i;
+       int ret, i = 0;
        char *name;
 
-       /* Request msix irq for all banks */
-       for (i = 0; i < hw_data->num_banks; i++) {
-               struct adf_etr_bank_data *bank = &etr_data->banks[i];
-               unsigned int cpu, cpus = num_online_cpus();
-
-               name = *(pci_dev_info->msix_entries.names + i);
-               snprintf(name, ADF_MAX_MSIX_VECTOR_NAME,
-                        "qat%d-bundle%d", accel_dev->accel_id, i);
-               ret = request_irq(msixe[i].vector,
-                                 adf_msix_isr_bundle, 0, name, bank);
-               if (ret) {
-                       dev_err(&GET_DEV(accel_dev),
-                               "failed to enable irq %d for %s\n",
-                               msixe[i].vector, name);
-                       return ret;
+       /* Request msix irq for all banks unless SR-IOV enabled */
+       if (!accel_dev->pf.vf_info) {
+               for (i = 0; i < hw_data->num_banks; i++) {
+                       struct adf_etr_bank_data *bank = &etr_data->banks[i];
+                       unsigned int cpu, cpus = num_online_cpus();
+
+                       name = *(pci_dev_info->msix_entries.names + i);
+                       snprintf(name, ADF_MAX_MSIX_VECTOR_NAME,
+                                "qat%d-bundle%d", accel_dev->accel_id, i);
+                       ret = request_irq(msixe[i].vector,
+                                         adf_msix_isr_bundle, 0, name, bank);
+                       if (ret) {
+                               dev_err(&GET_DEV(accel_dev),
+                                       "failed to enable irq %d for %s\n",
+                                       msixe[i].vector, name);
+                               return ret;
+                       }
+
+                       cpu = ((accel_dev->accel_id * hw_data->num_banks) +
+                              i) % cpus;
+                       irq_set_affinity_hint(msixe[i].vector,
+                                             get_cpu_mask(cpu));
                }
-
-               cpu = ((accel_dev->accel_id * hw_data->num_banks) + i) % cpus;
-               irq_set_affinity_hint(msixe[i].vector, get_cpu_mask(cpu));
        }
 
        /* Request msix irq for AE */
@@ -152,11 +214,13 @@ static void adf_free_irqs(struct adf_accel_dev *accel_dev)
        struct adf_hw_device_data *hw_data = accel_dev->hw_device;
        struct msix_entry *msixe = pci_dev_info->msix_entries.entries;
        struct adf_etr_data *etr_data = accel_dev->transport;
-       int i;
+       int i = 0;
 
-       for (i = 0; i < hw_data->num_banks; i++) {
-               irq_set_affinity_hint(msixe[i].vector, NULL);
-               free_irq(msixe[i].vector, &etr_data->banks[i]);
+       if (pci_dev_info->msix_entries.num_entries > 1) {
+               for (i = 0; i < hw_data->num_banks; i++) {
+                       irq_set_affinity_hint(msixe[i].vector, NULL);
+                       free_irq(msixe[i].vector, &etr_data->banks[i]);
+               }
        }
        irq_set_affinity_hint(msixe[i].vector, NULL);
        free_irq(msixe[i].vector, accel_dev);
@@ -168,7 +232,11 @@ static int adf_isr_alloc_msix_entry_table(struct adf_accel_dev *accel_dev)
        char **names;
        struct msix_entry *entries;
        struct adf_hw_device_data *hw_data = accel_dev->hw_device;
-       uint32_t msix_num_entries = hw_data->num_banks + 1;
+       u32 msix_num_entries = 1;
+
+       /* If SR-IOV is disabled (vf_info is NULL), add entries for each bank */
+       if (!accel_dev->pf.vf_info)
+               msix_num_entries += hw_data->num_banks;
 
        entries = kzalloc_node(msix_num_entries * sizeof(*entries),
                               GFP_KERNEL, dev_to_node(&GET_DEV(accel_dev)));
@@ -185,6 +253,7 @@ static int adf_isr_alloc_msix_entry_table(struct adf_accel_dev *accel_dev)
                if (!(*(names + i)))
                        goto err;
        }
+       accel_dev->accel_pci_dev.msix_entries.num_entries = msix_num_entries;
        accel_dev->accel_pci_dev.msix_entries.entries = entries;
        accel_dev->accel_pci_dev.msix_entries.names = names;
        return 0;
@@ -198,13 +267,11 @@ err:
 
 static void adf_isr_free_msix_entry_table(struct adf_accel_dev *accel_dev)
 {
-       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
-       uint32_t msix_num_entries = hw_data->num_banks + 1;
        char **names = accel_dev->accel_pci_dev.msix_entries.names;
        int i;
 
        kfree(accel_dev->accel_pci_dev.msix_entries.entries);
-       for (i = 0; i < msix_num_entries; i++)
+       for (i = 0; i < accel_dev->accel_pci_dev.msix_entries.num_entries; i++)
                kfree(*(names + i));
        kfree(names);
 }
diff --git a/drivers/crypto/qat/qat_dh895xcc/qat_admin.c b/drivers/crypto/qat/qat_dh895xcc/qat_admin.c
deleted file mode 100644 (file)
index 55b7a8e..0000000
+++ /dev/null
@@ -1,107 +0,0 @@
-/*
-  This file is provided under a dual BSD/GPLv2 license.  When using or
-  redistributing this file, you may do so under either license.
-
-  GPL LICENSE SUMMARY
-  Copyright(c) 2014 Intel Corporation.
-  This program is free software; you can redistribute it and/or modify
-  it under the terms of version 2 of the GNU General Public License as
-  published by the Free Software Foundation.
-
-  This program is distributed in the hope that it will be useful, but
-  WITHOUT ANY WARRANTY; without even the implied warranty of
-  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
-  General Public License for more details.
-
-  Contact Information:
-  qat-linux@intel.com
-
-  BSD LICENSE
-  Copyright(c) 2014 Intel Corporation.
-  Redistribution and use in source and binary forms, with or without
-  modification, are permitted provided that the following conditions
-  are met:
-
-    * Redistributions of source code must retain the above copyright
-      notice, this list of conditions and the following disclaimer.
-    * Redistributions in binary form must reproduce the above copyright
-      notice, this list of conditions and the following disclaimer in
-      the documentation and/or other materials provided with the
-      distribution.
-    * Neither the name of Intel Corporation nor the names of its
-      contributors may be used to endorse or promote products derived
-      from this software without specific prior written permission.
-
-  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-#include <icp_qat_fw_init_admin.h>
-#include <adf_accel_devices.h>
-#include <adf_common_drv.h>
-#include "adf_drv.h"
-
-static struct service_hndl qat_admin;
-
-static int qat_send_admin_cmd(struct adf_accel_dev *accel_dev, int cmd)
-{
-       struct adf_hw_device_data *hw_device = accel_dev->hw_device;
-       struct icp_qat_fw_init_admin_req req;
-       struct icp_qat_fw_init_admin_resp resp;
-       int i;
-
-       memset(&req, 0, sizeof(struct icp_qat_fw_init_admin_req));
-       req.init_admin_cmd_id = cmd;
-       for (i = 0; i < hw_device->get_num_aes(hw_device); i++) {
-               memset(&resp, 0, sizeof(struct icp_qat_fw_init_admin_resp));
-               if (adf_put_admin_msg_sync(accel_dev, i, &req, &resp) ||
-                   resp.init_resp_hdr.status)
-                       return -EFAULT;
-       }
-       return 0;
-}
-
-static int qat_admin_start(struct adf_accel_dev *accel_dev)
-{
-       return qat_send_admin_cmd(accel_dev, ICP_QAT_FW_INIT_ME);
-}
-
-static int qat_admin_event_handler(struct adf_accel_dev *accel_dev,
-                                  enum adf_event event)
-{
-       int ret;
-
-       switch (event) {
-       case ADF_EVENT_START:
-               ret = qat_admin_start(accel_dev);
-               break;
-       case ADF_EVENT_STOP:
-       case ADF_EVENT_INIT:
-       case ADF_EVENT_SHUTDOWN:
-       default:
-               ret = 0;
-       }
-       return ret;
-}
-
-int qat_admin_register(void)
-{
-       memset(&qat_admin, 0, sizeof(struct service_hndl));
-       qat_admin.event_hld = qat_admin_event_handler;
-       qat_admin.name = "qat_admin";
-       qat_admin.admin = 1;
-       return adf_service_register(&qat_admin);
-}
-
-int qat_admin_unregister(void)
-{
-       return adf_service_unregister(&qat_admin);
-}
diff --git a/drivers/crypto/qat/qat_dh895xccvf/Makefile b/drivers/crypto/qat/qat_dh895xccvf/Makefile
new file mode 100644 (file)
index 0000000..85399fc
--- /dev/null
@@ -0,0 +1,5 @@
+ccflags-y := -I$(src)/../qat_common
+obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCCVF) += qat_dh895xccvf.o
+qat_dh895xccvf-objs := adf_drv.o \
+               adf_isr.o \
+               adf_dh895xccvf_hw_data.o
diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c b/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c
new file mode 100644 (file)
index 0000000..a9a27ef
--- /dev/null
@@ -0,0 +1,172 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2015 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2015 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <adf_accel_devices.h>
+#include <adf_pf2vf_msg.h>
+#include <adf_common_drv.h>
+#include "adf_dh895xccvf_hw_data.h"
+#include "adf_drv.h"
+
+static struct adf_hw_device_class dh895xcciov_class = {
+       .name = ADF_DH895XCCVF_DEVICE_NAME,
+       .type = DEV_DH895XCCVF,
+       .instances = 0
+};
+
+static u32 get_accel_mask(u32 fuse)
+{
+       return ADF_DH895XCCIOV_ACCELERATORS_MASK;
+}
+
+static u32 get_ae_mask(u32 fuse)
+{
+       return ADF_DH895XCCIOV_ACCELENGINES_MASK;
+}
+
+static u32 get_num_accels(struct adf_hw_device_data *self)
+{
+       return ADF_DH895XCCIOV_MAX_ACCELERATORS;
+}
+
+static u32 get_num_aes(struct adf_hw_device_data *self)
+{
+       return ADF_DH895XCCIOV_MAX_ACCELENGINES;
+}
+
+static u32 get_misc_bar_id(struct adf_hw_device_data *self)
+{
+       return ADF_DH895XCCIOV_PMISC_BAR;
+}
+
+static u32 get_etr_bar_id(struct adf_hw_device_data *self)
+{
+       return ADF_DH895XCCIOV_ETR_BAR;
+}
+
+static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
+{
+       return DEV_SKU_VF;
+}
+
+static u32 get_pf2vf_offset(u32 i)
+{
+       return ADF_DH895XCCIOV_PF2VF_OFFSET;
+}
+
+static u32 get_vintmsk_offset(u32 i)
+{
+       return ADF_DH895XCCIOV_VINTMSK_OFFSET;
+}
+
+static int adf_vf_int_noop(struct adf_accel_dev *accel_dev)
+{
+       return 0;
+}
+
+static void adf_vf_void_noop(struct adf_accel_dev *accel_dev)
+{
+}
+
+static int adf_vf2pf_init(struct adf_accel_dev *accel_dev)
+{
+       u32 msg = (ADF_VF2PF_MSGORIGIN_SYSTEM |
+               (ADF_VF2PF_MSGTYPE_INIT << ADF_VF2PF_MSGTYPE_SHIFT));
+
+       if (adf_iov_putmsg(accel_dev, msg, 0)) {
+               dev_err(&GET_DEV(accel_dev),
+                       "Failed to send Init event to PF\n");
+               return -EFAULT;
+       }
+       return 0;
+}
+
+static void adf_vf2pf_shutdown(struct adf_accel_dev *accel_dev)
+{
+       u32 msg = (ADF_VF2PF_MSGORIGIN_SYSTEM |
+           (ADF_VF2PF_MSGTYPE_SHUTDOWN << ADF_VF2PF_MSGTYPE_SHIFT));
+
+       if (adf_iov_putmsg(accel_dev, msg, 0))
+               dev_err(&GET_DEV(accel_dev),
+                       "Failed to send Shutdown event to PF\n");
+}
+
+void adf_init_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data)
+{
+       hw_data->dev_class = &dh895xcciov_class;
+       hw_data->instance_id = dh895xcciov_class.instances++;
+       hw_data->num_banks = ADF_DH895XCCIOV_ETR_MAX_BANKS;
+       hw_data->num_accel = ADF_DH895XCCIOV_MAX_ACCELERATORS;
+       hw_data->num_logical_accel = 1;
+       hw_data->num_engines = ADF_DH895XCCIOV_MAX_ACCELENGINES;
+       hw_data->tx_rx_gap = ADF_DH895XCCIOV_RX_RINGS_OFFSET;
+       hw_data->tx_rings_mask = ADF_DH895XCCIOV_TX_RINGS_MASK;
+       hw_data->alloc_irq = adf_vf_isr_resource_alloc;
+       hw_data->free_irq = adf_vf_isr_resource_free;
+       hw_data->enable_error_correction = adf_vf_void_noop;
+       hw_data->init_admin_comms = adf_vf_int_noop;
+       hw_data->exit_admin_comms = adf_vf_void_noop;
+       hw_data->send_admin_init = adf_vf2pf_init;
+       hw_data->init_arb = adf_vf_int_noop;
+       hw_data->exit_arb = adf_vf_void_noop;
+       hw_data->disable_iov = adf_vf2pf_shutdown;
+       hw_data->get_accel_mask = get_accel_mask;
+       hw_data->get_ae_mask = get_ae_mask;
+       hw_data->get_num_accels = get_num_accels;
+       hw_data->get_num_aes = get_num_aes;
+       hw_data->get_etr_bar_id = get_etr_bar_id;
+       hw_data->get_misc_bar_id = get_misc_bar_id;
+       hw_data->get_pf2vf_offset = get_pf2vf_offset;
+       hw_data->get_vintmsk_offset = get_vintmsk_offset;
+       hw_data->get_sku = get_sku;
+       hw_data->enable_ints = adf_vf_void_noop;
+       hw_data->enable_vf2pf_comms = adf_enable_vf2pf_comms;
+       hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION;
+}
+
+void adf_clean_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data)
+{
+       hw_data->dev_class->instances--;
+}
diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h b/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h
new file mode 100644 (file)
index 0000000..8f6babf
--- /dev/null
@@ -0,0 +1,68 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2015 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2015 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef ADF_DH895XVF_HW_DATA_H_
+#define ADF_DH895XVF_HW_DATA_H_
+
+#define ADF_DH895XCCIOV_PMISC_BAR 1
+#define ADF_DH895XCCIOV_ACCELERATORS_MASK 0x1
+#define ADF_DH895XCCIOV_ACCELENGINES_MASK 0x1
+#define ADF_DH895XCCIOV_MAX_ACCELERATORS 1
+#define ADF_DH895XCCIOV_MAX_ACCELENGINES 1
+#define ADF_DH895XCCIOV_RX_RINGS_OFFSET 8
+#define ADF_DH895XCCIOV_TX_RINGS_MASK 0xFF
+#define ADF_DH895XCCIOV_ETR_BAR 0
+#define ADF_DH895XCCIOV_ETR_MAX_BANKS 1
+
+#define ADF_DH895XCCIOV_PF2VF_OFFSET   0x200
+#define ADF_DH895XCC_PF2VF_PF2VFINT    BIT(0)
+
+#define ADF_DH895XCCIOV_VINTSOU_OFFSET 0x204
+#define ADF_DH895XCC_VINTSOU_BUN       BIT(0)
+#define ADF_DH895XCC_VINTSOU_PF2VF     BIT(1)
+
+#define ADF_DH895XCCIOV_VINTMSK_OFFSET 0x208
+#endif
diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
new file mode 100644 (file)
index 0000000..789426f
--- /dev/null
@@ -0,0 +1,393 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/workqueue.h>
+#include <linux/io.h>
+#include <adf_accel_devices.h>
+#include <adf_common_drv.h>
+#include <adf_cfg.h>
+#include <adf_transport_access_macros.h>
+#include "adf_dh895xccvf_hw_data.h"
+#include "adf_drv.h"
+
+static const char adf_driver_name[] = ADF_DH895XCCVF_DEVICE_NAME;
+
+#define ADF_SYSTEM_DEVICE(device_id) \
+       {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
+
+static const struct pci_device_id adf_pci_tbl[] = {
+       ADF_SYSTEM_DEVICE(ADF_DH895XCCIOV_PCI_DEVICE_ID),
+       {0,}
+};
+MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
+
+static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent);
+static void adf_remove(struct pci_dev *dev);
+
+static struct pci_driver adf_driver = {
+       .id_table = adf_pci_tbl,
+       .name = adf_driver_name,
+       .probe = adf_probe,
+       .remove = adf_remove,
+};
+
+static void adf_cleanup_pci_dev(struct adf_accel_dev *accel_dev)
+{
+       pci_release_regions(accel_dev->accel_pci_dev.pci_dev);
+       pci_disable_device(accel_dev->accel_pci_dev.pci_dev);
+}
+
+static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
+{
+       struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev;
+       struct adf_accel_dev *pf;
+       int i;
+
+       for (i = 0; i < ADF_PCI_MAX_BARS; i++) {
+               struct adf_bar *bar = &accel_pci_dev->pci_bars[i];
+
+               if (bar->virt_addr)
+                       pci_iounmap(accel_pci_dev->pci_dev, bar->virt_addr);
+       }
+
+       if (accel_dev->hw_device) {
+               switch (accel_pci_dev->pci_dev->device) {
+               case ADF_DH895XCCIOV_PCI_DEVICE_ID:
+                       adf_clean_hw_data_dh895xcciov(accel_dev->hw_device);
+                       break;
+               default:
+                       break;
+               }
+               kfree(accel_dev->hw_device);
+               accel_dev->hw_device = NULL;
+       }
+       adf_cfg_dev_remove(accel_dev);
+       debugfs_remove(accel_dev->debugfs_dir);
+       pf = adf_devmgr_pci_to_accel_dev(accel_pci_dev->pci_dev->physfn);
+       adf_devmgr_rm_dev(accel_dev, pf);
+}
+
+static int adf_dev_configure(struct adf_accel_dev *accel_dev)
+{
+       char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
+       unsigned long val, bank = 0;
+
+       if (adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC))
+               goto err;
+       if (adf_cfg_section_add(accel_dev, "Accelerator0"))
+               goto err;
+
+       snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_BANK_NUM, 0);
+       if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, key,
+                                       (void *)&bank, ADF_DEC))
+               goto err;
+
+       val = bank;
+       snprintf(key, sizeof(key), ADF_CY "%d" ADF_ETRMGR_CORE_AFFINITY, 0);
+       if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, key,
+                                       (void *)&val, ADF_DEC))
+               goto err;
+
+       snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, 0);
+
+       val = 128;
+       if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, key,
+                                       (void *)&val, ADF_DEC))
+               goto err;
+
+       val = 512;
+       snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_SIZE, 0);
+       if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+                                       key, (void *)&val, ADF_DEC))
+               goto err;
+
+       val = 0;
+       snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, 0);
+       if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+                                       key, (void *)&val, ADF_DEC))
+               goto err;
+
+       val = 2;
+       snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_TX, 0);
+       if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+                                       key, (void *)&val, ADF_DEC))
+               goto err;
+
+       val = 8;
+       snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, 0);
+       if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+                                       key, (void *)&val, ADF_DEC))
+               goto err;
+
+       val = 10;
+       snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_RX, 0);
+       if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+                                       key, (void *)&val, ADF_DEC))
+                       goto err;
+
+       val = ADF_COALESCING_DEF_TIME;
+       snprintf(key, sizeof(key), ADF_ETRMGR_COALESCE_TIMER_FORMAT,
+                (int)bank);
+       if (adf_cfg_add_key_value_param(accel_dev, "Accelerator0",
+                                       key, (void *)&val, ADF_DEC))
+               goto err;
+
+       val = 1;
+       if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+                                       ADF_NUM_CY, (void *)&val, ADF_DEC))
+               goto err;
+
+       set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
+       return 0;
+err:
+       dev_err(&GET_DEV(accel_dev), "Failed to configure QAT accel dev\n");
+       return -EINVAL;
+}
+
+static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+       struct adf_accel_dev *accel_dev;
+       struct adf_accel_dev *pf;
+       struct adf_accel_pci *accel_pci_dev;
+       struct adf_hw_device_data *hw_data;
+       char name[ADF_DEVICE_NAME_LENGTH];
+       unsigned int i, bar_nr;
+       int ret, bar_mask;
+
+       switch (ent->device) {
+       case ADF_DH895XCCIOV_PCI_DEVICE_ID:
+               break;
+       default:
+               dev_err(&pdev->dev, "Invalid device 0x%x.\n", ent->device);
+               return -ENODEV;
+       }
+
+       accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL,
+                                dev_to_node(&pdev->dev));
+       if (!accel_dev)
+               return -ENOMEM;
+
+       accel_dev->is_vf = true;
+       pf = adf_devmgr_pci_to_accel_dev(pdev->physfn);
+       accel_pci_dev = &accel_dev->accel_pci_dev;
+       accel_pci_dev->pci_dev = pdev;
+
+       /* Add accel device to accel table */
+       if (adf_devmgr_add_dev(accel_dev, pf)) {
+               dev_err(&pdev->dev, "Failed to add new accelerator device.\n");
+               kfree(accel_dev);
+               return -EFAULT;
+       }
+       INIT_LIST_HEAD(&accel_dev->crypto_list);
+
+       accel_dev->owner = THIS_MODULE;
+       /* Allocate and configure device configuration structure */
+       hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL,
+                              dev_to_node(&pdev->dev));
+       if (!hw_data) {
+               ret = -ENOMEM;
+               goto out_err;
+       }
+       accel_dev->hw_device = hw_data;
+       switch (ent->device) {
+       case ADF_DH895XCCIOV_PCI_DEVICE_ID:
+               adf_init_hw_data_dh895xcciov(accel_dev->hw_device);
+               break;
+       default:
+               ret = -ENODEV;
+               goto out_err;
+       }
+
+       /* Get Accelerators and Accelerators Engines masks */
+       hw_data->accel_mask = hw_data->get_accel_mask(hw_data->fuses);
+       hw_data->ae_mask = hw_data->get_ae_mask(hw_data->fuses);
+       accel_pci_dev->sku = hw_data->get_sku(hw_data);
+
+       /* Create dev top level debugfs entry */
+       snprintf(name, sizeof(name), "%s%s_%02x:%02d.%02d",
+                ADF_DEVICE_NAME_PREFIX, hw_data->dev_class->name,
+                pdev->bus->number, PCI_SLOT(pdev->devfn),
+                PCI_FUNC(pdev->devfn));
+
+       accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
+       if (!accel_dev->debugfs_dir) {
+               dev_err(&pdev->dev, "Could not create debugfs dir %s\n", name);
+               ret = -EINVAL;
+               goto out_err;
+       }
+
+       /* Create device configuration table */
+       ret = adf_cfg_dev_add(accel_dev);
+       if (ret)
+               goto out_err;
+
+       /* enable PCI device */
+       if (pci_enable_device(pdev)) {
+               ret = -EFAULT;
+               goto out_err;
+       }
+
+       /* set dma identifier */
+       if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+               if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
+                       dev_err(&pdev->dev, "No usable DMA configuration\n");
+                       ret = -EFAULT;
+                       goto out_err_disable;
+               } else {
+                       pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+               }
+
+       } else {
+               pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+       }
+
+       if (pci_request_regions(pdev, adf_driver_name)) {
+               ret = -EFAULT;
+               goto out_err_disable;
+       }
+
+       /* Find and map all the device's BARS */
+       i = 0;
+       bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
+       for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
+                        ADF_PCI_MAX_BARS * 2) {
+               struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
+
+               bar->base_addr = pci_resource_start(pdev, bar_nr);
+               if (!bar->base_addr)
+                       break;
+               bar->size = pci_resource_len(pdev, bar_nr);
+               bar->virt_addr = pci_iomap(accel_pci_dev->pci_dev, bar_nr, 0);
+               if (!bar->virt_addr) {
+                       dev_err(&pdev->dev, "Failed to map BAR %d\n", bar_nr);
+                       ret = -EFAULT;
+                       goto out_err_free_reg;
+               }
+       }
+       pci_set_master(pdev);
+       /* Completion for VF2PF request/response message exchange */
+       init_completion(&accel_dev->vf.iov_msg_completion);
+
+       ret = adf_dev_configure(accel_dev);
+       if (ret)
+               goto out_err_free_reg;
+
+       ret = adf_dev_init(accel_dev);
+       if (ret)
+               goto out_err_dev_shutdown;
+
+       ret = adf_dev_start(accel_dev);
+       if (ret)
+               goto out_err_dev_stop;
+
+       return ret;
+
+out_err_dev_stop:
+       adf_dev_stop(accel_dev);
+out_err_dev_shutdown:
+       adf_dev_shutdown(accel_dev);
+out_err_free_reg:
+       pci_release_regions(accel_pci_dev->pci_dev);
+out_err_disable:
+       pci_disable_device(accel_pci_dev->pci_dev);
+out_err:
+       adf_cleanup_accel(accel_dev);
+       kfree(accel_dev);
+       return ret;
+}
+
+static void adf_remove(struct pci_dev *pdev)
+{
+       struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+       if (!accel_dev) {
+               pr_err("QAT: Driver removal failed\n");
+               return;
+       }
+       if (adf_dev_stop(accel_dev))
+               dev_err(&GET_DEV(accel_dev), "Failed to stop QAT accel dev\n");
+
+       adf_dev_shutdown(accel_dev);
+       adf_cleanup_accel(accel_dev);
+       adf_cleanup_pci_dev(accel_dev);
+       kfree(accel_dev);
+}
+
+static int __init adfdrv_init(void)
+{
+       request_module("intel_qat");
+
+       if (pci_register_driver(&adf_driver)) {
+               pr_err("QAT: Driver initialization failed\n");
+               return -EFAULT;
+       }
+       return 0;
+}
+
+static void __exit adfdrv_release(void)
+{
+       pci_unregister_driver(&adf_driver);
+       adf_clean_vf_map(true);
+}
+
+module_init(adfdrv_init);
+module_exit(adfdrv_release);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Intel");
+MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
+MODULE_VERSION(ADF_DRV_VERSION);
diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.h b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.h
new file mode 100644 (file)
index 0000000..e270e4a
--- /dev/null
@@ -0,0 +1,57 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef ADF_DH895xVF_DRV_H_
+#define ADF_DH895xVF_DRV_H_
+#include <adf_accel_devices.h>
+#include <adf_transport.h>
+
+void adf_init_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data);
+void adf_clean_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data);
+int adf_vf_isr_resource_alloc(struct adf_accel_dev *accel_dev);
+void adf_vf_isr_resource_free(struct adf_accel_dev *accel_dev);
+void adf_update_ring_arb_enable(struct adf_etr_ring_data *ring);
+#endif
diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_isr.c b/drivers/crypto/qat/qat_dh895xccvf/adf_isr.c
new file mode 100644 (file)
index 0000000..87c5d8a
--- /dev/null
@@ -0,0 +1,258 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <adf_accel_devices.h>
+#include <adf_common_drv.h>
+#include <adf_cfg.h>
+#include <adf_cfg_strings.h>
+#include <adf_cfg_common.h>
+#include <adf_transport_access_macros.h>
+#include <adf_transport_internal.h>
+#include <adf_pf2vf_msg.h>
+#include "adf_drv.h"
+#include "adf_dh895xccvf_hw_data.h"
+
+static int adf_enable_msi(struct adf_accel_dev *accel_dev)
+{
+       struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
+       int stat = pci_enable_msi(pci_dev_info->pci_dev);
+
+       if (stat) {
+               dev_err(&GET_DEV(accel_dev),
+                       "Failed to enable MSI interrupts\n");
+               return stat;
+       }
+
+       accel_dev->vf.irq_name = kzalloc(ADF_MAX_MSIX_VECTOR_NAME, GFP_KERNEL);
+       if (!accel_dev->vf.irq_name)
+               return -ENOMEM;
+
+       return stat;
+}
+
+static void adf_disable_msi(struct adf_accel_dev *accel_dev)
+{
+       struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
+
+       kfree(accel_dev->vf.irq_name);
+       pci_disable_msi(pdev);
+}
+
+static void adf_pf2vf_bh_handler(void *data)
+{
+       struct adf_accel_dev *accel_dev = data;
+       void __iomem *pmisc_bar_addr =
+               (&GET_BARS(accel_dev)[ADF_DH895XCCIOV_PMISC_BAR])->virt_addr;
+       u32 msg;
+
+       /* Read the message from PF */
+       msg = ADF_CSR_RD(pmisc_bar_addr, ADF_DH895XCCIOV_PF2VF_OFFSET);
+
+       if (!(msg & ADF_PF2VF_MSGORIGIN_SYSTEM))
+               /* Ignore legacy non-system (non-kernel) PF2VF messages */
+               goto err;
+
+       switch ((msg & ADF_PF2VF_MSGTYPE_MASK) >> ADF_PF2VF_MSGTYPE_SHIFT) {
+       case ADF_PF2VF_MSGTYPE_RESTARTING:
+               dev_dbg(&GET_DEV(accel_dev),
+                       "Restarting msg received from PF 0x%x\n", msg);
+               adf_dev_stop(accel_dev);
+               break;
+       case ADF_PF2VF_MSGTYPE_VERSION_RESP:
+               dev_dbg(&GET_DEV(accel_dev),
+                       "Version resp received from PF 0x%x\n", msg);
+               accel_dev->vf.pf_version =
+                       (msg & ADF_PF2VF_VERSION_RESP_VERS_MASK) >>
+                       ADF_PF2VF_VERSION_RESP_VERS_SHIFT;
+               accel_dev->vf.compatible =
+                       (msg & ADF_PF2VF_VERSION_RESP_RESULT_MASK) >>
+                       ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
+               complete(&accel_dev->vf.iov_msg_completion);
+               break;
+       default:
+               goto err;
+       }
+
+       /* To ack, clear the PF2VFINT bit */
+       msg &= ~ADF_DH895XCC_PF2VF_PF2VFINT;
+       ADF_CSR_WR(pmisc_bar_addr, ADF_DH895XCCIOV_PF2VF_OFFSET, msg);
+
+       /* Re-enable PF2VF interrupts */
+       adf_enable_pf2vf_interrupts(accel_dev);
+       return;
+err:
+       dev_err(&GET_DEV(accel_dev),
+               "Unknown message from PF (0x%x); leaving PF2VF ints disabled\n",
+               msg);
+}
+
+static int adf_setup_pf2vf_bh(struct adf_accel_dev *accel_dev)
+{
+       tasklet_init(&accel_dev->vf.pf2vf_bh_tasklet,
+                    (void *)adf_pf2vf_bh_handler, (unsigned long)accel_dev);
+
+       mutex_init(&accel_dev->vf.vf2pf_lock);
+       return 0;
+}
+
+static void adf_cleanup_pf2vf_bh(struct adf_accel_dev *accel_dev)
+{
+       tasklet_disable(&accel_dev->vf.pf2vf_bh_tasklet);
+       tasklet_kill(&accel_dev->vf.pf2vf_bh_tasklet);
+       mutex_destroy(&accel_dev->vf.vf2pf_lock);
+}
+
+static irqreturn_t adf_isr(int irq, void *privdata)
+{
+       struct adf_accel_dev *accel_dev = privdata;
+       void __iomem *pmisc_bar_addr =
+               (&GET_BARS(accel_dev)[ADF_DH895XCCIOV_PMISC_BAR])->virt_addr;
+       u32 v_int;
+
+       /* Read VF INT source CSR to determine the source of VF interrupt */
+       v_int = ADF_CSR_RD(pmisc_bar_addr, ADF_DH895XCCIOV_VINTSOU_OFFSET);
+
+       /* Check for PF2VF interrupt */
+       if (v_int & ADF_DH895XCC_VINTSOU_PF2VF) {
+               /* Disable PF to VF interrupt */
+               adf_disable_pf2vf_interrupts(accel_dev);
+
+               /* Schedule tasklet to handle interrupt BH */
+               tasklet_hi_schedule(&accel_dev->vf.pf2vf_bh_tasklet);
+               return IRQ_HANDLED;
+       }
+
+       /* Check bundle interrupt */
+       if (v_int & ADF_DH895XCC_VINTSOU_BUN) {
+               struct adf_etr_data *etr_data = accel_dev->transport;
+               struct adf_etr_bank_data *bank = &etr_data->banks[0];
+
+               /* Disable Flag and Coalesce Ring Interrupts */
+               WRITE_CSR_INT_FLAG_AND_COL(bank->csr_addr, bank->bank_number,
+                                          0);
+               tasklet_hi_schedule(&bank->resp_handler);
+               return IRQ_HANDLED;
+       }
+
+       return IRQ_NONE;
+}
+
+static int adf_request_msi_irq(struct adf_accel_dev *accel_dev)
+{
+       struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
+       unsigned int cpu;
+       int ret;
+
+       snprintf(accel_dev->vf.irq_name, ADF_MAX_MSIX_VECTOR_NAME,
+                "qat_%02x:%02d.%02d", pdev->bus->number, PCI_SLOT(pdev->devfn),
+                PCI_FUNC(pdev->devfn));
+       ret = request_irq(pdev->irq, adf_isr, 0, accel_dev->vf.irq_name,
+                         (void *)accel_dev);
+       if (ret) {
+               dev_err(&GET_DEV(accel_dev), "failed to enable irq for %s\n",
+                       accel_dev->vf.irq_name);
+               return ret;
+       }
+       cpu = accel_dev->accel_id % num_online_cpus();
+       irq_set_affinity_hint(pdev->irq, get_cpu_mask(cpu));
+
+       return ret;
+}
+
+static int adf_setup_bh(struct adf_accel_dev *accel_dev)
+{
+       struct adf_etr_data *priv_data = accel_dev->transport;
+
+       tasklet_init(&priv_data->banks[0].resp_handler, adf_response_handler,
+                    (unsigned long)priv_data->banks);
+       return 0;
+}
+
+static void adf_cleanup_bh(struct adf_accel_dev *accel_dev)
+{
+       struct adf_etr_data *priv_data = accel_dev->transport;
+
+       tasklet_disable(&priv_data->banks[0].resp_handler);
+       tasklet_kill(&priv_data->banks[0].resp_handler);
+}
+
+void adf_vf_isr_resource_free(struct adf_accel_dev *accel_dev)
+{
+       struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
+
+       irq_set_affinity_hint(pdev->irq, NULL);
+       free_irq(pdev->irq, (void *)accel_dev);
+       adf_cleanup_bh(accel_dev);
+       adf_cleanup_pf2vf_bh(accel_dev);
+       adf_disable_msi(accel_dev);
+}
+
+int adf_vf_isr_resource_alloc(struct adf_accel_dev *accel_dev)
+{
+       if (adf_enable_msi(accel_dev))
+               goto err_out;
+
+       if (adf_setup_pf2vf_bh(accel_dev))
+               goto err_out;
+
+       if (adf_setup_bh(accel_dev))
+               goto err_out;
+
+       if (adf_request_msi_irq(accel_dev))
+               goto err_out;
+
+       return 0;
+err_out:
+       adf_vf_isr_resource_free(accel_dev);
+       return -EFAULT;
+}
index 397a500b3d8a9edbaf214ae28a8cc88e64582eb0..1c19e44c3146ffbc1d69179c2440f6e26c1aa6ea 100644 (file)
@@ -1516,7 +1516,7 @@ static int sahara_probe(struct platform_device *pdev)
        }
 
        /* Allocate HW descriptors */
-       dev->hw_desc[0] = dma_alloc_coherent(&pdev->dev,
+       dev->hw_desc[0] = dmam_alloc_coherent(&pdev->dev,
                        SAHARA_MAX_HW_DESC * sizeof(struct sahara_hw_desc),
                        &dev->hw_phys_desc[0], GFP_KERNEL);
        if (!dev->hw_desc[0]) {
@@ -1528,34 +1528,31 @@ static int sahara_probe(struct platform_device *pdev)
                                sizeof(struct sahara_hw_desc);
 
        /* Allocate space for iv and key */
-       dev->key_base = dma_alloc_coherent(&pdev->dev, 2 * AES_KEYSIZE_128,
+       dev->key_base = dmam_alloc_coherent(&pdev->dev, 2 * AES_KEYSIZE_128,
                                &dev->key_phys_base, GFP_KERNEL);
        if (!dev->key_base) {
                dev_err(&pdev->dev, "Could not allocate memory for key\n");
-               err = -ENOMEM;
-               goto err_key;
+               return -ENOMEM;
        }
        dev->iv_base = dev->key_base + AES_KEYSIZE_128;
        dev->iv_phys_base = dev->key_phys_base + AES_KEYSIZE_128;
 
        /* Allocate space for context: largest digest + message length field */
-       dev->context_base = dma_alloc_coherent(&pdev->dev,
+       dev->context_base = dmam_alloc_coherent(&pdev->dev,
                                        SHA256_DIGEST_SIZE + 4,
                                        &dev->context_phys_base, GFP_KERNEL);
        if (!dev->context_base) {
                dev_err(&pdev->dev, "Could not allocate memory for MDHA context\n");
-               err = -ENOMEM;
-               goto err_key;
+               return -ENOMEM;
        }
 
        /* Allocate space for HW links */
-       dev->hw_link[0] = dma_alloc_coherent(&pdev->dev,
+       dev->hw_link[0] = dmam_alloc_coherent(&pdev->dev,
                        SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link),
                        &dev->hw_phys_link[0], GFP_KERNEL);
        if (!dev->hw_link[0]) {
                dev_err(&pdev->dev, "Could not allocate hw links\n");
-               err = -ENOMEM;
-               goto err_link;
+               return -ENOMEM;
        }
        for (i = 1; i < SAHARA_MAX_HW_LINK; i++) {
                dev->hw_phys_link[i] = dev->hw_phys_link[i - 1] +
@@ -1572,15 +1569,14 @@ static int sahara_probe(struct platform_device *pdev)
 
        dev->kthread = kthread_run(sahara_queue_manage, dev, "sahara_crypto");
        if (IS_ERR(dev->kthread)) {
-               err = PTR_ERR(dev->kthread);
-               goto err_link;
+               return PTR_ERR(dev->kthread);
        }
 
        init_completion(&dev->dma_completion);
 
        err = clk_prepare_enable(dev->clk_ipg);
        if (err)
-               goto err_link;
+               return err;
        err = clk_prepare_enable(dev->clk_ahb);
        if (err)
                goto clk_ipg_disable;
@@ -1620,25 +1616,11 @@ static int sahara_probe(struct platform_device *pdev)
        return 0;
 
 err_algs:
-       dma_free_coherent(&pdev->dev,
-                         SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link),
-                         dev->hw_link[0], dev->hw_phys_link[0]);
        kthread_stop(dev->kthread);
        dev_ptr = NULL;
        clk_disable_unprepare(dev->clk_ahb);
 clk_ipg_disable:
        clk_disable_unprepare(dev->clk_ipg);
-err_link:
-       dma_free_coherent(&pdev->dev,
-                         2 * AES_KEYSIZE_128,
-                         dev->key_base, dev->key_phys_base);
-       dma_free_coherent(&pdev->dev,
-                         SHA256_DIGEST_SIZE,
-                         dev->context_base, dev->context_phys_base);
-err_key:
-       dma_free_coherent(&pdev->dev,
-                         SAHARA_MAX_HW_DESC * sizeof(struct sahara_hw_desc),
-                         dev->hw_desc[0], dev->hw_phys_desc[0]);
 
        return err;
 }
@@ -1647,16 +1629,6 @@ static int sahara_remove(struct platform_device *pdev)
 {
        struct sahara_dev *dev = platform_get_drvdata(pdev);
 
-       dma_free_coherent(&pdev->dev,
-                         SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link),
-                         dev->hw_link[0], dev->hw_phys_link[0]);
-       dma_free_coherent(&pdev->dev,
-                         2 * AES_KEYSIZE_128,
-                         dev->key_base, dev->key_phys_base);
-       dma_free_coherent(&pdev->dev,
-                         SAHARA_MAX_HW_DESC * sizeof(struct sahara_hw_desc),
-                         dev->hw_desc[0], dev->hw_phys_desc[0]);
-
        kthread_stop(dev->kthread);
 
        sahara_unregister_algs(dev);
diff --git a/drivers/crypto/sunxi-ss/Makefile b/drivers/crypto/sunxi-ss/Makefile
new file mode 100644 (file)
index 0000000..8f4c7a2
--- /dev/null
@@ -0,0 +1,2 @@
+obj-$(CONFIG_CRYPTO_DEV_SUN4I_SS) += sun4i-ss.o
+sun4i-ss-y += sun4i-ss-core.o sun4i-ss-hash.o sun4i-ss-cipher.o
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
new file mode 100644 (file)
index 0000000..e070c31
--- /dev/null
@@ -0,0 +1,542 @@
+/*
+ * sun4i-ss-cipher.c - hardware cryptographic accelerator for Allwinner A20 SoC
+ *
+ * Copyright (C) 2013-2015 Corentin LABBE <clabbe.montjoie@gmail.com>
+ *
+ * This file add support for AES cipher with 128,192,256 bits
+ * keysize in CBC and ECB mode.
+ * Add support also for DES and 3DES in CBC and ECB mode.
+ *
+ * You could find the datasheet in Documentation/arm/sunxi/README
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#include "sun4i-ss.h"
+
+static int sun4i_ss_opti_poll(struct ablkcipher_request *areq)
+{
+       struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
+       struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
+       struct sun4i_ss_ctx *ss = op->ss;
+       unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
+       struct sun4i_cipher_req_ctx *ctx = ablkcipher_request_ctx(areq);
+       u32 mode = ctx->mode;
+       /* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */
+       u32 rx_cnt = SS_RX_DEFAULT;
+       u32 tx_cnt = 0;
+       u32 spaces;
+       u32 v;
+       int i, err = 0;
+       unsigned int ileft = areq->nbytes;
+       unsigned int oleft = areq->nbytes;
+       unsigned int todo;
+       struct sg_mapping_iter mi, mo;
+       unsigned int oi, oo; /* offset for in and out */
+
+       if (areq->nbytes == 0)
+               return 0;
+
+       if (!areq->info) {
+               dev_err_ratelimited(ss->dev, "ERROR: Empty IV\n");
+               return -EINVAL;
+       }
+
+       if (!areq->src || !areq->dst) {
+               dev_err_ratelimited(ss->dev, "ERROR: Some SGs are NULL\n");
+               return -EINVAL;
+       }
+
+       spin_lock_bh(&ss->slock);
+
+       for (i = 0; i < op->keylen; i += 4)
+               writel(*(op->key + i / 4), ss->base + SS_KEY0 + i);
+
+       if (areq->info) {
+               for (i = 0; i < 4 && i < ivsize / 4; i++) {
+                       v = *(u32 *)(areq->info + i * 4);
+                       writel(v, ss->base + SS_IV0 + i * 4);
+               }
+       }
+       writel(mode, ss->base + SS_CTL);
+
+       sg_miter_start(&mi, areq->src, sg_nents(areq->src),
+                      SG_MITER_FROM_SG | SG_MITER_ATOMIC);
+       sg_miter_start(&mo, areq->dst, sg_nents(areq->dst),
+                      SG_MITER_TO_SG | SG_MITER_ATOMIC);
+       sg_miter_next(&mi);
+       sg_miter_next(&mo);
+       if (!mi.addr || !mo.addr) {
+               dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
+               err = -EINVAL;
+               goto release_ss;
+       }
+
+       ileft = areq->nbytes / 4;
+       oleft = areq->nbytes / 4;
+       oi = 0;
+       oo = 0;
+       do {
+               todo = min3(rx_cnt, ileft, (mi.length - oi) / 4);
+               if (todo > 0) {
+                       ileft -= todo;
+                       writesl(ss->base + SS_RXFIFO, mi.addr + oi, todo);
+                       oi += todo * 4;
+               }
+               if (oi == mi.length) {
+                       sg_miter_next(&mi);
+                       oi = 0;
+               }
+
+               spaces = readl(ss->base + SS_FCSR);
+               rx_cnt = SS_RXFIFO_SPACES(spaces);
+               tx_cnt = SS_TXFIFO_SPACES(spaces);
+
+               todo = min3(tx_cnt, oleft, (mo.length - oo) / 4);
+               if (todo > 0) {
+                       oleft -= todo;
+                       readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo);
+                       oo += todo * 4;
+               }
+               if (oo == mo.length) {
+                       sg_miter_next(&mo);
+                       oo = 0;
+               }
+       } while (mo.length > 0);
+
+       if (areq->info) {
+               for (i = 0; i < 4 && i < ivsize / 4; i++) {
+                       v = readl(ss->base + SS_IV0 + i * 4);
+                       *(u32 *)(areq->info + i * 4) = v;
+               }
+       }
+
+release_ss:
+       sg_miter_stop(&mi);
+       sg_miter_stop(&mo);
+       writel(0, ss->base + SS_CTL);
+       spin_unlock_bh(&ss->slock);
+       return err;
+}
+
+/* Generic function that support SG with size not multiple of 4 */
+static int sun4i_ss_cipher_poll(struct ablkcipher_request *areq)
+{
+       struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
+       struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
+       struct sun4i_ss_ctx *ss = op->ss;
+       int no_chunk = 1;
+       struct scatterlist *in_sg = areq->src;
+       struct scatterlist *out_sg = areq->dst;
+       unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
+       struct sun4i_cipher_req_ctx *ctx = ablkcipher_request_ctx(areq);
+       u32 mode = ctx->mode;
+       /* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */
+       u32 rx_cnt = SS_RX_DEFAULT;
+       u32 tx_cnt = 0;
+       u32 v;
+       u32 spaces;
+       int i, err = 0;
+       unsigned int ileft = areq->nbytes;
+       unsigned int oleft = areq->nbytes;
+       unsigned int todo;
+       struct sg_mapping_iter mi, mo;
+       unsigned int oi, oo;    /* offset for in and out */
+       char buf[4 * SS_RX_MAX];/* buffer for linearize SG src */
+       char bufo[4 * SS_TX_MAX]; /* buffer for linearize SG dst */
+       unsigned int ob = 0;    /* offset in buf */
+       unsigned int obo = 0;   /* offset in bufo*/
+       unsigned int obl = 0;   /* length of data in bufo */
+
+       if (areq->nbytes == 0)
+               return 0;
+
+       if (!areq->info) {
+               dev_err_ratelimited(ss->dev, "ERROR: Empty IV\n");
+               return -EINVAL;
+       }
+
+       if (!areq->src || !areq->dst) {
+               dev_err_ratelimited(ss->dev, "ERROR: Some SGs are NULL\n");
+               return -EINVAL;
+       }
+
+       /*
+        * if we have only SGs with size multiple of 4,
+        * we can use the SS optimized function
+        */
+       while (in_sg && no_chunk == 1) {
+               if ((in_sg->length % 4) != 0)
+                       no_chunk = 0;
+               in_sg = sg_next(in_sg);
+       }
+       while (out_sg && no_chunk == 1) {
+               if ((out_sg->length % 4) != 0)
+                       no_chunk = 0;
+               out_sg = sg_next(out_sg);
+       }
+
+       if (no_chunk == 1)
+               return sun4i_ss_opti_poll(areq);
+
+       spin_lock_bh(&ss->slock);
+
+       for (i = 0; i < op->keylen; i += 4)
+               writel(*(op->key + i / 4), ss->base + SS_KEY0 + i);
+
+       if (areq->info) {
+               for (i = 0; i < 4 && i < ivsize / 4; i++) {
+                       v = *(u32 *)(areq->info + i * 4);
+                       writel(v, ss->base + SS_IV0 + i * 4);
+               }
+       }
+       writel(mode, ss->base + SS_CTL);
+
+       sg_miter_start(&mi, areq->src, sg_nents(areq->src),
+                      SG_MITER_FROM_SG | SG_MITER_ATOMIC);
+       sg_miter_start(&mo, areq->dst, sg_nents(areq->dst),
+                      SG_MITER_TO_SG | SG_MITER_ATOMIC);
+       sg_miter_next(&mi);
+       sg_miter_next(&mo);
+       if (!mi.addr || !mo.addr) {
+               dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
+               err = -EINVAL;
+               goto release_ss;
+       }
+       ileft = areq->nbytes;
+       oleft = areq->nbytes;
+       oi = 0;
+       oo = 0;
+
+       while (oleft > 0) {
+               if (ileft > 0) {
+                       /*
+                        * todo is the number of consecutive 4byte word that we
+                        * can read from current SG
+                        */
+                       todo = min3(rx_cnt, ileft / 4, (mi.length - oi) / 4);
+                       if (todo > 0 && ob == 0) {
+                               writesl(ss->base + SS_RXFIFO, mi.addr + oi,
+                                       todo);
+                               ileft -= todo * 4;
+                               oi += todo * 4;
+                       } else {
+                               /*
+                                * not enough consecutive bytes, so we need to
+                                * linearize in buf. todo is in bytes
+                                * After that copy, if we have a multiple of 4
+                                * we need to be able to write all buf in one
+                                * pass, so it is why we min() with rx_cnt
+                                */
+                               todo = min3(rx_cnt * 4 - ob, ileft,
+                                           mi.length - oi);
+                               memcpy(buf + ob, mi.addr + oi, todo);
+                               ileft -= todo;
+                               oi += todo;
+                               ob += todo;
+                               if (ob % 4 == 0) {
+                                       writesl(ss->base + SS_RXFIFO, buf,
+                                               ob / 4);
+                                       ob = 0;
+                               }
+                       }
+                       if (oi == mi.length) {
+                               sg_miter_next(&mi);
+                               oi = 0;
+                       }
+               }
+
+               spaces = readl(ss->base + SS_FCSR);
+               rx_cnt = SS_RXFIFO_SPACES(spaces);
+               tx_cnt = SS_TXFIFO_SPACES(spaces);
+               dev_dbg(ss->dev, "%x %u/%u %u/%u cnt=%u %u/%u %u/%u cnt=%u %u %u\n",
+                       mode,
+                       oi, mi.length, ileft, areq->nbytes, rx_cnt,
+                       oo, mo.length, oleft, areq->nbytes, tx_cnt,
+                       todo, ob);
+
+               if (tx_cnt == 0)
+                       continue;
+               /* todo in 4bytes word */
+               todo = min3(tx_cnt, oleft / 4, (mo.length - oo) / 4);
+               if (todo > 0) {
+                       readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo);
+                       oleft -= todo * 4;
+                       oo += todo * 4;
+                       if (oo == mo.length) {
+                               sg_miter_next(&mo);
+                               oo = 0;
+                       }
+               } else {
+                       /*
+                        * read obl bytes in bufo, we read at maximum for
+                        * emptying the device
+                        */
+                       readsl(ss->base + SS_TXFIFO, bufo, tx_cnt);
+                       obl = tx_cnt * 4;
+                       obo = 0;
+                       do {
+                               /*
+                                * how many bytes we can copy ?
+                                * no more than remaining SG size
+                                * no more than remaining buffer
+                                * no need to test against oleft
+                                */
+                               todo = min(mo.length - oo, obl - obo);
+                               memcpy(mo.addr + oo, bufo + obo, todo);
+                               oleft -= todo;
+                               obo += todo;
+                               oo += todo;
+                               if (oo == mo.length) {
+                                       sg_miter_next(&mo);
+                                       oo = 0;
+                               }
+                       } while (obo < obl);
+                       /* bufo must be fully used here */
+               }
+       }
+       if (areq->info) {
+               for (i = 0; i < 4 && i < ivsize / 4; i++) {
+                       v = readl(ss->base + SS_IV0 + i * 4);
+                       *(u32 *)(areq->info + i * 4) = v;
+               }
+       }
+
+release_ss:
+       sg_miter_stop(&mi);
+       sg_miter_stop(&mo);
+       writel(0, ss->base + SS_CTL);
+       spin_unlock_bh(&ss->slock);
+
+       return err;
+}
+
+/* CBC AES */
+int sun4i_ss_cbc_aes_encrypt(struct ablkcipher_request *areq)
+{
+       struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
+       struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
+       struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
+
+       rctx->mode = SS_OP_AES | SS_CBC | SS_ENABLED | SS_ENCRYPTION |
+               op->keymode;
+       return sun4i_ss_cipher_poll(areq);
+}
+
+int sun4i_ss_cbc_aes_decrypt(struct ablkcipher_request *areq)
+{
+       struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
+       struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
+       struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
+
+       rctx->mode = SS_OP_AES | SS_CBC | SS_ENABLED | SS_DECRYPTION |
+               op->keymode;
+       return sun4i_ss_cipher_poll(areq);
+}
+
+/* ECB AES */
+int sun4i_ss_ecb_aes_encrypt(struct ablkcipher_request *areq)
+{
+       struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
+       struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
+       struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
+
+       rctx->mode = SS_OP_AES | SS_ECB | SS_ENABLED | SS_ENCRYPTION |
+               op->keymode;
+       return sun4i_ss_cipher_poll(areq);
+}
+
+int sun4i_ss_ecb_aes_decrypt(struct ablkcipher_request *areq)
+{
+       struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
+       struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
+       struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
+
+       rctx->mode = SS_OP_AES | SS_ECB | SS_ENABLED | SS_DECRYPTION |
+               op->keymode;
+       return sun4i_ss_cipher_poll(areq);
+}
+
+/* CBC DES */
+int sun4i_ss_cbc_des_encrypt(struct ablkcipher_request *areq)
+{
+       struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
+       struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
+       struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
+
+       rctx->mode = SS_OP_DES | SS_CBC | SS_ENABLED | SS_ENCRYPTION |
+               op->keymode;
+       return sun4i_ss_cipher_poll(areq);
+}
+
+int sun4i_ss_cbc_des_decrypt(struct ablkcipher_request *areq)
+{
+       struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
+       struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
+       struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
+
+       rctx->mode = SS_OP_DES | SS_CBC | SS_ENABLED | SS_DECRYPTION |
+               op->keymode;
+       return sun4i_ss_cipher_poll(areq);
+}
+
+/* ECB DES */
+int sun4i_ss_ecb_des_encrypt(struct ablkcipher_request *areq)
+{
+       struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
+       struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
+       struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
+
+       rctx->mode = SS_OP_DES | SS_ECB | SS_ENABLED | SS_ENCRYPTION |
+               op->keymode;
+       return sun4i_ss_cipher_poll(areq);
+}
+
+int sun4i_ss_ecb_des_decrypt(struct ablkcipher_request *areq)
+{
+       struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
+       struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
+       struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
+
+       rctx->mode = SS_OP_DES | SS_ECB | SS_ENABLED | SS_DECRYPTION |
+               op->keymode;
+       return sun4i_ss_cipher_poll(areq);
+}
+
+/* CBC 3DES */
+int sun4i_ss_cbc_des3_encrypt(struct ablkcipher_request *areq)
+{
+       struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
+       struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
+       struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
+
+       rctx->mode = SS_OP_3DES | SS_CBC | SS_ENABLED | SS_ENCRYPTION |
+               op->keymode;
+       return sun4i_ss_cipher_poll(areq);
+}
+
+int sun4i_ss_cbc_des3_decrypt(struct ablkcipher_request *areq)
+{
+       struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
+       struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
+       struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
+
+       rctx->mode = SS_OP_3DES | SS_CBC | SS_ENABLED | SS_DECRYPTION |
+               op->keymode;
+       return sun4i_ss_cipher_poll(areq);
+}
+
+/* ECB 3DES */
+int sun4i_ss_ecb_des3_encrypt(struct ablkcipher_request *areq)
+{
+       struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
+       struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
+       struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
+
+       rctx->mode = SS_OP_3DES | SS_ECB | SS_ENABLED | SS_ENCRYPTION |
+               op->keymode;
+       return sun4i_ss_cipher_poll(areq);
+}
+
+int sun4i_ss_ecb_des3_decrypt(struct ablkcipher_request *areq)
+{
+       struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
+       struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
+       struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
+
+       rctx->mode = SS_OP_3DES | SS_ECB | SS_ENABLED | SS_DECRYPTION |
+               op->keymode;
+       return sun4i_ss_cipher_poll(areq);
+}
+
+int sun4i_ss_cipher_init(struct crypto_tfm *tfm)
+{
+       struct sun4i_tfm_ctx *op = crypto_tfm_ctx(tfm);
+       struct crypto_alg *alg = tfm->__crt_alg;
+       struct sun4i_ss_alg_template *algt;
+
+       memset(op, 0, sizeof(struct sun4i_tfm_ctx));
+
+       algt = container_of(alg, struct sun4i_ss_alg_template, alg.crypto);
+       op->ss = algt->ss;
+
+       tfm->crt_ablkcipher.reqsize = sizeof(struct sun4i_cipher_req_ctx);
+
+       return 0;
+}
+
+/* check and set the AES key, prepare the mode to be used */
+int sun4i_ss_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+                       unsigned int keylen)
+{
+       struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
+       struct sun4i_ss_ctx *ss = op->ss;
+
+       switch (keylen) {
+       case 128 / 8:
+               op->keymode = SS_AES_128BITS;
+               break;
+       case 192 / 8:
+               op->keymode = SS_AES_192BITS;
+               break;
+       case 256 / 8:
+               op->keymode = SS_AES_256BITS;
+               break;
+       default:
+               dev_err(ss->dev, "ERROR: Invalid keylen %u\n", keylen);
+               crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+               return -EINVAL;
+       }
+       op->keylen = keylen;
+       memcpy(op->key, key, keylen);
+       return 0;
+}
+
+/* check and set the DES key, prepare the mode to be used */
+int sun4i_ss_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+                       unsigned int keylen)
+{
+       struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
+       struct sun4i_ss_ctx *ss = op->ss;
+       u32 flags;
+       u32 tmp[DES_EXPKEY_WORDS];
+       int ret;
+
+       if (unlikely(keylen != DES_KEY_SIZE)) {
+               dev_err(ss->dev, "Invalid keylen %u\n", keylen);
+               crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+               return -EINVAL;
+       }
+
+       flags = crypto_ablkcipher_get_flags(tfm);
+
+       ret = des_ekey(tmp, key);
+       if (unlikely(ret == 0) && (flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+               crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_WEAK_KEY);
+               dev_dbg(ss->dev, "Weak key %u\n", keylen);
+               return -EINVAL;
+       }
+
+       op->keylen = keylen;
+       memcpy(op->key, key, keylen);
+       return 0;
+}
+
+/* check and set the 3DES key, prepare the mode to be used */
+int sun4i_ss_des3_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+                        unsigned int keylen)
+{
+       struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
+       struct sun4i_ss_ctx *ss = op->ss;
+
+       if (unlikely(keylen != 3 * DES_KEY_SIZE)) {
+               dev_err(ss->dev, "Invalid keylen %u\n", keylen);
+               crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+               return -EINVAL;
+       }
+       op->keylen = keylen;
+       memcpy(op->key, key, keylen);
+       return 0;
+}
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-core.c b/drivers/crypto/sunxi-ss/sun4i-ss-core.c
new file mode 100644 (file)
index 0000000..eab6fe2
--- /dev/null
@@ -0,0 +1,425 @@
+/*
+ * sun4i-ss-core.c - hardware cryptographic accelerator for Allwinner A20 SoC
+ *
+ * Copyright (C) 2013-2015 Corentin LABBE <clabbe.montjoie@gmail.com>
+ *
+ * Core file which registers crypto algorithms supported by the SS.
+ *
+ * You could find a link for the datasheet in Documentation/arm/sunxi/README
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#include <linux/clk.h>
+#include <linux/crypto.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <crypto/scatterwalk.h>
+#include <linux/scatterlist.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/reset.h>
+
+#include "sun4i-ss.h"
+
+static struct sun4i_ss_alg_template ss_algs[] = {
+{       .type = CRYPTO_ALG_TYPE_AHASH,
+       .mode = SS_OP_MD5,
+       .alg.hash = {
+               .init = sun4i_hash_init,
+               .update = sun4i_hash_update,
+               .final = sun4i_hash_final,
+               .finup = sun4i_hash_finup,
+               .digest = sun4i_hash_digest,
+               .export = sun4i_hash_export_md5,
+               .import = sun4i_hash_import_md5,
+               .halg = {
+                       .digestsize = MD5_DIGEST_SIZE,
+                       .base = {
+                               .cra_name = "md5",
+                               .cra_driver_name = "md5-sun4i-ss",
+                               .cra_priority = 300,
+                               .cra_alignmask = 3,
+                               .cra_flags = CRYPTO_ALG_TYPE_AHASH,
+                               .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
+                               .cra_ctxsize = sizeof(struct sun4i_req_ctx),
+                               .cra_module = THIS_MODULE,
+                               .cra_type = &crypto_ahash_type,
+                               .cra_init = sun4i_hash_crainit
+                       }
+               }
+       }
+},
+{       .type = CRYPTO_ALG_TYPE_AHASH,
+       .mode = SS_OP_SHA1,
+       .alg.hash = {
+               .init = sun4i_hash_init,
+               .update = sun4i_hash_update,
+               .final = sun4i_hash_final,
+               .finup = sun4i_hash_finup,
+               .digest = sun4i_hash_digest,
+               .export = sun4i_hash_export_sha1,
+               .import = sun4i_hash_import_sha1,
+               .halg = {
+                       .digestsize = SHA1_DIGEST_SIZE,
+                       .base = {
+                               .cra_name = "sha1",
+                               .cra_driver_name = "sha1-sun4i-ss",
+                               .cra_priority = 300,
+                               .cra_alignmask = 3,
+                               .cra_flags = CRYPTO_ALG_TYPE_AHASH,
+                               .cra_blocksize = SHA1_BLOCK_SIZE,
+                               .cra_ctxsize = sizeof(struct sun4i_req_ctx),
+                               .cra_module = THIS_MODULE,
+                               .cra_type = &crypto_ahash_type,
+                               .cra_init = sun4i_hash_crainit
+                       }
+               }
+       }
+},
+{       .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+       .alg.crypto = {
+               .cra_name = "cbc(aes)",
+               .cra_driver_name = "cbc-aes-sun4i-ss",
+               .cra_priority = 300,
+               .cra_blocksize = AES_BLOCK_SIZE,
+               .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER,
+               .cra_ctxsize = sizeof(struct sun4i_tfm_ctx),
+               .cra_module = THIS_MODULE,
+               .cra_alignmask = 3,
+               .cra_type = &crypto_ablkcipher_type,
+               .cra_init = sun4i_ss_cipher_init,
+               .cra_ablkcipher = {
+                       .min_keysize    = AES_MIN_KEY_SIZE,
+                       .max_keysize    = AES_MAX_KEY_SIZE,
+                       .ivsize         = AES_BLOCK_SIZE,
+                       .setkey         = sun4i_ss_aes_setkey,
+                       .encrypt        = sun4i_ss_cbc_aes_encrypt,
+                       .decrypt        = sun4i_ss_cbc_aes_decrypt,
+               }
+       }
+},
+{       .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+       .alg.crypto = {
+               .cra_name = "ecb(aes)",
+               .cra_driver_name = "ecb-aes-sun4i-ss",
+               .cra_priority = 300,
+               .cra_blocksize = AES_BLOCK_SIZE,
+               .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER,
+               .cra_ctxsize = sizeof(struct sun4i_tfm_ctx),
+               .cra_module = THIS_MODULE,
+               .cra_alignmask = 3,
+               .cra_type = &crypto_ablkcipher_type,
+               .cra_init = sun4i_ss_cipher_init,
+               .cra_ablkcipher = {
+                       .min_keysize    = AES_MIN_KEY_SIZE,
+                       .max_keysize    = AES_MAX_KEY_SIZE,
+                       .ivsize         = AES_BLOCK_SIZE,
+                       .setkey         = sun4i_ss_aes_setkey,
+                       .encrypt        = sun4i_ss_ecb_aes_encrypt,
+                       .decrypt        = sun4i_ss_ecb_aes_decrypt,
+               }
+       }
+},
+{       .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+       .alg.crypto = {
+               .cra_name = "cbc(des)",
+               .cra_driver_name = "cbc-des-sun4i-ss",
+               .cra_priority = 300,
+               .cra_blocksize = DES_BLOCK_SIZE,
+               .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER,
+               .cra_ctxsize = sizeof(struct sun4i_req_ctx),
+               .cra_module = THIS_MODULE,
+               .cra_alignmask = 3,
+               .cra_type = &crypto_ablkcipher_type,
+               .cra_init = sun4i_ss_cipher_init,
+               .cra_u.ablkcipher = {
+                       .min_keysize    = DES_KEY_SIZE,
+                       .max_keysize    = DES_KEY_SIZE,
+                       .ivsize         = DES_BLOCK_SIZE,
+                       .setkey         = sun4i_ss_des_setkey,
+                       .encrypt        = sun4i_ss_cbc_des_encrypt,
+                       .decrypt        = sun4i_ss_cbc_des_decrypt,
+               }
+       }
+},
+{       .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+       .alg.crypto = {
+               .cra_name = "ecb(des)",
+               .cra_driver_name = "ecb-des-sun4i-ss",
+               .cra_priority = 300,
+               .cra_blocksize = DES_BLOCK_SIZE,
+               .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER,
+               .cra_ctxsize = sizeof(struct sun4i_req_ctx),
+               .cra_module = THIS_MODULE,
+               .cra_alignmask = 3,
+               .cra_type = &crypto_ablkcipher_type,
+               .cra_init = sun4i_ss_cipher_init,
+               .cra_u.ablkcipher = {
+                       .min_keysize    = DES_KEY_SIZE,
+                       .max_keysize    = DES_KEY_SIZE,
+                       .setkey         = sun4i_ss_des_setkey,
+                       .encrypt        = sun4i_ss_ecb_des_encrypt,
+                       .decrypt        = sun4i_ss_ecb_des_decrypt,
+               }
+       }
+},
+{       .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+       .alg.crypto = {
+                       .cra_name = "cbc(des3_ede)",
+                       .cra_driver_name = "cbc-des3-sun4i-ss",
+                       .cra_priority = 300,
+                       .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+                       .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER,
+                       .cra_ctxsize = sizeof(struct sun4i_req_ctx),
+                       .cra_module = THIS_MODULE,
+                       .cra_alignmask = 3,
+                       .cra_type = &crypto_ablkcipher_type,
+                       .cra_init = sun4i_ss_cipher_init,
+                       .cra_u.ablkcipher = {
+                               .min_keysize    = DES3_EDE_KEY_SIZE,
+                               .max_keysize    = DES3_EDE_KEY_SIZE,
+                               .ivsize         = DES3_EDE_BLOCK_SIZE,
+                               .setkey         = sun4i_ss_des3_setkey,
+                               .encrypt        = sun4i_ss_cbc_des3_encrypt,
+                               .decrypt        = sun4i_ss_cbc_des3_decrypt,
+               }
+       }
+},
+{       .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+       .alg.crypto = {
+                       .cra_name = "ecb(des3_ede)",
+                       .cra_driver_name = "ecb-des3-sun4i-ss",
+                       .cra_priority = 300,
+                       .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+                       .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER,
+                       .cra_ctxsize = sizeof(struct sun4i_req_ctx),
+                       .cra_module = THIS_MODULE,
+                       .cra_alignmask = 3,
+                       .cra_type = &crypto_ablkcipher_type,
+                       .cra_init = sun4i_ss_cipher_init,
+                       .cra_u.ablkcipher = {
+                               .min_keysize    = DES3_EDE_KEY_SIZE,
+                               .max_keysize    = DES3_EDE_KEY_SIZE,
+                               .ivsize         = DES3_EDE_BLOCK_SIZE,
+                               .setkey         = sun4i_ss_des3_setkey,
+                               .encrypt        = sun4i_ss_ecb_des3_encrypt,
+                               .decrypt        = sun4i_ss_ecb_des3_decrypt,
+               }
+       }
+},
+};
+
+static int sun4i_ss_probe(struct platform_device *pdev)
+{
+       struct resource *res;
+       u32 v;
+       int err, i;
+       unsigned long cr;
+       const unsigned long cr_ahb = 24 * 1000 * 1000;
+       const unsigned long cr_mod = 150 * 1000 * 1000;
+       struct sun4i_ss_ctx *ss;
+
+       if (!pdev->dev.of_node)
+               return -ENODEV;
+
+       ss = devm_kzalloc(&pdev->dev, sizeof(*ss), GFP_KERNEL);
+       if (!ss)
+               return -ENOMEM;
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       ss->base = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(ss->base)) {
+               dev_err(&pdev->dev, "Cannot request MMIO\n");
+               return PTR_ERR(ss->base);
+       }
+
+       ss->ssclk = devm_clk_get(&pdev->dev, "mod");
+       if (IS_ERR(ss->ssclk)) {
+               err = PTR_ERR(ss->ssclk);
+               dev_err(&pdev->dev, "Cannot get SS clock err=%d\n", err);
+               return err;
+       }
+       dev_dbg(&pdev->dev, "clock ss acquired\n");
+
+       ss->busclk = devm_clk_get(&pdev->dev, "ahb");
+       if (IS_ERR(ss->busclk)) {
+               err = PTR_ERR(ss->busclk);
+               dev_err(&pdev->dev, "Cannot get AHB SS clock err=%d\n", err);
+               return err;
+       }
+       dev_dbg(&pdev->dev, "clock ahb_ss acquired\n");
+
+       ss->reset = devm_reset_control_get_optional(&pdev->dev, "ahb");
+       if (IS_ERR(ss->reset)) {
+               if (PTR_ERR(ss->reset) == -EPROBE_DEFER)
+                       return PTR_ERR(ss->reset);
+               dev_info(&pdev->dev, "no reset control found\n");
+               ss->reset = NULL;
+       }
+
+       /* Enable both clocks */
+       err = clk_prepare_enable(ss->busclk);
+       if (err != 0) {
+               dev_err(&pdev->dev, "Cannot prepare_enable busclk\n");
+               return err;
+       }
+       err = clk_prepare_enable(ss->ssclk);
+       if (err != 0) {
+               dev_err(&pdev->dev, "Cannot prepare_enable ssclk\n");
+               goto error_ssclk;
+       }
+
+       /*
+        * Check that clock have the correct rates given in the datasheet
+        * Try to set the clock to the maximum allowed
+        */
+       err = clk_set_rate(ss->ssclk, cr_mod);
+       if (err != 0) {
+               dev_err(&pdev->dev, "Cannot set clock rate to ssclk\n");
+               goto error_clk;
+       }
+
+       /* Deassert reset if we have a reset control */
+       if (ss->reset) {
+               err = reset_control_deassert(ss->reset);
+               if (err) {
+                       dev_err(&pdev->dev, "Cannot deassert reset control\n");
+                       goto error_clk;
+               }
+       }
+
+       /*
+        * The only impact on clocks below requirement are bad performance,
+        * so do not print "errors"
+        * warn on Overclocked clocks
+        */
+       cr = clk_get_rate(ss->busclk);
+       if (cr >= cr_ahb)
+               dev_dbg(&pdev->dev, "Clock bus %lu (%lu MHz) (must be >= %lu)\n",
+                       cr, cr / 1000000, cr_ahb);
+       else
+               dev_warn(&pdev->dev, "Clock bus %lu (%lu MHz) (must be >= %lu)\n",
+                        cr, cr / 1000000, cr_ahb);
+
+       cr = clk_get_rate(ss->ssclk);
+       if (cr <= cr_mod)
+               if (cr < cr_mod)
+                       dev_warn(&pdev->dev, "Clock ss %lu (%lu MHz) (must be <= %lu)\n",
+                                cr, cr / 1000000, cr_mod);
+               else
+                       dev_dbg(&pdev->dev, "Clock ss %lu (%lu MHz) (must be <= %lu)\n",
+                               cr, cr / 1000000, cr_mod);
+       else
+               dev_warn(&pdev->dev, "Clock ss is at %lu (%lu MHz) (must be <= %lu)\n",
+                        cr, cr / 1000000, cr_mod);
+
+       /*
+        * Datasheet named it "Die Bonding ID"
+        * I expect to be a sort of Security System Revision number.
+        * Since the A80 seems to have an other version of SS
+        * this info could be useful
+        */
+       writel(SS_ENABLED, ss->base + SS_CTL);
+       v = readl(ss->base + SS_CTL);
+       v >>= 16;
+       v &= 0x07;
+       dev_info(&pdev->dev, "Die ID %d\n", v);
+       writel(0, ss->base + SS_CTL);
+
+       ss->dev = &pdev->dev;
+
+       spin_lock_init(&ss->slock);
+
+       for (i = 0; i < ARRAY_SIZE(ss_algs); i++) {
+               ss_algs[i].ss = ss;
+               switch (ss_algs[i].type) {
+               case CRYPTO_ALG_TYPE_ABLKCIPHER:
+                       err = crypto_register_alg(&ss_algs[i].alg.crypto);
+                       if (err != 0) {
+                               dev_err(ss->dev, "Fail to register %s\n",
+                                       ss_algs[i].alg.crypto.cra_name);
+                               goto error_alg;
+                       }
+                       break;
+               case CRYPTO_ALG_TYPE_AHASH:
+                       err = crypto_register_ahash(&ss_algs[i].alg.hash);
+                       if (err != 0) {
+                               dev_err(ss->dev, "Fail to register %s\n",
+                                       ss_algs[i].alg.hash.halg.base.cra_name);
+                               goto error_alg;
+                       }
+                       break;
+               }
+       }
+       platform_set_drvdata(pdev, ss);
+       return 0;
+error_alg:
+       i--;
+       for (; i >= 0; i--) {
+               switch (ss_algs[i].type) {
+               case CRYPTO_ALG_TYPE_ABLKCIPHER:
+                       crypto_unregister_alg(&ss_algs[i].alg.crypto);
+                       break;
+               case CRYPTO_ALG_TYPE_AHASH:
+                       crypto_unregister_ahash(&ss_algs[i].alg.hash);
+                       break;
+               }
+       }
+       if (ss->reset)
+               reset_control_assert(ss->reset);
+error_clk:
+       clk_disable_unprepare(ss->ssclk);
+error_ssclk:
+       clk_disable_unprepare(ss->busclk);
+       return err;
+}
+
+static int sun4i_ss_remove(struct platform_device *pdev)
+{
+       int i;
+       struct sun4i_ss_ctx *ss = platform_get_drvdata(pdev);
+
+       for (i = 0; i < ARRAY_SIZE(ss_algs); i++) {
+               switch (ss_algs[i].type) {
+               case CRYPTO_ALG_TYPE_ABLKCIPHER:
+                       crypto_unregister_alg(&ss_algs[i].alg.crypto);
+                       break;
+               case CRYPTO_ALG_TYPE_AHASH:
+                       crypto_unregister_ahash(&ss_algs[i].alg.hash);
+                       break;
+               }
+       }
+
+       writel(0, ss->base + SS_CTL);
+       if (ss->reset)
+               reset_control_assert(ss->reset);
+       clk_disable_unprepare(ss->busclk);
+       clk_disable_unprepare(ss->ssclk);
+       return 0;
+}
+
+static const struct of_device_id a20ss_crypto_of_match_table[] = {
+       { .compatible = "allwinner,sun4i-a10-crypto" },
+       {}
+};
+MODULE_DEVICE_TABLE(of, a20ss_crypto_of_match_table);
+
+static struct platform_driver sun4i_ss_driver = {
+       .probe          = sun4i_ss_probe,
+       .remove         = sun4i_ss_remove,
+       .driver         = {
+               .name           = "sun4i-ss",
+               .of_match_table = a20ss_crypto_of_match_table,
+       },
+};
+
+module_platform_driver(sun4i_ss_driver);
+
+MODULE_DESCRIPTION("Allwinner Security System cryptographic accelerator");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Corentin LABBE <clabbe.montjoie@gmail.com>");
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-hash.c b/drivers/crypto/sunxi-ss/sun4i-ss-hash.c
new file mode 100644 (file)
index 0000000..ff80314
--- /dev/null
@@ -0,0 +1,492 @@
+/*
+ * sun4i-ss-hash.c - hardware cryptographic accelerator for Allwinner A20 SoC
+ *
+ * Copyright (C) 2013-2015 Corentin LABBE <clabbe.montjoie@gmail.com>
+ *
+ * This file add support for MD5 and SHA1.
+ *
+ * You could find the datasheet in Documentation/arm/sunxi/README
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#include "sun4i-ss.h"
+#include <linux/scatterlist.h>
+
+/* This is a totally arbitrary value */
+#define SS_TIMEOUT 100
+
+int sun4i_hash_crainit(struct crypto_tfm *tfm)
+{
+       crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+                                sizeof(struct sun4i_req_ctx));
+       return 0;
+}
+
+/* sun4i_hash_init: initialize request context */
+int sun4i_hash_init(struct ahash_request *areq)
+{
+       struct sun4i_req_ctx *op = ahash_request_ctx(areq);
+       struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+       struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
+       struct sun4i_ss_alg_template *algt;
+       struct sun4i_ss_ctx *ss;
+
+       memset(op, 0, sizeof(struct sun4i_req_ctx));
+
+       algt = container_of(alg, struct sun4i_ss_alg_template, alg.hash);
+       ss = algt->ss;
+       op->ss = algt->ss;
+       op->mode = algt->mode;
+
+       return 0;
+}
+
+int sun4i_hash_export_md5(struct ahash_request *areq, void *out)
+{
+       struct sun4i_req_ctx *op = ahash_request_ctx(areq);
+       struct md5_state *octx = out;
+       int i;
+
+       octx->byte_count = op->byte_count + op->len;
+
+       memcpy(octx->block, op->buf, op->len);
+
+       if (op->byte_count > 0) {
+               for (i = 0; i < 4; i++)
+                       octx->hash[i] = op->hash[i];
+       } else {
+               octx->hash[0] = SHA1_H0;
+               octx->hash[1] = SHA1_H1;
+               octx->hash[2] = SHA1_H2;
+               octx->hash[3] = SHA1_H3;
+       }
+
+       return 0;
+}
+
+int sun4i_hash_import_md5(struct ahash_request *areq, const void *in)
+{
+       struct sun4i_req_ctx *op = ahash_request_ctx(areq);
+       const struct md5_state *ictx = in;
+       int i;
+
+       sun4i_hash_init(areq);
+
+       op->byte_count = ictx->byte_count & ~0x3F;
+       op->len = ictx->byte_count & 0x3F;
+
+       memcpy(op->buf, ictx->block, op->len);
+
+       for (i = 0; i < 4; i++)
+               op->hash[i] = ictx->hash[i];
+
+       return 0;
+}
+
+int sun4i_hash_export_sha1(struct ahash_request *areq, void *out)
+{
+       struct sun4i_req_ctx *op = ahash_request_ctx(areq);
+       struct sha1_state *octx = out;
+       int i;
+
+       octx->count = op->byte_count + op->len;
+
+       memcpy(octx->buffer, op->buf, op->len);
+
+       if (op->byte_count > 0) {
+               for (i = 0; i < 5; i++)
+                       octx->state[i] = op->hash[i];
+       } else {
+               octx->state[0] = SHA1_H0;
+               octx->state[1] = SHA1_H1;
+               octx->state[2] = SHA1_H2;
+               octx->state[3] = SHA1_H3;
+               octx->state[4] = SHA1_H4;
+       }
+
+       return 0;
+}
+
+int sun4i_hash_import_sha1(struct ahash_request *areq, const void *in)
+{
+       struct sun4i_req_ctx *op = ahash_request_ctx(areq);
+       const struct sha1_state *ictx = in;
+       int i;
+
+       sun4i_hash_init(areq);
+
+       op->byte_count = ictx->count & ~0x3F;
+       op->len = ictx->count & 0x3F;
+
+       memcpy(op->buf, ictx->buffer, op->len);
+
+       for (i = 0; i < 5; i++)
+               op->hash[i] = ictx->state[i];
+
+       return 0;
+}
+
+/*
+ * sun4i_hash_update: update hash engine
+ *
+ * Could be used for both SHA1 and MD5
+ * Write data by step of 32bits and put then in the SS.
+ *
+ * Since we cannot leave partial data and hash state in the engine,
+ * we need to get the hash state at the end of this function.
+ * We can get the hash state every 64 bytes
+ *
+ * So the first work is to get the number of bytes to write to SS modulo 64
+ * The extra bytes will go to a temporary buffer op->buf storing op->len bytes
+ *
+ * So at the begin of update()
+ * if op->len + areq->nbytes < 64
+ * => all data will be written to wait buffer (op->buf) and end=0
+ * if not, write all data from op->buf to the device and position end to
+ * complete to 64bytes
+ *
+ * example 1:
+ * update1 60o => op->len=60
+ * update2 60o => need one more word to have 64 bytes
+ * end=4
+ * so write all data from op->buf and one word of SGs
+ * write remaining data in op->buf
+ * final state op->len=56
+ */
+int sun4i_hash_update(struct ahash_request *areq)
+{
+       u32 v, ivmode = 0;
+       unsigned int i = 0;
+       /*
+        * i is the total bytes read from SGs, to be compared to areq->nbytes
+        * i is important because we cannot rely on SG length since the sum of
+        * SG->length could be greater than areq->nbytes
+        */
+
+       struct sun4i_req_ctx *op = ahash_request_ctx(areq);
+       struct sun4i_ss_ctx *ss = op->ss;
+       struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+       unsigned int in_i = 0; /* advancement in the current SG */
+       unsigned int end;
+       /*
+        * end is the position when we need to stop writing to the device,
+        * to be compared to i
+        */
+       int in_r, err = 0;
+       unsigned int todo;
+       u32 spaces, rx_cnt = SS_RX_DEFAULT;
+       size_t copied = 0;
+       struct sg_mapping_iter mi;
+
+       dev_dbg(ss->dev, "%s %s bc=%llu len=%u mode=%x wl=%u h0=%0x",
+               __func__, crypto_tfm_alg_name(areq->base.tfm),
+               op->byte_count, areq->nbytes, op->mode,
+               op->len, op->hash[0]);
+
+       if (areq->nbytes == 0)
+               return 0;
+
+       /* protect against overflow */
+       if (areq->nbytes > UINT_MAX - op->len) {
+               dev_err(ss->dev, "Cannot process too large request\n");
+               return -EINVAL;
+       }
+
+       if (op->len + areq->nbytes < 64) {
+               /* linearize data to op->buf */
+               copied = sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
+                                           op->buf + op->len, areq->nbytes, 0);
+               op->len += copied;
+               return 0;
+       }
+
+       end = ((areq->nbytes + op->len) / 64) * 64 - op->len;
+
+       if (end > areq->nbytes || areq->nbytes - end > 63) {
+               dev_err(ss->dev, "ERROR: Bound error %u %u\n",
+                       end, areq->nbytes);
+               return -EINVAL;
+       }
+
+       spin_lock_bh(&ss->slock);
+
+       /*
+        * if some data have been processed before,
+        * we need to restore the partial hash state
+        */
+       if (op->byte_count > 0) {
+               ivmode = SS_IV_ARBITRARY;
+               for (i = 0; i < 5; i++)
+                       writel(op->hash[i], ss->base + SS_IV0 + i * 4);
+       }
+       /* Enable the device */
+       writel(op->mode | SS_ENABLED | ivmode, ss->base + SS_CTL);
+
+       i = 0;
+       sg_miter_start(&mi, areq->src, sg_nents(areq->src),
+                      SG_MITER_FROM_SG | SG_MITER_ATOMIC);
+       sg_miter_next(&mi);
+       in_i = 0;
+
+       do {
+               /*
+                * we need to linearize in two case:
+                * - the buffer is already used
+                * - the SG does not have enough byte remaining ( < 4)
+                */
+               if (op->len > 0 || (mi.length - in_i) < 4) {
+                       /*
+                        * if we have entered here we have two reason to stop
+                        * - the buffer is full
+                        * - reach the end
+                        */
+                       while (op->len < 64 && i < end) {
+                               /* how many bytes we can read from current SG */
+                               in_r = min3(mi.length - in_i, end - i,
+                                           64 - op->len);
+                               memcpy(op->buf + op->len, mi.addr + in_i, in_r);
+                               op->len += in_r;
+                               i += in_r;
+                               in_i += in_r;
+                               if (in_i == mi.length) {
+                                       sg_miter_next(&mi);
+                                       in_i = 0;
+                               }
+                       }
+                       if (op->len > 3 && (op->len % 4) == 0) {
+                               /* write buf to the device */
+                               writesl(ss->base + SS_RXFIFO, op->buf,
+                                       op->len / 4);
+                               op->byte_count += op->len;
+                               op->len = 0;
+                       }
+               }
+               if (mi.length - in_i > 3 && i < end) {
+                       /* how many bytes we can read from current SG */
+                       in_r = min3(mi.length - in_i, areq->nbytes - i,
+                                   ((mi.length - in_i) / 4) * 4);
+                       /* how many bytes we can write in the device*/
+                       todo = min3((u32)(end - i) / 4, rx_cnt, (u32)in_r / 4);
+                       writesl(ss->base + SS_RXFIFO, mi.addr + in_i, todo);
+                       op->byte_count += todo * 4;
+                       i += todo * 4;
+                       in_i += todo * 4;
+                       rx_cnt -= todo;
+                       if (rx_cnt == 0) {
+                               spaces = readl(ss->base + SS_FCSR);
+                               rx_cnt = SS_RXFIFO_SPACES(spaces);
+                       }
+                       if (in_i == mi.length) {
+                               sg_miter_next(&mi);
+                               in_i = 0;
+                       }
+               }
+       } while (i < end);
+       /* final linear */
+       if ((areq->nbytes - i) < 64) {
+               while (i < areq->nbytes && in_i < mi.length && op->len < 64) {
+                       /* how many bytes we can read from current SG */
+                       in_r = min3(mi.length - in_i, areq->nbytes - i,
+                                   64 - op->len);
+                       memcpy(op->buf + op->len, mi.addr + in_i, in_r);
+                       op->len += in_r;
+                       i += in_r;
+                       in_i += in_r;
+                       if (in_i == mi.length) {
+                               sg_miter_next(&mi);
+                               in_i = 0;
+                       }
+               }
+       }
+
+       sg_miter_stop(&mi);
+
+       writel(op->mode | SS_ENABLED | SS_DATA_END, ss->base + SS_CTL);
+       i = 0;
+       do {
+               v = readl(ss->base + SS_CTL);
+               i++;
+       } while (i < SS_TIMEOUT && (v & SS_DATA_END) > 0);
+       if (i >= SS_TIMEOUT) {
+               dev_err_ratelimited(ss->dev,
+                                   "ERROR: hash end timeout %d>%d ctl=%x len=%u\n",
+                                   i, SS_TIMEOUT, v, areq->nbytes);
+               err = -EIO;
+               goto release_ss;
+       }
+
+       /* get the partial hash only if something was written */
+       for (i = 0; i < crypto_ahash_digestsize(tfm) / 4; i++)
+               op->hash[i] = readl(ss->base + SS_MD0 + i * 4);
+
+release_ss:
+       writel(0, ss->base + SS_CTL);
+       spin_unlock_bh(&ss->slock);
+       return err;
+}
+
+/*
+ * sun4i_hash_final: finalize hashing operation
+ *
+ * If we have some remaining bytes, we write them.
+ * Then ask the SS for finalizing the hashing operation
+ *
+ * I do not check RX FIFO size in this function since the size is 32
+ * after each enabling and this function neither write more than 32 words.
+ */
+int sun4i_hash_final(struct ahash_request *areq)
+{
+       u32 v, ivmode = 0;
+       unsigned int i;
+       unsigned int j = 0;
+       int zeros, err = 0;
+       unsigned int index, padlen;
+       __be64 bits;
+       struct sun4i_req_ctx *op = ahash_request_ctx(areq);
+       struct sun4i_ss_ctx *ss = op->ss;
+       struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+       u32 bf[32];
+       u32 wb = 0;
+       unsigned int nwait, nbw = 0;
+
+       dev_dbg(ss->dev, "%s: byte=%llu len=%u mode=%x wl=%u h=%x",
+               __func__, op->byte_count, areq->nbytes, op->mode,
+               op->len, op->hash[0]);
+
+       spin_lock_bh(&ss->slock);
+
+       /*
+        * if we have already written something,
+        * restore the partial hash state
+        */
+       if (op->byte_count > 0) {
+               ivmode = SS_IV_ARBITRARY;
+               for (i = 0; i < crypto_ahash_digestsize(tfm) / 4; i++)
+                       writel(op->hash[i], ss->base + SS_IV0 + i * 4);
+       }
+       writel(op->mode | SS_ENABLED | ivmode, ss->base + SS_CTL);
+
+       /* write the remaining words of the wait buffer */
+       if (op->len > 0) {
+               nwait = op->len / 4;
+               if (nwait > 0) {
+                       writesl(ss->base + SS_RXFIFO, op->buf, nwait);
+                       op->byte_count += 4 * nwait;
+               }
+               nbw = op->len - 4 * nwait;
+               wb = *(u32 *)(op->buf + nwait * 4);
+               wb &= (0xFFFFFFFF >> (4 - nbw) * 8);
+       }
+
+       /* write the remaining bytes of the nbw buffer */
+       if (nbw > 0) {
+               wb |= ((1 << 7) << (nbw * 8));
+               bf[j++] = wb;
+       } else {
+               bf[j++] = 1 << 7;
+       }
+
+       /*
+        * number of space to pad to obtain 64o minus 8(size) minus 4 (final 1)
+        * I take the operations from other MD5/SHA1 implementations
+        */
+
+       /* we have already send 4 more byte of which nbw data */
+       if (op->mode == SS_OP_MD5) {
+               index = (op->byte_count + 4) & 0x3f;
+               op->byte_count += nbw;
+               if (index > 56)
+                       zeros = (120 - index) / 4;
+               else
+                       zeros = (56 - index) / 4;
+       } else {
+               op->byte_count += nbw;
+               index = op->byte_count & 0x3f;
+               padlen = (index < 56) ? (56 - index) : ((64 + 56) - index);
+               zeros = (padlen - 1) / 4;
+       }
+
+       memset(bf + j, 0, 4 * zeros);
+       j += zeros;
+
+       /* write the length of data */
+       if (op->mode == SS_OP_SHA1) {
+               bits = cpu_to_be64(op->byte_count << 3);
+               bf[j++] = bits & 0xffffffff;
+               bf[j++] = (bits >> 32) & 0xffffffff;
+       } else {
+               bf[j++] = (op->byte_count << 3) & 0xffffffff;
+               bf[j++] = (op->byte_count >> 29) & 0xffffffff;
+       }
+       writesl(ss->base + SS_RXFIFO, bf, j);
+
+       /* Tell the SS to stop the hashing */
+       writel(op->mode | SS_ENABLED | SS_DATA_END, ss->base + SS_CTL);
+
+       /*
+        * Wait for SS to finish the hash.
+        * The timeout could happen only in case of bad overcloking
+        * or driver bug.
+        */
+       i = 0;
+       do {
+               v = readl(ss->base + SS_CTL);
+               i++;
+       } while (i < SS_TIMEOUT && (v & SS_DATA_END) > 0);
+       if (i >= SS_TIMEOUT) {
+               dev_err_ratelimited(ss->dev,
+                                   "ERROR: hash end timeout %d>%d ctl=%x len=%u\n",
+                                   i, SS_TIMEOUT, v, areq->nbytes);
+               err = -EIO;
+               goto release_ss;
+       }
+
+       /* Get the hash from the device */
+       if (op->mode == SS_OP_SHA1) {
+               for (i = 0; i < 5; i++) {
+                       v = cpu_to_be32(readl(ss->base + SS_MD0 + i * 4));
+                       memcpy(areq->result + i * 4, &v, 4);
+               }
+       } else {
+               for (i = 0; i < 4; i++) {
+                       v = readl(ss->base + SS_MD0 + i * 4);
+                       memcpy(areq->result + i * 4, &v, 4);
+               }
+       }
+
+release_ss:
+       writel(0, ss->base + SS_CTL);
+       spin_unlock_bh(&ss->slock);
+       return err;
+}
+
+/* sun4i_hash_finup: finalize hashing operation after an update */
+int sun4i_hash_finup(struct ahash_request *areq)
+{
+       int err;
+
+       err = sun4i_hash_update(areq);
+       if (err != 0)
+               return err;
+
+       return sun4i_hash_final(areq);
+}
+
+/* combo of init/update/final functions */
+int sun4i_hash_digest(struct ahash_request *areq)
+{
+       int err;
+
+       err = sun4i_hash_init(areq);
+       if (err != 0)
+               return err;
+
+       err = sun4i_hash_update(areq);
+       if (err != 0)
+               return err;
+
+       return sun4i_hash_final(areq);
+}
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss.h b/drivers/crypto/sunxi-ss/sun4i-ss.h
new file mode 100644 (file)
index 0000000..8e9c05f
--- /dev/null
@@ -0,0 +1,201 @@
+/*
+ * sun4i-ss.h - hardware cryptographic accelerator for Allwinner A20 SoC
+ *
+ * Copyright (C) 2013-2015 Corentin LABBE <clabbe.montjoie@gmail.com>
+ *
+ * Support AES cipher with 128,192,256 bits keysize.
+ * Support MD5 and SHA1 hash algorithms.
+ * Support DES and 3DES
+ *
+ * You could find the datasheet in Documentation/arm/sunxi/README
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/clk.h>
+#include <linux/crypto.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <crypto/scatterwalk.h>
+#include <linux/scatterlist.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <crypto/md5.h>
+#include <crypto/sha.h>
+#include <crypto/hash.h>
+#include <crypto/internal/hash.h>
+#include <crypto/aes.h>
+#include <crypto/des.h>
+#include <crypto/internal/rng.h>
+
+#define SS_CTL            0x00
+#define SS_KEY0           0x04
+#define SS_KEY1           0x08
+#define SS_KEY2           0x0C
+#define SS_KEY3           0x10
+#define SS_KEY4           0x14
+#define SS_KEY5           0x18
+#define SS_KEY6           0x1C
+#define SS_KEY7           0x20
+
+#define SS_IV0            0x24
+#define SS_IV1            0x28
+#define SS_IV2            0x2C
+#define SS_IV3            0x30
+
+#define SS_FCSR           0x44
+
+#define SS_MD0            0x4C
+#define SS_MD1            0x50
+#define SS_MD2            0x54
+#define SS_MD3            0x58
+#define SS_MD4            0x5C
+
+#define SS_RXFIFO         0x200
+#define SS_TXFIFO         0x204
+
+/* SS_CTL configuration values */
+
+/* PRNG generator mode - bit 15 */
+#define SS_PRNG_ONESHOT                (0 << 15)
+#define SS_PRNG_CONTINUE       (1 << 15)
+
+/* IV mode for hash */
+#define SS_IV_ARBITRARY                (1 << 14)
+
+/* SS operation mode - bits 12-13 */
+#define SS_ECB                 (0 << 12)
+#define SS_CBC                 (1 << 12)
+#define SS_CTS                 (3 << 12)
+
+/* Counter width for CNT mode - bits 10-11 */
+#define SS_CNT_16BITS          (0 << 10)
+#define SS_CNT_32BITS          (1 << 10)
+#define SS_CNT_64BITS          (2 << 10)
+
+/* Key size for AES - bits 8-9 */
+#define SS_AES_128BITS         (0 << 8)
+#define SS_AES_192BITS         (1 << 8)
+#define SS_AES_256BITS         (2 << 8)
+
+/* Operation direction - bit 7 */
+#define SS_ENCRYPTION          (0 << 7)
+#define SS_DECRYPTION          (1 << 7)
+
+/* SS Method - bits 4-6 */
+#define SS_OP_AES              (0 << 4)
+#define SS_OP_DES              (1 << 4)
+#define SS_OP_3DES             (2 << 4)
+#define SS_OP_SHA1             (3 << 4)
+#define SS_OP_MD5              (4 << 4)
+#define SS_OP_PRNG             (5 << 4)
+
+/* Data end bit - bit 2 */
+#define SS_DATA_END            (1 << 2)
+
+/* PRNG start bit - bit 1 */
+#define SS_PRNG_START          (1 << 1)
+
+/* SS Enable bit - bit 0 */
+#define SS_DISABLED            (0 << 0)
+#define SS_ENABLED             (1 << 0)
+
+/* SS_FCSR configuration values */
+/* RX FIFO status - bit 30 */
+#define SS_RXFIFO_FREE         (1 << 30)
+
+/* RX FIFO empty spaces - bits 24-29 */
+#define SS_RXFIFO_SPACES(val)  (((val) >> 24) & 0x3f)
+
+/* TX FIFO status - bit 22 */
+#define SS_TXFIFO_AVAILABLE    (1 << 22)
+
+/* TX FIFO available spaces - bits 16-21 */
+#define SS_TXFIFO_SPACES(val)  (((val) >> 16) & 0x3f)
+
+#define SS_RX_MAX      32
+#define SS_RX_DEFAULT  SS_RX_MAX
+#define SS_TX_MAX      33
+
+#define SS_RXFIFO_EMP_INT_PENDING      (1 << 10)
+#define SS_TXFIFO_AVA_INT_PENDING      (1 << 8)
+#define SS_RXFIFO_EMP_INT_ENABLE       (1 << 2)
+#define SS_TXFIFO_AVA_INT_ENABLE       (1 << 0)
+
+struct sun4i_ss_ctx {
+       void __iomem *base;
+       int irq;
+       struct clk *busclk;
+       struct clk *ssclk;
+       struct reset_control *reset;
+       struct device *dev;
+       struct resource *res;
+       spinlock_t slock; /* control the use of the device */
+};
+
+struct sun4i_ss_alg_template {
+       u32 type;
+       u32 mode;
+       union {
+               struct crypto_alg crypto;
+               struct ahash_alg hash;
+       } alg;
+       struct sun4i_ss_ctx *ss;
+};
+
+struct sun4i_tfm_ctx {
+       u32 key[AES_MAX_KEY_SIZE / 4];/* divided by sizeof(u32) */
+       u32 keylen;
+       u32 keymode;
+       struct sun4i_ss_ctx *ss;
+};
+
+struct sun4i_cipher_req_ctx {
+       u32 mode;
+};
+
+struct sun4i_req_ctx {
+       u32 mode;
+       u64 byte_count; /* number of bytes "uploaded" to the device */
+       u32 hash[5]; /* for storing SS_IVx register */
+       char buf[64];
+       unsigned int len;
+       struct sun4i_ss_ctx *ss;
+};
+
+int sun4i_hash_crainit(struct crypto_tfm *tfm);
+int sun4i_hash_init(struct ahash_request *areq);
+int sun4i_hash_update(struct ahash_request *areq);
+int sun4i_hash_final(struct ahash_request *areq);
+int sun4i_hash_finup(struct ahash_request *areq);
+int sun4i_hash_digest(struct ahash_request *areq);
+int sun4i_hash_export_md5(struct ahash_request *areq, void *out);
+int sun4i_hash_import_md5(struct ahash_request *areq, const void *in);
+int sun4i_hash_export_sha1(struct ahash_request *areq, void *out);
+int sun4i_hash_import_sha1(struct ahash_request *areq, const void *in);
+
+int sun4i_ss_cbc_aes_encrypt(struct ablkcipher_request *areq);
+int sun4i_ss_cbc_aes_decrypt(struct ablkcipher_request *areq);
+int sun4i_ss_ecb_aes_encrypt(struct ablkcipher_request *areq);
+int sun4i_ss_ecb_aes_decrypt(struct ablkcipher_request *areq);
+
+int sun4i_ss_cbc_des_encrypt(struct ablkcipher_request *areq);
+int sun4i_ss_cbc_des_decrypt(struct ablkcipher_request *areq);
+int sun4i_ss_ecb_des_encrypt(struct ablkcipher_request *areq);
+int sun4i_ss_ecb_des_decrypt(struct ablkcipher_request *areq);
+
+int sun4i_ss_cbc_des3_encrypt(struct ablkcipher_request *areq);
+int sun4i_ss_cbc_des3_decrypt(struct ablkcipher_request *areq);
+int sun4i_ss_ecb_des3_encrypt(struct ablkcipher_request *areq);
+int sun4i_ss_ecb_des3_decrypt(struct ablkcipher_request *areq);
+
+int sun4i_ss_cipher_init(struct crypto_tfm *tfm);
+int sun4i_ss_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+                       unsigned int keylen);
+int sun4i_ss_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+                       unsigned int keylen);
+int sun4i_ss_des3_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+                        unsigned int keylen);
index 83aca95a95bc226e6b3d1b083c7ba5373660cc6b..cd774534d987756f57fd79f9d6441a51d74af609 100644 (file)
@@ -766,6 +766,7 @@ static int talitos_rng_init(struct hwrng *rng)
 static int talitos_register_rng(struct device *dev)
 {
        struct talitos_private *priv = dev_get_drvdata(dev);
+       int err;
 
        priv->rng.name          = dev_driver_string(dev),
        priv->rng.init          = talitos_rng_init,
@@ -773,14 +774,22 @@ static int talitos_register_rng(struct device *dev)
        priv->rng.data_read     = talitos_rng_data_read,
        priv->rng.priv          = (unsigned long)dev;
 
-       return hwrng_register(&priv->rng);
+       err = hwrng_register(&priv->rng);
+       if (!err)
+               priv->rng_registered = true;
+
+       return err;
 }
 
 static void talitos_unregister_rng(struct device *dev)
 {
        struct talitos_private *priv = dev_get_drvdata(dev);
 
+       if (!priv->rng_registered)
+               return;
+
        hwrng_unregister(&priv->rng);
+       priv->rng_registered = false;
 }
 
 /*
@@ -799,7 +808,6 @@ struct talitos_ctx {
        unsigned int keylen;
        unsigned int enckeylen;
        unsigned int authkeylen;
-       unsigned int authsize;
 };
 
 #define HASH_MAX_BLOCK_SIZE            SHA512_BLOCK_SIZE
@@ -819,16 +827,6 @@ struct talitos_ahash_req_ctx {
        struct scatterlist *psrc;
 };
 
-static int aead_setauthsize(struct crypto_aead *authenc,
-                           unsigned int authsize)
-{
-       struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
-
-       ctx->authsize = authsize;
-
-       return 0;
-}
-
 static int aead_setkey(struct crypto_aead *authenc,
                       const u8 *key, unsigned int keylen)
 {
@@ -857,12 +855,11 @@ badkey:
 
 /*
  * talitos_edesc - s/w-extended descriptor
- * @assoc_nents: number of segments in associated data scatterlist
  * @src_nents: number of segments in input scatterlist
  * @dst_nents: number of segments in output scatterlist
- * @assoc_chained: whether assoc is chained or not
  * @src_chained: whether src is chained or not
  * @dst_chained: whether dst is chained or not
+ * @icv_ool: whether ICV is out-of-line
  * @iv_dma: dma address of iv for checking continuity and link table
  * @dma_len: length of dma mapped link_tbl space
  * @dma_link_tbl: bus physical address of link_tbl/buf
@@ -875,12 +872,11 @@ badkey:
  * of link_tbl data
  */
 struct talitos_edesc {
-       int assoc_nents;
        int src_nents;
        int dst_nents;
-       bool assoc_chained;
        bool src_chained;
        bool dst_chained;
+       bool icv_ool;
        dma_addr_t iv_dma;
        int dma_len;
        dma_addr_t dma_link_tbl;
@@ -952,14 +948,6 @@ static void ipsec_esp_unmap(struct device *dev,
        unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
        unmap_single_talitos_ptr(dev, &edesc->desc.ptr[0], DMA_TO_DEVICE);
 
-       if (edesc->assoc_chained)
-               talitos_unmap_sg_chain(dev, areq->assoc, DMA_TO_DEVICE);
-       else if (areq->assoclen)
-               /* assoc_nents counts also for IV in non-contiguous cases */
-               dma_unmap_sg(dev, areq->assoc,
-                            edesc->assoc_nents ? edesc->assoc_nents - 1 : 1,
-                            DMA_TO_DEVICE);
-
        talitos_sg_unmap(dev, edesc, areq->src, areq->dst);
 
        if (edesc->dma_len)
@@ -976,7 +964,7 @@ static void ipsec_esp_encrypt_done(struct device *dev,
 {
        struct aead_request *areq = context;
        struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
-       struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
+       unsigned int authsize = crypto_aead_authsize(authenc);
        struct talitos_edesc *edesc;
        struct scatterlist *sg;
        void *icvdata;
@@ -986,13 +974,12 @@ static void ipsec_esp_encrypt_done(struct device *dev,
        ipsec_esp_unmap(dev, edesc, areq);
 
        /* copy the generated ICV to dst */
-       if (edesc->dst_nents) {
+       if (edesc->icv_ool) {
                icvdata = &edesc->link_tbl[edesc->src_nents +
-                                          edesc->dst_nents + 2 +
-                                          edesc->assoc_nents];
+                                          edesc->dst_nents + 2];
                sg = sg_last(areq->dst, edesc->dst_nents);
-               memcpy((char *)sg_virt(sg) + sg->length - ctx->authsize,
-                      icvdata, ctx->authsize);
+               memcpy((char *)sg_virt(sg) + sg->length - authsize,
+                      icvdata, authsize);
        }
 
        kfree(edesc);
@@ -1006,10 +993,10 @@ static void ipsec_esp_decrypt_swauth_done(struct device *dev,
 {
        struct aead_request *req = context;
        struct crypto_aead *authenc = crypto_aead_reqtfm(req);
-       struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
+       unsigned int authsize = crypto_aead_authsize(authenc);
        struct talitos_edesc *edesc;
        struct scatterlist *sg;
-       void *icvdata;
+       char *oicv, *icv;
 
        edesc = container_of(desc, struct talitos_edesc, desc);
 
@@ -1017,16 +1004,18 @@ static void ipsec_esp_decrypt_swauth_done(struct device *dev,
 
        if (!err) {
                /* auth check */
-               if (edesc->dma_len)
-                       icvdata = &edesc->link_tbl[edesc->src_nents +
-                                                  edesc->dst_nents + 2 +
-                                                  edesc->assoc_nents];
-               else
-                       icvdata = &edesc->link_tbl[0];
-
                sg = sg_last(req->dst, edesc->dst_nents ? : 1);
-               err = memcmp(icvdata, (char *)sg_virt(sg) + sg->length -
-                            ctx->authsize, ctx->authsize) ? -EBADMSG : 0;
+               icv = (char *)sg_virt(sg) + sg->length - authsize;
+
+               if (edesc->dma_len) {
+                       oicv = (char *)&edesc->link_tbl[edesc->src_nents +
+                                                       edesc->dst_nents + 2];
+                       if (edesc->icv_ool)
+                               icv = oicv + authsize;
+               } else
+                       oicv = (char *)&edesc->link_tbl[0];
+
+               err = memcmp(oicv, icv, authsize) ? -EBADMSG : 0;
        }
 
        kfree(edesc);
@@ -1059,53 +1048,69 @@ static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
  * convert scatterlist to SEC h/w link table format
  * stop at cryptlen bytes
  */
-static int sg_to_link_tbl(struct scatterlist *sg, int sg_count,
-                          int cryptlen, struct talitos_ptr *link_tbl_ptr)
+static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count,
+                                unsigned int offset, int cryptlen,
+                                struct talitos_ptr *link_tbl_ptr)
 {
        int n_sg = sg_count;
+       int count = 0;
 
-       while (sg && n_sg--) {
-               to_talitos_ptr(link_tbl_ptr, sg_dma_address(sg), 0);
-               link_tbl_ptr->len = cpu_to_be16(sg_dma_len(sg));
-               link_tbl_ptr->j_extent = 0;
-               link_tbl_ptr++;
-               cryptlen -= sg_dma_len(sg);
-               sg = sg_next(sg);
-       }
+       while (cryptlen && sg && n_sg--) {
+               unsigned int len = sg_dma_len(sg);
+
+               if (offset >= len) {
+                       offset -= len;
+                       goto next;
+               }
+
+               len -= offset;
+
+               if (len > cryptlen)
+                       len = cryptlen;
 
-       /* adjust (decrease) last one (or two) entry's len to cryptlen */
-       link_tbl_ptr--;
-       while (be16_to_cpu(link_tbl_ptr->len) <= (-cryptlen)) {
-               /* Empty this entry, and move to previous one */
-               cryptlen += be16_to_cpu(link_tbl_ptr->len);
-               link_tbl_ptr->len = 0;
-               sg_count--;
-               link_tbl_ptr--;
+               to_talitos_ptr(link_tbl_ptr + count,
+                              sg_dma_address(sg) + offset, 0);
+               link_tbl_ptr[count].len = cpu_to_be16(len);
+               link_tbl_ptr[count].j_extent = 0;
+               count++;
+               cryptlen -= len;
+               offset = 0;
+
+next:
+               sg = sg_next(sg);
        }
-       link_tbl_ptr->len = cpu_to_be16(be16_to_cpu(link_tbl_ptr->len)
-                                       + cryptlen);
 
        /* tag end of link table */
-       link_tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
+       if (count > 0)
+               link_tbl_ptr[count - 1].j_extent = DESC_PTR_LNKTBL_RETURN;
 
-       return sg_count;
+       return count;
+}
+
+static inline int sg_to_link_tbl(struct scatterlist *sg, int sg_count,
+                                int cryptlen,
+                                struct talitos_ptr *link_tbl_ptr)
+{
+       return sg_to_link_tbl_offset(sg, sg_count, 0, cryptlen,
+                                    link_tbl_ptr);
 }
 
 /*
  * fill in and submit ipsec_esp descriptor
  */
 static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
-                    u64 seq, void (*callback) (struct device *dev,
-                                               struct talitos_desc *desc,
-                                               void *context, int error))
+                    void (*callback)(struct device *dev,
+                                     struct talitos_desc *desc,
+                                     void *context, int error))
 {
        struct crypto_aead *aead = crypto_aead_reqtfm(areq);
+       unsigned int authsize = crypto_aead_authsize(aead);
        struct talitos_ctx *ctx = crypto_aead_ctx(aead);
        struct device *dev = ctx->dev;
        struct talitos_desc *desc = &edesc->desc;
        unsigned int cryptlen = areq->cryptlen;
-       unsigned int authsize = ctx->authsize;
        unsigned int ivsize = crypto_aead_ivsize(aead);
+       int tbl_off = 0;
        int sg_count, ret;
        int sg_link_tbl_len;
 
@@ -1113,36 +1118,27 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
        map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key,
                               DMA_TO_DEVICE);
 
+       sg_count = talitos_map_sg(dev, areq->src, edesc->src_nents ?: 1,
+                                 (areq->src == areq->dst) ? DMA_BIDIRECTIONAL
+                                                          : DMA_TO_DEVICE,
+                                 edesc->src_chained);
+
        /* hmac data */
-       desc->ptr[1].len = cpu_to_be16(areq->assoclen + ivsize);
-       if (edesc->assoc_nents) {
-               int tbl_off = edesc->src_nents + edesc->dst_nents + 2;
-               struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
+       desc->ptr[1].len = cpu_to_be16(areq->assoclen);
+       if (sg_count > 1 &&
+           (ret = sg_to_link_tbl_offset(areq->src, sg_count, 0,
+                                        areq->assoclen,
+                                        &edesc->link_tbl[tbl_off])) > 1) {
+               tbl_off += ret;
 
                to_talitos_ptr(&desc->ptr[1], edesc->dma_link_tbl + tbl_off *
                               sizeof(struct talitos_ptr), 0);
                desc->ptr[1].j_extent = DESC_PTR_LNKTBL_JUMP;
 
-               /* assoc_nents - 1 entries for assoc, 1 for IV */
-               sg_count = sg_to_link_tbl(areq->assoc, edesc->assoc_nents - 1,
-                                         areq->assoclen, tbl_ptr);
-
-               /* add IV to link table */
-               tbl_ptr += sg_count - 1;
-               tbl_ptr->j_extent = 0;
-               tbl_ptr++;
-               to_talitos_ptr(tbl_ptr, edesc->iv_dma, 0);
-               tbl_ptr->len = cpu_to_be16(ivsize);
-               tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
-
                dma_sync_single_for_device(dev, edesc->dma_link_tbl,
                                           edesc->dma_len, DMA_BIDIRECTIONAL);
        } else {
-               if (areq->assoclen)
-                       to_talitos_ptr(&desc->ptr[1],
-                                      sg_dma_address(areq->assoc), 0);
-               else
-                       to_talitos_ptr(&desc->ptr[1], edesc->iv_dma, 0);
+               to_talitos_ptr(&desc->ptr[1], sg_dma_address(areq->src), 0);
                desc->ptr[1].j_extent = 0;
        }
 
@@ -1150,8 +1146,6 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
        to_talitos_ptr(&desc->ptr[2], edesc->iv_dma, 0);
        desc->ptr[2].len = cpu_to_be16(ivsize);
        desc->ptr[2].j_extent = 0;
-       /* Sync needed for the aead_givencrypt case */
-       dma_sync_single_for_device(dev, edesc->iv_dma, ivsize, DMA_TO_DEVICE);
 
        /* cipher key */
        map_single_talitos_ptr(dev, &desc->ptr[3], ctx->enckeylen,
@@ -1167,33 +1161,24 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
        desc->ptr[4].len = cpu_to_be16(cryptlen);
        desc->ptr[4].j_extent = authsize;
 
-       sg_count = talitos_map_sg(dev, areq->src, edesc->src_nents ? : 1,
-                                 (areq->src == areq->dst) ? DMA_BIDIRECTIONAL
-                                                          : DMA_TO_DEVICE,
-                                 edesc->src_chained);
-
-       if (sg_count == 1) {
+       sg_link_tbl_len = cryptlen;
+       if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV)
+               sg_link_tbl_len += authsize;
+
+       if (sg_count > 1 &&
+           (ret = sg_to_link_tbl_offset(areq->src, sg_count, areq->assoclen,
+                                        sg_link_tbl_len,
+                                        &edesc->link_tbl[tbl_off])) > 1) {
+               tbl_off += ret;
+               desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
+               to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl +
+                                             tbl_off *
+                                             sizeof(struct talitos_ptr), 0);
+               dma_sync_single_for_device(dev, edesc->dma_link_tbl,
+                                          edesc->dma_len,
+                                          DMA_BIDIRECTIONAL);
+       } else
                to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src), 0);
-       } else {
-               sg_link_tbl_len = cryptlen;
-
-               if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV)
-                       sg_link_tbl_len = cryptlen + authsize;
-
-               sg_count = sg_to_link_tbl(areq->src, sg_count, sg_link_tbl_len,
-                                         &edesc->link_tbl[0]);
-               if (sg_count > 1) {
-                       desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
-                       to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl, 0);
-                       dma_sync_single_for_device(dev, edesc->dma_link_tbl,
-                                                  edesc->dma_len,
-                                                  DMA_BIDIRECTIONAL);
-               } else {
-                       /* Only one segment now, so no link tbl needed */
-                       to_talitos_ptr(&desc->ptr[4],
-                                      sg_dma_address(areq->src), 0);
-               }
-       }
 
        /* cipher out */
        desc->ptr[5].len = cpu_to_be16(cryptlen);
@@ -1204,16 +1189,17 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
                                          edesc->dst_nents ? : 1,
                                          DMA_FROM_DEVICE, edesc->dst_chained);
 
-       if (sg_count == 1) {
-               to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst), 0);
-       } else {
-               int tbl_off = edesc->src_nents + 1;
+       edesc->icv_ool = false;
+
+       if (sg_count > 1 &&
+           (sg_count = sg_to_link_tbl_offset(areq->dst, sg_count,
+                                             areq->assoclen, cryptlen,
+                                             &edesc->link_tbl[tbl_off])) >
+           1) {
                struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
 
                to_talitos_ptr(&desc->ptr[5], edesc->dma_link_tbl +
                               tbl_off * sizeof(struct talitos_ptr), 0);
-               sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen,
-                                         tbl_ptr);
 
                /* Add an entry to the link table for ICV data */
                tbl_ptr += sg_count - 1;
@@ -1224,13 +1210,16 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
 
                /* icv data follows link tables */
                to_talitos_ptr(tbl_ptr, edesc->dma_link_tbl +
-                              (tbl_off + edesc->dst_nents + 1 +
-                               edesc->assoc_nents) *
-                              sizeof(struct talitos_ptr), 0);
+                                       (edesc->src_nents + edesc->dst_nents +
+                                        2) * sizeof(struct talitos_ptr) +
+                                       authsize, 0);
                desc->ptr[5].j_extent |= DESC_PTR_LNKTBL_JUMP;
                dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl,
                                           edesc->dma_len, DMA_BIDIRECTIONAL);
-       }
+
+               edesc->icv_ool = true;
+       } else
+               to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst), 0);
 
        /* iv out */
        map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv,
@@ -1268,7 +1257,6 @@ static int sg_count(struct scatterlist *sg_list, int nbytes, bool *chained)
  * allocate and map the extended descriptor
  */
 static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
-                                                struct scatterlist *assoc,
                                                 struct scatterlist *src,
                                                 struct scatterlist *dst,
                                                 u8 *iv,
@@ -1281,8 +1269,8 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
                                                 bool encrypt)
 {
        struct talitos_edesc *edesc;
-       int assoc_nents = 0, src_nents, dst_nents, alloc_len, dma_len;
-       bool assoc_chained = false, src_chained = false, dst_chained = false;
+       int src_nents, dst_nents, alloc_len, dma_len;
+       bool src_chained = false, dst_chained = false;
        dma_addr_t iv_dma = 0;
        gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
                      GFP_ATOMIC;
@@ -1298,48 +1286,35 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
        if (ivsize)
                iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
 
-       if (assoclen) {
-               /*
-                * Currently it is assumed that iv is provided whenever assoc
-                * is.
-                */
-               BUG_ON(!iv);
-
-               assoc_nents = sg_count(assoc, assoclen, &assoc_chained);
-               talitos_map_sg(dev, assoc, assoc_nents, DMA_TO_DEVICE,
-                              assoc_chained);
-               assoc_nents = (assoc_nents == 1) ? 0 : assoc_nents;
-
-               if (assoc_nents || sg_dma_address(assoc) + assoclen != iv_dma)
-                       assoc_nents = assoc_nents ? assoc_nents + 1 : 2;
-       }
-
        if (!dst || dst == src) {
-               src_nents = sg_count(src, cryptlen + authsize, &src_chained);
+               src_nents = sg_count(src, assoclen + cryptlen + authsize,
+                                    &src_chained);
                src_nents = (src_nents == 1) ? 0 : src_nents;
                dst_nents = dst ? src_nents : 0;
        } else { /* dst && dst != src*/
-               src_nents = sg_count(src, cryptlen + (encrypt ? 0 : authsize),
+               src_nents = sg_count(src, assoclen + cryptlen +
+                                         (encrypt ? 0 : authsize),
                                     &src_chained);
                src_nents = (src_nents == 1) ? 0 : src_nents;
-               dst_nents = sg_count(dst, cryptlen + (encrypt ? authsize : 0),
+               dst_nents = sg_count(dst, assoclen + cryptlen +
+                                         (encrypt ? authsize : 0),
                                     &dst_chained);
                dst_nents = (dst_nents == 1) ? 0 : dst_nents;
        }
 
        /*
         * allocate space for base edesc plus the link tables,
-        * allowing for two separate entries for ICV and generated ICV (+ 2),
-        * and the ICV data itself
+        * allowing for two separate entries for AD and generated ICV (+ 2),
+        * and space for two sets of ICVs (stashed and generated)
         */
        alloc_len = sizeof(struct talitos_edesc);
-       if (assoc_nents || src_nents || dst_nents) {
+       if (src_nents || dst_nents) {
                if (is_sec1)
                        dma_len = (src_nents ? cryptlen : 0) +
                                  (dst_nents ? cryptlen : 0);
                else
-                       dma_len = (src_nents + dst_nents + 2 + assoc_nents) *
-                                 sizeof(struct talitos_ptr) + authsize;
+                       dma_len = (src_nents + dst_nents + 2) *
+                                 sizeof(struct talitos_ptr) + authsize * 2;
                alloc_len += dma_len;
        } else {
                dma_len = 0;
@@ -1348,13 +1323,6 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
 
        edesc = kmalloc(alloc_len, GFP_DMA | flags);
        if (!edesc) {
-               if (assoc_chained)
-                       talitos_unmap_sg_chain(dev, assoc, DMA_TO_DEVICE);
-               else if (assoclen)
-                       dma_unmap_sg(dev, assoc,
-                                    assoc_nents ? assoc_nents - 1 : 1,
-                                    DMA_TO_DEVICE);
-
                if (iv_dma)
                        dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
 
@@ -1362,10 +1330,8 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
                return ERR_PTR(-ENOMEM);
        }
 
-       edesc->assoc_nents = assoc_nents;
        edesc->src_nents = src_nents;
        edesc->dst_nents = dst_nents;
-       edesc->assoc_chained = assoc_chained;
        edesc->src_chained = src_chained;
        edesc->dst_chained = dst_chained;
        edesc->iv_dma = iv_dma;
@@ -1382,12 +1348,13 @@ static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
                                              int icv_stashing, bool encrypt)
 {
        struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
+       unsigned int authsize = crypto_aead_authsize(authenc);
        struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
        unsigned int ivsize = crypto_aead_ivsize(authenc);
 
-       return talitos_edesc_alloc(ctx->dev, areq->assoc, areq->src, areq->dst,
+       return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
                                   iv, areq->assoclen, areq->cryptlen,
-                                  ctx->authsize, ivsize, icv_stashing,
+                                  authsize, ivsize, icv_stashing,
                                   areq->base.flags, encrypt);
 }
 
@@ -1405,14 +1372,14 @@ static int aead_encrypt(struct aead_request *req)
        /* set encrypt */
        edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
 
-       return ipsec_esp(edesc, req, 0, ipsec_esp_encrypt_done);
+       return ipsec_esp(edesc, req, ipsec_esp_encrypt_done);
 }
 
 static int aead_decrypt(struct aead_request *req)
 {
        struct crypto_aead *authenc = crypto_aead_reqtfm(req);
+       unsigned int authsize = crypto_aead_authsize(authenc);
        struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
-       unsigned int authsize = ctx->authsize;
        struct talitos_private *priv = dev_get_drvdata(ctx->dev);
        struct talitos_edesc *edesc;
        struct scatterlist *sg;
@@ -1437,7 +1404,7 @@ static int aead_decrypt(struct aead_request *req)
                /* reset integrity check result bits */
                edesc->desc.hdr_lo = 0;
 
-               return ipsec_esp(edesc, req, 0, ipsec_esp_decrypt_hwauth_done);
+               return ipsec_esp(edesc, req, ipsec_esp_decrypt_hwauth_done);
        }
 
        /* Have to check the ICV with software */
@@ -1445,40 +1412,16 @@ static int aead_decrypt(struct aead_request *req)
 
        /* stash incoming ICV for later cmp with ICV generated by the h/w */
        if (edesc->dma_len)
-               icvdata = &edesc->link_tbl[edesc->src_nents +
-                                          edesc->dst_nents + 2 +
-                                          edesc->assoc_nents];
+               icvdata = (char *)&edesc->link_tbl[edesc->src_nents +
+                                                  edesc->dst_nents + 2];
        else
                icvdata = &edesc->link_tbl[0];
 
        sg = sg_last(req->src, edesc->src_nents ? : 1);
 
-       memcpy(icvdata, (char *)sg_virt(sg) + sg->length - ctx->authsize,
-              ctx->authsize);
+       memcpy(icvdata, (char *)sg_virt(sg) + sg->length - authsize, authsize);
 
-       return ipsec_esp(edesc, req, 0, ipsec_esp_decrypt_swauth_done);
-}
-
-static int aead_givencrypt(struct aead_givcrypt_request *req)
-{
-       struct aead_request *areq = &req->areq;
-       struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
-       struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
-       struct talitos_edesc *edesc;
-
-       /* allocate extended descriptor */
-       edesc = aead_edesc_alloc(areq, req->giv, 0, true);
-       if (IS_ERR(edesc))
-               return PTR_ERR(edesc);
-
-       /* set encrypt */
-       edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
-
-       memcpy(req->giv, ctx->iv, crypto_aead_ivsize(authenc));
-       /* avoid consecutive packets going out with same IV */
-       *(__be64 *)req->giv ^= cpu_to_be64(req->seq);
-
-       return ipsec_esp(edesc, areq, req->seq, ipsec_esp_encrypt_done);
+       return ipsec_esp(edesc, req, ipsec_esp_decrypt_swauth_done);
 }
 
 static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
@@ -1710,7 +1653,7 @@ static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request *
        struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
        unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
 
-       return talitos_edesc_alloc(ctx->dev, NULL, areq->src, areq->dst,
+       return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
                                   areq->info, 0, areq->nbytes, 0, ivsize, 0,
                                   areq->base.flags, encrypt);
 }
@@ -1895,7 +1838,7 @@ static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq,
        struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
        struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
 
-       return talitos_edesc_alloc(ctx->dev, NULL, req_ctx->psrc, NULL, NULL, 0,
+       return talitos_edesc_alloc(ctx->dev, req_ctx->psrc, NULL, NULL, 0,
                                   nbytes, 0, 0, 0, areq->base.flags, false);
 }
 
@@ -2161,6 +2104,7 @@ struct talitos_alg_template {
        union {
                struct crypto_alg crypto;
                struct ahash_alg hash;
+               struct aead_alg aead;
        } alg;
        __be32 desc_hdr_template;
 };
@@ -2168,15 +2112,16 @@ struct talitos_alg_template {
 static struct talitos_alg_template driver_algs[] = {
        /* AEAD algorithms.  These use a single-pass ipsec_esp descriptor */
        {       .type = CRYPTO_ALG_TYPE_AEAD,
-               .alg.crypto = {
-                       .cra_name = "authenc(hmac(sha1),cbc(aes))",
-                       .cra_driver_name = "authenc-hmac-sha1-cbc-aes-talitos",
-                       .cra_blocksize = AES_BLOCK_SIZE,
-                       .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
-                       .cra_aead = {
-                               .ivsize = AES_BLOCK_SIZE,
-                               .maxauthsize = SHA1_DIGEST_SIZE,
-                       }
+               .alg.aead = {
+                       .base = {
+                               .cra_name = "authenc(hmac(sha1),cbc(aes))",
+                               .cra_driver_name = "authenc-hmac-sha1-"
+                                                  "cbc-aes-talitos",
+                               .cra_blocksize = AES_BLOCK_SIZE,
+                               .cra_flags = CRYPTO_ALG_ASYNC,
+                       },
+                       .ivsize = AES_BLOCK_SIZE,
+                       .maxauthsize = SHA1_DIGEST_SIZE,
                },
                .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
                                     DESC_HDR_SEL0_AESU |
@@ -2187,15 +2132,17 @@ static struct talitos_alg_template driver_algs[] = {
                                     DESC_HDR_MODE1_MDEU_SHA1_HMAC,
        },
        {       .type = CRYPTO_ALG_TYPE_AEAD,
-               .alg.crypto = {
-                       .cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
-                       .cra_driver_name = "authenc-hmac-sha1-cbc-3des-talitos",
-                       .cra_blocksize = DES3_EDE_BLOCK_SIZE,
-                       .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
-                       .cra_aead = {
-                               .ivsize = DES3_EDE_BLOCK_SIZE,
-                               .maxauthsize = SHA1_DIGEST_SIZE,
-                       }
+               .alg.aead = {
+                       .base = {
+                               .cra_name = "authenc(hmac(sha1),"
+                                           "cbc(des3_ede))",
+                               .cra_driver_name = "authenc-hmac-sha1-"
+                                                  "cbc-3des-talitos",
+                               .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+                               .cra_flags = CRYPTO_ALG_ASYNC,
+                       },
+                       .ivsize = DES3_EDE_BLOCK_SIZE,
+                       .maxauthsize = SHA1_DIGEST_SIZE,
                },
                .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
                                     DESC_HDR_SEL0_DEU |
@@ -2207,15 +2154,16 @@ static struct talitos_alg_template driver_algs[] = {
                                     DESC_HDR_MODE1_MDEU_SHA1_HMAC,
        },
        {       .type = CRYPTO_ALG_TYPE_AEAD,
-               .alg.crypto = {
-                       .cra_name = "authenc(hmac(sha224),cbc(aes))",
-                       .cra_driver_name = "authenc-hmac-sha224-cbc-aes-talitos",
-                       .cra_blocksize = AES_BLOCK_SIZE,
-                       .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
-                       .cra_aead = {
-                               .ivsize = AES_BLOCK_SIZE,
-                               .maxauthsize = SHA224_DIGEST_SIZE,
-                       }
+               .alg.aead = {
+                       .base = {
+                               .cra_name = "authenc(hmac(sha224),cbc(aes))",
+                               .cra_driver_name = "authenc-hmac-sha224-"
+                                                  "cbc-aes-talitos",
+                               .cra_blocksize = AES_BLOCK_SIZE,
+                               .cra_flags = CRYPTO_ALG_ASYNC,
+                       },
+                       .ivsize = AES_BLOCK_SIZE,
+                       .maxauthsize = SHA224_DIGEST_SIZE,
                },
                .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
                                     DESC_HDR_SEL0_AESU |
@@ -2226,15 +2174,17 @@ static struct talitos_alg_template driver_algs[] = {
                                     DESC_HDR_MODE1_MDEU_SHA224_HMAC,
        },
        {       .type = CRYPTO_ALG_TYPE_AEAD,
-               .alg.crypto = {
-                       .cra_name = "authenc(hmac(sha224),cbc(des3_ede))",
-                       .cra_driver_name = "authenc-hmac-sha224-cbc-3des-talitos",
-                       .cra_blocksize = DES3_EDE_BLOCK_SIZE,
-                       .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
-                       .cra_aead = {
-                               .ivsize = DES3_EDE_BLOCK_SIZE,
-                               .maxauthsize = SHA224_DIGEST_SIZE,
-                       }
+               .alg.aead = {
+                       .base = {
+                               .cra_name = "authenc(hmac(sha224),"
+                                           "cbc(des3_ede))",
+                               .cra_driver_name = "authenc-hmac-sha224-"
+                                                  "cbc-3des-talitos",
+                               .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+                               .cra_flags = CRYPTO_ALG_ASYNC,
+                       },
+                       .ivsize = DES3_EDE_BLOCK_SIZE,
+                       .maxauthsize = SHA224_DIGEST_SIZE,
                },
                .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
                                     DESC_HDR_SEL0_DEU |
@@ -2246,15 +2196,16 @@ static struct talitos_alg_template driver_algs[] = {
                                     DESC_HDR_MODE1_MDEU_SHA224_HMAC,
        },
        {       .type = CRYPTO_ALG_TYPE_AEAD,
-               .alg.crypto = {
-                       .cra_name = "authenc(hmac(sha256),cbc(aes))",
-                       .cra_driver_name = "authenc-hmac-sha256-cbc-aes-talitos",
-                       .cra_blocksize = AES_BLOCK_SIZE,
-                       .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
-                       .cra_aead = {
-                               .ivsize = AES_BLOCK_SIZE,
-                               .maxauthsize = SHA256_DIGEST_SIZE,
-                       }
+               .alg.aead = {
+                       .base = {
+                               .cra_name = "authenc(hmac(sha256),cbc(aes))",
+                               .cra_driver_name = "authenc-hmac-sha256-"
+                                                  "cbc-aes-talitos",
+                               .cra_blocksize = AES_BLOCK_SIZE,
+                               .cra_flags = CRYPTO_ALG_ASYNC,
+                       },
+                       .ivsize = AES_BLOCK_SIZE,
+                       .maxauthsize = SHA256_DIGEST_SIZE,
                },
                .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
                                     DESC_HDR_SEL0_AESU |
@@ -2265,15 +2216,17 @@ static struct talitos_alg_template driver_algs[] = {
                                     DESC_HDR_MODE1_MDEU_SHA256_HMAC,
        },
        {       .type = CRYPTO_ALG_TYPE_AEAD,
-               .alg.crypto = {
-                       .cra_name = "authenc(hmac(sha256),cbc(des3_ede))",
-                       .cra_driver_name = "authenc-hmac-sha256-cbc-3des-talitos",
-                       .cra_blocksize = DES3_EDE_BLOCK_SIZE,
-                       .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
-                       .cra_aead = {
-                               .ivsize = DES3_EDE_BLOCK_SIZE,
-                               .maxauthsize = SHA256_DIGEST_SIZE,
-                       }
+               .alg.aead = {
+                       .base = {
+                               .cra_name = "authenc(hmac(sha256),"
+                                           "cbc(des3_ede))",
+                               .cra_driver_name = "authenc-hmac-sha256-"
+                                                  "cbc-3des-talitos",
+                               .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+                               .cra_flags = CRYPTO_ALG_ASYNC,
+                       },
+                       .ivsize = DES3_EDE_BLOCK_SIZE,
+                       .maxauthsize = SHA256_DIGEST_SIZE,
                },
                .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
                                     DESC_HDR_SEL0_DEU |
@@ -2285,15 +2238,16 @@ static struct talitos_alg_template driver_algs[] = {
                                     DESC_HDR_MODE1_MDEU_SHA256_HMAC,
        },
        {       .type = CRYPTO_ALG_TYPE_AEAD,
-               .alg.crypto = {
-                       .cra_name = "authenc(hmac(sha384),cbc(aes))",
-                       .cra_driver_name = "authenc-hmac-sha384-cbc-aes-talitos",
-                       .cra_blocksize = AES_BLOCK_SIZE,
-                       .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
-                       .cra_aead = {
-                               .ivsize = AES_BLOCK_SIZE,
-                               .maxauthsize = SHA384_DIGEST_SIZE,
-                       }
+               .alg.aead = {
+                       .base = {
+                               .cra_name = "authenc(hmac(sha384),cbc(aes))",
+                               .cra_driver_name = "authenc-hmac-sha384-"
+                                                  "cbc-aes-talitos",
+                               .cra_blocksize = AES_BLOCK_SIZE,
+                               .cra_flags = CRYPTO_ALG_ASYNC,
+                       },
+                       .ivsize = AES_BLOCK_SIZE,
+                       .maxauthsize = SHA384_DIGEST_SIZE,
                },
                .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
                                     DESC_HDR_SEL0_AESU |
@@ -2304,15 +2258,17 @@ static struct talitos_alg_template driver_algs[] = {
                                     DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
        },
        {       .type = CRYPTO_ALG_TYPE_AEAD,
-               .alg.crypto = {
-                       .cra_name = "authenc(hmac(sha384),cbc(des3_ede))",
-                       .cra_driver_name = "authenc-hmac-sha384-cbc-3des-talitos",
-                       .cra_blocksize = DES3_EDE_BLOCK_SIZE,
-                       .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
-                       .cra_aead = {
-                               .ivsize = DES3_EDE_BLOCK_SIZE,
-                               .maxauthsize = SHA384_DIGEST_SIZE,
-                       }
+               .alg.aead = {
+                       .base = {
+                               .cra_name = "authenc(hmac(sha384),"
+                                           "cbc(des3_ede))",
+                               .cra_driver_name = "authenc-hmac-sha384-"
+                                                  "cbc-3des-talitos",
+                               .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+                               .cra_flags = CRYPTO_ALG_ASYNC,
+                       },
+                       .ivsize = DES3_EDE_BLOCK_SIZE,
+                       .maxauthsize = SHA384_DIGEST_SIZE,
                },
                .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
                                     DESC_HDR_SEL0_DEU |
@@ -2324,15 +2280,16 @@ static struct talitos_alg_template driver_algs[] = {
                                     DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
        },
        {       .type = CRYPTO_ALG_TYPE_AEAD,
-               .alg.crypto = {
-                       .cra_name = "authenc(hmac(sha512),cbc(aes))",
-                       .cra_driver_name = "authenc-hmac-sha512-cbc-aes-talitos",
-                       .cra_blocksize = AES_BLOCK_SIZE,
-                       .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
-                       .cra_aead = {
-                               .ivsize = AES_BLOCK_SIZE,
-                               .maxauthsize = SHA512_DIGEST_SIZE,
-                       }
+               .alg.aead = {
+                       .base = {
+                               .cra_name = "authenc(hmac(sha512),cbc(aes))",
+                               .cra_driver_name = "authenc-hmac-sha512-"
+                                                  "cbc-aes-talitos",
+                               .cra_blocksize = AES_BLOCK_SIZE,
+                               .cra_flags = CRYPTO_ALG_ASYNC,
+                       },
+                       .ivsize = AES_BLOCK_SIZE,
+                       .maxauthsize = SHA512_DIGEST_SIZE,
                },
                .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
                                     DESC_HDR_SEL0_AESU |
@@ -2343,15 +2300,17 @@ static struct talitos_alg_template driver_algs[] = {
                                     DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
        },
        {       .type = CRYPTO_ALG_TYPE_AEAD,
-               .alg.crypto = {
-                       .cra_name = "authenc(hmac(sha512),cbc(des3_ede))",
-                       .cra_driver_name = "authenc-hmac-sha512-cbc-3des-talitos",
-                       .cra_blocksize = DES3_EDE_BLOCK_SIZE,
-                       .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
-                       .cra_aead = {
-                               .ivsize = DES3_EDE_BLOCK_SIZE,
-                               .maxauthsize = SHA512_DIGEST_SIZE,
-                       }
+               .alg.aead = {
+                       .base = {
+                               .cra_name = "authenc(hmac(sha512),"
+                                           "cbc(des3_ede))",
+                               .cra_driver_name = "authenc-hmac-sha512-"
+                                                  "cbc-3des-talitos",
+                               .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+                               .cra_flags = CRYPTO_ALG_ASYNC,
+                       },
+                       .ivsize = DES3_EDE_BLOCK_SIZE,
+                       .maxauthsize = SHA512_DIGEST_SIZE,
                },
                .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
                                     DESC_HDR_SEL0_DEU |
@@ -2363,15 +2322,16 @@ static struct talitos_alg_template driver_algs[] = {
                                     DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
        },
        {       .type = CRYPTO_ALG_TYPE_AEAD,
-               .alg.crypto = {
-                       .cra_name = "authenc(hmac(md5),cbc(aes))",
-                       .cra_driver_name = "authenc-hmac-md5-cbc-aes-talitos",
-                       .cra_blocksize = AES_BLOCK_SIZE,
-                       .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
-                       .cra_aead = {
-                               .ivsize = AES_BLOCK_SIZE,
-                               .maxauthsize = MD5_DIGEST_SIZE,
-                       }
+               .alg.aead = {
+                       .base = {
+                               .cra_name = "authenc(hmac(md5),cbc(aes))",
+                               .cra_driver_name = "authenc-hmac-md5-"
+                                                  "cbc-aes-talitos",
+                               .cra_blocksize = AES_BLOCK_SIZE,
+                               .cra_flags = CRYPTO_ALG_ASYNC,
+                       },
+                       .ivsize = AES_BLOCK_SIZE,
+                       .maxauthsize = MD5_DIGEST_SIZE,
                },
                .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
                                     DESC_HDR_SEL0_AESU |
@@ -2382,15 +2342,16 @@ static struct talitos_alg_template driver_algs[] = {
                                     DESC_HDR_MODE1_MDEU_MD5_HMAC,
        },
        {       .type = CRYPTO_ALG_TYPE_AEAD,
-               .alg.crypto = {
-                       .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
-                       .cra_driver_name = "authenc-hmac-md5-cbc-3des-talitos",
-                       .cra_blocksize = DES3_EDE_BLOCK_SIZE,
-                       .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
-                       .cra_aead = {
-                               .ivsize = DES3_EDE_BLOCK_SIZE,
-                               .maxauthsize = MD5_DIGEST_SIZE,
-                       }
+               .alg.aead = {
+                       .base = {
+                               .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
+                               .cra_driver_name = "authenc-hmac-md5-"
+                                                  "cbc-3des-talitos",
+                               .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+                               .cra_flags = CRYPTO_ALG_ASYNC,
+                       },
+                       .ivsize = DES3_EDE_BLOCK_SIZE,
+                       .maxauthsize = MD5_DIGEST_SIZE,
                },
                .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
                                     DESC_HDR_SEL0_DEU |
@@ -2658,15 +2619,9 @@ static int talitos_cra_init(struct crypto_tfm *tfm)
        return 0;
 }
 
-static int talitos_cra_init_aead(struct crypto_tfm *tfm)
+static int talitos_cra_init_aead(struct crypto_aead *tfm)
 {
-       struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
-
-       talitos_cra_init(tfm);
-
-       /* random first IV */
-       get_random_bytes(ctx->iv, TALITOS_MAX_IV_LENGTH);
-
+       talitos_cra_init(crypto_aead_tfm(tfm));
        return 0;
 }
 
@@ -2713,9 +2668,9 @@ static int talitos_remove(struct platform_device *ofdev)
        list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
                switch (t_alg->algt.type) {
                case CRYPTO_ALG_TYPE_ABLKCIPHER:
-               case CRYPTO_ALG_TYPE_AEAD:
-                       crypto_unregister_alg(&t_alg->algt.alg.crypto);
                        break;
+               case CRYPTO_ALG_TYPE_AEAD:
+                       crypto_unregister_aead(&t_alg->algt.alg.aead);
                case CRYPTO_ALG_TYPE_AHASH:
                        crypto_unregister_ahash(&t_alg->algt.alg.hash);
                        break;
@@ -2727,7 +2682,7 @@ static int talitos_remove(struct platform_device *ofdev)
        if (hw_supports(dev, DESC_HDR_SEL0_RNG))
                talitos_unregister_rng(dev);
 
-       for (i = 0; i < priv->num_channels; i++)
+       for (i = 0; priv->chan && i < priv->num_channels; i++)
                kfree(priv->chan[i].fifo);
 
        kfree(priv->chan);
@@ -2774,15 +2729,11 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
                alg->cra_ablkcipher.geniv = "eseqiv";
                break;
        case CRYPTO_ALG_TYPE_AEAD:
-               alg = &t_alg->algt.alg.crypto;
-               alg->cra_init = talitos_cra_init_aead;
-               alg->cra_type = &crypto_aead_type;
-               alg->cra_aead.setkey = aead_setkey;
-               alg->cra_aead.setauthsize = aead_setauthsize;
-               alg->cra_aead.encrypt = aead_encrypt;
-               alg->cra_aead.decrypt = aead_decrypt;
-               alg->cra_aead.givencrypt = aead_givencrypt;
-               alg->cra_aead.geniv = "<built-in>";
+               alg = &t_alg->algt.alg.aead.base;
+               t_alg->algt.alg.aead.init = talitos_cra_init_aead;
+               t_alg->algt.alg.aead.setkey = aead_setkey;
+               t_alg->algt.alg.aead.encrypt = aead_encrypt;
+               t_alg->algt.alg.aead.decrypt = aead_decrypt;
                break;
        case CRYPTO_ALG_TYPE_AHASH:
                alg = &t_alg->algt.alg.hash.halg.base;
@@ -3041,7 +2992,7 @@ static int talitos_probe(struct platform_device *ofdev)
        for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
                if (hw_supports(dev, driver_algs[i].desc_hdr_template)) {
                        struct talitos_crypto_alg *t_alg;
-                       char *name = NULL;
+                       struct crypto_alg *alg = NULL;
 
                        t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
                        if (IS_ERR(t_alg)) {
@@ -3053,21 +3004,26 @@ static int talitos_probe(struct platform_device *ofdev)
 
                        switch (t_alg->algt.type) {
                        case CRYPTO_ALG_TYPE_ABLKCIPHER:
-                       case CRYPTO_ALG_TYPE_AEAD:
                                err = crypto_register_alg(
                                                &t_alg->algt.alg.crypto);
-                               name = t_alg->algt.alg.crypto.cra_driver_name;
+                               alg = &t_alg->algt.alg.crypto;
                                break;
+
+                       case CRYPTO_ALG_TYPE_AEAD:
+                               err = crypto_register_aead(
+                                       &t_alg->algt.alg.aead);
+                               alg = &t_alg->algt.alg.aead.base;
+                               break;
+
                        case CRYPTO_ALG_TYPE_AHASH:
                                err = crypto_register_ahash(
                                                &t_alg->algt.alg.hash);
-                               name =
-                                t_alg->algt.alg.hash.halg.base.cra_driver_name;
+                               alg = &t_alg->algt.alg.hash.halg.base;
                                break;
                        }
                        if (err) {
                                dev_err(dev, "%s alg registration failed\n",
-                                       name);
+                                       alg->cra_driver_name);
                                kfree(t_alg);
                        } else
                                list_add_tail(&t_alg->entry, &priv->alg_list);
index 314daf55e7f77791d075065a427b531bb361cce1..0090f3211d68bc94af9d4105f51de7c1123ded0b 100644 (file)
@@ -52,12 +52,7 @@ struct talitos_ptr {
        __be32 ptr;     /* address */
 };
 
-static const struct talitos_ptr zero_entry = {
-       .len = 0,
-       .j_extent = 0,
-       .eptr = 0,
-       .ptr = 0
-};
+static const struct talitos_ptr zero_entry;
 
 /* descriptor */
 struct talitos_desc {
@@ -154,6 +149,7 @@ struct talitos_private {
 
        /* hwrng device */
        struct hwrng rng;
+       bool rng_registered;
 };
 
 extern int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
index e79e567e43aacae4584b32c2d7fc9ae1e6c1e300..263af709e53604ee5a049f707d2e9f5795031d1d 100644 (file)
@@ -84,6 +84,7 @@ static int p8_aes_setkey(struct crypto_tfm *tfm, const u8 *key,
        preempt_disable();
        pagefault_disable();
        enable_kernel_altivec();
+       enable_kernel_vsx();
        ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
        ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key);
        pagefault_enable();
@@ -103,6 +104,7 @@ static void p8_aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
                preempt_disable();
                pagefault_disable();
                enable_kernel_altivec();
+               enable_kernel_vsx();
                aes_p8_encrypt(src, dst, &ctx->enc_key);
                pagefault_enable();
                preempt_enable();
@@ -119,6 +121,7 @@ static void p8_aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
                preempt_disable();
                pagefault_disable();
                enable_kernel_altivec();
+               enable_kernel_vsx();
                aes_p8_decrypt(src, dst, &ctx->dec_key);
                pagefault_enable();
                preempt_enable();
index 7299995c78ec3b34ea76e289cf84dc877f1175ef..0b8fe2ec5315fc8253431ca533d953b9c72d7243 100644 (file)
@@ -85,6 +85,7 @@ static int p8_aes_cbc_setkey(struct crypto_tfm *tfm, const u8 *key,
        preempt_disable();
        pagefault_disable();
        enable_kernel_altivec();
+       enable_kernel_vsx();
        ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
        ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key);
        pagefault_enable();
@@ -115,6 +116,7 @@ static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc,
                preempt_disable();
                pagefault_disable();
                enable_kernel_altivec();
+               enable_kernel_vsx();
 
                blkcipher_walk_init(&walk, dst, src, nbytes);
                ret = blkcipher_walk_virt(desc, &walk);
@@ -155,6 +157,7 @@ static int p8_aes_cbc_decrypt(struct blkcipher_desc *desc,
                preempt_disable();
                pagefault_disable();
                enable_kernel_altivec();
+               enable_kernel_vsx();
 
                blkcipher_walk_init(&walk, dst, src, nbytes);
                ret = blkcipher_walk_virt(desc, &walk);
index 7adae42a7b79ea81a5bc35ae2db9db9b6a2437e2..ee1306cd8f59bc5fd2d1f8b286ff5f553d53cc85 100644 (file)
@@ -82,6 +82,7 @@ static int p8_aes_ctr_setkey(struct crypto_tfm *tfm, const u8 *key,
 
        pagefault_disable();
        enable_kernel_altivec();
+       enable_kernel_vsx();
        ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
        pagefault_enable();
 
@@ -100,6 +101,7 @@ static void p8_aes_ctr_final(struct p8_aes_ctr_ctx *ctx,
 
        pagefault_disable();
        enable_kernel_altivec();
+       enable_kernel_vsx();
        aes_p8_encrypt(ctrblk, keystream, &ctx->enc_key);
        pagefault_enable();
 
@@ -113,6 +115,7 @@ static int p8_aes_ctr_crypt(struct blkcipher_desc *desc,
                            struct scatterlist *src, unsigned int nbytes)
 {
        int ret;
+       u64 inc;
        struct blkcipher_walk walk;
        struct p8_aes_ctr_ctx *ctx =
                crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm));
@@ -131,6 +134,7 @@ static int p8_aes_ctr_crypt(struct blkcipher_desc *desc,
                while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
                        pagefault_disable();
                        enable_kernel_altivec();
+                       enable_kernel_vsx();
                        aes_p8_ctr32_encrypt_blocks(walk.src.virt.addr,
                                                    walk.dst.virt.addr,
                                                    (nbytes &
@@ -140,7 +144,12 @@ static int p8_aes_ctr_crypt(struct blkcipher_desc *desc,
                                                    walk.iv);
                        pagefault_enable();
 
-                       crypto_inc(walk.iv, AES_BLOCK_SIZE);
+                       /* We need to update IV mostly for last bytes/round */
+                       inc = (nbytes & AES_BLOCK_MASK) / AES_BLOCK_SIZE;
+                       if (inc > 0)
+                               while (inc--)
+                                       crypto_inc(walk.iv, AES_BLOCK_SIZE);
+
                        nbytes &= AES_BLOCK_SIZE - 1;
                        ret = blkcipher_walk_done(desc, &walk, nbytes);
                }
index 6c5c20c6108e7bd25bd2075a1155d88110832b0b..228053921b3f024f468dbcf720c31bcb3cc58437 100644 (file)
@@ -1437,28 +1437,28 @@ Load_ctr32_enc_key:
        ?vperm          v31,v31,$out0,$keyperm
        lvx             v25,$x10,$key_          # pre-load round[2]
 
-       vadduwm         $two,$one,$one
+       vadduqm         $two,$one,$one
        subi            $inp,$inp,15            # undo "caller"
        $SHL            $len,$len,4
 
-       vadduwm         $out1,$ivec,$one        # counter values ...
-       vadduwm         $out2,$ivec,$two
+       vadduqm         $out1,$ivec,$one        # counter values ...
+       vadduqm         $out2,$ivec,$two
        vxor            $out0,$ivec,$rndkey0    # ... xored with rndkey[0]
         le?li          $idx,8
-       vadduwm         $out3,$out1,$two
+       vadduqm         $out3,$out1,$two
        vxor            $out1,$out1,$rndkey0
         le?lvsl        $inpperm,0,$idx
-       vadduwm         $out4,$out2,$two
+       vadduqm         $out4,$out2,$two
        vxor            $out2,$out2,$rndkey0
         le?vspltisb    $tmp,0x0f
-       vadduwm         $out5,$out3,$two
+       vadduqm         $out5,$out3,$two
        vxor            $out3,$out3,$rndkey0
         le?vxor        $inpperm,$inpperm,$tmp  # transform for lvx_u/stvx_u
-       vadduwm         $out6,$out4,$two
+       vadduqm         $out6,$out4,$two
        vxor            $out4,$out4,$rndkey0
-       vadduwm         $out7,$out5,$two
+       vadduqm         $out7,$out5,$two
        vxor            $out5,$out5,$rndkey0
-       vadduwm         $ivec,$out6,$two        # next counter value
+       vadduqm         $ivec,$out6,$two        # next counter value
        vxor            $out6,$out6,$rndkey0
        vxor            $out7,$out7,$rndkey0
 
@@ -1594,27 +1594,27 @@ Loop_ctr32_enc8x_middle:
 
        vcipherlast     $in0,$out0,$in0
        vcipherlast     $in1,$out1,$in1
-        vadduwm        $out1,$ivec,$one        # counter values ...
+        vadduqm        $out1,$ivec,$one        # counter values ...
        vcipherlast     $in2,$out2,$in2
-        vadduwm        $out2,$ivec,$two
+        vadduqm        $out2,$ivec,$two
         vxor           $out0,$ivec,$rndkey0    # ... xored with rndkey[0]
        vcipherlast     $in3,$out3,$in3
-        vadduwm        $out3,$out1,$two
+        vadduqm        $out3,$out1,$two
         vxor           $out1,$out1,$rndkey0
        vcipherlast     $in4,$out4,$in4
-        vadduwm        $out4,$out2,$two
+        vadduqm        $out4,$out2,$two
         vxor           $out2,$out2,$rndkey0
        vcipherlast     $in5,$out5,$in5
-        vadduwm        $out5,$out3,$two
+        vadduqm        $out5,$out3,$two
         vxor           $out3,$out3,$rndkey0
        vcipherlast     $in6,$out6,$in6
-        vadduwm        $out6,$out4,$two
+        vadduqm        $out6,$out4,$two
         vxor           $out4,$out4,$rndkey0
        vcipherlast     $in7,$out7,$in7
-        vadduwm        $out7,$out5,$two
+        vadduqm        $out7,$out5,$two
         vxor           $out5,$out5,$rndkey0
        le?vperm        $in0,$in0,$in0,$inpperm
-        vadduwm        $ivec,$out6,$two        # next counter value
+        vadduqm        $ivec,$out6,$two        # next counter value
         vxor           $out6,$out6,$rndkey0
        le?vperm        $in1,$in1,$in1,$inpperm
         vxor           $out7,$out7,$rndkey0
index b5e29002b66678337c54ec7858634d43285c4213..2183a2e77641e0682ca113951430765bcbcca4fc 100644 (file)
@@ -119,6 +119,7 @@ static int p8_ghash_setkey(struct crypto_shash *tfm, const u8 *key,
        preempt_disable();
        pagefault_disable();
        enable_kernel_altivec();
+       enable_kernel_vsx();
        enable_kernel_fp();
        gcm_init_p8(ctx->htable, (const u64 *) key);
        pagefault_enable();
@@ -149,6 +150,7 @@ static int p8_ghash_update(struct shash_desc *desc,
                        preempt_disable();
                        pagefault_disable();
                        enable_kernel_altivec();
+                       enable_kernel_vsx();
                        enable_kernel_fp();
                        gcm_ghash_p8(dctx->shash, ctx->htable,
                                     dctx->buffer, GHASH_DIGEST_SIZE);
@@ -163,6 +165,7 @@ static int p8_ghash_update(struct shash_desc *desc,
                        preempt_disable();
                        pagefault_disable();
                        enable_kernel_altivec();
+                       enable_kernel_vsx();
                        enable_kernel_fp();
                        gcm_ghash_p8(dctx->shash, ctx->htable, src, len);
                        pagefault_enable();
@@ -193,6 +196,7 @@ static int p8_ghash_final(struct shash_desc *desc, u8 *out)
                        preempt_disable();
                        pagefault_disable();
                        enable_kernel_altivec();
+                       enable_kernel_vsx();
                        enable_kernel_fp();
                        gcm_ghash_p8(dctx->shash, ctx->htable,
                                     dctx->buffer, GHASH_DIGEST_SIZE);
index 0a6f899839ddb2ff47044f560a776725b86e5dfc..d8429cb71f02761df72060a7e757c58901681302 100644 (file)
@@ -61,6 +61,12 @@ $code=<<___;
        mtspr           256,r0
        li              r10,0x30
        lvx_u           $H,0,r4                 # load H
+       le?xor          r7,r7,r7
+       le?addi         r7,r7,0x8               # need a vperm start with 08
+       le?lvsr         5,0,r7
+       le?vspltisb     6,0x0f
+       le?vxor         5,5,6                   # set a b-endian mask
+       le?vperm        $H,$H,$H,5
 
        vspltisb        $xC2,-16                # 0xf0
        vspltisb        $t0,1                   # one
index a59188494af872e687dec62a268353f2fde5c8d9..b9997335f1937eb8694321ef0e37c66c8877a6c2 100644 (file)
@@ -169,6 +169,7 @@ my $vpmsumd = sub { vcrypto_op(@_, 1224); };
 my $vpmsubh    = sub { vcrypto_op(@_, 1096); };
 my $vpmsumw    = sub { vcrypto_op(@_, 1160); };
 my $vaddudm    = sub { vcrypto_op(@_, 192);  };
+my $vadduqm    = sub { vcrypto_op(@_, 256);  };
 
 my $mtsle      = sub {
     my ($f, $arg) = @_;
index de5f610e0810f00bb7dd160b0c6c27efa31bd3cb..6a30252cd79f20f24e604965a9a023f2ff08478f 100644 (file)
@@ -4048,3 +4048,88 @@ void pci_dev_specific_enable_acs(struct pci_dev *dev)
                }
        }
 }
+
+/*
+ * The PCI capabilities list for Intel DH895xCC VFs (device id 0x0443) with
+ * QuickAssist Technology (QAT) is prematurely terminated in hardware.  The
+ * Next Capability pointer in the MSI Capability Structure should point to
+ * the PCIe Capability Structure but is incorrectly hardwired as 0 terminating
+ * the list.
+ */
+static void quirk_intel_qat_vf_cap(struct pci_dev *pdev)
+{
+       int pos, i = 0;
+       u8 next_cap;
+       u16 reg16, *cap;
+       struct pci_cap_saved_state *state;
+
+       /* Bail if the hardware bug is fixed */
+       if (pdev->pcie_cap || pci_find_capability(pdev, PCI_CAP_ID_EXP))
+               return;
+
+       /* Bail if MSI Capability Structure is not found for some reason */
+       pos = pci_find_capability(pdev, PCI_CAP_ID_MSI);
+       if (!pos)
+               return;
+
+       /*
+        * Bail if Next Capability pointer in the MSI Capability Structure
+        * is not the expected incorrect 0x00.
+        */
+       pci_read_config_byte(pdev, pos + 1, &next_cap);
+       if (next_cap)
+               return;
+
+       /*
+        * PCIe Capability Structure is expected to be at 0x50 and should
+        * terminate the list (Next Capability pointer is 0x00).  Verify
+        * Capability Id and Next Capability pointer is as expected.
+        * Open-code some of set_pcie_port_type() and pci_cfg_space_size_ext()
+        * to correctly set kernel data structures which have already been
+        * set incorrectly due to the hardware bug.
+        */
+       pos = 0x50;
+       pci_read_config_word(pdev, pos, &reg16);
+       if (reg16 == (0x0000 | PCI_CAP_ID_EXP)) {
+               u32 status;
+#ifndef PCI_EXP_SAVE_REGS
+#define PCI_EXP_SAVE_REGS     7
+#endif
+               int size = PCI_EXP_SAVE_REGS * sizeof(u16);
+
+               pdev->pcie_cap = pos;
+               pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16);
+               pdev->pcie_flags_reg = reg16;
+               pci_read_config_word(pdev, pos + PCI_EXP_DEVCAP, &reg16);
+               pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD;
+
+               pdev->cfg_size = PCI_CFG_SPACE_EXP_SIZE;
+               if (pci_read_config_dword(pdev, PCI_CFG_SPACE_SIZE, &status) !=
+                   PCIBIOS_SUCCESSFUL || (status == 0xffffffff))
+                       pdev->cfg_size = PCI_CFG_SPACE_SIZE;
+
+               if (pci_find_saved_cap(pdev, PCI_CAP_ID_EXP))
+                       return;
+
+               /*
+                * Save PCIE cap
+                */
+               state = kzalloc(sizeof(*state) + size, GFP_KERNEL);
+               if (!state)
+                       return;
+
+               state->cap.cap_nr = PCI_CAP_ID_EXP;
+               state->cap.cap_extended = 0;
+               state->cap.size = size;
+               cap = (u16 *)&state->cap.data[0];
+               pcie_capability_read_word(pdev, PCI_EXP_DEVCTL, &cap[i++]);
+               pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &cap[i++]);
+               pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &cap[i++]);
+               pcie_capability_read_word(pdev, PCI_EXP_RTCTL,  &cap[i++]);
+               pcie_capability_read_word(pdev, PCI_EXP_DEVCTL2, &cap[i++]);
+               pcie_capability_read_word(pdev, PCI_EXP_LNKCTL2, &cap[i++]);
+               pcie_capability_read_word(pdev, PCI_EXP_SLTCTL2, &cap[i++]);
+               hlist_add_head(&state->next, &pdev->saved_cap_space);
+       }
+}
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x443, quirk_intel_qat_vf_cap);
index 7169ad04acc06602aa10ffaeeedcf26c061a2da2..077cae1e6b516779eb4bb7e4e35058838bd019d8 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * AEAD: Authenticated Encryption with Associated Data
  * 
- * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au>
+ * Copyright (c) 2007-2015 Herbert Xu <herbert@gondor.apana.org.au>
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License as published by the Free
  * a breach in the integrity of the message. In essence, that -EBADMSG error
  * code is the key bonus an AEAD cipher has over "standard" block chaining
  * modes.
+ *
+ * Memory Structure:
+ *
+ * To support the needs of the most prominent user of AEAD ciphers, namely
+ * IPSEC, the AEAD ciphers have a special memory layout the caller must adhere
+ * to.
+ *
+ * The scatter list pointing to the input data must contain:
+ *
+ * * for RFC4106 ciphers, the concatenation of
+ * associated authentication data || IV || plaintext or ciphertext. Note, the
+ * same IV (buffer) is also set with the aead_request_set_crypt call. Note,
+ * the API call of aead_request_set_ad must provide the length of the AAD and
+ * the IV. The API call of aead_request_set_crypt only points to the size of
+ * the input plaintext or ciphertext.
+ *
+ * * for "normal" AEAD ciphers, the concatenation of
+ * associated authentication data || plaintext or ciphertext.
+ *
+ * It is important to note that if multiple scatter gather list entries form
+ * the input data mentioned above, the first entry must not point to a NULL
+ * buffer. If there is any potential where the AAD buffer can be NULL, the
+ * calling code must contain a precaution to ensure that this does not result
+ * in the first scatter gather list entry pointing to a NULL buffer.
  */
 
+struct crypto_aead;
+
 /**
  *     struct aead_request - AEAD request
  *     @base: Common attributes for async crypto requests
- *     @old: Boolean whether the old or new AEAD API is used
  *     @assoclen: Length in bytes of associated data for authentication
  *     @cryptlen: Length of data to be encrypted or decrypted
  *     @iv: Initialisation vector
- *     @assoc: Associated data
  *     @src: Source data
  *     @dst: Destination data
  *     @__ctx: Start of private context data
 struct aead_request {
        struct crypto_async_request base;
 
-       bool old;
-
        unsigned int assoclen;
        unsigned int cryptlen;
 
        u8 *iv;
 
-       struct scatterlist *assoc;
        struct scatterlist *src;
        struct scatterlist *dst;
 
        void *__ctx[] CRYPTO_MINALIGN_ATTR;
 };
 
-/**
- *     struct aead_givcrypt_request - AEAD request with IV generation
- *     @seq: Sequence number for IV generation
- *     @giv: Space for generated IV
- *     @areq: The AEAD request itself
- */
-struct aead_givcrypt_request {
-       u64 seq;
-       u8 *giv;
-
-       struct aead_request areq;
-};
-
 /**
  * struct aead_alg - AEAD cipher definition
  * @maxauthsize: Set the maximum authentication tag size supported by the
@@ -141,16 +149,6 @@ struct aead_alg {
 };
 
 struct crypto_aead {
-       int (*setkey)(struct crypto_aead *tfm, const u8 *key,
-                     unsigned int keylen);
-       int (*setauthsize)(struct crypto_aead *tfm, unsigned int authsize);
-       int (*encrypt)(struct aead_request *req);
-       int (*decrypt)(struct aead_request *req);
-       int (*givencrypt)(struct aead_givcrypt_request *req);
-       int (*givdecrypt)(struct aead_givcrypt_request *req);
-
-       struct crypto_aead *child;
-
        unsigned int authsize;
        unsigned int reqsize;
 
@@ -192,16 +190,6 @@ static inline void crypto_free_aead(struct crypto_aead *tfm)
        crypto_destroy_tfm(tfm, crypto_aead_tfm(tfm));
 }
 
-static inline struct crypto_aead *crypto_aead_crt(struct crypto_aead *tfm)
-{
-       return tfm;
-}
-
-static inline struct old_aead_alg *crypto_old_aead_alg(struct crypto_aead *tfm)
-{
-       return &crypto_aead_tfm(tfm)->__crt_alg->cra_aead;
-}
-
 static inline struct aead_alg *crypto_aead_alg(struct crypto_aead *tfm)
 {
        return container_of(crypto_aead_tfm(tfm)->__crt_alg,
@@ -210,8 +198,7 @@ static inline struct aead_alg *crypto_aead_alg(struct crypto_aead *tfm)
 
 static inline unsigned int crypto_aead_alg_ivsize(struct aead_alg *alg)
 {
-       return alg->base.cra_aead.encrypt ? alg->base.cra_aead.ivsize :
-                                           alg->ivsize;
+       return alg->ivsize;
 }
 
 /**
@@ -337,7 +324,7 @@ static inline struct crypto_aead *crypto_aead_reqtfm(struct aead_request *req)
  */
 static inline int crypto_aead_encrypt(struct aead_request *req)
 {
-       return crypto_aead_reqtfm(req)->encrypt(req);
+       return crypto_aead_alg(crypto_aead_reqtfm(req))->encrypt(req);
 }
 
 /**
@@ -364,10 +351,12 @@ static inline int crypto_aead_encrypt(struct aead_request *req)
  */
 static inline int crypto_aead_decrypt(struct aead_request *req)
 {
-       if (req->cryptlen < crypto_aead_authsize(crypto_aead_reqtfm(req)))
+       struct crypto_aead *aead = crypto_aead_reqtfm(req);
+
+       if (req->cryptlen < crypto_aead_authsize(aead))
                return -EINVAL;
 
-       return crypto_aead_reqtfm(req)->decrypt(req);
+       return crypto_aead_alg(aead)->decrypt(req);
 }
 
 /**
@@ -387,7 +376,10 @@ static inline int crypto_aead_decrypt(struct aead_request *req)
  *
  * Return: number of bytes
  */
-unsigned int crypto_aead_reqsize(struct crypto_aead *tfm);
+static inline unsigned int crypto_aead_reqsize(struct crypto_aead *tfm)
+{
+       return tfm->reqsize;
+}
 
 /**
  * aead_request_set_tfm() - update cipher handle reference in request
@@ -400,7 +392,7 @@ unsigned int crypto_aead_reqsize(struct crypto_aead *tfm);
 static inline void aead_request_set_tfm(struct aead_request *req,
                                        struct crypto_aead *tfm)
 {
-       req->base.tfm = crypto_aead_tfm(tfm->child);
+       req->base.tfm = crypto_aead_tfm(tfm);
 }
 
 /**
@@ -525,23 +517,6 @@ static inline void aead_request_set_crypt(struct aead_request *req,
        req->iv = iv;
 }
 
-/**
- * aead_request_set_assoc() - set the associated data scatter / gather list
- * @req: request handle
- * @assoc: associated data scatter / gather list
- * @assoclen: number of bytes to process from @assoc
- *
- * Obsolete, do not use.
- */
-static inline void aead_request_set_assoc(struct aead_request *req,
-                                         struct scatterlist *assoc,
-                                         unsigned int assoclen)
-{
-       req->assoc = assoc;
-       req->assoclen = assoclen;
-       req->old = true;
-}
-
 /**
  * aead_request_set_ad - set associated data information
  * @req: request handle
@@ -554,77 +529,6 @@ static inline void aead_request_set_ad(struct aead_request *req,
                                       unsigned int assoclen)
 {
        req->assoclen = assoclen;
-       req->old = false;
-}
-
-static inline struct crypto_aead *aead_givcrypt_reqtfm(
-       struct aead_givcrypt_request *req)
-{
-       return crypto_aead_reqtfm(&req->areq);
-}
-
-static inline int crypto_aead_givencrypt(struct aead_givcrypt_request *req)
-{
-       return aead_givcrypt_reqtfm(req)->givencrypt(req);
-};
-
-static inline int crypto_aead_givdecrypt(struct aead_givcrypt_request *req)
-{
-       return aead_givcrypt_reqtfm(req)->givdecrypt(req);
-};
-
-static inline void aead_givcrypt_set_tfm(struct aead_givcrypt_request *req,
-                                        struct crypto_aead *tfm)
-{
-       req->areq.base.tfm = crypto_aead_tfm(tfm);
-}
-
-static inline struct aead_givcrypt_request *aead_givcrypt_alloc(
-       struct crypto_aead *tfm, gfp_t gfp)
-{
-       struct aead_givcrypt_request *req;
-
-       req = kmalloc(sizeof(struct aead_givcrypt_request) +
-                     crypto_aead_reqsize(tfm), gfp);
-
-       if (likely(req))
-               aead_givcrypt_set_tfm(req, tfm);
-
-       return req;
-}
-
-static inline void aead_givcrypt_free(struct aead_givcrypt_request *req)
-{
-       kfree(req);
-}
-
-static inline void aead_givcrypt_set_callback(
-       struct aead_givcrypt_request *req, u32 flags,
-       crypto_completion_t compl, void *data)
-{
-       aead_request_set_callback(&req->areq, flags, compl, data);
-}
-
-static inline void aead_givcrypt_set_crypt(struct aead_givcrypt_request *req,
-                                          struct scatterlist *src,
-                                          struct scatterlist *dst,
-                                          unsigned int nbytes, void *iv)
-{
-       aead_request_set_crypt(&req->areq, src, dst, nbytes, iv);
-}
-
-static inline void aead_givcrypt_set_assoc(struct aead_givcrypt_request *req,
-                                          struct scatterlist *assoc,
-                                          unsigned int assoclen)
-{
-       aead_request_set_assoc(&req->areq, assoc, assoclen);
-}
-
-static inline void aead_givcrypt_set_giv(struct aead_givcrypt_request *req,
-                                        u8 *giv, u64 seq)
-{
-       req->giv = giv;
-       req->seq = seq;
 }
 
 #endif /* _CRYPTO_AEAD_H */
index d4ebf6e9af6a536c589d55c914e56f1c6000f910..c9fe145f7dd3bad3af8cd7902accefbc8c7b5366 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/skbuff.h>
 
 struct crypto_aead;
+struct crypto_instance;
 struct module;
 struct rtattr;
 struct seq_file;
@@ -30,6 +31,7 @@ struct crypto_type {
        void (*show)(struct seq_file *m, struct crypto_alg *alg);
        int (*report)(struct sk_buff *skb, struct crypto_alg *alg);
        struct crypto_alg *(*lookup)(const char *name, u32 type, u32 mask);
+       void (*free)(struct crypto_instance *inst);
 
        unsigned int type;
        unsigned int maskclear;
@@ -180,7 +182,6 @@ struct crypto_instance *crypto_alloc_instance(const char *name,
 void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen);
 int crypto_enqueue_request(struct crypto_queue *queue,
                           struct crypto_async_request *request);
-void *__crypto_dequeue_request(struct crypto_queue *queue, unsigned int offset);
 struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue);
 int crypto_tfm_in_queue(struct crypto_queue *queue, struct crypto_tfm *tfm);
 
diff --git a/include/crypto/chacha20.h b/include/crypto/chacha20.h
new file mode 100644 (file)
index 0000000..274bbae
--- /dev/null
@@ -0,0 +1,25 @@
+/*
+ * Common values for the ChaCha20 algorithm
+ */
+
+#ifndef _CRYPTO_CHACHA20_H
+#define _CRYPTO_CHACHA20_H
+
+#include <linux/types.h>
+#include <linux/crypto.h>
+
+#define CHACHA20_IV_SIZE       16
+#define CHACHA20_KEY_SIZE      32
+#define CHACHA20_BLOCK_SIZE    64
+
+struct chacha20_ctx {
+       u32 key[8];
+};
+
+void crypto_chacha20_init(u32 *state, struct chacha20_ctx *ctx, u8 *iv);
+int crypto_chacha20_setkey(struct crypto_tfm *tfm, const u8 *key,
+                          unsigned int keysize);
+int crypto_chacha20_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
+                         struct scatterlist *src, unsigned int nbytes);
+
+#endif
index 57c8a6ee33c27321d1a1e366559a858ae5cccdb3..8e920b44c0ac4b14238ef0877812fab4eaf7f8b8 100644 (file)
@@ -63,6 +63,11 @@ struct ahash_request {
        void *__ctx[] CRYPTO_MINALIGN_ATTR;
 };
 
+#define AHASH_REQUEST_ON_STACK(name, ahash) \
+       char __##name##_desc[sizeof(struct ahash_request) + \
+               crypto_ahash_reqsize(ahash)] CRYPTO_MINALIGN_ATTR; \
+       struct ahash_request *name = (void *)__##name##_desc
+
 /**
  * struct ahash_alg - asynchronous message digest definition
  * @init: Initialize the transformation context. Intended only to initialize the
index 4b2547186519f80e00729144dd8edafab238b5ef..5554cdd8d6c17344f049f138e57027defc050dc1 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * AEAD: Authenticated Encryption with Associated Data
  * 
- * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au>
+ * Copyright (c) 2007-2015 Herbert Xu <herbert@gondor.apana.org.au>
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License as published by the Free
@@ -21,6 +21,7 @@
 struct rtattr;
 
 struct aead_instance {
+       void (*free)(struct aead_instance *inst);
        union {
                struct {
                        char head[offsetof(struct aead_alg, base)];
@@ -34,20 +35,15 @@ struct crypto_aead_spawn {
        struct crypto_spawn base;
 };
 
-extern const struct crypto_type crypto_aead_type;
-extern const struct crypto_type crypto_nivaead_type;
+struct aead_queue {
+       struct crypto_queue base;
+};
 
 static inline void *crypto_aead_ctx(struct crypto_aead *tfm)
 {
        return crypto_tfm_ctx(&tfm->base);
 }
 
-static inline struct crypto_instance *crypto_aead_alg_instance(
-       struct crypto_aead *aead)
-{
-       return crypto_tfm_alg_instance(&aead->base);
-}
-
 static inline struct crypto_instance *aead_crypto_instance(
        struct aead_instance *inst)
 {
@@ -61,7 +57,7 @@ static inline struct aead_instance *aead_instance(struct crypto_instance *inst)
 
 static inline struct aead_instance *aead_alg_instance(struct crypto_aead *aead)
 {
-       return aead_instance(crypto_aead_alg_instance(aead));
+       return aead_instance(crypto_tfm_alg_instance(&aead->base));
 }
 
 static inline void *aead_instance_ctx(struct aead_instance *inst)
@@ -90,8 +86,6 @@ static inline void crypto_set_aead_spawn(
        crypto_set_spawn(&spawn->base, inst);
 }
 
-struct crypto_alg *crypto_lookup_aead(const char *name, u32 type, u32 mask);
-
 int crypto_grab_aead(struct crypto_aead_spawn *spawn, const char *name,
                     u32 type, u32 mask);
 
@@ -100,12 +94,6 @@ static inline void crypto_drop_aead(struct crypto_aead_spawn *spawn)
        crypto_drop_spawn(&spawn->base);
 }
 
-static inline struct crypto_alg *crypto_aead_spawn_alg(
-       struct crypto_aead_spawn *spawn)
-{
-       return spawn->base.alg;
-}
-
 static inline struct aead_alg *crypto_spawn_aead_alg(
        struct crypto_aead_spawn *spawn)
 {
@@ -118,43 +106,51 @@ static inline struct crypto_aead *crypto_spawn_aead(
        return crypto_spawn_tfm2(&spawn->base);
 }
 
-struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl,
-                                      struct rtattr **tb, u32 type, u32 mask);
-void aead_geniv_free(struct aead_instance *inst);
-int aead_geniv_init(struct crypto_tfm *tfm);
-void aead_geniv_exit(struct crypto_tfm *tfm);
+static inline void crypto_aead_set_reqsize(struct crypto_aead *aead,
+                                          unsigned int reqsize)
+{
+       aead->reqsize = reqsize;
+}
 
-static inline struct crypto_aead *aead_geniv_base(struct crypto_aead *geniv)
+static inline unsigned int crypto_aead_alg_maxauthsize(struct aead_alg *alg)
 {
-       return geniv->child;
+       return alg->maxauthsize;
 }
 
-static inline void *aead_givcrypt_reqctx(struct aead_givcrypt_request *req)
+static inline unsigned int crypto_aead_maxauthsize(struct crypto_aead *aead)
 {
-       return aead_request_ctx(&req->areq);
+       return crypto_aead_alg_maxauthsize(crypto_aead_alg(aead));
 }
 
-static inline void aead_givcrypt_complete(struct aead_givcrypt_request *req,
-                                         int err)
+static inline void aead_init_queue(struct aead_queue *queue,
+                                  unsigned int max_qlen)
 {
-       aead_request_complete(&req->areq, err);
+       crypto_init_queue(&queue->base, max_qlen);
 }
 
-static inline void crypto_aead_set_reqsize(struct crypto_aead *aead,
-                                          unsigned int reqsize)
+static inline int aead_enqueue_request(struct aead_queue *queue,
+                                      struct aead_request *request)
 {
-       crypto_aead_crt(aead)->reqsize = reqsize;
+       return crypto_enqueue_request(&queue->base, &request->base);
 }
 
-static inline unsigned int crypto_aead_alg_maxauthsize(struct aead_alg *alg)
+static inline struct aead_request *aead_dequeue_request(
+       struct aead_queue *queue)
 {
-       return alg->base.cra_aead.encrypt ? alg->base.cra_aead.maxauthsize :
-                                           alg->maxauthsize;
+       struct crypto_async_request *req;
+
+       req = crypto_dequeue_request(&queue->base);
+
+       return req ? container_of(req, struct aead_request, base) : NULL;
 }
 
-static inline unsigned int crypto_aead_maxauthsize(struct crypto_aead *aead)
+static inline struct aead_request *aead_get_backlog(struct aead_queue *queue)
 {
-       return crypto_aead_alg_maxauthsize(crypto_aead_alg(aead));
+       struct crypto_async_request *req;
+
+       req = crypto_get_backlog(&queue->base);
+
+       return req ? container_of(req, struct aead_request, base) : NULL;
 }
 
 int crypto_register_aead(struct aead_alg *alg);
index 9ca9b871aba56a1fe9fa81bfcd665bbfa0e39697..59333635e712d15a109919f03c70107183df473e 100644 (file)
 
 #include <crypto/internal/aead.h>
 #include <linux/spinlock.h>
+#include <linux/types.h>
 
 struct aead_geniv_ctx {
        spinlock_t lock;
        struct crypto_aead *child;
+       struct crypto_blkcipher *null;
+       u8 salt[] __attribute__ ((aligned(__alignof__(u32))));
 };
 
+struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl,
+                                      struct rtattr **tb, u32 type, u32 mask);
+void aead_geniv_free(struct aead_instance *inst);
+int aead_init_geniv(struct crypto_aead *tfm);
+void aead_exit_geniv(struct crypto_aead *tfm);
+
 #endif /* _CRYPTO_INTERNAL_GENIV_H */
index b3a46c515d1b7ed2cedb5185898e9b6a685ea434..2cf7a61ece59e987893138a35c3c48336a630c19 100644 (file)
@@ -107,5 +107,20 @@ static inline u32 ablkcipher_request_flags(struct ablkcipher_request *req)
        return req->base.flags;
 }
 
+static inline void *crypto_skcipher_ctx(struct crypto_skcipher *tfm)
+{
+       return crypto_tfm_ctx(&tfm->base);
+}
+
+static inline void *skcipher_request_ctx(struct skcipher_request *req)
+{
+       return req->__ctx;
+}
+
+static inline u32 skcipher_request_flags(struct skcipher_request *req)
+{
+       return req->base.flags;
+}
+
 #endif /* _CRYPTO_INTERNAL_SKCIPHER_H */
 
diff --git a/include/crypto/poly1305.h b/include/crypto/poly1305.h
new file mode 100644 (file)
index 0000000..894df59
--- /dev/null
@@ -0,0 +1,41 @@
+/*
+ * Common values for the Poly1305 algorithm
+ */
+
+#ifndef _CRYPTO_POLY1305_H
+#define _CRYPTO_POLY1305_H
+
+#include <linux/types.h>
+#include <linux/crypto.h>
+
+#define POLY1305_BLOCK_SIZE    16
+#define POLY1305_KEY_SIZE      32
+#define POLY1305_DIGEST_SIZE   16
+
+struct poly1305_desc_ctx {
+       /* key */
+       u32 r[5];
+       /* finalize key */
+       u32 s[4];
+       /* accumulator */
+       u32 h[5];
+       /* partial buffer */
+       u8 buf[POLY1305_BLOCK_SIZE];
+       /* bytes used in partial buffer */
+       unsigned int buflen;
+       /* r key has been set */
+       bool rset;
+       /* s key has been set */
+       bool sset;
+};
+
+int crypto_poly1305_init(struct shash_desc *desc);
+int crypto_poly1305_setkey(struct crypto_shash *tfm,
+                          const u8 *key, unsigned int keylen);
+unsigned int crypto_poly1305_setdesckey(struct poly1305_desc_ctx *dctx,
+                                       const u8 *src, unsigned int srclen);
+int crypto_poly1305_update(struct shash_desc *desc,
+                          const u8 *src, unsigned int srclen);
+int crypto_poly1305_final(struct shash_desc *desc, u8 *dst);
+
+#endif
index 07d245f073d161a8e6f2380a910b2698e44919ae..d8dd41fb034fe5af52b68699096abb6ebc5559d5 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Symmetric key ciphers.
  * 
- * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au>
+ * Copyright (c) 2007-2015 Herbert Xu <herbert@gondor.apana.org.au>
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License as published by the Free
 #include <linux/kernel.h>
 #include <linux/slab.h>
 
+/**
+ *     struct skcipher_request - Symmetric key cipher request
+ *     @cryptlen: Number of bytes to encrypt or decrypt
+ *     @iv: Initialisation Vector
+ *     @src: Source SG list
+ *     @dst: Destination SG list
+ *     @base: Underlying async request request
+ *     @__ctx: Start of private context data
+ */
+struct skcipher_request {
+       unsigned int cryptlen;
+
+       u8 *iv;
+
+       struct scatterlist *src;
+       struct scatterlist *dst;
+
+       struct crypto_async_request base;
+
+       void *__ctx[] CRYPTO_MINALIGN_ATTR;
+};
+
 /**
  *     struct skcipher_givcrypt_request - Crypto request with IV generation
  *     @seq: Sequence number for IV generation
@@ -30,6 +52,23 @@ struct skcipher_givcrypt_request {
        struct ablkcipher_request creq;
 };
 
+struct crypto_skcipher {
+       int (*setkey)(struct crypto_skcipher *tfm, const u8 *key,
+                     unsigned int keylen);
+       int (*encrypt)(struct skcipher_request *req);
+       int (*decrypt)(struct skcipher_request *req);
+
+       unsigned int ivsize;
+       unsigned int reqsize;
+
+       struct crypto_tfm base;
+};
+
+#define SKCIPHER_REQUEST_ON_STACK(name, tfm) \
+       char __##name##_desc[sizeof(struct skcipher_request) + \
+               crypto_skcipher_reqsize(tfm)] CRYPTO_MINALIGN_ATTR; \
+       struct skcipher_request *name = (void *)__##name##_desc
+
 static inline struct crypto_ablkcipher *skcipher_givcrypt_reqtfm(
        struct skcipher_givcrypt_request *req)
 {
@@ -106,5 +145,355 @@ static inline void skcipher_givcrypt_set_giv(
        req->seq = seq;
 }
 
+/**
+ * DOC: Symmetric Key Cipher API
+ *
+ * Symmetric key cipher API is used with the ciphers of type
+ * CRYPTO_ALG_TYPE_SKCIPHER (listed as type "skcipher" in /proc/crypto).
+ *
+ * Asynchronous cipher operations imply that the function invocation for a
+ * cipher request returns immediately before the completion of the operation.
+ * The cipher request is scheduled as a separate kernel thread and therefore
+ * load-balanced on the different CPUs via the process scheduler. To allow
+ * the kernel crypto API to inform the caller about the completion of a cipher
+ * request, the caller must provide a callback function. That function is
+ * invoked with the cipher handle when the request completes.
+ *
+ * To support the asynchronous operation, additional information than just the
+ * cipher handle must be supplied to the kernel crypto API. That additional
+ * information is given by filling in the skcipher_request data structure.
+ *
+ * For the symmetric key cipher API, the state is maintained with the tfm
+ * cipher handle. A single tfm can be used across multiple calls and in
+ * parallel. For asynchronous block cipher calls, context data supplied and
+ * only used by the caller can be referenced the request data structure in
+ * addition to the IV used for the cipher request. The maintenance of such
+ * state information would be important for a crypto driver implementer to
+ * have, because when calling the callback function upon completion of the
+ * cipher operation, that callback function may need some information about
+ * which operation just finished if it invoked multiple in parallel. This
+ * state information is unused by the kernel crypto API.
+ */
+
+static inline struct crypto_skcipher *__crypto_skcipher_cast(
+       struct crypto_tfm *tfm)
+{
+       return container_of(tfm, struct crypto_skcipher, base);
+}
+
+/**
+ * crypto_alloc_skcipher() - allocate symmetric key cipher handle
+ * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
+ *           skcipher cipher
+ * @type: specifies the type of the cipher
+ * @mask: specifies the mask for the cipher
+ *
+ * Allocate a cipher handle for an skcipher. The returned struct
+ * crypto_skcipher is the cipher handle that is required for any subsequent
+ * API invocation for that skcipher.
+ *
+ * Return: allocated cipher handle in case of success; IS_ERR() is true in case
+ *        of an error, PTR_ERR() returns the error code.
+ */
+struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name,
+                                             u32 type, u32 mask);
+
+static inline struct crypto_tfm *crypto_skcipher_tfm(
+       struct crypto_skcipher *tfm)
+{
+       return &tfm->base;
+}
+
+/**
+ * crypto_free_skcipher() - zeroize and free cipher handle
+ * @tfm: cipher handle to be freed
+ */
+static inline void crypto_free_skcipher(struct crypto_skcipher *tfm)
+{
+       crypto_destroy_tfm(tfm, crypto_skcipher_tfm(tfm));
+}
+
+/**
+ * crypto_has_skcipher() - Search for the availability of an skcipher.
+ * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
+ *           skcipher
+ * @type: specifies the type of the cipher
+ * @mask: specifies the mask for the cipher
+ *
+ * Return: true when the skcipher is known to the kernel crypto API; false
+ *        otherwise
+ */
+static inline int crypto_has_skcipher(const char *alg_name, u32 type,
+                                       u32 mask)
+{
+       return crypto_has_alg(alg_name, crypto_skcipher_type(type),
+                             crypto_skcipher_mask(mask));
+}
+
+/**
+ * crypto_skcipher_ivsize() - obtain IV size
+ * @tfm: cipher handle
+ *
+ * The size of the IV for the skcipher referenced by the cipher handle is
+ * returned. This IV size may be zero if the cipher does not need an IV.
+ *
+ * Return: IV size in bytes
+ */
+static inline unsigned int crypto_skcipher_ivsize(struct crypto_skcipher *tfm)
+{
+       return tfm->ivsize;
+}
+
+/**
+ * crypto_skcipher_blocksize() - obtain block size of cipher
+ * @tfm: cipher handle
+ *
+ * The block size for the skcipher referenced with the cipher handle is
+ * returned. The caller may use that information to allocate appropriate
+ * memory for the data returned by the encryption or decryption operation
+ *
+ * Return: block size of cipher
+ */
+static inline unsigned int crypto_skcipher_blocksize(
+       struct crypto_skcipher *tfm)
+{
+       return crypto_tfm_alg_blocksize(crypto_skcipher_tfm(tfm));
+}
+
+static inline unsigned int crypto_skcipher_alignmask(
+       struct crypto_skcipher *tfm)
+{
+       return crypto_tfm_alg_alignmask(crypto_skcipher_tfm(tfm));
+}
+
+static inline u32 crypto_skcipher_get_flags(struct crypto_skcipher *tfm)
+{
+       return crypto_tfm_get_flags(crypto_skcipher_tfm(tfm));
+}
+
+static inline void crypto_skcipher_set_flags(struct crypto_skcipher *tfm,
+                                              u32 flags)
+{
+       crypto_tfm_set_flags(crypto_skcipher_tfm(tfm), flags);
+}
+
+static inline void crypto_skcipher_clear_flags(struct crypto_skcipher *tfm,
+                                                u32 flags)
+{
+       crypto_tfm_clear_flags(crypto_skcipher_tfm(tfm), flags);
+}
+
+/**
+ * crypto_skcipher_setkey() - set key for cipher
+ * @tfm: cipher handle
+ * @key: buffer holding the key
+ * @keylen: length of the key in bytes
+ *
+ * The caller provided key is set for the skcipher referenced by the cipher
+ * handle.
+ *
+ * Note, the key length determines the cipher type. Many block ciphers implement
+ * different cipher modes depending on the key size, such as AES-128 vs AES-192
+ * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128
+ * is performed.
+ *
+ * Return: 0 if the setting of the key was successful; < 0 if an error occurred
+ */
+static inline int crypto_skcipher_setkey(struct crypto_skcipher *tfm,
+                                        const u8 *key, unsigned int keylen)
+{
+       return tfm->setkey(tfm, key, keylen);
+}
+
+/**
+ * crypto_skcipher_reqtfm() - obtain cipher handle from request
+ * @req: skcipher_request out of which the cipher handle is to be obtained
+ *
+ * Return the crypto_skcipher handle when furnishing an skcipher_request
+ * data structure.
+ *
+ * Return: crypto_skcipher handle
+ */
+static inline struct crypto_skcipher *crypto_skcipher_reqtfm(
+       struct skcipher_request *req)
+{
+       return __crypto_skcipher_cast(req->base.tfm);
+}
+
+/**
+ * crypto_skcipher_encrypt() - encrypt plaintext
+ * @req: reference to the skcipher_request handle that holds all information
+ *      needed to perform the cipher operation
+ *
+ * Encrypt plaintext data using the skcipher_request handle. That data
+ * structure and how it is filled with data is discussed with the
+ * skcipher_request_* functions.
+ *
+ * Return: 0 if the cipher operation was successful; < 0 if an error occurred
+ */
+static inline int crypto_skcipher_encrypt(struct skcipher_request *req)
+{
+       struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+
+       return tfm->encrypt(req);
+}
+
+/**
+ * crypto_skcipher_decrypt() - decrypt ciphertext
+ * @req: reference to the skcipher_request handle that holds all information
+ *      needed to perform the cipher operation
+ *
+ * Decrypt ciphertext data using the skcipher_request handle. That data
+ * structure and how it is filled with data is discussed with the
+ * skcipher_request_* functions.
+ *
+ * Return: 0 if the cipher operation was successful; < 0 if an error occurred
+ */
+static inline int crypto_skcipher_decrypt(struct skcipher_request *req)
+{
+       struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+
+       return tfm->decrypt(req);
+}
+
+/**
+ * DOC: Symmetric Key Cipher Request Handle
+ *
+ * The skcipher_request data structure contains all pointers to data
+ * required for the symmetric key cipher operation. This includes the cipher
+ * handle (which can be used by multiple skcipher_request instances), pointer
+ * to plaintext and ciphertext, asynchronous callback function, etc. It acts
+ * as a handle to the skcipher_request_* API calls in a similar way as
+ * skcipher handle to the crypto_skcipher_* API calls.
+ */
+
+/**
+ * crypto_skcipher_reqsize() - obtain size of the request data structure
+ * @tfm: cipher handle
+ *
+ * Return: number of bytes
+ */
+static inline unsigned int crypto_skcipher_reqsize(struct crypto_skcipher *tfm)
+{
+       return tfm->reqsize;
+}
+
+/**
+ * skcipher_request_set_tfm() - update cipher handle reference in request
+ * @req: request handle to be modified
+ * @tfm: cipher handle that shall be added to the request handle
+ *
+ * Allow the caller to replace the existing skcipher handle in the request
+ * data structure with a different one.
+ */
+static inline void skcipher_request_set_tfm(struct skcipher_request *req,
+                                           struct crypto_skcipher *tfm)
+{
+       req->base.tfm = crypto_skcipher_tfm(tfm);
+}
+
+static inline struct skcipher_request *skcipher_request_cast(
+       struct crypto_async_request *req)
+{
+       return container_of(req, struct skcipher_request, base);
+}
+
+/**
+ * skcipher_request_alloc() - allocate request data structure
+ * @tfm: cipher handle to be registered with the request
+ * @gfp: memory allocation flag that is handed to kmalloc by the API call.
+ *
+ * Allocate the request data structure that must be used with the skcipher
+ * encrypt and decrypt API calls. During the allocation, the provided skcipher
+ * handle is registered in the request data structure.
+ *
+ * Return: allocated request handle in case of success; IS_ERR() is true in case
+ *        of an error, PTR_ERR() returns the error code.
+ */
+static inline struct skcipher_request *skcipher_request_alloc(
+       struct crypto_skcipher *tfm, gfp_t gfp)
+{
+       struct skcipher_request *req;
+
+       req = kmalloc(sizeof(struct skcipher_request) +
+                     crypto_skcipher_reqsize(tfm), gfp);
+
+       if (likely(req))
+               skcipher_request_set_tfm(req, tfm);
+
+       return req;
+}
+
+/**
+ * skcipher_request_free() - zeroize and free request data structure
+ * @req: request data structure cipher handle to be freed
+ */
+static inline void skcipher_request_free(struct skcipher_request *req)
+{
+       kzfree(req);
+}
+
+/**
+ * skcipher_request_set_callback() - set asynchronous callback function
+ * @req: request handle
+ * @flags: specify zero or an ORing of the flags
+ *         CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and
+ *        increase the wait queue beyond the initial maximum size;
+ *        CRYPTO_TFM_REQ_MAY_SLEEP the request processing may sleep
+ * @compl: callback function pointer to be registered with the request handle
+ * @data: The data pointer refers to memory that is not used by the kernel
+ *       crypto API, but provided to the callback function for it to use. Here,
+ *       the caller can provide a reference to memory the callback function can
+ *       operate on. As the callback function is invoked asynchronously to the
+ *       related functionality, it may need to access data structures of the
+ *       related functionality which can be referenced using this pointer. The
+ *       callback function can access the memory via the "data" field in the
+ *       crypto_async_request data structure provided to the callback function.
+ *
+ * This function allows setting the callback function that is triggered once the
+ * cipher operation completes.
+ *
+ * The callback function is registered with the skcipher_request handle and
+ * must comply with the following template
+ *
+ *     void callback_function(struct crypto_async_request *req, int error)
+ */
+static inline void skcipher_request_set_callback(struct skcipher_request *req,
+                                                u32 flags,
+                                                crypto_completion_t compl,
+                                                void *data)
+{
+       req->base.complete = compl;
+       req->base.data = data;
+       req->base.flags = flags;
+}
+
+/**
+ * skcipher_request_set_crypt() - set data buffers
+ * @req: request handle
+ * @src: source scatter / gather list
+ * @dst: destination scatter / gather list
+ * @cryptlen: number of bytes to process from @src
+ * @iv: IV for the cipher operation which must comply with the IV size defined
+ *      by crypto_skcipher_ivsize
+ *
+ * This function allows setting of the source data and destination data
+ * scatter / gather lists.
+ *
+ * For encryption, the source is treated as the plaintext and the
+ * destination is the ciphertext. For a decryption operation, the use is
+ * reversed - the source is the ciphertext and the destination is the plaintext.
+ */
+static inline void skcipher_request_set_crypt(
+       struct skcipher_request *req,
+       struct scatterlist *src, struct scatterlist *dst,
+       unsigned int cryptlen, void *iv)
+{
+       req->src = src;
+       req->dst = dst;
+       req->cryptlen = cryptlen;
+       req->iv = iv;
+}
+
 #endif /* _CRYPTO_SKCIPHER_H */
 
index 8780868458a09e9b35d7fdee97f732663da0a61c..8de173ff19f310bbb8c0e9d7a7e26cb442079ff7 100644 (file)
 #define IMX6QDL_CLK_VIDEO_27M                  238
 #define IMX6QDL_CLK_MIPI_CORE_CFG              239
 #define IMX6QDL_CLK_MIPI_IPG                   240
-#define IMX6QDL_CLK_END                                241
+#define IMX6QDL_CLK_CAAM_MEM                   241
+#define IMX6QDL_CLK_CAAM_ACLK                  242
+#define IMX6QDL_CLK_CAAM_IPG                   243
+#define IMX6QDL_CLK_END                                244
 
 #endif /* __DT_BINDINGS_CLOCK_IMX6QDL_H */
index 81ef938b0a8e9d34342ddbf8459a7a8b30b489d1..e71cb70a1ac2f6eb1faf9023671f469574786c60 100644 (file)
  */
 #define CRYPTO_ALG_INTERNAL            0x00002000
 
-/*
- * Temporary flag used to prevent legacy AEAD implementations from
- * being used by user-space.
- */
-#define CRYPTO_ALG_AEAD_NEW            0x00004000
-
 /*
  * Transform masks and values (for crt_flags).
  */
 struct scatterlist;
 struct crypto_ablkcipher;
 struct crypto_async_request;
-struct crypto_aead;
 struct crypto_blkcipher;
 struct crypto_hash;
 struct crypto_tfm;
 struct crypto_type;
-struct aead_request;
-struct aead_givcrypt_request;
 struct skcipher_givcrypt_request;
 
 typedef void (*crypto_completion_t)(struct crypto_async_request *req, int err);
@@ -274,47 +265,6 @@ struct ablkcipher_alg {
        unsigned int ivsize;
 };
 
-/**
- * struct old_aead_alg - AEAD cipher definition
- * @maxauthsize: Set the maximum authentication tag size supported by the
- *              transformation. A transformation may support smaller tag sizes.
- *              As the authentication tag is a message digest to ensure the
- *              integrity of the encrypted data, a consumer typically wants the
- *              largest authentication tag possible as defined by this
- *              variable.
- * @setauthsize: Set authentication size for the AEAD transformation. This
- *              function is used to specify the consumer requested size of the
- *              authentication tag to be either generated by the transformation
- *              during encryption or the size of the authentication tag to be
- *              supplied during the decryption operation. This function is also
- *              responsible for checking the authentication tag size for
- *              validity.
- * @setkey: see struct ablkcipher_alg
- * @encrypt: see struct ablkcipher_alg
- * @decrypt: see struct ablkcipher_alg
- * @givencrypt: see struct ablkcipher_alg
- * @givdecrypt: see struct ablkcipher_alg
- * @geniv: see struct ablkcipher_alg
- * @ivsize: see struct ablkcipher_alg
- *
- * All fields except @givencrypt , @givdecrypt , @geniv and @ivsize are
- * mandatory and must be filled.
- */
-struct old_aead_alg {
-       int (*setkey)(struct crypto_aead *tfm, const u8 *key,
-                     unsigned int keylen);
-       int (*setauthsize)(struct crypto_aead *tfm, unsigned int authsize);
-       int (*encrypt)(struct aead_request *req);
-       int (*decrypt)(struct aead_request *req);
-       int (*givencrypt)(struct aead_givcrypt_request *req);
-       int (*givdecrypt)(struct aead_givcrypt_request *req);
-
-       const char *geniv;
-
-       unsigned int ivsize;
-       unsigned int maxauthsize;
-};
-
 /**
  * struct blkcipher_alg - synchronous block cipher definition
  * @min_keysize: see struct ablkcipher_alg
@@ -409,7 +359,6 @@ struct compress_alg {
 
 
 #define cra_ablkcipher cra_u.ablkcipher
-#define cra_aead       cra_u.aead
 #define cra_blkcipher  cra_u.blkcipher
 #define cra_cipher     cra_u.cipher
 #define cra_compress   cra_u.compress
@@ -460,7 +409,7 @@ struct compress_alg {
  *           struct crypto_type, which implements callbacks common for all
  *           transformation types. There are multiple options:
  *           &crypto_blkcipher_type, &crypto_ablkcipher_type,
- *           &crypto_ahash_type, &crypto_aead_type, &crypto_rng_type.
+ *           &crypto_ahash_type, &crypto_rng_type.
  *           This field might be empty. In that case, there are no common
  *           callbacks. This is the case for: cipher, compress, shash.
  * @cra_u: Callbacks implementing the transformation. This is a union of
@@ -508,7 +457,6 @@ struct crypto_alg {
 
        union {
                struct ablkcipher_alg ablkcipher;
-               struct old_aead_alg aead;
                struct blkcipher_alg blkcipher;
                struct cipher_alg cipher;
                struct compress_alg compress;
index bc0a1da8afba2c5362fb55772e7154a8ad2ea844..95c52a95259e89b6fc0ab279c4e51cfc5f35bf1f 100644 (file)
@@ -146,18 +146,25 @@ int mpi_read_buffer(MPI a, uint8_t *buf, unsigned buf_len, unsigned *nbytes,
        uint8_t *p;
        mpi_limb_t alimb;
        unsigned int n = mpi_get_size(a);
-       int i;
+       int i, lzeros = 0;
 
-       if (buf_len < n || !buf)
+       if (buf_len < n || !buf || !nbytes)
                return -EINVAL;
 
        if (sign)
                *sign = a->sign;
 
-       if (nbytes)
-               *nbytes = n;
+       p = (void *)&a->d[a->nlimbs] - 1;
+
+       for (i = a->nlimbs * sizeof(alimb) - 1; i >= 0; i--, p--) {
+               if (!*p)
+                       lzeros++;
+               else
+                       break;
+       }
 
        p = buf;
+       *nbytes = n - lzeros;
 
        for (i = a->nlimbs - 1; i >= 0; i--) {
                alimb = a->d[i];
@@ -178,6 +185,19 @@ int mpi_read_buffer(MPI a, uint8_t *buf, unsigned buf_len, unsigned *nbytes,
 #else
 #error please implement for this limb size.
 #endif
+
+               if (lzeros > 0) {
+                       if (lzeros >= sizeof(alimb)) {
+                               p -= sizeof(alimb);
+                       } else {
+                               mpi_limb_t *limb1 = (void *)p - sizeof(alimb);
+                               mpi_limb_t *limb2 = (void *)p - sizeof(alimb)
+                                                       + lzeros;
+                               *limb1 = *limb2;
+                               p -= lzeros;
+                       }
+                       lzeros -= sizeof(alimb);
+               }
        }
        return 0;
 }
@@ -197,7 +217,7 @@ EXPORT_SYMBOL_GPL(mpi_read_buffer);
  */
 void *mpi_get_buffer(MPI a, unsigned *nbytes, int *sign)
 {
-       uint8_t *buf, *p;
+       uint8_t *buf;
        unsigned int n;
        int ret;
 
@@ -220,14 +240,6 @@ void *mpi_get_buffer(MPI a, unsigned *nbytes, int *sign)
                kfree(buf);
                return NULL;
        }
-
-       /* this is sub-optimal but we need to do the shift operation
-        * because the caller has to free the returned buffer */
-       for (p = buf; !*p && *nbytes; p++, --*nbytes)
-               ;
-       if (p != buf)
-               memmove(buf, p, *nbytes);
-
        return buf;
 }
 EXPORT_SYMBOL_GPL(mpi_get_buffer);
index 42f7c76cf853697341a6e34cf522f92927b36ad4..f07224d8b88f6a2479de02ce944181edcd2576ab 100644 (file)
@@ -31,7 +31,7 @@ static struct xfrm_algo_desc aead_list[] = {
 
        .uinfo = {
                .aead = {
-                       .geniv = "seqniv",
+                       .geniv = "seqiv",
                        .icv_truncbits = 64,
                }
        },
@@ -50,7 +50,7 @@ static struct xfrm_algo_desc aead_list[] = {
 
        .uinfo = {
                .aead = {
-                       .geniv = "seqniv",
+                       .geniv = "seqiv",
                        .icv_truncbits = 96,
                }
        },
@@ -69,7 +69,7 @@ static struct xfrm_algo_desc aead_list[] = {
 
        .uinfo = {
                .aead = {
-                       .geniv = "seqniv",
+                       .geniv = "seqiv",
                        .icv_truncbits = 128,
                }
        },
@@ -88,7 +88,7 @@ static struct xfrm_algo_desc aead_list[] = {
 
        .uinfo = {
                .aead = {
-                       .geniv = "seqniv",
+                       .geniv = "seqiv",
                        .icv_truncbits = 64,
                }
        },
@@ -107,7 +107,7 @@ static struct xfrm_algo_desc aead_list[] = {
 
        .uinfo = {
                .aead = {
-                       .geniv = "seqniv",
+                       .geniv = "seqiv",
                        .icv_truncbits = 96,
                }
        },
@@ -126,7 +126,7 @@ static struct xfrm_algo_desc aead_list[] = {
 
        .uinfo = {
                .aead = {
-                       .geniv = "seqniv",
+                       .geniv = "seqiv",
                        .icv_truncbits = 128,
                }
        },
@@ -164,7 +164,7 @@ static struct xfrm_algo_desc aead_list[] = {
 
        .uinfo = {
                .aead = {
-                       .geniv = "seqniv",
+                       .geniv = "seqiv",
                        .icv_truncbits = 128,
                }
        },