]> git.proxmox.com Git - mirror_ubuntu-disco-kernel.git/commitdiff
net/tls: don't copy negative amounts of data in reencrypt
authorJakub Kicinski <jakub.kicinski@netronome.com>
Fri, 26 Apr 2019 00:35:09 +0000 (17:35 -0700)
committerStefan Bader <stefan.bader@canonical.com>
Tue, 2 Jul 2019 10:07:53 +0000 (12:07 +0200)
BugLink: https://bugs.launchpad.net/bugs/1832749
[ Upstream commit 97e1caa517e22d62a283b876fb8aa5f4672c83dd ]

There is no guarantee the record starts before the skb frags.
If we don't check for this condition copy amount will get
negative, leading to reads and writes to random memory locations.
Familiar hilarity ensues.

Fixes: 4799ac81e52a ("tls: Add rx inline crypto offload")
Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com>
Reviewed-by: John Hurley <john.hurley@netronome.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: Connor Kuehl <connor.kuehl@canonical.com>
Signed-off-by: Stefan Bader <stefan.bader@canonical.com>
net/tls/tls_device.c

index 5f1d937c4be9502208890d9632e5fb3da28736ee..4068101d43ea78d94c5fd02080eb647d5c414081 100644 (file)
@@ -610,14 +610,16 @@ static int tls_device_reencrypt(struct sock *sk, struct sk_buff *skb)
        else
                err = 0;
 
-       copy = min_t(int, skb_pagelen(skb) - offset,
-                    rxm->full_len - TLS_CIPHER_AES_GCM_128_TAG_SIZE);
+       if (skb_pagelen(skb) > offset) {
+               copy = min_t(int, skb_pagelen(skb) - offset,
+                            rxm->full_len - TLS_CIPHER_AES_GCM_128_TAG_SIZE);
 
-       if (skb->decrypted)
-               skb_store_bits(skb, offset, buf, copy);
+               if (skb->decrypted)
+                       skb_store_bits(skb, offset, buf, copy);
 
-       offset += copy;
-       buf += copy;
+               offset += copy;
+               buf += copy;
+       }
 
        skb_walk_frags(skb, skb_iter) {
                copy = min_t(int, skb_iter->len,