]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/commitdiff
tls: fix replacing proto_ops
authorJakub Kicinski <kuba@kernel.org>
Wed, 24 Nov 2021 23:25:56 +0000 (15:25 -0800)
committerAndrea Righi <andrea.righi@canonical.com>
Tue, 4 Jan 2022 08:49:02 +0000 (09:49 +0100)
BugLink: https://bugs.launchpad.net/bugs/1953370
[ Upstream commit f3911f73f51d1534f4db70b516cc1fcb6be05bae ]

We replace proto_ops whenever TLS is configured for RX. But our
replacement also overrides sendpage_locked, which will crash
unless TX is also configured. Similarly we plug both of those
in for TLS_HW (NIC crypto offload) even tho TLS_HW has a completely
different implementation for TX.

Last but not least we always plug in something based on inet_stream_ops
even though a few of the callbacks differ for IPv6 (getname, release,
bind).

Use a callback building method similar to what we do for struct proto.

Fixes: c46234ebb4d1 ("tls: RX path for ktls")
Fixes: d4ffb02dee2f ("net/tls: enable sk_msg redirect to tls socket egress")
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Signed-off-by: Sasha Levin <sashal@kernel.org>
Signed-off-by: Andrea Righi <andrea.righi@canonical.com>
net/tls/tls_main.c

index 9ab81db8a654534f6edde98d82dc9e9f8e3e6bd8..9aac9c60d786db4f2cda968d0892e17d2551a027 100644 (file)
@@ -61,7 +61,7 @@ static DEFINE_MUTEX(tcpv6_prot_mutex);
 static const struct proto *saved_tcpv4_prot;
 static DEFINE_MUTEX(tcpv4_prot_mutex);
 static struct proto tls_prots[TLS_NUM_PROTS][TLS_NUM_CONFIG][TLS_NUM_CONFIG];
-static struct proto_ops tls_sw_proto_ops;
+static struct proto_ops tls_proto_ops[TLS_NUM_PROTS][TLS_NUM_CONFIG][TLS_NUM_CONFIG];
 static void build_protos(struct proto prot[TLS_NUM_CONFIG][TLS_NUM_CONFIG],
                         const struct proto *base);
 
@@ -71,6 +71,8 @@ void update_sk_prot(struct sock *sk, struct tls_context *ctx)
 
        WRITE_ONCE(sk->sk_prot,
                   &tls_prots[ip_ver][ctx->tx_conf][ctx->rx_conf]);
+       WRITE_ONCE(sk->sk_socket->ops,
+                  &tls_proto_ops[ip_ver][ctx->tx_conf][ctx->rx_conf]);
 }
 
 int wait_on_pending_writer(struct sock *sk, long *timeo)
@@ -581,8 +583,6 @@ static int do_tls_setsockopt_conf(struct sock *sk, sockptr_t optval,
        if (tx) {
                ctx->sk_write_space = sk->sk_write_space;
                sk->sk_write_space = tls_write_space;
-       } else {
-               sk->sk_socket->ops = &tls_sw_proto_ops;
        }
        goto out;
 
@@ -640,6 +640,39 @@ struct tls_context *tls_ctx_create(struct sock *sk)
        return ctx;
 }
 
+static void build_proto_ops(struct proto_ops ops[TLS_NUM_CONFIG][TLS_NUM_CONFIG],
+                           const struct proto_ops *base)
+{
+       ops[TLS_BASE][TLS_BASE] = *base;
+
+       ops[TLS_SW  ][TLS_BASE] = ops[TLS_BASE][TLS_BASE];
+       ops[TLS_SW  ][TLS_BASE].sendpage_locked = tls_sw_sendpage_locked;
+
+       ops[TLS_BASE][TLS_SW  ] = ops[TLS_BASE][TLS_BASE];
+       ops[TLS_BASE][TLS_SW  ].splice_read     = tls_sw_splice_read;
+
+       ops[TLS_SW  ][TLS_SW  ] = ops[TLS_SW  ][TLS_BASE];
+       ops[TLS_SW  ][TLS_SW  ].splice_read     = tls_sw_splice_read;
+
+#ifdef CONFIG_TLS_DEVICE
+       ops[TLS_HW  ][TLS_BASE] = ops[TLS_BASE][TLS_BASE];
+       ops[TLS_HW  ][TLS_BASE].sendpage_locked = NULL;
+
+       ops[TLS_HW  ][TLS_SW  ] = ops[TLS_BASE][TLS_SW  ];
+       ops[TLS_HW  ][TLS_SW  ].sendpage_locked = NULL;
+
+       ops[TLS_BASE][TLS_HW  ] = ops[TLS_BASE][TLS_SW  ];
+
+       ops[TLS_SW  ][TLS_HW  ] = ops[TLS_SW  ][TLS_SW  ];
+
+       ops[TLS_HW  ][TLS_HW  ] = ops[TLS_HW  ][TLS_SW  ];
+       ops[TLS_HW  ][TLS_HW  ].sendpage_locked = NULL;
+#endif
+#ifdef CONFIG_TLS_TOE
+       ops[TLS_HW_RECORD][TLS_HW_RECORD] = *base;
+#endif
+}
+
 static void tls_build_proto(struct sock *sk)
 {
        int ip_ver = sk->sk_family == AF_INET6 ? TLSV6 : TLSV4;
@@ -651,6 +684,8 @@ static void tls_build_proto(struct sock *sk)
                mutex_lock(&tcpv6_prot_mutex);
                if (likely(prot != saved_tcpv6_prot)) {
                        build_protos(tls_prots[TLSV6], prot);
+                       build_proto_ops(tls_proto_ops[TLSV6],
+                                       sk->sk_socket->ops);
                        smp_store_release(&saved_tcpv6_prot, prot);
                }
                mutex_unlock(&tcpv6_prot_mutex);
@@ -661,6 +696,8 @@ static void tls_build_proto(struct sock *sk)
                mutex_lock(&tcpv4_prot_mutex);
                if (likely(prot != saved_tcpv4_prot)) {
                        build_protos(tls_prots[TLSV4], prot);
+                       build_proto_ops(tls_proto_ops[TLSV4],
+                                       sk->sk_socket->ops);
                        smp_store_release(&saved_tcpv4_prot, prot);
                }
                mutex_unlock(&tcpv4_prot_mutex);
@@ -871,10 +908,6 @@ static int __init tls_register(void)
        if (err)
                return err;
 
-       tls_sw_proto_ops = inet_stream_ops;
-       tls_sw_proto_ops.splice_read = tls_sw_splice_read;
-       tls_sw_proto_ops.sendpage_locked   = tls_sw_sendpage_locked;
-
        tls_device_init();
        tcp_register_ulp(&tcp_tls_ulp_ops);