net/tls: don't clear TX resync flag on error
authorDirk van der Merwe <dirk.vandermerwe@netronome.com>
Tue, 9 Jul 2019 02:53:13 +0000 (19:53 -0700)
committerDavid S. Miller <davem@davemloft.net>
Tue, 9 Jul 2019 03:21:09 +0000 (20:21 -0700)
Introduce a return code for the tls_dev_resync callback.

When the driver TX resync fails, kernel can retry the resync again
until it succeeds.  This prevents drivers from attempting to offload
TLS packets if the connection is known to be out of sync.

We don't worry about the RX resync since they will be retried naturally
as more encrypted records get received.

Signed-off-by: Dirk van der Merwe <dirk.vandermerwe@netronome.com>
Reviewed-by: Jakub Kicinski <jakub.kicinski@netronome.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c
drivers/net/ethernet/netronome/nfp/crypto/tls.c
include/net/tls.h
net/tls/tls_device.c

index f8b93b62a7d206d33dfd33dc9b14f2e53f94dba4..ca07c86427a7bae9ad950906c594067dbd531823 100644 (file)
@@ -160,9 +160,9 @@ static void mlx5e_tls_del(struct net_device *netdev,
                                direction == TLS_OFFLOAD_CTX_DIR_TX);
 }
 
-static void mlx5e_tls_resync(struct net_device *netdev, struct sock *sk,
-                            u32 seq, u8 *rcd_sn_data,
-                            enum tls_offload_ctx_dir direction)
+static int mlx5e_tls_resync(struct net_device *netdev, struct sock *sk,
+                           u32 seq, u8 *rcd_sn_data,
+                           enum tls_offload_ctx_dir direction)
 {
        struct tls_context *tls_ctx = tls_get_ctx(sk);
        struct mlx5e_priv *priv = netdev_priv(netdev);
@@ -177,6 +177,8 @@ static void mlx5e_tls_resync(struct net_device *netdev, struct sock *sk,
                    be64_to_cpu(rcd_sn));
        mlx5_accel_tls_resync_rx(priv->mdev, rx_ctx->handle, seq, rcd_sn);
        atomic64_inc(&priv->tls->sw_stats.rx_tls_resync_reply);
+
+       return 0;
 }
 
 static const struct tlsdev_ops mlx5e_tls_ops = {
index b49405b4af55b4989ebe6368534fca1ecbb13ff3..d448c6de8ea4099ce0d68a0e2ebea2738af627b0 100644 (file)
@@ -403,7 +403,7 @@ nfp_net_tls_del(struct net_device *netdev, struct tls_context *tls_ctx,
        nfp_net_tls_del_fw(nn, ntls->fw_handle);
 }
 
-static void
+static int
 nfp_net_tls_resync(struct net_device *netdev, struct sock *sk, u32 seq,
                   u8 *rcd_sn, enum tls_offload_ctx_dir direction)
 {
@@ -412,11 +412,12 @@ nfp_net_tls_resync(struct net_device *netdev, struct sock *sk, u32 seq,
        struct nfp_crypto_req_update *req;
        struct sk_buff *skb;
        gfp_t flags;
+       int err;
 
        flags = direction == TLS_OFFLOAD_CTX_DIR_TX ? GFP_KERNEL : GFP_ATOMIC;
        skb = nfp_net_tls_alloc_simple(nn, sizeof(*req), flags);
        if (!skb)
-               return;
+               return -ENOMEM;
 
        ntls = tls_driver_ctx(sk, direction);
        req = (void *)skb->data;
@@ -428,13 +429,17 @@ nfp_net_tls_resync(struct net_device *netdev, struct sock *sk, u32 seq,
        memcpy(req->rec_no, rcd_sn, sizeof(req->rec_no));
 
        if (direction == TLS_OFFLOAD_CTX_DIR_TX) {
-               nfp_net_tls_communicate_simple(nn, skb, "sync",
-                                              NFP_CCM_TYPE_CRYPTO_UPDATE);
+               err = nfp_net_tls_communicate_simple(nn, skb, "sync",
+                                                    NFP_CCM_TYPE_CRYPTO_UPDATE);
+               if (err)
+                       return err;
                ntls->next_seq = seq;
        } else {
                nfp_ccm_mbox_post(nn, skb, NFP_CCM_TYPE_CRYPTO_UPDATE,
                                  sizeof(struct nfp_crypto_reply_simple));
        }
+
+       return 0;
 }
 
 static const struct tlsdev_ops nfp_net_tls_ops = {
index 176d0b039f32c0e96f57208021370be760439a08..584609174fe007fbaea67da225363e2b91047c3b 100644 (file)
@@ -304,9 +304,9 @@ struct tlsdev_ops {
        void (*tls_dev_del)(struct net_device *netdev,
                            struct tls_context *ctx,
                            enum tls_offload_ctx_dir direction);
-       void (*tls_dev_resync)(struct net_device *netdev,
-                              struct sock *sk, u32 seq, u8 *rcd_sn,
-                              enum tls_offload_ctx_dir direction);
+       int (*tls_dev_resync)(struct net_device *netdev,
+                             struct sock *sk, u32 seq, u8 *rcd_sn,
+                             enum tls_offload_ctx_dir direction);
 };
 
 enum tls_offload_sync_type {
index 92fd1352c0377e40e5f84db36043ea73986e8f94..77fa3b5f2b490e1a85bb375e5ddd184ac3d03d25 100644 (file)
@@ -214,6 +214,7 @@ static void tls_device_resync_tx(struct sock *sk, struct tls_context *tls_ctx,
 {
        struct net_device *netdev;
        struct sk_buff *skb;
+       int err = 0;
        u8 *rcd_sn;
 
        skb = tcp_write_queue_tail(sk);
@@ -225,9 +226,12 @@ static void tls_device_resync_tx(struct sock *sk, struct tls_context *tls_ctx,
        down_read(&device_offload_lock);
        netdev = tls_ctx->netdev;
        if (netdev)
-               netdev->tlsdev_ops->tls_dev_resync(netdev, sk, seq, rcd_sn,
-                                                  TLS_OFFLOAD_CTX_DIR_TX);
+               err = netdev->tlsdev_ops->tls_dev_resync(netdev, sk, seq,
+                                                        rcd_sn,
+                                                        TLS_OFFLOAD_CTX_DIR_TX);
        up_read(&device_offload_lock);
+       if (err)
+               return;
 
        clear_bit_unlock(TLS_TX_SYNC_SCHED, &tls_ctx->flags);
 }