net: tls: make the offload check helper take skb not socket
authorJakub Kicinski <kuba@kernel.org>
Tue, 13 Jun 2023 20:50:06 +0000 (13:50 -0700)
committerDavid S. Miller <davem@davemloft.net>
Thu, 15 Jun 2023 08:01:05 +0000 (09:01 +0100)
All callers of tls_is_sk_tx_device_offloaded() currently do
an equivalent of:

 if (skb->sk && tls_is_skb_tx_device_offloaded(skb->sk))

Have the helper accept skb and do the skb->sk check locally.
Two drivers have local static inlines with similar wrappers
already.

While at it change the ifdef condition to TLS_DEVICE.
Only TLS_DEVICE selects SOCK_VALIDATE_XMIT, so the two are
equivalent. This makes removing the duplicated IS_ENABLED()
check in funeth more obviously correct.

Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Acked-by: Maxim Mikityanskiy <maxtram95@gmail.com>
Reviewed-by: Simon Horman <simon.horman@corigine.com>
Acked-by: Tariq Toukan <tariqt@nvidia.com>
Acked-by: Dimitris Michailidis <dmichail@fungible.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
12 files changed:
drivers/net/bonding/bond_main.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
drivers/net/ethernet/chelsio/cxgb4/sge.c
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
drivers/net/ethernet/fungible/funeth/funeth_tx.c
drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h
drivers/net/ethernet/netronome/nfp/nfp_net_common.c
include/net/tls.h
net/tls/tls_device.c

index 007cec23a92f864cf8fe3621c05e13cec7144667..16405b84dc2f54150cea178bf2b66dc43c3ad444 100644 (file)
@@ -5442,7 +5442,7 @@ static netdev_tx_t bond_tls_device_xmit(struct bonding *bond, struct sk_buff *sk
 {
        struct net_device *tls_netdev = rcu_dereference(tls_get_ctx(skb->sk)->netdev);
 
-       /* tls_netdev might become NULL, even if tls_is_sk_tx_device_offloaded
+       /* tls_netdev might become NULL, even if tls_is_skb_tx_device_offloaded
         * was true, if tls_device_down is running in parallel, but it's OK,
         * because bond_get_slave_by_dev has a NULL check.
         */
@@ -5461,7 +5461,7 @@ static netdev_tx_t __bond_start_xmit(struct sk_buff *skb, struct net_device *dev
                return NETDEV_TX_OK;
 
 #if IS_ENABLED(CONFIG_TLS_DEVICE)
-       if (skb->sk && tls_is_sk_tx_device_offloaded(skb->sk))
+       if (tls_is_skb_tx_device_offloaded(skb))
                return bond_tls_device_xmit(bond, skb, dev);
 #endif
 
index f0bc7396ce2b12c86951e83d92e581147deb84ff..2eb33a727bba3b6e6db281c4db508f5b46df4a9c 100644 (file)
@@ -1175,7 +1175,7 @@ static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
                txq = netdev_pick_tx(dev, skb, sb_dev);
                if (xfrm_offload(skb) || is_ptp_enabled(skb, dev) ||
                    skb->encapsulation ||
-                   cxgb4_is_ktls_skb(skb) ||
+                   tls_is_skb_tx_device_offloaded(skb) ||
                    (proto != IPPROTO_TCP && proto != IPPROTO_UDP))
                        txq = txq % pi->nqsets;
 
index 34546f5312eee59f1453262aab41624d12f27992..a9599ba26975764b7848a7bb12601bb44559f764 100644 (file)
@@ -497,11 +497,6 @@ struct cxgb4_uld_info {
 #endif
 };
 
-static inline bool cxgb4_is_ktls_skb(struct sk_buff *skb)
-{
-       return skb->sk && tls_is_sk_tx_device_offloaded(skb->sk);
-}
-
 void cxgb4_uld_enable(struct adapter *adap);
 void cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p);
 int cxgb4_unregister_uld(enum cxgb4_uld type);
index 46809e2d94ee08bf619398791daf034beb4f8405..98dd78551d89a656544f2d6be753ef0ceb159232 100644 (file)
@@ -1530,7 +1530,7 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
 #endif /* CHELSIO_IPSEC_INLINE */
 
 #if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
-       if (cxgb4_is_ktls_skb(skb) &&
+       if (tls_is_skb_tx_device_offloaded(skb) &&
            (skb->len - skb_tcp_all_headers(skb)))
                return adap->uld[CXGB4_ULD_KTLS].tx_handler(skb, dev);
 #endif /* CHELSIO_TLS_DEVICE */
index 1a5fdd755e9e30124d80cbb385d8efad1d41530a..bcdc7fc2f4276ace76c6fd3a35a93fd918fed792 100644 (file)
@@ -1946,7 +1946,7 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
        tls_ctx = tls_get_ctx(skb->sk);
        tls_netdev = rcu_dereference_bh(tls_ctx->netdev);
        /* Don't quit on NULL: if tls_device_down is running in parallel,
-        * netdev might become NULL, even if tls_is_sk_tx_device_offloaded was
+        * netdev might become NULL, even if tls_is_skb_tx_device_offloaded was
         * true. Rather continue processing this packet.
         */
        if (unlikely(tls_netdev && tls_netdev != dev))
index 706d81e39a54d3b1902e3ec0aafe8f29a75dff52..8ddefd3ec15b64ad0ede28dd58cbddf07af82dab 100644 (file)
@@ -348,8 +348,7 @@ netdev_tx_t fun_start_xmit(struct sk_buff *skb, struct net_device *netdev)
        unsigned int tls_len = 0;
        unsigned int ndesc;
 
-       if (IS_ENABLED(CONFIG_TLS_DEVICE) && skb->sk &&
-           tls_is_sk_tx_device_offloaded(skb->sk)) {
+       if (tls_is_skb_tx_device_offloaded(skb)) {
                skb = fun_tls_tx(skb, q, &tls_len);
                if (unlikely(!skb))
                        goto dropped;
index c964644ee866e765480f2746d5441a8a103a64be..bac4717548c67e9e6ac900929de542e1b19b6bdf 100644 (file)
@@ -125,7 +125,7 @@ static inline bool mlx5e_accel_tx_begin(struct net_device *dev,
 
 #ifdef CONFIG_MLX5_EN_TLS
        /* May send WQEs. */
-       if (mlx5e_ktls_skb_offloaded(skb))
+       if (tls_is_skb_tx_device_offloaded(skb))
                if (unlikely(!mlx5e_ktls_handle_tx_skb(dev, sq, skb,
                                                       &state->tls)))
                        return false;
index 0e4c0a093293a7e8872a7c751bfd8d5373f80935..efb2cf74ad6a3ab2bc853b519175ed76f55fd1e2 100644 (file)
@@ -846,7 +846,7 @@ bool mlx5e_ktls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq,
        tls_ctx = tls_get_ctx(skb->sk);
        tls_netdev = rcu_dereference_bh(tls_ctx->netdev);
        /* Don't WARN on NULL: if tls_device_down is running in parallel,
-        * netdev might become NULL, even if tls_is_sk_tx_device_offloaded was
+        * netdev might become NULL, even if tls_is_skb_tx_device_offloaded was
         * true. Rather continue processing this packet.
         */
        if (WARN_ON_ONCE(tls_netdev && tls_netdev != netdev))
index 2dd78dd4ad65ba0eeb0b683d71d3ebe71f7f7dec..f87b65c560ea68ef764139d1039286c1a350efa1 100644 (file)
@@ -49,11 +49,6 @@ mlx5e_ktls_rx_pending_resync_list(struct mlx5e_channel *c, int budget)
        return budget && test_bit(MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC, &c->async_icosq.state);
 }
 
-static inline bool mlx5e_ktls_skb_offloaded(struct sk_buff *skb)
-{
-       return skb->sk && tls_is_sk_tx_device_offloaded(skb->sk);
-}
-
 static inline void
 mlx5e_ktls_handle_tx_wqe(struct mlx5_wqe_ctrl_seg *cseg,
                         struct mlx5e_accel_tx_tls_state *state)
index b7cce746b5c0ad877d5bd7435a65d21c7e9aa61d..49f2f081ebb54a19876964111dfd8c19da86a521 100644 (file)
@@ -598,7 +598,7 @@ nfp_net_tls_tx(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
 
        if (likely(!dp->ktls_tx))
                return skb;
-       if (!skb->sk || !tls_is_sk_tx_device_offloaded(skb->sk))
+       if (!tls_is_skb_tx_device_offloaded(skb))
                return skb;
 
        datalen = skb->len - skb_tcp_all_headers(skb);
@@ -666,7 +666,7 @@ void nfp_net_tls_tx_undo(struct sk_buff *skb, u64 tls_handle)
 
        if (!tls_handle)
                return;
-       if (WARN_ON_ONCE(!skb->sk || !tls_is_sk_tx_device_offloaded(skb->sk)))
+       if (WARN_ON_ONCE(!tls_is_skb_tx_device_offloaded(skb)))
                return;
 
        datalen = skb->len - skb_tcp_all_headers(skb);
index b7d0f1e3058ba5e21fdb3f7c1ab25bee4d148bb8..5e71dd3df8ca0450b443814cd130059fe8ad5fc2 100644 (file)
@@ -370,10 +370,12 @@ struct sk_buff *
 tls_validate_xmit_skb_sw(struct sock *sk, struct net_device *dev,
                         struct sk_buff *skb);
 
-static inline bool tls_is_sk_tx_device_offloaded(struct sock *sk)
+static inline bool tls_is_skb_tx_device_offloaded(const struct sk_buff *skb)
 {
-#ifdef CONFIG_SOCK_VALIDATE_XMIT
-       return sk_fullsock(sk) &&
+#ifdef CONFIG_TLS_DEVICE
+       struct sock *sk = skb->sk;
+
+       return sk && sk_fullsock(sk) &&
               (smp_load_acquire(&sk->sk_validate_xmit_skb) ==
               &tls_validate_xmit_skb);
 #else
index b4864d55900fbd02bf9fb382a460b71995013576..b82770f68807a8cd843a17d4012591ded5b5d64b 100644 (file)
@@ -1219,7 +1219,7 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
        tls_device_attach(ctx, sk, netdev);
        up_read(&device_offload_lock);
 
-       /* following this assignment tls_is_sk_tx_device_offloaded
+       /* following this assignment tls_is_skb_tx_device_offloaded
         * will return true and the context might be accessed
         * by the netdev's xmit function.
         */
@@ -1372,7 +1372,7 @@ static int tls_device_down(struct net_device *netdev)
 
        list_for_each_entry_safe(ctx, tmp, &list, list) {
                /* Stop offloaded TX and switch to the fallback.
-                * tls_is_sk_tx_device_offloaded will return false.
+                * tls_is_skb_tx_device_offloaded will return false.
                 */
                WRITE_ONCE(ctx->sk->sk_validate_xmit_skb, tls_validate_xmit_skb_sw);