Merge ra.kernel.org:/pub/scm/linux/kernel/git/netdev/net
authorDavid S. Miller <davem@davemloft.net>
Tue, 17 Sep 2019 21:51:10 +0000 (23:51 +0200)
committerDavid S. Miller <davem@davemloft.net>
Tue, 17 Sep 2019 21:51:10 +0000 (23:51 +0200)
Pull in bug fixes from 'net' tree for the merge window.

Signed-off-by: David S. Miller <davem@davemloft.net>
16 files changed:
MAINTAINERS
drivers/net/ethernet/amazon/ena/ena_netdev.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/xen-netfront.c
include/net/pkt_sched.h
include/net/sock_reuseport.h
net/core/dev.c
net/core/sock_reuseport.c
net/dsa/dsa2.c
net/ipv4/datagram.c
net/ipv4/udp.c
net/ipv6/datagram.c
net/ipv6/ip6_gre.c
net/ipv6/udp.c
net/rds/ib_stats.c
net/sched/sch_generic.c

index 32f4f05fb3da8b608066ea3d11af191275a21f56..dd39fc578607ab34f2c0159d53dc8d07e3f3275c 100644 (file)
@@ -649,6 +649,12 @@ M: Lino Sanfilippo <LinoSanfilippo@gmx.de>
 S:     Maintained
 F:     drivers/net/ethernet/alacritech/*
 
+FORCEDETH GIGABIT ETHERNET DRIVER
+M:     Rain River <rain.1986.08.12@gmail.com>
+L:     netdev@vger.kernel.org
+S:     Maintained
+F:     drivers/net/ethernet/nvidia/*
+
 ALCATEL SPEEDTOUCH USB DRIVER
 M:     Duncan Sands <duncan.sands@free.fr>
 L:     linux-usb@vger.kernel.org
@@ -17673,7 +17679,7 @@ F:      Documentation/ABI/testing/sysfs-hypervisor-xen
 
 XEN NETWORK BACKEND DRIVER
 M:     Wei Liu <wei.liu@kernel.org>
-M:     Paul Durrant <paul.durrant@citrix.com>
+M:     Paul Durrant <paul@xen.org>
 L:     xen-devel@lists.xenproject.org (moderated for non-subscribers)
 L:     netdev@vger.kernel.org
 S:     Supported
index e4bf7a4af87ac2446a51d5fa12836a499a453313..c487d2a7d6dd04cfcc0c5693a6f0692a00753cd9 100644 (file)
@@ -824,7 +824,8 @@ static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget)
                above_thresh =
                        ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
                                                     ENA_TX_WAKEUP_THRESH);
-               if (netif_tx_queue_stopped(txq) && above_thresh) {
+               if (netif_tx_queue_stopped(txq) && above_thresh &&
+                   test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags)) {
                        netif_tx_wake_queue(txq);
                        u64_stats_update_begin(&tx_ring->syncp);
                        tx_ring->tx_stats.queue_wakeup++;
index c61d702fe83a176e2f84eb43f6a842ee1c2cb96e..a6cb2aa60e6485d4a0126d3330ac0c348a9c2baa 100644 (file)
@@ -4713,10 +4713,12 @@ int stmmac_suspend(struct device *dev)
        if (!ndev || !netif_running(ndev))
                return 0;
 
-       phylink_stop(priv->phylink);
-
        mutex_lock(&priv->lock);
 
+       rtnl_lock();
+       phylink_stop(priv->phylink);
+       rtnl_unlock();
+
        netif_device_detach(ndev);
        stmmac_stop_all_queues(priv);
 
@@ -4820,9 +4822,11 @@ int stmmac_resume(struct device *dev)
 
        stmmac_start_all_queues(priv);
 
-       mutex_unlock(&priv->lock);
-
+       rtnl_lock();
        phylink_start(priv->phylink);
+       rtnl_unlock();
+
+       mutex_unlock(&priv->lock);
 
        return 0;
 }
index b930d5f9522234c98116f2ea20b2c366f754a813..e14ec75b61d60776008b05d1bbd0b0a0d3055bbb 100644 (file)
@@ -906,7 +906,7 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
                        __pskb_pull_tail(skb, pull_to - skb_headlen(skb));
                }
                if (unlikely(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) {
-                       queue->rx.rsp_cons = ++cons;
+                       queue->rx.rsp_cons = ++cons + skb_queue_len(list);
                        kfree_skb(nskb);
                        return ~0U;
                }
index d1632979622e746abe163407bb0388e5ad278fbb..6a70845bd9ab00e3f3607fcf102d93c948ea6884 100644 (file)
@@ -118,7 +118,12 @@ void __qdisc_run(struct Qdisc *q);
 static inline void qdisc_run(struct Qdisc *q)
 {
        if (qdisc_run_begin(q)) {
-               __qdisc_run(q);
+               /* NOLOCK qdisc must check 'state' under the qdisc seqlock
+                * to avoid racing with dev_qdisc_reset()
+                */
+               if (!(q->flags & TCQ_F_NOLOCK) ||
+                   likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
+                       __qdisc_run(q);
                qdisc_run_end(q);
        }
 }
index d9112de852614ebaa07e4e584e2782c2706458c4..43f4a818d88f19a78a9052c25f5972f1ff44e240 100644 (file)
@@ -21,7 +21,8 @@ struct sock_reuseport {
        unsigned int            synq_overflow_ts;
        /* ID stays the same even after the size of socks[] grows. */
        unsigned int            reuseport_id;
-       bool                    bind_inany;
+       unsigned int            bind_inany:1;
+       unsigned int            has_conns:1;
        struct bpf_prog __rcu   *prog;          /* optional BPF sock selector */
        struct sock             *socks[0];      /* array of sock pointers */
 };
@@ -37,6 +38,23 @@ extern struct sock *reuseport_select_sock(struct sock *sk,
 extern int reuseport_attach_prog(struct sock *sk, struct bpf_prog *prog);
 extern int reuseport_detach_prog(struct sock *sk);
 
+static inline bool reuseport_has_conns(struct sock *sk, bool set)
+{
+       struct sock_reuseport *reuse;
+       bool ret = false;
+
+       rcu_read_lock();
+       reuse = rcu_dereference(sk->sk_reuseport_cb);
+       if (reuse) {
+               if (set)
+                       reuse->has_conns = 1;
+               ret = reuse->has_conns;
+       }
+       rcu_read_unlock();
+
+       return ret;
+}
+
 int reuseport_get_id(struct sock_reuseport *reuse);
 
 #endif  /* _SOCK_REUSEPORT_H */
index a9775d676285379cb82f2b69fb3e3b96003ae64d..71b18e80389faecd5af1942a91dae3b86f4f816d 100644 (file)
@@ -3467,18 +3467,22 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
        qdisc_calculate_pkt_len(skb, q);
 
        if (q->flags & TCQ_F_NOLOCK) {
-               if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
-                       __qdisc_drop(skb, &to_free);
-                       rc = NET_XMIT_DROP;
-               } else if ((q->flags & TCQ_F_CAN_BYPASS) && q->empty &&
-                          qdisc_run_begin(q)) {
+               if ((q->flags & TCQ_F_CAN_BYPASS) && q->empty &&
+                   qdisc_run_begin(q)) {
+                       if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED,
+                                             &q->state))) {
+                               __qdisc_drop(skb, &to_free);
+                               rc = NET_XMIT_DROP;
+                               goto end_run;
+                       }
                        qdisc_bstats_cpu_update(q, skb);
 
+                       rc = NET_XMIT_SUCCESS;
                        if (sch_direct_xmit(skb, q, dev, txq, NULL, true))
                                __qdisc_run(q);
 
+end_run:
                        qdisc_run_end(q);
-                       rc = NET_XMIT_SUCCESS;
                } else {
                        rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK;
                        qdisc_run(q);
index 9408f9264d052298a4eb85e0fb232c89af533e5c..f3ceec93f3923d864bde1aba9c26d9b6bfe1f97e 100644 (file)
@@ -295,8 +295,19 @@ struct sock *reuseport_select_sock(struct sock *sk,
 
 select_by_hash:
                /* no bpf or invalid bpf result: fall back to hash usage */
-               if (!sk2)
-                       sk2 = reuse->socks[reciprocal_scale(hash, socks)];
+               if (!sk2) {
+                       int i, j;
+
+                       i = j = reciprocal_scale(hash, socks);
+                       while (reuse->socks[i]->sk_state == TCP_ESTABLISHED) {
+                               i++;
+                               if (i >= reuse->num_socks)
+                                       i = 0;
+                               if (i == j)
+                                       goto out;
+                       }
+                       sk2 = reuse->socks[i];
+               }
        }
 
 out:
index b501c90aabe45e16741f17ff663917bc82464a2e..73002022c9d820df95b9df1841ce3724c4d41070 100644 (file)
@@ -644,6 +644,8 @@ static int dsa_port_parse_cpu(struct dsa_port *dp, struct net_device *master)
        tag_protocol = ds->ops->get_tag_protocol(ds, dp->index);
        tag_ops = dsa_tag_driver_get(tag_protocol);
        if (IS_ERR(tag_ops)) {
+               if (PTR_ERR(tag_ops) == -ENOPROTOOPT)
+                       return -EPROBE_DEFER;
                dev_warn(ds->dev, "No tagger for this switch\n");
                return PTR_ERR(tag_ops);
        }
index 7bd29e694603a8a19d7c114c2acae847e3fdd170..9a0fe0c2fa02c9707e6fc8c02529a48e84f7d680 100644 (file)
@@ -15,6 +15,7 @@
 #include <net/sock.h>
 #include <net/route.h>
 #include <net/tcp_states.h>
+#include <net/sock_reuseport.h>
 
 int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
 {
@@ -69,6 +70,7 @@ int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len
        }
        inet->inet_daddr = fl4->daddr;
        inet->inet_dport = usin->sin_port;
+       reuseport_has_conns(sk, true);
        sk->sk_state = TCP_ESTABLISHED;
        sk_set_txhash(sk);
        inet->inet_id = jiffies;
index fbcd9be3a470f5f6d2597bb948788171b8b66199..cf755156a684373f92c639c274f0fb4ab62aa211 100644 (file)
@@ -423,12 +423,13 @@ static struct sock *udp4_lib_lookup2(struct net *net,
                score = compute_score(sk, net, saddr, sport,
                                      daddr, hnum, dif, sdif);
                if (score > badness) {
-                       if (sk->sk_reuseport) {
+                       if (sk->sk_reuseport &&
+                           sk->sk_state != TCP_ESTABLISHED) {
                                hash = udp_ehashfn(net, daddr, hnum,
                                                   saddr, sport);
                                result = reuseport_select_sock(sk, hash, skb,
                                                        sizeof(struct udphdr));
-                               if (result)
+                               if (result && !reuseport_has_conns(sk, false))
                                        return result;
                        }
                        badness = score;
index 9ab897ded4df52d882cda1414ef0159f3eb1765a..96f939248d2ff5e512c58cfdcb063d7b42af8766 100644 (file)
@@ -27,6 +27,7 @@
 #include <net/ip6_route.h>
 #include <net/tcp_states.h>
 #include <net/dsfield.h>
+#include <net/sock_reuseport.h>
 
 #include <linux/errqueue.h>
 #include <linux/uaccess.h>
@@ -254,6 +255,7 @@ ipv4_connected:
                goto out;
        }
 
+       reuseport_has_conns(sk, true);
        sk->sk_state = TCP_ESTABLISHED;
        sk_set_txhash(sk);
 out:
index dd2d0b96326074d3255eee91891938dc1d948483..d5779d6a60650ba9ce6a72d6246a829e2df733e0 100644 (file)
@@ -968,7 +968,7 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
                if (unlikely(!tun_info ||
                             !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
                             ip_tunnel_info_af(tun_info) != AF_INET6))
-                       return -EINVAL;
+                       goto tx_err;
 
                key = &tun_info->key;
                memset(&fl6, 0, sizeof(fl6));
index 2c8beb3896d17ea8bd482d9106f8a8896cab2053..aae4938f3deab284ecfd78f0c601647fbb9b03d4 100644 (file)
@@ -158,13 +158,14 @@ static struct sock *udp6_lib_lookup2(struct net *net,
                score = compute_score(sk, net, saddr, sport,
                                      daddr, hnum, dif, sdif);
                if (score > badness) {
-                       if (sk->sk_reuseport) {
+                       if (sk->sk_reuseport &&
+                           sk->sk_state != TCP_ESTABLISHED) {
                                hash = udp6_ehashfn(net, daddr, hnum,
                                                    saddr, sport);
 
                                result = reuseport_select_sock(sk, hash, skb,
                                                        sizeof(struct udphdr));
-                               if (result)
+                               if (result && !reuseport_has_conns(sk, false))
                                        return result;
                        }
                        result = sk;
index 9252ad126335971fa202e5aee6e2babb55d969e0..ac46d8961b61a199032fbed88337558a713a1845 100644 (file)
@@ -42,7 +42,7 @@ DEFINE_PER_CPU_SHARED_ALIGNED(struct rds_ib_statistics, rds_ib_stats);
 static const char *const rds_ib_stat_names[] = {
        "ib_connect_raced",
        "ib_listen_closed_stale",
-       "s_ib_evt_handler_call",
+       "ib_evt_handler_call",
        "ib_tasklet_call",
        "ib_tx_cq_event",
        "ib_tx_ring_full",
index ac28f6a5d70e0b38ae8ce7858f08e9d15778c22f..17bd8f539bc7f1d596e97c713467f953802c9b82 100644 (file)
@@ -985,6 +985,9 @@ static void qdisc_destroy(struct Qdisc *qdisc)
 
 void qdisc_put(struct Qdisc *qdisc)
 {
+       if (!qdisc)
+               return;
+
        if (qdisc->flags & TCQ_F_BUILTIN ||
            !refcount_dec_and_test(&qdisc->refcnt))
                return;