net/hyperv: Add flow control based on hi/low watermark
authorHaiyang Zhang <haiyangz@microsoft.com>
Tue, 27 Mar 2012 13:20:45 +0000 (13:20 +0000)
committerDavid S. Miller <davem@davemloft.net>
Tue, 3 Apr 2012 21:47:15 +0000 (17:47 -0400)
In the existing code, we only stop queue when the ringbuffer is full,
so the current packet has to be dropped or retried from upper layer.

This patch stops the tx queue when available ringbuffer is below
the low watermark. So the ringbuffer still has small amount of space
available for the current packet. This will reduce the overhead of
retries on sending.

Signed-off-by: Haiyang Zhang <haiyangz@microsoft.com>
Reviewed-by: K. Y. Srinivasan <kys@microsoft.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/hv/ring_buffer.c
drivers/net/hyperv/netvsc.c
drivers/net/hyperv/netvsc_drv.c
include/linux/hyperv.h

index 8af25a097d75ae2a189d5e5aa25c1b226c849dc7..7233c88f01b8366547467215dc7c8492f03daacb 100644 (file)
 #include "hyperv_vmbus.h"
 
 
-/* #defines */
-
-
-/* Amount of space to write to */
-#define BYTES_AVAIL_TO_WRITE(r, w, z) \
-       ((w) >= (r)) ? ((z) - ((w) - (r))) : ((r) - (w))
-
-
-/*
- *
- * hv_get_ringbuffer_availbytes()
- *
- * Get number of bytes available to read and to write to
- * for the specified ring buffer
- */
-static inline void
-hv_get_ringbuffer_availbytes(struct hv_ring_buffer_info *rbi,
-                         u32 *read, u32 *write)
-{
-       u32 read_loc, write_loc;
-
-       smp_read_barrier_depends();
-
-       /* Capture the read/write indices before they changed */
-       read_loc = rbi->ring_buffer->read_index;
-       write_loc = rbi->ring_buffer->write_index;
-
-       *write = BYTES_AVAIL_TO_WRITE(read_loc, write_loc, rbi->ring_datasize);
-       *read = rbi->ring_datasize - *write;
-}
-
 /*
  * hv_get_next_write_location()
  *
index d025c83cd12a09fb4f701abce880f92aaed47b05..8b919471472fb1dba4d34ffcf0bfe5b4c723af7b 100644 (file)
@@ -428,6 +428,24 @@ int netvsc_device_remove(struct hv_device *device)
        return 0;
 }
 
+
+#define RING_AVAIL_PERCENT_HIWATER 20
+#define RING_AVAIL_PERCENT_LOWATER 10
+
+/*
+ * Get the percentage of available bytes to write in the ring.
+ * The return value is in range from 0 to 100.
+ */
+static inline u32 hv_ringbuf_avail_percent(
+               struct hv_ring_buffer_info *ring_info)
+{
+       u32 avail_read, avail_write;
+
+       hv_get_ringbuffer_availbytes(ring_info, &avail_read, &avail_write);
+
+       return avail_write * 100 / ring_info->ring_datasize;
+}
+
 static void netvsc_send_completion(struct hv_device *device,
                                   struct vmpacket_descriptor *packet)
 {
@@ -455,6 +473,8 @@ static void netvsc_send_completion(struct hv_device *device,
                complete(&net_device->channel_init_wait);
        } else if (nvsp_packet->hdr.msg_type ==
                   NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE) {
+               int num_outstanding_sends;
+
                /* Get the send context */
                nvsc_packet = (struct hv_netvsc_packet *)(unsigned long)
                        packet->trans_id;
@@ -463,10 +483,14 @@ static void netvsc_send_completion(struct hv_device *device,
                nvsc_packet->completion.send.send_completion(
                        nvsc_packet->completion.send.send_completion_ctx);
 
-               atomic_dec(&net_device->num_outstanding_sends);
+               num_outstanding_sends =
+                       atomic_dec_return(&net_device->num_outstanding_sends);
 
-               if (netif_queue_stopped(ndev) && !net_device->start_remove)
-                       netif_wake_queue(ndev);
+               if (netif_queue_stopped(ndev) && !net_device->start_remove &&
+                       (hv_ringbuf_avail_percent(&device->channel->outbound)
+                       > RING_AVAIL_PERCENT_HIWATER ||
+                       num_outstanding_sends < 1))
+                               netif_wake_queue(ndev);
        } else {
                netdev_err(ndev, "Unknown send completion packet type- "
                           "%d received!!\n", nvsp_packet->hdr.msg_type);
@@ -519,10 +543,19 @@ int netvsc_send(struct hv_device *device,
 
        if (ret == 0) {
                atomic_inc(&net_device->num_outstanding_sends);
+               if (hv_ringbuf_avail_percent(&device->channel->outbound) <
+                       RING_AVAIL_PERCENT_LOWATER) {
+                       netif_stop_queue(ndev);
+                       if (atomic_read(&net_device->
+                               num_outstanding_sends) < 1)
+                               netif_wake_queue(ndev);
+               }
        } else if (ret == -EAGAIN) {
                netif_stop_queue(ndev);
-               if (atomic_read(&net_device->num_outstanding_sends) < 1)
+               if (atomic_read(&net_device->num_outstanding_sends) < 1) {
                        netif_wake_queue(ndev);
+                       ret = -ENOSPC;
+               }
        } else {
                netdev_err(ndev, "Unable to send packet %p ret %d\n",
                           packet, ret);
index dd294783b5c5b77c3d48ecd14067921d9e90d4bb..a0cc12786be441c9f1cabad455834c329b669929 100644 (file)
@@ -224,9 +224,13 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
                net->stats.tx_packets++;
        } else {
                kfree(packet);
+               if (ret != -EAGAIN) {
+                       dev_kfree_skb_any(skb);
+                       net->stats.tx_dropped++;
+               }
        }
 
-       return ret ? NETDEV_TX_BUSY : NETDEV_TX_OK;
+       return (ret == -EAGAIN) ? NETDEV_TX_BUSY : NETDEV_TX_OK;
 }
 
 /*
index 5852545e6bba77423c16e4577efc40c4450131a9..6af8738ae7e976855271e4226fac24896c901a71 100644 (file)
@@ -274,6 +274,33 @@ struct hv_ring_buffer_debug_info {
        u32 bytes_avail_towrite;
 };
 
+
+/*
+ *
+ * hv_get_ringbuffer_availbytes()
+ *
+ * Get number of bytes available to read and to write to
+ * for the specified ring buffer
+ */
+static inline void
+hv_get_ringbuffer_availbytes(struct hv_ring_buffer_info *rbi,
+                         u32 *read, u32 *write)
+{
+       u32 read_loc, write_loc, dsize;
+
+       smp_read_barrier_depends();
+
+       /* Capture the read/write indices before they changed */
+       read_loc = rbi->ring_buffer->read_index;
+       write_loc = rbi->ring_buffer->write_index;
+       dsize = rbi->ring_datasize;
+
+       *write = write_loc >= read_loc ? dsize - (write_loc - read_loc) :
+               read_loc - write_loc;
+       *read = dsize - *write;
+}
+
+
 /*
  * We use the same version numbering for all Hyper-V modules.
  *