Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[sfrench/cifs-2.6.git] / net / xfrm / xfrm_device.c
index ac9477189d1cc2e9013af88d549fa075144d4e8c..92b4648e75cabed98417188ecbffdc8affec1bfc 100644 (file)
 #include <linux/notifier.h>
 
 #ifdef CONFIG_XFRM_OFFLOAD
-int validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features)
+struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features, bool *again)
 {
        int err;
+       unsigned long flags;
        struct xfrm_state *x;
+       struct sk_buff *skb2;
+       struct softnet_data *sd;
+       netdev_features_t esp_features = features;
        struct xfrm_offload *xo = xfrm_offload(skb);
 
-       if (skb_is_gso(skb))
-               return 0;
+       if (!xo)
+               return skb;
 
-       if (xo) {
-               x = skb->sp->xvec[skb->sp->len - 1];
-               if (xo->flags & XFRM_GRO || x->xso.flags & XFRM_OFFLOAD_INBOUND)
-                       return 0;
+       if (!(features & NETIF_F_HW_ESP))
+               esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK);
+
+       x = skb->sp->xvec[skb->sp->len - 1];
+       if (xo->flags & XFRM_GRO || x->xso.flags & XFRM_OFFLOAD_INBOUND)
+               return skb;
+
+       local_irq_save(flags);
+       sd = this_cpu_ptr(&softnet_data);
+       err = !skb_queue_empty(&sd->xfrm_backlog);
+       local_irq_restore(flags);
 
+       if (err) {
+               *again = true;
+               return skb;
+       }
+
+       if (skb_is_gso(skb)) {
+               struct net_device *dev = skb->dev;
+
+               if (unlikely(!x->xso.offload_handle || (x->xso.dev != dev))) {
+                       struct sk_buff *segs;
+
+                       /* Packet got rerouted, fixup features and segment it. */
+                       esp_features = esp_features & ~(NETIF_F_HW_ESP
+                                                       | NETIF_F_GSO_ESP);
+
+                       segs = skb_gso_segment(skb, esp_features);
+                       if (IS_ERR(segs)) {
+                               kfree_skb(skb);
+                               atomic_long_inc(&dev->tx_dropped);
+                               return NULL;
+                       } else {
+                               consume_skb(skb);
+                               skb = segs;
+                       }
+               }
+       }
+
+       if (!skb->next) {
                x->outer_mode->xmit(x, skb);
 
-               err = x->type_offload->xmit(x, skb, features);
+               xo->flags |= XFRM_DEV_RESUME;
+
+               err = x->type_offload->xmit(x, skb, esp_features);
                if (err) {
+                       if (err == -EINPROGRESS)
+                               return NULL;
+
                        XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
-                       return err;
+                       kfree_skb(skb);
+                       return NULL;
                }
 
                skb_push(skb, skb->data - skb_mac_header(skb));
+
+               return skb;
        }
 
-       return 0;
+       skb2 = skb;
+
+       do {
+               struct sk_buff *nskb = skb2->next;
+               skb2->next = NULL;
+
+               xo = xfrm_offload(skb2);
+               xo->flags |= XFRM_DEV_RESUME;
+
+               x->outer_mode->xmit(x, skb2);
+
+               err = x->type_offload->xmit(x, skb2, esp_features);
+               if (!err) {
+                       skb2->next = nskb;
+               } else if (err != -EINPROGRESS) {
+                       XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
+                       skb2->next = nskb;
+                       kfree_skb_list(skb2);
+                       return NULL;
+               } else {
+                       if (skb == skb2)
+                               skb = nskb;
+
+                       if (!skb)
+                               return NULL;
+
+                       goto skip_push;
+               }
+
+               skb_push(skb2, skb2->data - skb_mac_header(skb2));
+
+skip_push:
+               skb2 = nskb;
+       } while (skb2);
+
+       return skb;
 }
 EXPORT_SYMBOL_GPL(validate_xmit_xfrm);
 
@@ -67,7 +149,7 @@ int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
 
        /* We don't yet support UDP encapsulation, TFC padding and ESN. */
        if (x->encap || x->tfcpad || (x->props.flags & XFRM_STATE_ESN))
-               return 0;
+               return -EINVAL;
 
        dev = dev_get_by_index(net, xuo->ifindex);
        if (!dev) {
@@ -121,8 +203,8 @@ bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
        if (!x->type_offload || x->encap)
                return false;
 
-       if ((x->xso.offload_handle && (dev == dst->path->dev)) &&
-            !dst->child->xfrm && x->type->get_mtu) {
+       if ((!dev || (x->xso.offload_handle && (dev == xfrm_dst_path(dst)->dev))) &&
+            (!xdst->child->xfrm && x->type->get_mtu)) {
                mtu = x->type->get_mtu(x, xdst->child_mtu_cached);
 
                if (skb->len <= mtu)
@@ -141,19 +223,82 @@ ok:
        return true;
 }
 EXPORT_SYMBOL_GPL(xfrm_dev_offload_ok);
+
+void xfrm_dev_resume(struct sk_buff *skb)
+{
+       struct net_device *dev = skb->dev;
+       int ret = NETDEV_TX_BUSY;
+       struct netdev_queue *txq;
+       struct softnet_data *sd;
+       unsigned long flags;
+
+       rcu_read_lock();
+       txq = netdev_pick_tx(dev, skb, NULL);
+
+       HARD_TX_LOCK(dev, txq, smp_processor_id());
+       if (!netif_xmit_frozen_or_stopped(txq))
+               skb = dev_hard_start_xmit(skb, dev, txq, &ret);
+       HARD_TX_UNLOCK(dev, txq);
+
+       if (!dev_xmit_complete(ret)) {
+               local_irq_save(flags);
+               sd = this_cpu_ptr(&softnet_data);
+               skb_queue_tail(&sd->xfrm_backlog, skb);
+               raise_softirq_irqoff(NET_TX_SOFTIRQ);
+               local_irq_restore(flags);
+       }
+       rcu_read_unlock();
+}
+EXPORT_SYMBOL_GPL(xfrm_dev_resume);
+
+void xfrm_dev_backlog(struct softnet_data *sd)
+{
+       struct sk_buff_head *xfrm_backlog = &sd->xfrm_backlog;
+       struct sk_buff_head list;
+       struct sk_buff *skb;
+
+       if (skb_queue_empty(xfrm_backlog))
+               return;
+
+       __skb_queue_head_init(&list);
+
+       spin_lock(&xfrm_backlog->lock);
+       skb_queue_splice_init(xfrm_backlog, &list);
+       spin_unlock(&xfrm_backlog->lock);
+
+       while (!skb_queue_empty(&list)) {
+               skb = __skb_dequeue(&list);
+               xfrm_dev_resume(skb);
+       }
+
+}
 #endif
 
-static int xfrm_dev_register(struct net_device *dev)
+static int xfrm_api_check(struct net_device *dev)
 {
-       if ((dev->features & NETIF_F_HW_ESP) && !dev->xfrmdev_ops)
-               return NOTIFY_BAD;
+#ifdef CONFIG_XFRM_OFFLOAD
        if ((dev->features & NETIF_F_HW_ESP_TX_CSUM) &&
            !(dev->features & NETIF_F_HW_ESP))
                return NOTIFY_BAD;
 
+       if ((dev->features & NETIF_F_HW_ESP) &&
+           (!(dev->xfrmdev_ops &&
+              dev->xfrmdev_ops->xdo_dev_state_add &&
+              dev->xfrmdev_ops->xdo_dev_state_delete)))
+               return NOTIFY_BAD;
+#else
+       if (dev->features & (NETIF_F_HW_ESP | NETIF_F_HW_ESP_TX_CSUM))
+               return NOTIFY_BAD;
+#endif
+
        return NOTIFY_DONE;
 }
 
+static int xfrm_dev_register(struct net_device *dev)
+{
+       return xfrm_api_check(dev);
+}
+
 static int xfrm_dev_unregister(struct net_device *dev)
 {
        xfrm_policy_cache_flush();
@@ -162,16 +307,7 @@ static int xfrm_dev_unregister(struct net_device *dev)
 
 static int xfrm_dev_feat_change(struct net_device *dev)
 {
-       if ((dev->features & NETIF_F_HW_ESP) && !dev->xfrmdev_ops)
-               return NOTIFY_BAD;
-       else if (!(dev->features & NETIF_F_HW_ESP))
-               dev->xfrmdev_ops = NULL;
-
-       if ((dev->features & NETIF_F_HW_ESP_TX_CSUM) &&
-           !(dev->features & NETIF_F_HW_ESP))
-               return NOTIFY_BAD;
-
-       return NOTIFY_DONE;
+       return xfrm_api_check(dev);
 }
 
 static int xfrm_dev_down(struct net_device *dev)