Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph...
[sfrench/cifs-2.6.git] / net / ipv4 / udp_offload.c
1 /*
2  *      IPV4 GSO/GRO offload support
3  *      Linux INET implementation
4  *
5  *      This program is free software; you can redistribute it and/or
6  *      modify it under the terms of the GNU General Public License
7  *      as published by the Free Software Foundation; either version
8  *      2 of the License, or (at your option) any later version.
9  *
10  *      UDPv4 GSO support
11  */
12
13 #include <linux/skbuff.h>
14 #include <net/udp.h>
15 #include <net/protocol.h>
16
17 static DEFINE_SPINLOCK(udp_offload_lock);
18 static struct udp_offload_priv __rcu *udp_offload_base __read_mostly;
19
20 #define udp_deref_protected(X) rcu_dereference_protected(X, lockdep_is_held(&udp_offload_lock))
21
22 struct udp_offload_priv {
23         struct udp_offload      *offload;
24         struct rcu_head         rcu;
25         struct udp_offload_priv __rcu *next;
26 };
27
28 static int udp4_ufo_send_check(struct sk_buff *skb)
29 {
30         if (!pskb_may_pull(skb, sizeof(struct udphdr)))
31                 return -EINVAL;
32
33         if (likely(!skb->encapsulation)) {
34                 const struct iphdr *iph;
35                 struct udphdr *uh;
36
37                 iph = ip_hdr(skb);
38                 uh = udp_hdr(skb);
39
40                 uh->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, skb->len,
41                                 IPPROTO_UDP, 0);
42                 skb->csum_start = skb_transport_header(skb) - skb->head;
43                 skb->csum_offset = offsetof(struct udphdr, check);
44                 skb->ip_summed = CHECKSUM_PARTIAL;
45         }
46
47         return 0;
48 }
49
50 struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
51                                        netdev_features_t features)
52 {
53         struct sk_buff *segs = ERR_PTR(-EINVAL);
54         u16 mac_offset = skb->mac_header;
55         int mac_len = skb->mac_len;
56         int tnl_hlen = skb_inner_mac_header(skb) - skb_transport_header(skb);
57         __be16 protocol = skb->protocol;
58         netdev_features_t enc_features;
59         int udp_offset, outer_hlen;
60         unsigned int oldlen;
61         bool need_csum;
62
63         oldlen = (u16)~skb->len;
64
65         if (unlikely(!pskb_may_pull(skb, tnl_hlen)))
66                 goto out;
67
68         skb->encapsulation = 0;
69         __skb_pull(skb, tnl_hlen);
70         skb_reset_mac_header(skb);
71         skb_set_network_header(skb, skb_inner_network_offset(skb));
72         skb->mac_len = skb_inner_network_offset(skb);
73         skb->protocol = htons(ETH_P_TEB);
74
75         need_csum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM);
76         if (need_csum)
77                 skb->encap_hdr_csum = 1;
78
79         /* segment inner packet. */
80         enc_features = skb->dev->hw_enc_features & netif_skb_features(skb);
81         segs = skb_mac_gso_segment(skb, enc_features);
82         if (IS_ERR_OR_NULL(segs)) {
83                 skb_gso_error_unwind(skb, protocol, tnl_hlen, mac_offset,
84                                      mac_len);
85                 goto out;
86         }
87
88         outer_hlen = skb_tnl_header_len(skb);
89         udp_offset = outer_hlen - tnl_hlen;
90         skb = segs;
91         do {
92                 struct udphdr *uh;
93                 int len;
94
95                 skb_reset_inner_headers(skb);
96                 skb->encapsulation = 1;
97
98                 skb->mac_len = mac_len;
99
100                 skb_push(skb, outer_hlen);
101                 skb_reset_mac_header(skb);
102                 skb_set_network_header(skb, mac_len);
103                 skb_set_transport_header(skb, udp_offset);
104                 len = skb->len - udp_offset;
105                 uh = udp_hdr(skb);
106                 uh->len = htons(len);
107
108                 if (need_csum) {
109                         __be32 delta = htonl(oldlen + len);
110
111                         uh->check = ~csum_fold((__force __wsum)
112                                                ((__force u32)uh->check +
113                                                 (__force u32)delta));
114                         uh->check = gso_make_checksum(skb, ~uh->check);
115
116                         if (uh->check == 0)
117                                 uh->check = CSUM_MANGLED_0;
118                 }
119
120                 skb->protocol = protocol;
121         } while ((skb = skb->next));
122 out:
123         return segs;
124 }
125
126 static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
127                                          netdev_features_t features)
128 {
129         struct sk_buff *segs = ERR_PTR(-EINVAL);
130         unsigned int mss;
131         int offset;
132         __wsum csum;
133
134         if (skb->encapsulation &&
135             (skb_shinfo(skb)->gso_type &
136              (SKB_GSO_UDP_TUNNEL|SKB_GSO_UDP_TUNNEL_CSUM))) {
137                 segs = skb_udp_tunnel_segment(skb, features);
138                 goto out;
139         }
140
141         mss = skb_shinfo(skb)->gso_size;
142         if (unlikely(skb->len <= mss))
143                 goto out;
144
145         if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
146                 /* Packet is from an untrusted source, reset gso_segs. */
147                 int type = skb_shinfo(skb)->gso_type;
148
149                 if (unlikely(type & ~(SKB_GSO_UDP | SKB_GSO_DODGY |
150                                       SKB_GSO_UDP_TUNNEL |
151                                       SKB_GSO_UDP_TUNNEL_CSUM |
152                                       SKB_GSO_IPIP |
153                                       SKB_GSO_GRE | SKB_GSO_GRE_CSUM |
154                                       SKB_GSO_MPLS) ||
155                              !(type & (SKB_GSO_UDP))))
156                         goto out;
157
158                 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
159
160                 segs = NULL;
161                 goto out;
162         }
163
164         /* Do software UFO. Complete and fill in the UDP checksum as
165          * HW cannot do checksum of UDP packets sent as multiple
166          * IP fragments.
167          */
168         offset = skb_checksum_start_offset(skb);
169         csum = skb_checksum(skb, offset, skb->len - offset, 0);
170         offset += skb->csum_offset;
171         *(__sum16 *)(skb->data + offset) = csum_fold(csum);
172         skb->ip_summed = CHECKSUM_NONE;
173
174         /* Fragment the skb. IP headers of the fragments are updated in
175          * inet_gso_segment()
176          */
177         segs = skb_segment(skb, features);
178 out:
179         return segs;
180 }
181
182 int udp_add_offload(struct udp_offload *uo)
183 {
184         struct udp_offload_priv *new_offload = kzalloc(sizeof(*new_offload), GFP_ATOMIC);
185
186         if (!new_offload)
187                 return -ENOMEM;
188
189         new_offload->offload = uo;
190
191         spin_lock(&udp_offload_lock);
192         new_offload->next = udp_offload_base;
193         rcu_assign_pointer(udp_offload_base, new_offload);
194         spin_unlock(&udp_offload_lock);
195
196         return 0;
197 }
198 EXPORT_SYMBOL(udp_add_offload);
199
200 static void udp_offload_free_routine(struct rcu_head *head)
201 {
202         struct udp_offload_priv *ou_priv = container_of(head, struct udp_offload_priv, rcu);
203         kfree(ou_priv);
204 }
205
206 void udp_del_offload(struct udp_offload *uo)
207 {
208         struct udp_offload_priv __rcu **head = &udp_offload_base;
209         struct udp_offload_priv *uo_priv;
210
211         spin_lock(&udp_offload_lock);
212
213         uo_priv = udp_deref_protected(*head);
214         for (; uo_priv != NULL;
215              uo_priv = udp_deref_protected(*head)) {
216                 if (uo_priv->offload == uo) {
217                         rcu_assign_pointer(*head,
218                                            udp_deref_protected(uo_priv->next));
219                         goto unlock;
220                 }
221                 head = &uo_priv->next;
222         }
223         pr_warn("udp_del_offload: didn't find offload for port %d\n", ntohs(uo->port));
224 unlock:
225         spin_unlock(&udp_offload_lock);
226         if (uo_priv != NULL)
227                 call_rcu(&uo_priv->rcu, udp_offload_free_routine);
228 }
229 EXPORT_SYMBOL(udp_del_offload);
230
231 static struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb)
232 {
233         struct udp_offload_priv *uo_priv;
234         struct sk_buff *p, **pp = NULL;
235         struct udphdr *uh, *uh2;
236         unsigned int hlen, off;
237         int flush = 1;
238
239         if (NAPI_GRO_CB(skb)->udp_mark ||
240             (!skb->encapsulation && skb->ip_summed != CHECKSUM_COMPLETE))
241                 goto out;
242
243         /* mark that this skb passed once through the udp gro layer */
244         NAPI_GRO_CB(skb)->udp_mark = 1;
245
246         off  = skb_gro_offset(skb);
247         hlen = off + sizeof(*uh);
248         uh   = skb_gro_header_fast(skb, off);
249         if (skb_gro_header_hard(skb, hlen)) {
250                 uh = skb_gro_header_slow(skb, hlen, off);
251                 if (unlikely(!uh))
252                         goto out;
253         }
254
255         rcu_read_lock();
256         uo_priv = rcu_dereference(udp_offload_base);
257         for (; uo_priv != NULL; uo_priv = rcu_dereference(uo_priv->next)) {
258                 if (uo_priv->offload->port == uh->dest &&
259                     uo_priv->offload->callbacks.gro_receive)
260                         goto unflush;
261         }
262         goto out_unlock;
263
264 unflush:
265         flush = 0;
266
267         for (p = *head; p; p = p->next) {
268                 if (!NAPI_GRO_CB(p)->same_flow)
269                         continue;
270
271                 uh2 = (struct udphdr   *)(p->data + off);
272                 if ((*(u32 *)&uh->source != *(u32 *)&uh2->source)) {
273                         NAPI_GRO_CB(p)->same_flow = 0;
274                         continue;
275                 }
276         }
277
278         skb_gro_pull(skb, sizeof(struct udphdr)); /* pull encapsulating udp header */
279         skb_gro_postpull_rcsum(skb, uh, sizeof(struct udphdr));
280         pp = uo_priv->offload->callbacks.gro_receive(head, skb);
281
282 out_unlock:
283         rcu_read_unlock();
284 out:
285         NAPI_GRO_CB(skb)->flush |= flush;
286         return pp;
287 }
288
289 static int udp_gro_complete(struct sk_buff *skb, int nhoff)
290 {
291         struct udp_offload_priv *uo_priv;
292         __be16 newlen = htons(skb->len - nhoff);
293         struct udphdr *uh = (struct udphdr *)(skb->data + nhoff);
294         int err = -ENOSYS;
295
296         uh->len = newlen;
297
298         rcu_read_lock();
299
300         uo_priv = rcu_dereference(udp_offload_base);
301         for (; uo_priv != NULL; uo_priv = rcu_dereference(uo_priv->next)) {
302                 if (uo_priv->offload->port == uh->dest &&
303                     uo_priv->offload->callbacks.gro_complete)
304                         break;
305         }
306
307         if (uo_priv != NULL)
308                 err = uo_priv->offload->callbacks.gro_complete(skb, nhoff + sizeof(struct udphdr));
309
310         rcu_read_unlock();
311         return err;
312 }
313
314 static const struct net_offload udpv4_offload = {
315         .callbacks = {
316                 .gso_send_check = udp4_ufo_send_check,
317                 .gso_segment = udp4_ufo_fragment,
318                 .gro_receive  = udp_gro_receive,
319                 .gro_complete = udp_gro_complete,
320         },
321 };
322
323 int __init udpv4_offload_init(void)
324 {
325         return inet_add_offload(&udpv4_offload, IPPROTO_UDP);
326 }