Merge git://git.infradead.org/mtd-2.6
[sfrench/cifs-2.6.git] / net / ipv4 / ip_output.c
1 /*
2  * INET         An implementation of the TCP/IP protocol suite for the LINUX
3  *              operating system.  INET is implemented using the  BSD Socket
4  *              interface as the means of communication with the user level.
5  *
6  *              The Internet Protocol (IP) output module.
7  *
8  * Authors:     Ross Biro
9  *              Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10  *              Donald Becker, <becker@super.org>
11  *              Alan Cox, <Alan.Cox@linux.org>
12  *              Richard Underwood
13  *              Stefan Becker, <stefanb@yello.ping.de>
14  *              Jorge Cwik, <jorge@laser.satlink.net>
15  *              Arnt Gulbrandsen, <agulbra@nvg.unit.no>
16  *              Hirokazu Takahashi, <taka@valinux.co.jp>
17  *
18  *      See ip_input.c for original log
19  *
20  *      Fixes:
21  *              Alan Cox        :       Missing nonblock feature in ip_build_xmit.
22  *              Mike Kilburn    :       htons() missing in ip_build_xmit.
23  *              Bradford Johnson:       Fix faulty handling of some frames when
24  *                                      no route is found.
25  *              Alexander Demenshin:    Missing sk/skb free in ip_queue_xmit
26  *                                      (in case if packet not accepted by
27  *                                      output firewall rules)
28  *              Mike McLagan    :       Routing by source
29  *              Alexey Kuznetsov:       use new route cache
30  *              Andi Kleen:             Fix broken PMTU recovery and remove
31  *                                      some redundant tests.
32  *      Vitaly E. Lavrov        :       Transparent proxy revived after year coma.
33  *              Andi Kleen      :       Replace ip_reply with ip_send_reply.
34  *              Andi Kleen      :       Split fast and slow ip_build_xmit path
35  *                                      for decreased register pressure on x86
36  *                                      and more readibility.
37  *              Marc Boucher    :       When call_out_firewall returns FW_QUEUE,
38  *                                      silently drop skb instead of failing with -EPERM.
39  *              Detlev Wengorz  :       Copy protocol for fragments.
40  *              Hirokazu Takahashi:     HW checksumming for outgoing UDP
41  *                                      datagrams.
42  *              Hirokazu Takahashi:     sendfile() on UDP works now.
43  */
44
45 #include <asm/uaccess.h>
46 #include <asm/system.h>
47 #include <linux/module.h>
48 #include <linux/types.h>
49 #include <linux/kernel.h>
50 #include <linux/mm.h>
51 #include <linux/string.h>
52 #include <linux/errno.h>
53 #include <linux/highmem.h>
54 #include <linux/slab.h>
55
56 #include <linux/socket.h>
57 #include <linux/sockios.h>
58 #include <linux/in.h>
59 #include <linux/inet.h>
60 #include <linux/netdevice.h>
61 #include <linux/etherdevice.h>
62 #include <linux/proc_fs.h>
63 #include <linux/stat.h>
64 #include <linux/init.h>
65
66 #include <net/snmp.h>
67 #include <net/ip.h>
68 #include <net/protocol.h>
69 #include <net/route.h>
70 #include <net/xfrm.h>
71 #include <linux/skbuff.h>
72 #include <net/sock.h>
73 #include <net/arp.h>
74 #include <net/icmp.h>
75 #include <net/checksum.h>
76 #include <net/inetpeer.h>
77 #include <linux/igmp.h>
78 #include <linux/netfilter_ipv4.h>
79 #include <linux/netfilter_bridge.h>
80 #include <linux/mroute.h>
81 #include <linux/netlink.h>
82 #include <linux/tcp.h>
83
84 int sysctl_ip_default_ttl __read_mostly = IPDEFTTL;
85
86 /* Generate a checksum for an outgoing IP datagram. */
87 __inline__ void ip_send_check(struct iphdr *iph)
88 {
89         iph->check = 0;
90         iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
91 }
92 EXPORT_SYMBOL(ip_send_check);
93
94 int __ip_local_out(struct sk_buff *skb)
95 {
96         struct iphdr *iph = ip_hdr(skb);
97
98         iph->tot_len = htons(skb->len);
99         ip_send_check(iph);
100         return nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, skb, NULL,
101                        skb_dst(skb)->dev, dst_output);
102 }
103
104 int ip_local_out(struct sk_buff *skb)
105 {
106         int err;
107
108         err = __ip_local_out(skb);
109         if (likely(err == 1))
110                 err = dst_output(skb);
111
112         return err;
113 }
114 EXPORT_SYMBOL_GPL(ip_local_out);
115
116 /* dev_loopback_xmit for use with netfilter. */
117 static int ip_dev_loopback_xmit(struct sk_buff *newskb)
118 {
119         skb_reset_mac_header(newskb);
120         __skb_pull(newskb, skb_network_offset(newskb));
121         newskb->pkt_type = PACKET_LOOPBACK;
122         newskb->ip_summed = CHECKSUM_UNNECESSARY;
123         WARN_ON(!skb_dst(newskb));
124         netif_rx_ni(newskb);
125         return 0;
126 }
127
128 static inline int ip_select_ttl(struct inet_sock *inet, struct dst_entry *dst)
129 {
130         int ttl = inet->uc_ttl;
131
132         if (ttl < 0)
133                 ttl = dst_metric(dst, RTAX_HOPLIMIT);
134         return ttl;
135 }
136
137 /*
138  *              Add an ip header to a skbuff and send it out.
139  *
140  */
141 int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
142                           __be32 saddr, __be32 daddr, struct ip_options *opt)
143 {
144         struct inet_sock *inet = inet_sk(sk);
145         struct rtable *rt = skb_rtable(skb);
146         struct iphdr *iph;
147
148         /* Build the IP header. */
149         skb_push(skb, sizeof(struct iphdr) + (opt ? opt->optlen : 0));
150         skb_reset_network_header(skb);
151         iph = ip_hdr(skb);
152         iph->version  = 4;
153         iph->ihl      = 5;
154         iph->tos      = inet->tos;
155         if (ip_dont_fragment(sk, &rt->dst))
156                 iph->frag_off = htons(IP_DF);
157         else
158                 iph->frag_off = 0;
159         iph->ttl      = ip_select_ttl(inet, &rt->dst);
160         iph->daddr    = rt->rt_dst;
161         iph->saddr    = rt->rt_src;
162         iph->protocol = sk->sk_protocol;
163         ip_select_ident(iph, &rt->dst, sk);
164
165         if (opt && opt->optlen) {
166                 iph->ihl += opt->optlen>>2;
167                 ip_options_build(skb, opt, daddr, rt, 0);
168         }
169
170         skb->priority = sk->sk_priority;
171         skb->mark = sk->sk_mark;
172
173         /* Send it out. */
174         return ip_local_out(skb);
175 }
176 EXPORT_SYMBOL_GPL(ip_build_and_send_pkt);
177
178 static inline int ip_finish_output2(struct sk_buff *skb)
179 {
180         struct dst_entry *dst = skb_dst(skb);
181         struct rtable *rt = (struct rtable *)dst;
182         struct net_device *dev = dst->dev;
183         unsigned int hh_len = LL_RESERVED_SPACE(dev);
184
185         if (rt->rt_type == RTN_MULTICAST) {
186                 IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUTMCAST, skb->len);
187         } else if (rt->rt_type == RTN_BROADCAST)
188                 IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUTBCAST, skb->len);
189
190         /* Be paranoid, rather than too clever. */
191         if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) {
192                 struct sk_buff *skb2;
193
194                 skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev));
195                 if (skb2 == NULL) {
196                         kfree_skb(skb);
197                         return -ENOMEM;
198                 }
199                 if (skb->sk)
200                         skb_set_owner_w(skb2, skb->sk);
201                 kfree_skb(skb);
202                 skb = skb2;
203         }
204
205         if (dst->hh)
206                 return neigh_hh_output(dst->hh, skb);
207         else if (dst->neighbour)
208                 return dst->neighbour->output(skb);
209
210         if (net_ratelimit())
211                 printk(KERN_DEBUG "ip_finish_output2: No header cache and no neighbour!\n");
212         kfree_skb(skb);
213         return -EINVAL;
214 }
215
216 static inline int ip_skb_dst_mtu(struct sk_buff *skb)
217 {
218         struct inet_sock *inet = skb->sk ? inet_sk(skb->sk) : NULL;
219
220         return (inet && inet->pmtudisc == IP_PMTUDISC_PROBE) ?
221                skb_dst(skb)->dev->mtu : dst_mtu(skb_dst(skb));
222 }
223
224 static int ip_finish_output(struct sk_buff *skb)
225 {
226 #if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
227         /* Policy lookup after SNAT yielded a new policy */
228         if (skb_dst(skb)->xfrm != NULL) {
229                 IPCB(skb)->flags |= IPSKB_REROUTED;
230                 return dst_output(skb);
231         }
232 #endif
233         if (skb->len > ip_skb_dst_mtu(skb) && !skb_is_gso(skb))
234                 return ip_fragment(skb, ip_finish_output2);
235         else
236                 return ip_finish_output2(skb);
237 }
238
239 int ip_mc_output(struct sk_buff *skb)
240 {
241         struct sock *sk = skb->sk;
242         struct rtable *rt = skb_rtable(skb);
243         struct net_device *dev = rt->dst.dev;
244
245         /*
246          *      If the indicated interface is up and running, send the packet.
247          */
248         IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUT, skb->len);
249
250         skb->dev = dev;
251         skb->protocol = htons(ETH_P_IP);
252
253         /*
254          *      Multicasts are looped back for other local users
255          */
256
257         if (rt->rt_flags&RTCF_MULTICAST) {
258                 if (sk_mc_loop(sk)
259 #ifdef CONFIG_IP_MROUTE
260                 /* Small optimization: do not loopback not local frames,
261                    which returned after forwarding; they will be  dropped
262                    by ip_mr_input in any case.
263                    Note, that local frames are looped back to be delivered
264                    to local recipients.
265
266                    This check is duplicated in ip_mr_input at the moment.
267                  */
268                     &&
269                     ((rt->rt_flags & RTCF_LOCAL) ||
270                      !(IPCB(skb)->flags & IPSKB_FORWARDED))
271 #endif
272                    ) {
273                         struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
274                         if (newskb)
275                                 NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING,
276                                         newskb, NULL, newskb->dev,
277                                         ip_dev_loopback_xmit);
278                 }
279
280                 /* Multicasts with ttl 0 must not go beyond the host */
281
282                 if (ip_hdr(skb)->ttl == 0) {
283                         kfree_skb(skb);
284                         return 0;
285                 }
286         }
287
288         if (rt->rt_flags&RTCF_BROADCAST) {
289                 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
290                 if (newskb)
291                         NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING, newskb,
292                                 NULL, newskb->dev, ip_dev_loopback_xmit);
293         }
294
295         return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, skb, NULL,
296                             skb->dev, ip_finish_output,
297                             !(IPCB(skb)->flags & IPSKB_REROUTED));
298 }
299
300 int ip_output(struct sk_buff *skb)
301 {
302         struct net_device *dev = skb_dst(skb)->dev;
303
304         IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUT, skb->len);
305
306         skb->dev = dev;
307         skb->protocol = htons(ETH_P_IP);
308
309         return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, skb, NULL, dev,
310                             ip_finish_output,
311                             !(IPCB(skb)->flags & IPSKB_REROUTED));
312 }
313
314 int ip_queue_xmit(struct sk_buff *skb)
315 {
316         struct sock *sk = skb->sk;
317         struct inet_sock *inet = inet_sk(sk);
318         struct ip_options *opt = inet->opt;
319         struct rtable *rt;
320         struct iphdr *iph;
321         int res;
322
323         /* Skip all of this if the packet is already routed,
324          * f.e. by something like SCTP.
325          */
326         rcu_read_lock();
327         rt = skb_rtable(skb);
328         if (rt != NULL)
329                 goto packet_routed;
330
331         /* Make sure we can route this packet. */
332         rt = (struct rtable *)__sk_dst_check(sk, 0);
333         if (rt == NULL) {
334                 __be32 daddr;
335
336                 /* Use correct destination address if we have options. */
337                 daddr = inet->inet_daddr;
338                 if(opt && opt->srr)
339                         daddr = opt->faddr;
340
341                 {
342                         struct flowi fl = { .oif = sk->sk_bound_dev_if,
343                                             .mark = sk->sk_mark,
344                                             .nl_u = { .ip4_u =
345                                                       { .daddr = daddr,
346                                                         .saddr = inet->inet_saddr,
347                                                         .tos = RT_CONN_FLAGS(sk) } },
348                                             .proto = sk->sk_protocol,
349                                             .flags = inet_sk_flowi_flags(sk),
350                                             .uli_u = { .ports =
351                                                        { .sport = inet->inet_sport,
352                                                          .dport = inet->inet_dport } } };
353
354                         /* If this fails, retransmit mechanism of transport layer will
355                          * keep trying until route appears or the connection times
356                          * itself out.
357                          */
358                         security_sk_classify_flow(sk, &fl);
359                         if (ip_route_output_flow(sock_net(sk), &rt, &fl, sk, 0))
360                                 goto no_route;
361                 }
362                 sk_setup_caps(sk, &rt->dst);
363         }
364         skb_dst_set_noref(skb, &rt->dst);
365
366 packet_routed:
367         if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway)
368                 goto no_route;
369
370         /* OK, we know where to send it, allocate and build IP header. */
371         skb_push(skb, sizeof(struct iphdr) + (opt ? opt->optlen : 0));
372         skb_reset_network_header(skb);
373         iph = ip_hdr(skb);
374         *((__be16 *)iph) = htons((4 << 12) | (5 << 8) | (inet->tos & 0xff));
375         if (ip_dont_fragment(sk, &rt->dst) && !skb->local_df)
376                 iph->frag_off = htons(IP_DF);
377         else
378                 iph->frag_off = 0;
379         iph->ttl      = ip_select_ttl(inet, &rt->dst);
380         iph->protocol = sk->sk_protocol;
381         iph->saddr    = rt->rt_src;
382         iph->daddr    = rt->rt_dst;
383         /* Transport layer set skb->h.foo itself. */
384
385         if (opt && opt->optlen) {
386                 iph->ihl += opt->optlen >> 2;
387                 ip_options_build(skb, opt, inet->inet_daddr, rt, 0);
388         }
389
390         ip_select_ident_more(iph, &rt->dst, sk,
391                              (skb_shinfo(skb)->gso_segs ?: 1) - 1);
392
393         skb->priority = sk->sk_priority;
394         skb->mark = sk->sk_mark;
395
396         res = ip_local_out(skb);
397         rcu_read_unlock();
398         return res;
399
400 no_route:
401         rcu_read_unlock();
402         IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
403         kfree_skb(skb);
404         return -EHOSTUNREACH;
405 }
406 EXPORT_SYMBOL(ip_queue_xmit);
407
408
409 static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
410 {
411         to->pkt_type = from->pkt_type;
412         to->priority = from->priority;
413         to->protocol = from->protocol;
414         skb_dst_drop(to);
415         skb_dst_copy(to, from);
416         to->dev = from->dev;
417         to->mark = from->mark;
418
419         /* Copy the flags to each fragment. */
420         IPCB(to)->flags = IPCB(from)->flags;
421
422 #ifdef CONFIG_NET_SCHED
423         to->tc_index = from->tc_index;
424 #endif
425         nf_copy(to, from);
426 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
427     defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
428         to->nf_trace = from->nf_trace;
429 #endif
430 #if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
431         to->ipvs_property = from->ipvs_property;
432 #endif
433         skb_copy_secmark(to, from);
434 }
435
436 /*
437  *      This IP datagram is too large to be sent in one piece.  Break it up into
438  *      smaller pieces (each of size equal to IP header plus
439  *      a block of the data of the original IP data part) that will yet fit in a
440  *      single device frame, and queue such a frame for sending.
441  */
442
443 int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
444 {
445         struct iphdr *iph;
446         int ptr;
447         struct net_device *dev;
448         struct sk_buff *skb2;
449         unsigned int mtu, hlen, left, len, ll_rs;
450         int offset;
451         __be16 not_last_frag;
452         struct rtable *rt = skb_rtable(skb);
453         int err = 0;
454
455         dev = rt->dst.dev;
456
457         /*
458          *      Point into the IP datagram header.
459          */
460
461         iph = ip_hdr(skb);
462
463         if (unlikely((iph->frag_off & htons(IP_DF)) && !skb->local_df)) {
464                 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
465                 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
466                           htonl(ip_skb_dst_mtu(skb)));
467                 kfree_skb(skb);
468                 return -EMSGSIZE;
469         }
470
471         /*
472          *      Setup starting values.
473          */
474
475         hlen = iph->ihl * 4;
476         mtu = dst_mtu(&rt->dst) - hlen; /* Size of data space */
477 #ifdef CONFIG_BRIDGE_NETFILTER
478         if (skb->nf_bridge)
479                 mtu -= nf_bridge_mtu_reduction(skb);
480 #endif
481         IPCB(skb)->flags |= IPSKB_FRAG_COMPLETE;
482
483         /* When frag_list is given, use it. First, check its validity:
484          * some transformers could create wrong frag_list or break existing
485          * one, it is not prohibited. In this case fall back to copying.
486          *
487          * LATER: this step can be merged to real generation of fragments,
488          * we can switch to copy when see the first bad fragment.
489          */
490         if (skb_has_frags(skb)) {
491                 struct sk_buff *frag;
492                 int first_len = skb_pagelen(skb);
493                 int truesizes = 0;
494
495                 if (first_len - hlen > mtu ||
496                     ((first_len - hlen) & 7) ||
497                     (iph->frag_off & htons(IP_MF|IP_OFFSET)) ||
498                     skb_cloned(skb))
499                         goto slow_path;
500
501                 skb_walk_frags(skb, frag) {
502                         /* Correct geometry. */
503                         if (frag->len > mtu ||
504                             ((frag->len & 7) && frag->next) ||
505                             skb_headroom(frag) < hlen)
506                             goto slow_path;
507
508                         /* Partially cloned skb? */
509                         if (skb_shared(frag))
510                                 goto slow_path;
511
512                         BUG_ON(frag->sk);
513                         if (skb->sk) {
514                                 frag->sk = skb->sk;
515                                 frag->destructor = sock_wfree;
516                         }
517                         truesizes += frag->truesize;
518                 }
519
520                 /* Everything is OK. Generate! */
521
522                 err = 0;
523                 offset = 0;
524                 frag = skb_shinfo(skb)->frag_list;
525                 skb_frag_list_init(skb);
526                 skb->data_len = first_len - skb_headlen(skb);
527                 skb->truesize -= truesizes;
528                 skb->len = first_len;
529                 iph->tot_len = htons(first_len);
530                 iph->frag_off = htons(IP_MF);
531                 ip_send_check(iph);
532
533                 for (;;) {
534                         /* Prepare header of the next frame,
535                          * before previous one went down. */
536                         if (frag) {
537                                 frag->ip_summed = CHECKSUM_NONE;
538                                 skb_reset_transport_header(frag);
539                                 __skb_push(frag, hlen);
540                                 skb_reset_network_header(frag);
541                                 memcpy(skb_network_header(frag), iph, hlen);
542                                 iph = ip_hdr(frag);
543                                 iph->tot_len = htons(frag->len);
544                                 ip_copy_metadata(frag, skb);
545                                 if (offset == 0)
546                                         ip_options_fragment(frag);
547                                 offset += skb->len - hlen;
548                                 iph->frag_off = htons(offset>>3);
549                                 if (frag->next != NULL)
550                                         iph->frag_off |= htons(IP_MF);
551                                 /* Ready, complete checksum */
552                                 ip_send_check(iph);
553                         }
554
555                         err = output(skb);
556
557                         if (!err)
558                                 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGCREATES);
559                         if (err || !frag)
560                                 break;
561
562                         skb = frag;
563                         frag = skb->next;
564                         skb->next = NULL;
565                 }
566
567                 if (err == 0) {
568                         IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGOKS);
569                         return 0;
570                 }
571
572                 while (frag) {
573                         skb = frag->next;
574                         kfree_skb(frag);
575                         frag = skb;
576                 }
577                 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
578                 return err;
579         }
580
581 slow_path:
582         left = skb->len - hlen;         /* Space per frame */
583         ptr = hlen;             /* Where to start from */
584
585         /* for bridged IP traffic encapsulated inside f.e. a vlan header,
586          * we need to make room for the encapsulating header
587          */
588         ll_rs = LL_RESERVED_SPACE_EXTRA(rt->dst.dev, nf_bridge_pad(skb));
589
590         /*
591          *      Fragment the datagram.
592          */
593
594         offset = (ntohs(iph->frag_off) & IP_OFFSET) << 3;
595         not_last_frag = iph->frag_off & htons(IP_MF);
596
597         /*
598          *      Keep copying data until we run out.
599          */
600
601         while (left > 0) {
602                 len = left;
603                 /* IF: it doesn't fit, use 'mtu' - the data space left */
604                 if (len > mtu)
605                         len = mtu;
606                 /* IF: we are not sending upto and including the packet end
607                    then align the next start on an eight byte boundary */
608                 if (len < left) {
609                         len &= ~7;
610                 }
611                 /*
612                  *      Allocate buffer.
613                  */
614
615                 if ((skb2 = alloc_skb(len+hlen+ll_rs, GFP_ATOMIC)) == NULL) {
616                         NETDEBUG(KERN_INFO "IP: frag: no memory for new fragment!\n");
617                         err = -ENOMEM;
618                         goto fail;
619                 }
620
621                 /*
622                  *      Set up data on packet
623                  */
624
625                 ip_copy_metadata(skb2, skb);
626                 skb_reserve(skb2, ll_rs);
627                 skb_put(skb2, len + hlen);
628                 skb_reset_network_header(skb2);
629                 skb2->transport_header = skb2->network_header + hlen;
630
631                 /*
632                  *      Charge the memory for the fragment to any owner
633                  *      it might possess
634                  */
635
636                 if (skb->sk)
637                         skb_set_owner_w(skb2, skb->sk);
638
639                 /*
640                  *      Copy the packet header into the new buffer.
641                  */
642
643                 skb_copy_from_linear_data(skb, skb_network_header(skb2), hlen);
644
645                 /*
646                  *      Copy a block of the IP datagram.
647                  */
648                 if (skb_copy_bits(skb, ptr, skb_transport_header(skb2), len))
649                         BUG();
650                 left -= len;
651
652                 /*
653                  *      Fill in the new header fields.
654                  */
655                 iph = ip_hdr(skb2);
656                 iph->frag_off = htons((offset >> 3));
657
658                 /* ANK: dirty, but effective trick. Upgrade options only if
659                  * the segment to be fragmented was THE FIRST (otherwise,
660                  * options are already fixed) and make it ONCE
661                  * on the initial skb, so that all the following fragments
662                  * will inherit fixed options.
663                  */
664                 if (offset == 0)
665                         ip_options_fragment(skb);
666
667                 /*
668                  *      Added AC : If we are fragmenting a fragment that's not the
669                  *                 last fragment then keep MF on each bit
670                  */
671                 if (left > 0 || not_last_frag)
672                         iph->frag_off |= htons(IP_MF);
673                 ptr += len;
674                 offset += len;
675
676                 /*
677                  *      Put this fragment into the sending queue.
678                  */
679                 iph->tot_len = htons(len + hlen);
680
681                 ip_send_check(iph);
682
683                 err = output(skb2);
684                 if (err)
685                         goto fail;
686
687                 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGCREATES);
688         }
689         kfree_skb(skb);
690         IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGOKS);
691         return err;
692
693 fail:
694         kfree_skb(skb);
695         IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
696         return err;
697 }
698 EXPORT_SYMBOL(ip_fragment);
699
700 int
701 ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb)
702 {
703         struct iovec *iov = from;
704
705         if (skb->ip_summed == CHECKSUM_PARTIAL) {
706                 if (memcpy_fromiovecend(to, iov, offset, len) < 0)
707                         return -EFAULT;
708         } else {
709                 __wsum csum = 0;
710                 if (csum_partial_copy_fromiovecend(to, iov, offset, len, &csum) < 0)
711                         return -EFAULT;
712                 skb->csum = csum_block_add(skb->csum, csum, odd);
713         }
714         return 0;
715 }
716 EXPORT_SYMBOL(ip_generic_getfrag);
717
718 static inline __wsum
719 csum_page(struct page *page, int offset, int copy)
720 {
721         char *kaddr;
722         __wsum csum;
723         kaddr = kmap(page);
724         csum = csum_partial(kaddr + offset, copy, 0);
725         kunmap(page);
726         return csum;
727 }
728
729 static inline int ip_ufo_append_data(struct sock *sk,
730                         int getfrag(void *from, char *to, int offset, int len,
731                                int odd, struct sk_buff *skb),
732                         void *from, int length, int hh_len, int fragheaderlen,
733                         int transhdrlen, int mtu, unsigned int flags)
734 {
735         struct sk_buff *skb;
736         int err;
737
738         /* There is support for UDP fragmentation offload by network
739          * device, so create one single skb packet containing complete
740          * udp datagram
741          */
742         if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) {
743                 skb = sock_alloc_send_skb(sk,
744                         hh_len + fragheaderlen + transhdrlen + 20,
745                         (flags & MSG_DONTWAIT), &err);
746
747                 if (skb == NULL)
748                         return err;
749
750                 /* reserve space for Hardware header */
751                 skb_reserve(skb, hh_len);
752
753                 /* create space for UDP/IP header */
754                 skb_put(skb, fragheaderlen + transhdrlen);
755
756                 /* initialize network header pointer */
757                 skb_reset_network_header(skb);
758
759                 /* initialize protocol header pointer */
760                 skb->transport_header = skb->network_header + fragheaderlen;
761
762                 skb->ip_summed = CHECKSUM_PARTIAL;
763                 skb->csum = 0;
764                 sk->sk_sndmsg_off = 0;
765
766                 /* specify the length of each IP datagram fragment */
767                 skb_shinfo(skb)->gso_size = mtu - fragheaderlen;
768                 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
769                 __skb_queue_tail(&sk->sk_write_queue, skb);
770         }
771
772         return skb_append_datato_frags(sk, skb, getfrag, from,
773                                        (length - transhdrlen));
774 }
775
776 /*
777  *      ip_append_data() and ip_append_page() can make one large IP datagram
778  *      from many pieces of data. Each pieces will be holded on the socket
779  *      until ip_push_pending_frames() is called. Each piece can be a page
780  *      or non-page data.
781  *
782  *      Not only UDP, other transport protocols - e.g. raw sockets - can use
783  *      this interface potentially.
784  *
785  *      LATER: length must be adjusted by pad at tail, when it is required.
786  */
787 int ip_append_data(struct sock *sk,
788                    int getfrag(void *from, char *to, int offset, int len,
789                                int odd, struct sk_buff *skb),
790                    void *from, int length, int transhdrlen,
791                    struct ipcm_cookie *ipc, struct rtable **rtp,
792                    unsigned int flags)
793 {
794         struct inet_sock *inet = inet_sk(sk);
795         struct sk_buff *skb;
796
797         struct ip_options *opt = NULL;
798         int hh_len;
799         int exthdrlen;
800         int mtu;
801         int copy;
802         int err;
803         int offset = 0;
804         unsigned int maxfraglen, fragheaderlen;
805         int csummode = CHECKSUM_NONE;
806         struct rtable *rt;
807
808         if (flags&MSG_PROBE)
809                 return 0;
810
811         if (skb_queue_empty(&sk->sk_write_queue)) {
812                 /*
813                  * setup for corking.
814                  */
815                 opt = ipc->opt;
816                 if (opt) {
817                         if (inet->cork.opt == NULL) {
818                                 inet->cork.opt = kmalloc(sizeof(struct ip_options) + 40, sk->sk_allocation);
819                                 if (unlikely(inet->cork.opt == NULL))
820                                         return -ENOBUFS;
821                         }
822                         memcpy(inet->cork.opt, opt, sizeof(struct ip_options)+opt->optlen);
823                         inet->cork.flags |= IPCORK_OPT;
824                         inet->cork.addr = ipc->addr;
825                 }
826                 rt = *rtp;
827                 if (unlikely(!rt))
828                         return -EFAULT;
829                 /*
830                  * We steal reference to this route, caller should not release it
831                  */
832                 *rtp = NULL;
833                 inet->cork.fragsize = mtu = inet->pmtudisc == IP_PMTUDISC_PROBE ?
834                                             rt->dst.dev->mtu :
835                                             dst_mtu(rt->dst.path);
836                 inet->cork.dst = &rt->dst;
837                 inet->cork.length = 0;
838                 sk->sk_sndmsg_page = NULL;
839                 sk->sk_sndmsg_off = 0;
840                 if ((exthdrlen = rt->dst.header_len) != 0) {
841                         length += exthdrlen;
842                         transhdrlen += exthdrlen;
843                 }
844         } else {
845                 rt = (struct rtable *)inet->cork.dst;
846                 if (inet->cork.flags & IPCORK_OPT)
847                         opt = inet->cork.opt;
848
849                 transhdrlen = 0;
850                 exthdrlen = 0;
851                 mtu = inet->cork.fragsize;
852         }
853         hh_len = LL_RESERVED_SPACE(rt->dst.dev);
854
855         fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
856         maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
857
858         if (inet->cork.length + length > 0xFFFF - fragheaderlen) {
859                 ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->inet_dport,
860                                mtu-exthdrlen);
861                 return -EMSGSIZE;
862         }
863
864         /*
865          * transhdrlen > 0 means that this is the first fragment and we wish
866          * it won't be fragmented in the future.
867          */
868         if (transhdrlen &&
869             length + fragheaderlen <= mtu &&
870             rt->dst.dev->features & NETIF_F_V4_CSUM &&
871             !exthdrlen)
872                 csummode = CHECKSUM_PARTIAL;
873
874         skb = skb_peek_tail(&sk->sk_write_queue);
875
876         inet->cork.length += length;
877         if (((length > mtu) || (skb && skb_is_gso(skb))) &&
878             (sk->sk_protocol == IPPROTO_UDP) &&
879             (rt->dst.dev->features & NETIF_F_UFO)) {
880                 err = ip_ufo_append_data(sk, getfrag, from, length, hh_len,
881                                          fragheaderlen, transhdrlen, mtu,
882                                          flags);
883                 if (err)
884                         goto error;
885                 return 0;
886         }
887
888         /* So, what's going on in the loop below?
889          *
890          * We use calculated fragment length to generate chained skb,
891          * each of segments is IP fragment ready for sending to network after
892          * adding appropriate IP header.
893          */
894
895         if (!skb)
896                 goto alloc_new_skb;
897
898         while (length > 0) {
899                 /* Check if the remaining data fits into current packet. */
900                 copy = mtu - skb->len;
901                 if (copy < length)
902                         copy = maxfraglen - skb->len;
903                 if (copy <= 0) {
904                         char *data;
905                         unsigned int datalen;
906                         unsigned int fraglen;
907                         unsigned int fraggap;
908                         unsigned int alloclen;
909                         struct sk_buff *skb_prev;
910 alloc_new_skb:
911                         skb_prev = skb;
912                         if (skb_prev)
913                                 fraggap = skb_prev->len - maxfraglen;
914                         else
915                                 fraggap = 0;
916
917                         /*
918                          * If remaining data exceeds the mtu,
919                          * we know we need more fragment(s).
920                          */
921                         datalen = length + fraggap;
922                         if (datalen > mtu - fragheaderlen)
923                                 datalen = maxfraglen - fragheaderlen;
924                         fraglen = datalen + fragheaderlen;
925
926                         if ((flags & MSG_MORE) &&
927                             !(rt->dst.dev->features&NETIF_F_SG))
928                                 alloclen = mtu;
929                         else
930                                 alloclen = datalen + fragheaderlen;
931
932                         /* The last fragment gets additional space at tail.
933                          * Note, with MSG_MORE we overallocate on fragments,
934                          * because we have no idea what fragment will be
935                          * the last.
936                          */
937                         if (datalen == length + fraggap)
938                                 alloclen += rt->dst.trailer_len;
939
940                         if (transhdrlen) {
941                                 skb = sock_alloc_send_skb(sk,
942                                                 alloclen + hh_len + 15,
943                                                 (flags & MSG_DONTWAIT), &err);
944                         } else {
945                                 skb = NULL;
946                                 if (atomic_read(&sk->sk_wmem_alloc) <=
947                                     2 * sk->sk_sndbuf)
948                                         skb = sock_wmalloc(sk,
949                                                            alloclen + hh_len + 15, 1,
950                                                            sk->sk_allocation);
951                                 if (unlikely(skb == NULL))
952                                         err = -ENOBUFS;
953                                 else
954                                         /* only the initial fragment is
955                                            time stamped */
956                                         ipc->shtx.flags = 0;
957                         }
958                         if (skb == NULL)
959                                 goto error;
960
961                         /*
962                          *      Fill in the control structures
963                          */
964                         skb->ip_summed = csummode;
965                         skb->csum = 0;
966                         skb_reserve(skb, hh_len);
967                         *skb_tx(skb) = ipc->shtx;
968
969                         /*
970                          *      Find where to start putting bytes.
971                          */
972                         data = skb_put(skb, fraglen);
973                         skb_set_network_header(skb, exthdrlen);
974                         skb->transport_header = (skb->network_header +
975                                                  fragheaderlen);
976                         data += fragheaderlen;
977
978                         if (fraggap) {
979                                 skb->csum = skb_copy_and_csum_bits(
980                                         skb_prev, maxfraglen,
981                                         data + transhdrlen, fraggap, 0);
982                                 skb_prev->csum = csum_sub(skb_prev->csum,
983                                                           skb->csum);
984                                 data += fraggap;
985                                 pskb_trim_unique(skb_prev, maxfraglen);
986                         }
987
988                         copy = datalen - transhdrlen - fraggap;
989                         if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
990                                 err = -EFAULT;
991                                 kfree_skb(skb);
992                                 goto error;
993                         }
994
995                         offset += copy;
996                         length -= datalen - fraggap;
997                         transhdrlen = 0;
998                         exthdrlen = 0;
999                         csummode = CHECKSUM_NONE;
1000
1001                         /*
1002                          * Put the packet on the pending queue.
1003                          */
1004                         __skb_queue_tail(&sk->sk_write_queue, skb);
1005                         continue;
1006                 }
1007
1008                 if (copy > length)
1009                         copy = length;
1010
1011                 if (!(rt->dst.dev->features&NETIF_F_SG)) {
1012                         unsigned int off;
1013
1014                         off = skb->len;
1015                         if (getfrag(from, skb_put(skb, copy),
1016                                         offset, copy, off, skb) < 0) {
1017                                 __skb_trim(skb, off);
1018                                 err = -EFAULT;
1019                                 goto error;
1020                         }
1021                 } else {
1022                         int i = skb_shinfo(skb)->nr_frags;
1023                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i-1];
1024                         struct page *page = sk->sk_sndmsg_page;
1025                         int off = sk->sk_sndmsg_off;
1026                         unsigned int left;
1027
1028                         if (page && (left = PAGE_SIZE - off) > 0) {
1029                                 if (copy >= left)
1030                                         copy = left;
1031                                 if (page != frag->page) {
1032                                         if (i == MAX_SKB_FRAGS) {
1033                                                 err = -EMSGSIZE;
1034                                                 goto error;
1035                                         }
1036                                         get_page(page);
1037                                         skb_fill_page_desc(skb, i, page, sk->sk_sndmsg_off, 0);
1038                                         frag = &skb_shinfo(skb)->frags[i];
1039                                 }
1040                         } else if (i < MAX_SKB_FRAGS) {
1041                                 if (copy > PAGE_SIZE)
1042                                         copy = PAGE_SIZE;
1043                                 page = alloc_pages(sk->sk_allocation, 0);
1044                                 if (page == NULL)  {
1045                                         err = -ENOMEM;
1046                                         goto error;
1047                                 }
1048                                 sk->sk_sndmsg_page = page;
1049                                 sk->sk_sndmsg_off = 0;
1050
1051                                 skb_fill_page_desc(skb, i, page, 0, 0);
1052                                 frag = &skb_shinfo(skb)->frags[i];
1053                         } else {
1054                                 err = -EMSGSIZE;
1055                                 goto error;
1056                         }
1057                         if (getfrag(from, page_address(frag->page)+frag->page_offset+frag->size, offset, copy, skb->len, skb) < 0) {
1058                                 err = -EFAULT;
1059                                 goto error;
1060                         }
1061                         sk->sk_sndmsg_off += copy;
1062                         frag->size += copy;
1063                         skb->len += copy;
1064                         skb->data_len += copy;
1065                         skb->truesize += copy;
1066                         atomic_add(copy, &sk->sk_wmem_alloc);
1067                 }
1068                 offset += copy;
1069                 length -= copy;
1070         }
1071
1072         return 0;
1073
1074 error:
1075         inet->cork.length -= length;
1076         IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS);
1077         return err;
1078 }
1079
1080 ssize_t ip_append_page(struct sock *sk, struct page *page,
1081                        int offset, size_t size, int flags)
1082 {
1083         struct inet_sock *inet = inet_sk(sk);
1084         struct sk_buff *skb;
1085         struct rtable *rt;
1086         struct ip_options *opt = NULL;
1087         int hh_len;
1088         int mtu;
1089         int len;
1090         int err;
1091         unsigned int maxfraglen, fragheaderlen, fraggap;
1092
1093         if (inet->hdrincl)
1094                 return -EPERM;
1095
1096         if (flags&MSG_PROBE)
1097                 return 0;
1098
1099         if (skb_queue_empty(&sk->sk_write_queue))
1100                 return -EINVAL;
1101
1102         rt = (struct rtable *)inet->cork.dst;
1103         if (inet->cork.flags & IPCORK_OPT)
1104                 opt = inet->cork.opt;
1105
1106         if (!(rt->dst.dev->features&NETIF_F_SG))
1107                 return -EOPNOTSUPP;
1108
1109         hh_len = LL_RESERVED_SPACE(rt->dst.dev);
1110         mtu = inet->cork.fragsize;
1111
1112         fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
1113         maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
1114
1115         if (inet->cork.length + size > 0xFFFF - fragheaderlen) {
1116                 ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->inet_dport, mtu);
1117                 return -EMSGSIZE;
1118         }
1119
1120         if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
1121                 return -EINVAL;
1122
1123         inet->cork.length += size;
1124         if ((size + skb->len > mtu) &&
1125             (sk->sk_protocol == IPPROTO_UDP) &&
1126             (rt->dst.dev->features & NETIF_F_UFO)) {
1127                 skb_shinfo(skb)->gso_size = mtu - fragheaderlen;
1128                 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
1129         }
1130
1131
1132         while (size > 0) {
1133                 int i;
1134
1135                 if (skb_is_gso(skb))
1136                         len = size;
1137                 else {
1138
1139                         /* Check if the remaining data fits into current packet. */
1140                         len = mtu - skb->len;
1141                         if (len < size)
1142                                 len = maxfraglen - skb->len;
1143                 }
1144                 if (len <= 0) {
1145                         struct sk_buff *skb_prev;
1146                         int alloclen;
1147
1148                         skb_prev = skb;
1149                         fraggap = skb_prev->len - maxfraglen;
1150
1151                         alloclen = fragheaderlen + hh_len + fraggap + 15;
1152                         skb = sock_wmalloc(sk, alloclen, 1, sk->sk_allocation);
1153                         if (unlikely(!skb)) {
1154                                 err = -ENOBUFS;
1155                                 goto error;
1156                         }
1157
1158                         /*
1159                          *      Fill in the control structures
1160                          */
1161                         skb->ip_summed = CHECKSUM_NONE;
1162                         skb->csum = 0;
1163                         skb_reserve(skb, hh_len);
1164
1165                         /*
1166                          *      Find where to start putting bytes.
1167                          */
1168                         skb_put(skb, fragheaderlen + fraggap);
1169                         skb_reset_network_header(skb);
1170                         skb->transport_header = (skb->network_header +
1171                                                  fragheaderlen);
1172                         if (fraggap) {
1173                                 skb->csum = skb_copy_and_csum_bits(skb_prev,
1174                                                                    maxfraglen,
1175                                                     skb_transport_header(skb),
1176                                                                    fraggap, 0);
1177                                 skb_prev->csum = csum_sub(skb_prev->csum,
1178                                                           skb->csum);
1179                                 pskb_trim_unique(skb_prev, maxfraglen);
1180                         }
1181
1182                         /*
1183                          * Put the packet on the pending queue.
1184                          */
1185                         __skb_queue_tail(&sk->sk_write_queue, skb);
1186                         continue;
1187                 }
1188
1189                 i = skb_shinfo(skb)->nr_frags;
1190                 if (len > size)
1191                         len = size;
1192                 if (skb_can_coalesce(skb, i, page, offset)) {
1193                         skb_shinfo(skb)->frags[i-1].size += len;
1194                 } else if (i < MAX_SKB_FRAGS) {
1195                         get_page(page);
1196                         skb_fill_page_desc(skb, i, page, offset, len);
1197                 } else {
1198                         err = -EMSGSIZE;
1199                         goto error;
1200                 }
1201
1202                 if (skb->ip_summed == CHECKSUM_NONE) {
1203                         __wsum csum;
1204                         csum = csum_page(page, offset, len);
1205                         skb->csum = csum_block_add(skb->csum, csum, skb->len);
1206                 }
1207
1208                 skb->len += len;
1209                 skb->data_len += len;
1210                 skb->truesize += len;
1211                 atomic_add(len, &sk->sk_wmem_alloc);
1212                 offset += len;
1213                 size -= len;
1214         }
1215         return 0;
1216
1217 error:
1218         inet->cork.length -= size;
1219         IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS);
1220         return err;
1221 }
1222
1223 static void ip_cork_release(struct inet_sock *inet)
1224 {
1225         inet->cork.flags &= ~IPCORK_OPT;
1226         kfree(inet->cork.opt);
1227         inet->cork.opt = NULL;
1228         dst_release(inet->cork.dst);
1229         inet->cork.dst = NULL;
1230 }
1231
1232 /*
1233  *      Combined all pending IP fragments on the socket as one IP datagram
1234  *      and push them out.
1235  */
1236 int ip_push_pending_frames(struct sock *sk)
1237 {
1238         struct sk_buff *skb, *tmp_skb;
1239         struct sk_buff **tail_skb;
1240         struct inet_sock *inet = inet_sk(sk);
1241         struct net *net = sock_net(sk);
1242         struct ip_options *opt = NULL;
1243         struct rtable *rt = (struct rtable *)inet->cork.dst;
1244         struct iphdr *iph;
1245         __be16 df = 0;
1246         __u8 ttl;
1247         int err = 0;
1248
1249         if ((skb = __skb_dequeue(&sk->sk_write_queue)) == NULL)
1250                 goto out;
1251         tail_skb = &(skb_shinfo(skb)->frag_list);
1252
1253         /* move skb->data to ip header from ext header */
1254         if (skb->data < skb_network_header(skb))
1255                 __skb_pull(skb, skb_network_offset(skb));
1256         while ((tmp_skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) {
1257                 __skb_pull(tmp_skb, skb_network_header_len(skb));
1258                 *tail_skb = tmp_skb;
1259                 tail_skb = &(tmp_skb->next);
1260                 skb->len += tmp_skb->len;
1261                 skb->data_len += tmp_skb->len;
1262                 skb->truesize += tmp_skb->truesize;
1263                 tmp_skb->destructor = NULL;
1264                 tmp_skb->sk = NULL;
1265         }
1266
1267         /* Unless user demanded real pmtu discovery (IP_PMTUDISC_DO), we allow
1268          * to fragment the frame generated here. No matter, what transforms
1269          * how transforms change size of the packet, it will come out.
1270          */
1271         if (inet->pmtudisc < IP_PMTUDISC_DO)
1272                 skb->local_df = 1;
1273
1274         /* DF bit is set when we want to see DF on outgoing frames.
1275          * If local_df is set too, we still allow to fragment this frame
1276          * locally. */
1277         if (inet->pmtudisc >= IP_PMTUDISC_DO ||
1278             (skb->len <= dst_mtu(&rt->dst) &&
1279              ip_dont_fragment(sk, &rt->dst)))
1280                 df = htons(IP_DF);
1281
1282         if (inet->cork.flags & IPCORK_OPT)
1283                 opt = inet->cork.opt;
1284
1285         if (rt->rt_type == RTN_MULTICAST)
1286                 ttl = inet->mc_ttl;
1287         else
1288                 ttl = ip_select_ttl(inet, &rt->dst);
1289
1290         iph = (struct iphdr *)skb->data;
1291         iph->version = 4;
1292         iph->ihl = 5;
1293         if (opt) {
1294                 iph->ihl += opt->optlen>>2;
1295                 ip_options_build(skb, opt, inet->cork.addr, rt, 0);
1296         }
1297         iph->tos = inet->tos;
1298         iph->frag_off = df;
1299         ip_select_ident(iph, &rt->dst, sk);
1300         iph->ttl = ttl;
1301         iph->protocol = sk->sk_protocol;
1302         iph->saddr = rt->rt_src;
1303         iph->daddr = rt->rt_dst;
1304
1305         skb->priority = sk->sk_priority;
1306         skb->mark = sk->sk_mark;
1307         /*
1308          * Steal rt from cork.dst to avoid a pair of atomic_inc/atomic_dec
1309          * on dst refcount
1310          */
1311         inet->cork.dst = NULL;
1312         skb_dst_set(skb, &rt->dst);
1313
1314         if (iph->protocol == IPPROTO_ICMP)
1315                 icmp_out_count(net, ((struct icmphdr *)
1316                         skb_transport_header(skb))->type);
1317
1318         /* Netfilter gets whole the not fragmented skb. */
1319         err = ip_local_out(skb);
1320         if (err) {
1321                 if (err > 0)
1322                         err = net_xmit_errno(err);
1323                 if (err)
1324                         goto error;
1325         }
1326
1327 out:
1328         ip_cork_release(inet);
1329         return err;
1330
1331 error:
1332         IP_INC_STATS(net, IPSTATS_MIB_OUTDISCARDS);
1333         goto out;
1334 }
1335
1336 /*
1337  *      Throw away all pending data on the socket.
1338  */
1339 void ip_flush_pending_frames(struct sock *sk)
1340 {
1341         struct sk_buff *skb;
1342
1343         while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL)
1344                 kfree_skb(skb);
1345
1346         ip_cork_release(inet_sk(sk));
1347 }
1348
1349
1350 /*
1351  *      Fetch data from kernel space and fill in checksum if needed.
1352  */
1353 static int ip_reply_glue_bits(void *dptr, char *to, int offset,
1354                               int len, int odd, struct sk_buff *skb)
1355 {
1356         __wsum csum;
1357
1358         csum = csum_partial_copy_nocheck(dptr+offset, to, len, 0);
1359         skb->csum = csum_block_add(skb->csum, csum, odd);
1360         return 0;
1361 }
1362
1363 /*
1364  *      Generic function to send a packet as reply to another packet.
1365  *      Used to send TCP resets so far. ICMP should use this function too.
1366  *
1367  *      Should run single threaded per socket because it uses the sock
1368  *      structure to pass arguments.
1369  */
1370 void ip_send_reply(struct sock *sk, struct sk_buff *skb, struct ip_reply_arg *arg,
1371                    unsigned int len)
1372 {
1373         struct inet_sock *inet = inet_sk(sk);
1374         struct {
1375                 struct ip_options       opt;
1376                 char                    data[40];
1377         } replyopts;
1378         struct ipcm_cookie ipc;
1379         __be32 daddr;
1380         struct rtable *rt = skb_rtable(skb);
1381
1382         if (ip_options_echo(&replyopts.opt, skb))
1383                 return;
1384
1385         daddr = ipc.addr = rt->rt_src;
1386         ipc.opt = NULL;
1387         ipc.shtx.flags = 0;
1388
1389         if (replyopts.opt.optlen) {
1390                 ipc.opt = &replyopts.opt;
1391
1392                 if (ipc.opt->srr)
1393                         daddr = replyopts.opt.faddr;
1394         }
1395
1396         {
1397                 struct flowi fl = { .oif = arg->bound_dev_if,
1398                                     .nl_u = { .ip4_u =
1399                                               { .daddr = daddr,
1400                                                 .saddr = rt->rt_spec_dst,
1401                                                 .tos = RT_TOS(ip_hdr(skb)->tos) } },
1402                                     /* Not quite clean, but right. */
1403                                     .uli_u = { .ports =
1404                                                { .sport = tcp_hdr(skb)->dest,
1405                                                  .dport = tcp_hdr(skb)->source } },
1406                                     .proto = sk->sk_protocol,
1407                                     .flags = ip_reply_arg_flowi_flags(arg) };
1408                 security_skb_classify_flow(skb, &fl);
1409                 if (ip_route_output_key(sock_net(sk), &rt, &fl))
1410                         return;
1411         }
1412
1413         /* And let IP do all the hard work.
1414
1415            This chunk is not reenterable, hence spinlock.
1416            Note that it uses the fact, that this function is called
1417            with locally disabled BH and that sk cannot be already spinlocked.
1418          */
1419         bh_lock_sock(sk);
1420         inet->tos = ip_hdr(skb)->tos;
1421         sk->sk_priority = skb->priority;
1422         sk->sk_protocol = ip_hdr(skb)->protocol;
1423         sk->sk_bound_dev_if = arg->bound_dev_if;
1424         ip_append_data(sk, ip_reply_glue_bits, arg->iov->iov_base, len, 0,
1425                        &ipc, &rt, MSG_DONTWAIT);
1426         if ((skb = skb_peek(&sk->sk_write_queue)) != NULL) {
1427                 if (arg->csumoffset >= 0)
1428                         *((__sum16 *)skb_transport_header(skb) +
1429                           arg->csumoffset) = csum_fold(csum_add(skb->csum,
1430                                                                 arg->csum));
1431                 skb->ip_summed = CHECKSUM_NONE;
1432                 ip_push_pending_frames(sk);
1433         }
1434
1435         bh_unlock_sock(sk);
1436
1437         ip_rt_put(rt);
1438 }
1439
1440 void __init ip_init(void)
1441 {
1442         ip_rt_init();
1443         inet_initpeers();
1444
1445 #if defined(CONFIG_IP_MULTICAST) && defined(CONFIG_PROC_FS)
1446         igmp_mc_proc_init();
1447 #endif
1448 }