Merge branch 'slabh' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/misc
[sfrench/cifs-2.6.git] / net / ipv4 / netfilter / nf_nat_core.c
1 /* NAT for netfilter; shared with compatibility layer. */
2
3 /* (C) 1999-2001 Paul `Rusty' Russell
4  * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10
11 #include <linux/module.h>
12 #include <linux/types.h>
13 #include <linux/timer.h>
14 #include <linux/skbuff.h>
15 #include <linux/gfp.h>
16 #include <net/checksum.h>
17 #include <net/icmp.h>
18 #include <net/ip.h>
19 #include <net/tcp.h>  /* For tcp_prot in getorigdst */
20 #include <linux/icmp.h>
21 #include <linux/udp.h>
22 #include <linux/jhash.h>
23
24 #include <linux/netfilter_ipv4.h>
25 #include <net/netfilter/nf_conntrack.h>
26 #include <net/netfilter/nf_conntrack_core.h>
27 #include <net/netfilter/nf_nat.h>
28 #include <net/netfilter/nf_nat_protocol.h>
29 #include <net/netfilter/nf_nat_core.h>
30 #include <net/netfilter/nf_nat_helper.h>
31 #include <net/netfilter/nf_conntrack_helper.h>
32 #include <net/netfilter/nf_conntrack_l3proto.h>
33 #include <net/netfilter/nf_conntrack_l4proto.h>
34 #include <net/netfilter/nf_conntrack_zones.h>
35
36 static DEFINE_SPINLOCK(nf_nat_lock);
37
38 static struct nf_conntrack_l3proto *l3proto __read_mostly;
39
40 #define MAX_IP_NAT_PROTO 256
41 static const struct nf_nat_protocol *nf_nat_protos[MAX_IP_NAT_PROTO]
42                                                 __read_mostly;
43
44 static inline const struct nf_nat_protocol *
45 __nf_nat_proto_find(u_int8_t protonum)
46 {
47         return rcu_dereference(nf_nat_protos[protonum]);
48 }
49
50 const struct nf_nat_protocol *
51 nf_nat_proto_find_get(u_int8_t protonum)
52 {
53         const struct nf_nat_protocol *p;
54
55         rcu_read_lock();
56         p = __nf_nat_proto_find(protonum);
57         if (!try_module_get(p->me))
58                 p = &nf_nat_unknown_protocol;
59         rcu_read_unlock();
60
61         return p;
62 }
63 EXPORT_SYMBOL_GPL(nf_nat_proto_find_get);
64
65 void
66 nf_nat_proto_put(const struct nf_nat_protocol *p)
67 {
68         module_put(p->me);
69 }
70 EXPORT_SYMBOL_GPL(nf_nat_proto_put);
71
72 /* We keep an extra hash for each conntrack, for fast searching. */
73 static inline unsigned int
74 hash_by_src(const struct net *net, u16 zone,
75             const struct nf_conntrack_tuple *tuple)
76 {
77         unsigned int hash;
78
79         /* Original src, to ensure we map it consistently if poss. */
80         hash = jhash_3words((__force u32)tuple->src.u3.ip,
81                             (__force u32)tuple->src.u.all ^ zone,
82                             tuple->dst.protonum, 0);
83         return ((u64)hash * net->ipv4.nat_htable_size) >> 32;
84 }
85
86 /* Is this tuple already taken? (not by us) */
87 int
88 nf_nat_used_tuple(const struct nf_conntrack_tuple *tuple,
89                   const struct nf_conn *ignored_conntrack)
90 {
91         /* Conntrack tracking doesn't keep track of outgoing tuples; only
92            incoming ones.  NAT means they don't have a fixed mapping,
93            so we invert the tuple and look for the incoming reply.
94
95            We could keep a separate hash if this proves too slow. */
96         struct nf_conntrack_tuple reply;
97
98         nf_ct_invert_tuplepr(&reply, tuple);
99         return nf_conntrack_tuple_taken(&reply, ignored_conntrack);
100 }
101 EXPORT_SYMBOL(nf_nat_used_tuple);
102
103 /* If we source map this tuple so reply looks like reply_tuple, will
104  * that meet the constraints of range. */
105 static int
106 in_range(const struct nf_conntrack_tuple *tuple,
107          const struct nf_nat_range *range)
108 {
109         const struct nf_nat_protocol *proto;
110         int ret = 0;
111
112         /* If we are supposed to map IPs, then we must be in the
113            range specified, otherwise let this drag us onto a new src IP. */
114         if (range->flags & IP_NAT_RANGE_MAP_IPS) {
115                 if (ntohl(tuple->src.u3.ip) < ntohl(range->min_ip) ||
116                     ntohl(tuple->src.u3.ip) > ntohl(range->max_ip))
117                         return 0;
118         }
119
120         rcu_read_lock();
121         proto = __nf_nat_proto_find(tuple->dst.protonum);
122         if (!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED) ||
123             proto->in_range(tuple, IP_NAT_MANIP_SRC,
124                             &range->min, &range->max))
125                 ret = 1;
126         rcu_read_unlock();
127
128         return ret;
129 }
130
131 static inline int
132 same_src(const struct nf_conn *ct,
133          const struct nf_conntrack_tuple *tuple)
134 {
135         const struct nf_conntrack_tuple *t;
136
137         t = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
138         return (t->dst.protonum == tuple->dst.protonum &&
139                 t->src.u3.ip == tuple->src.u3.ip &&
140                 t->src.u.all == tuple->src.u.all);
141 }
142
143 /* Only called for SRC manip */
144 static int
145 find_appropriate_src(struct net *net, u16 zone,
146                      const struct nf_conntrack_tuple *tuple,
147                      struct nf_conntrack_tuple *result,
148                      const struct nf_nat_range *range)
149 {
150         unsigned int h = hash_by_src(net, zone, tuple);
151         const struct nf_conn_nat *nat;
152         const struct nf_conn *ct;
153         const struct hlist_node *n;
154
155         rcu_read_lock();
156         hlist_for_each_entry_rcu(nat, n, &net->ipv4.nat_bysource[h], bysource) {
157                 ct = nat->ct;
158                 if (same_src(ct, tuple) && nf_ct_zone(ct) == zone) {
159                         /* Copy source part from reply tuple. */
160                         nf_ct_invert_tuplepr(result,
161                                        &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
162                         result->dst = tuple->dst;
163
164                         if (in_range(result, range)) {
165                                 rcu_read_unlock();
166                                 return 1;
167                         }
168                 }
169         }
170         rcu_read_unlock();
171         return 0;
172 }
173
174 /* For [FUTURE] fragmentation handling, we want the least-used
175    src-ip/dst-ip/proto triple.  Fairness doesn't come into it.  Thus
176    if the range specifies 1.2.3.4 ports 10000-10005 and 1.2.3.5 ports
177    1-65535, we don't do pro-rata allocation based on ports; we choose
178    the ip with the lowest src-ip/dst-ip/proto usage.
179 */
180 static void
181 find_best_ips_proto(u16 zone, struct nf_conntrack_tuple *tuple,
182                     const struct nf_nat_range *range,
183                     const struct nf_conn *ct,
184                     enum nf_nat_manip_type maniptype)
185 {
186         __be32 *var_ipp;
187         /* Host order */
188         u_int32_t minip, maxip, j;
189
190         /* No IP mapping?  Do nothing. */
191         if (!(range->flags & IP_NAT_RANGE_MAP_IPS))
192                 return;
193
194         if (maniptype == IP_NAT_MANIP_SRC)
195                 var_ipp = &tuple->src.u3.ip;
196         else
197                 var_ipp = &tuple->dst.u3.ip;
198
199         /* Fast path: only one choice. */
200         if (range->min_ip == range->max_ip) {
201                 *var_ipp = range->min_ip;
202                 return;
203         }
204
205         /* Hashing source and destination IPs gives a fairly even
206          * spread in practice (if there are a small number of IPs
207          * involved, there usually aren't that many connections
208          * anyway).  The consistency means that servers see the same
209          * client coming from the same IP (some Internet Banking sites
210          * like this), even across reboots. */
211         minip = ntohl(range->min_ip);
212         maxip = ntohl(range->max_ip);
213         j = jhash_2words((__force u32)tuple->src.u3.ip,
214                          range->flags & IP_NAT_RANGE_PERSISTENT ?
215                                 0 : (__force u32)tuple->dst.u3.ip ^ zone, 0);
216         j = ((u64)j * (maxip - minip + 1)) >> 32;
217         *var_ipp = htonl(minip + j);
218 }
219
220 /* Manipulate the tuple into the range given.  For NF_INET_POST_ROUTING,
221  * we change the source to map into the range.  For NF_INET_PRE_ROUTING
222  * and NF_INET_LOCAL_OUT, we change the destination to map into the
223  * range.  It might not be possible to get a unique tuple, but we try.
224  * At worst (or if we race), we will end up with a final duplicate in
225  * __ip_conntrack_confirm and drop the packet. */
226 static void
227 get_unique_tuple(struct nf_conntrack_tuple *tuple,
228                  const struct nf_conntrack_tuple *orig_tuple,
229                  const struct nf_nat_range *range,
230                  struct nf_conn *ct,
231                  enum nf_nat_manip_type maniptype)
232 {
233         struct net *net = nf_ct_net(ct);
234         const struct nf_nat_protocol *proto;
235         u16 zone = nf_ct_zone(ct);
236
237         /* 1) If this srcip/proto/src-proto-part is currently mapped,
238            and that same mapping gives a unique tuple within the given
239            range, use that.
240
241            This is only required for source (ie. NAT/masq) mappings.
242            So far, we don't do local source mappings, so multiple
243            manips not an issue.  */
244         if (maniptype == IP_NAT_MANIP_SRC &&
245             !(range->flags & IP_NAT_RANGE_PROTO_RANDOM)) {
246                 if (find_appropriate_src(net, zone, orig_tuple, tuple, range)) {
247                         pr_debug("get_unique_tuple: Found current src map\n");
248                         if (!nf_nat_used_tuple(tuple, ct))
249                                 return;
250                 }
251         }
252
253         /* 2) Select the least-used IP/proto combination in the given
254            range. */
255         *tuple = *orig_tuple;
256         find_best_ips_proto(zone, tuple, range, ct, maniptype);
257
258         /* 3) The per-protocol part of the manip is made to map into
259            the range to make a unique tuple. */
260
261         rcu_read_lock();
262         proto = __nf_nat_proto_find(orig_tuple->dst.protonum);
263
264         /* Change protocol info to have some randomization */
265         if (range->flags & IP_NAT_RANGE_PROTO_RANDOM) {
266                 proto->unique_tuple(tuple, range, maniptype, ct);
267                 goto out;
268         }
269
270         /* Only bother mapping if it's not already in range and unique */
271         if ((!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED) ||
272              proto->in_range(tuple, maniptype, &range->min, &range->max)) &&
273             !nf_nat_used_tuple(tuple, ct))
274                 goto out;
275
276         /* Last change: get protocol to try to obtain unique tuple. */
277         proto->unique_tuple(tuple, range, maniptype, ct);
278 out:
279         rcu_read_unlock();
280 }
281
282 unsigned int
283 nf_nat_setup_info(struct nf_conn *ct,
284                   const struct nf_nat_range *range,
285                   enum nf_nat_manip_type maniptype)
286 {
287         struct net *net = nf_ct_net(ct);
288         struct nf_conntrack_tuple curr_tuple, new_tuple;
289         struct nf_conn_nat *nat;
290         int have_to_hash = !(ct->status & IPS_NAT_DONE_MASK);
291
292         /* nat helper or nfctnetlink also setup binding */
293         nat = nfct_nat(ct);
294         if (!nat) {
295                 nat = nf_ct_ext_add(ct, NF_CT_EXT_NAT, GFP_ATOMIC);
296                 if (nat == NULL) {
297                         pr_debug("failed to add NAT extension\n");
298                         return NF_ACCEPT;
299                 }
300         }
301
302         NF_CT_ASSERT(maniptype == IP_NAT_MANIP_SRC ||
303                      maniptype == IP_NAT_MANIP_DST);
304         BUG_ON(nf_nat_initialized(ct, maniptype));
305
306         /* What we've got will look like inverse of reply. Normally
307            this is what is in the conntrack, except for prior
308            manipulations (future optimization: if num_manips == 0,
309            orig_tp =
310            conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple) */
311         nf_ct_invert_tuplepr(&curr_tuple,
312                              &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
313
314         get_unique_tuple(&new_tuple, &curr_tuple, range, ct, maniptype);
315
316         if (!nf_ct_tuple_equal(&new_tuple, &curr_tuple)) {
317                 struct nf_conntrack_tuple reply;
318
319                 /* Alter conntrack table so will recognize replies. */
320                 nf_ct_invert_tuplepr(&reply, &new_tuple);
321                 nf_conntrack_alter_reply(ct, &reply);
322
323                 /* Non-atomic: we own this at the moment. */
324                 if (maniptype == IP_NAT_MANIP_SRC)
325                         ct->status |= IPS_SRC_NAT;
326                 else
327                         ct->status |= IPS_DST_NAT;
328         }
329
330         /* Place in source hash if this is the first time. */
331         if (have_to_hash) {
332                 unsigned int srchash;
333
334                 srchash = hash_by_src(net, nf_ct_zone(ct),
335                                       &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
336                 spin_lock_bh(&nf_nat_lock);
337                 /* nf_conntrack_alter_reply might re-allocate exntension aera */
338                 nat = nfct_nat(ct);
339                 nat->ct = ct;
340                 hlist_add_head_rcu(&nat->bysource,
341                                    &net->ipv4.nat_bysource[srchash]);
342                 spin_unlock_bh(&nf_nat_lock);
343         }
344
345         /* It's done. */
346         if (maniptype == IP_NAT_MANIP_DST)
347                 set_bit(IPS_DST_NAT_DONE_BIT, &ct->status);
348         else
349                 set_bit(IPS_SRC_NAT_DONE_BIT, &ct->status);
350
351         return NF_ACCEPT;
352 }
353 EXPORT_SYMBOL(nf_nat_setup_info);
354
355 /* Returns true if succeeded. */
356 static bool
357 manip_pkt(u_int16_t proto,
358           struct sk_buff *skb,
359           unsigned int iphdroff,
360           const struct nf_conntrack_tuple *target,
361           enum nf_nat_manip_type maniptype)
362 {
363         struct iphdr *iph;
364         const struct nf_nat_protocol *p;
365
366         if (!skb_make_writable(skb, iphdroff + sizeof(*iph)))
367                 return false;
368
369         iph = (void *)skb->data + iphdroff;
370
371         /* Manipulate protcol part. */
372
373         /* rcu_read_lock()ed by nf_hook_slow */
374         p = __nf_nat_proto_find(proto);
375         if (!p->manip_pkt(skb, iphdroff, target, maniptype))
376                 return false;
377
378         iph = (void *)skb->data + iphdroff;
379
380         if (maniptype == IP_NAT_MANIP_SRC) {
381                 csum_replace4(&iph->check, iph->saddr, target->src.u3.ip);
382                 iph->saddr = target->src.u3.ip;
383         } else {
384                 csum_replace4(&iph->check, iph->daddr, target->dst.u3.ip);
385                 iph->daddr = target->dst.u3.ip;
386         }
387         return true;
388 }
389
390 /* Do packet manipulations according to nf_nat_setup_info. */
391 unsigned int nf_nat_packet(struct nf_conn *ct,
392                            enum ip_conntrack_info ctinfo,
393                            unsigned int hooknum,
394                            struct sk_buff *skb)
395 {
396         enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
397         unsigned long statusbit;
398         enum nf_nat_manip_type mtype = HOOK2MANIP(hooknum);
399
400         if (mtype == IP_NAT_MANIP_SRC)
401                 statusbit = IPS_SRC_NAT;
402         else
403                 statusbit = IPS_DST_NAT;
404
405         /* Invert if this is reply dir. */
406         if (dir == IP_CT_DIR_REPLY)
407                 statusbit ^= IPS_NAT_MASK;
408
409         /* Non-atomic: these bits don't change. */
410         if (ct->status & statusbit) {
411                 struct nf_conntrack_tuple target;
412
413                 /* We are aiming to look like inverse of other direction. */
414                 nf_ct_invert_tuplepr(&target, &ct->tuplehash[!dir].tuple);
415
416                 if (!manip_pkt(target.dst.protonum, skb, 0, &target, mtype))
417                         return NF_DROP;
418         }
419         return NF_ACCEPT;
420 }
421 EXPORT_SYMBOL_GPL(nf_nat_packet);
422
423 /* Dir is direction ICMP is coming from (opposite to packet it contains) */
424 int nf_nat_icmp_reply_translation(struct nf_conn *ct,
425                                   enum ip_conntrack_info ctinfo,
426                                   unsigned int hooknum,
427                                   struct sk_buff *skb)
428 {
429         struct {
430                 struct icmphdr icmp;
431                 struct iphdr ip;
432         } *inside;
433         const struct nf_conntrack_l4proto *l4proto;
434         struct nf_conntrack_tuple inner, target;
435         int hdrlen = ip_hdrlen(skb);
436         enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
437         unsigned long statusbit;
438         enum nf_nat_manip_type manip = HOOK2MANIP(hooknum);
439
440         if (!skb_make_writable(skb, hdrlen + sizeof(*inside)))
441                 return 0;
442
443         inside = (void *)skb->data + ip_hdrlen(skb);
444
445         /* We're actually going to mangle it beyond trivial checksum
446            adjustment, so make sure the current checksum is correct. */
447         if (nf_ip_checksum(skb, hooknum, hdrlen, 0))
448                 return 0;
449
450         /* Must be RELATED */
451         NF_CT_ASSERT(skb->nfctinfo == IP_CT_RELATED ||
452                      skb->nfctinfo == IP_CT_RELATED+IP_CT_IS_REPLY);
453
454         /* Redirects on non-null nats must be dropped, else they'll
455            start talking to each other without our translation, and be
456            confused... --RR */
457         if (inside->icmp.type == ICMP_REDIRECT) {
458                 /* If NAT isn't finished, assume it and drop. */
459                 if ((ct->status & IPS_NAT_DONE_MASK) != IPS_NAT_DONE_MASK)
460                         return 0;
461
462                 if (ct->status & IPS_NAT_MASK)
463                         return 0;
464         }
465
466         pr_debug("icmp_reply_translation: translating error %p manip %u "
467                  "dir %s\n", skb, manip,
468                  dir == IP_CT_DIR_ORIGINAL ? "ORIG" : "REPLY");
469
470         /* rcu_read_lock()ed by nf_hook_slow */
471         l4proto = __nf_ct_l4proto_find(PF_INET, inside->ip.protocol);
472
473         if (!nf_ct_get_tuple(skb,
474                              ip_hdrlen(skb) + sizeof(struct icmphdr),
475                              (ip_hdrlen(skb) +
476                               sizeof(struct icmphdr) + inside->ip.ihl * 4),
477                              (u_int16_t)AF_INET,
478                              inside->ip.protocol,
479                              &inner, l3proto, l4proto))
480                 return 0;
481
482         /* Change inner back to look like incoming packet.  We do the
483            opposite manip on this hook to normal, because it might not
484            pass all hooks (locally-generated ICMP).  Consider incoming
485            packet: PREROUTING (DST manip), routing produces ICMP, goes
486            through POSTROUTING (which must correct the DST manip). */
487         if (!manip_pkt(inside->ip.protocol, skb,
488                        ip_hdrlen(skb) + sizeof(inside->icmp),
489                        &ct->tuplehash[!dir].tuple,
490                        !manip))
491                 return 0;
492
493         if (skb->ip_summed != CHECKSUM_PARTIAL) {
494                 /* Reloading "inside" here since manip_pkt inner. */
495                 inside = (void *)skb->data + ip_hdrlen(skb);
496                 inside->icmp.checksum = 0;
497                 inside->icmp.checksum =
498                         csum_fold(skb_checksum(skb, hdrlen,
499                                                skb->len - hdrlen, 0));
500         }
501
502         /* Change outer to look the reply to an incoming packet
503          * (proto 0 means don't invert per-proto part). */
504         if (manip == IP_NAT_MANIP_SRC)
505                 statusbit = IPS_SRC_NAT;
506         else
507                 statusbit = IPS_DST_NAT;
508
509         /* Invert if this is reply dir. */
510         if (dir == IP_CT_DIR_REPLY)
511                 statusbit ^= IPS_NAT_MASK;
512
513         if (ct->status & statusbit) {
514                 nf_ct_invert_tuplepr(&target, &ct->tuplehash[!dir].tuple);
515                 if (!manip_pkt(0, skb, 0, &target, manip))
516                         return 0;
517         }
518
519         return 1;
520 }
521 EXPORT_SYMBOL_GPL(nf_nat_icmp_reply_translation);
522
523 /* Protocol registration. */
524 int nf_nat_protocol_register(const struct nf_nat_protocol *proto)
525 {
526         int ret = 0;
527
528         spin_lock_bh(&nf_nat_lock);
529         if (nf_nat_protos[proto->protonum] != &nf_nat_unknown_protocol) {
530                 ret = -EBUSY;
531                 goto out;
532         }
533         rcu_assign_pointer(nf_nat_protos[proto->protonum], proto);
534  out:
535         spin_unlock_bh(&nf_nat_lock);
536         return ret;
537 }
538 EXPORT_SYMBOL(nf_nat_protocol_register);
539
540 /* Noone stores the protocol anywhere; simply delete it. */
541 void nf_nat_protocol_unregister(const struct nf_nat_protocol *proto)
542 {
543         spin_lock_bh(&nf_nat_lock);
544         rcu_assign_pointer(nf_nat_protos[proto->protonum],
545                            &nf_nat_unknown_protocol);
546         spin_unlock_bh(&nf_nat_lock);
547         synchronize_rcu();
548 }
549 EXPORT_SYMBOL(nf_nat_protocol_unregister);
550
551 /* Noone using conntrack by the time this called. */
552 static void nf_nat_cleanup_conntrack(struct nf_conn *ct)
553 {
554         struct nf_conn_nat *nat = nf_ct_ext_find(ct, NF_CT_EXT_NAT);
555
556         if (nat == NULL || nat->ct == NULL)
557                 return;
558
559         NF_CT_ASSERT(nat->ct->status & IPS_NAT_DONE_MASK);
560
561         spin_lock_bh(&nf_nat_lock);
562         hlist_del_rcu(&nat->bysource);
563         spin_unlock_bh(&nf_nat_lock);
564 }
565
566 static void nf_nat_move_storage(void *new, void *old)
567 {
568         struct nf_conn_nat *new_nat = new;
569         struct nf_conn_nat *old_nat = old;
570         struct nf_conn *ct = old_nat->ct;
571
572         if (!ct || !(ct->status & IPS_NAT_DONE_MASK))
573                 return;
574
575         spin_lock_bh(&nf_nat_lock);
576         new_nat->ct = ct;
577         hlist_replace_rcu(&old_nat->bysource, &new_nat->bysource);
578         spin_unlock_bh(&nf_nat_lock);
579 }
580
581 static struct nf_ct_ext_type nat_extend __read_mostly = {
582         .len            = sizeof(struct nf_conn_nat),
583         .align          = __alignof__(struct nf_conn_nat),
584         .destroy        = nf_nat_cleanup_conntrack,
585         .move           = nf_nat_move_storage,
586         .id             = NF_CT_EXT_NAT,
587         .flags          = NF_CT_EXT_F_PREALLOC,
588 };
589
590 #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
591
592 #include <linux/netfilter/nfnetlink.h>
593 #include <linux/netfilter/nfnetlink_conntrack.h>
594
595 static const struct nla_policy protonat_nla_policy[CTA_PROTONAT_MAX+1] = {
596         [CTA_PROTONAT_PORT_MIN] = { .type = NLA_U16 },
597         [CTA_PROTONAT_PORT_MAX] = { .type = NLA_U16 },
598 };
599
600 static int nfnetlink_parse_nat_proto(struct nlattr *attr,
601                                      const struct nf_conn *ct,
602                                      struct nf_nat_range *range)
603 {
604         struct nlattr *tb[CTA_PROTONAT_MAX+1];
605         const struct nf_nat_protocol *npt;
606         int err;
607
608         err = nla_parse_nested(tb, CTA_PROTONAT_MAX, attr, protonat_nla_policy);
609         if (err < 0)
610                 return err;
611
612         npt = nf_nat_proto_find_get(nf_ct_protonum(ct));
613         if (npt->nlattr_to_range)
614                 err = npt->nlattr_to_range(tb, range);
615         nf_nat_proto_put(npt);
616         return err;
617 }
618
619 static const struct nla_policy nat_nla_policy[CTA_NAT_MAX+1] = {
620         [CTA_NAT_MINIP]         = { .type = NLA_U32 },
621         [CTA_NAT_MAXIP]         = { .type = NLA_U32 },
622 };
623
624 static int
625 nfnetlink_parse_nat(const struct nlattr *nat,
626                     const struct nf_conn *ct, struct nf_nat_range *range)
627 {
628         struct nlattr *tb[CTA_NAT_MAX+1];
629         int err;
630
631         memset(range, 0, sizeof(*range));
632
633         err = nla_parse_nested(tb, CTA_NAT_MAX, nat, nat_nla_policy);
634         if (err < 0)
635                 return err;
636
637         if (tb[CTA_NAT_MINIP])
638                 range->min_ip = nla_get_be32(tb[CTA_NAT_MINIP]);
639
640         if (!tb[CTA_NAT_MAXIP])
641                 range->max_ip = range->min_ip;
642         else
643                 range->max_ip = nla_get_be32(tb[CTA_NAT_MAXIP]);
644
645         if (range->min_ip)
646                 range->flags |= IP_NAT_RANGE_MAP_IPS;
647
648         if (!tb[CTA_NAT_PROTO])
649                 return 0;
650
651         err = nfnetlink_parse_nat_proto(tb[CTA_NAT_PROTO], ct, range);
652         if (err < 0)
653                 return err;
654
655         return 0;
656 }
657
658 static int
659 nfnetlink_parse_nat_setup(struct nf_conn *ct,
660                           enum nf_nat_manip_type manip,
661                           const struct nlattr *attr)
662 {
663         struct nf_nat_range range;
664
665         if (nfnetlink_parse_nat(attr, ct, &range) < 0)
666                 return -EINVAL;
667         if (nf_nat_initialized(ct, manip))
668                 return -EEXIST;
669
670         return nf_nat_setup_info(ct, &range, manip);
671 }
672 #else
673 static int
674 nfnetlink_parse_nat_setup(struct nf_conn *ct,
675                           enum nf_nat_manip_type manip,
676                           const struct nlattr *attr)
677 {
678         return -EOPNOTSUPP;
679 }
680 #endif
681
682 static int __net_init nf_nat_net_init(struct net *net)
683 {
684         /* Leave them the same for the moment. */
685         net->ipv4.nat_htable_size = net->ct.htable_size;
686         net->ipv4.nat_bysource = nf_ct_alloc_hashtable(&net->ipv4.nat_htable_size,
687                                                        &net->ipv4.nat_vmalloced, 0);
688         if (!net->ipv4.nat_bysource)
689                 return -ENOMEM;
690         return 0;
691 }
692
693 /* Clear NAT section of all conntracks, in case we're loaded again. */
694 static int clean_nat(struct nf_conn *i, void *data)
695 {
696         struct nf_conn_nat *nat = nfct_nat(i);
697
698         if (!nat)
699                 return 0;
700         memset(nat, 0, sizeof(*nat));
701         i->status &= ~(IPS_NAT_MASK | IPS_NAT_DONE_MASK | IPS_SEQ_ADJUST);
702         return 0;
703 }
704
705 static void __net_exit nf_nat_net_exit(struct net *net)
706 {
707         nf_ct_iterate_cleanup(net, &clean_nat, NULL);
708         synchronize_rcu();
709         nf_ct_free_hashtable(net->ipv4.nat_bysource, net->ipv4.nat_vmalloced,
710                              net->ipv4.nat_htable_size);
711 }
712
713 static struct pernet_operations nf_nat_net_ops = {
714         .init = nf_nat_net_init,
715         .exit = nf_nat_net_exit,
716 };
717
718 static int __init nf_nat_init(void)
719 {
720         size_t i;
721         int ret;
722
723         need_ipv4_conntrack();
724
725         ret = nf_ct_extend_register(&nat_extend);
726         if (ret < 0) {
727                 printk(KERN_ERR "nf_nat_core: Unable to register extension\n");
728                 return ret;
729         }
730
731         ret = register_pernet_subsys(&nf_nat_net_ops);
732         if (ret < 0)
733                 goto cleanup_extend;
734
735         /* Sew in builtin protocols. */
736         spin_lock_bh(&nf_nat_lock);
737         for (i = 0; i < MAX_IP_NAT_PROTO; i++)
738                 rcu_assign_pointer(nf_nat_protos[i], &nf_nat_unknown_protocol);
739         rcu_assign_pointer(nf_nat_protos[IPPROTO_TCP], &nf_nat_protocol_tcp);
740         rcu_assign_pointer(nf_nat_protos[IPPROTO_UDP], &nf_nat_protocol_udp);
741         rcu_assign_pointer(nf_nat_protos[IPPROTO_ICMP], &nf_nat_protocol_icmp);
742         spin_unlock_bh(&nf_nat_lock);
743
744         /* Initialize fake conntrack so that NAT will skip it */
745         nf_conntrack_untracked.status |= IPS_NAT_DONE_MASK;
746
747         l3proto = nf_ct_l3proto_find_get((u_int16_t)AF_INET);
748
749         BUG_ON(nf_nat_seq_adjust_hook != NULL);
750         rcu_assign_pointer(nf_nat_seq_adjust_hook, nf_nat_seq_adjust);
751         BUG_ON(nfnetlink_parse_nat_setup_hook != NULL);
752         rcu_assign_pointer(nfnetlink_parse_nat_setup_hook,
753                            nfnetlink_parse_nat_setup);
754         BUG_ON(nf_ct_nat_offset != NULL);
755         rcu_assign_pointer(nf_ct_nat_offset, nf_nat_get_offset);
756         return 0;
757
758  cleanup_extend:
759         nf_ct_extend_unregister(&nat_extend);
760         return ret;
761 }
762
763 static void __exit nf_nat_cleanup(void)
764 {
765         unregister_pernet_subsys(&nf_nat_net_ops);
766         nf_ct_l3proto_put(l3proto);
767         nf_ct_extend_unregister(&nat_extend);
768         rcu_assign_pointer(nf_nat_seq_adjust_hook, NULL);
769         rcu_assign_pointer(nfnetlink_parse_nat_setup_hook, NULL);
770         rcu_assign_pointer(nf_ct_nat_offset, NULL);
771         synchronize_net();
772 }
773
774 MODULE_LICENSE("GPL");
775 MODULE_ALIAS("nf-nat-ipv4");
776
777 module_init(nf_nat_init);
778 module_exit(nf_nat_cleanup);