Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
[sfrench/cifs-2.6.git] / net / bridge / br_netfilter.c
1 /*
2  *      Handle firewalling
3  *      Linux ethernet bridge
4  *
5  *      Authors:
6  *      Lennert Buytenhek               <buytenh@gnu.org>
7  *      Bart De Schuymer                <bdschuym@pandora.be>
8  *
9  *      This program is free software; you can redistribute it and/or
10  *      modify it under the terms of the GNU General Public License
11  *      as published by the Free Software Foundation; either version
12  *      2 of the License, or (at your option) any later version.
13  *
14  *      Lennert dedicates this file to Kerstin Wurdinger.
15  */
16
17 #include <linux/module.h>
18 #include <linux/kernel.h>
19 #include <linux/slab.h>
20 #include <linux/ip.h>
21 #include <linux/netdevice.h>
22 #include <linux/skbuff.h>
23 #include <linux/if_arp.h>
24 #include <linux/if_ether.h>
25 #include <linux/if_vlan.h>
26 #include <linux/if_pppox.h>
27 #include <linux/ppp_defs.h>
28 #include <linux/netfilter_bridge.h>
29 #include <linux/netfilter_ipv4.h>
30 #include <linux/netfilter_ipv6.h>
31 #include <linux/netfilter_arp.h>
32 #include <linux/in_route.h>
33 #include <linux/inetdevice.h>
34
35 #include <net/ip.h>
36 #include <net/ipv6.h>
37 #include <net/route.h>
38
39 #include <asm/uaccess.h>
40 #include "br_private.h"
41 #ifdef CONFIG_SYSCTL
42 #include <linux/sysctl.h>
43 #endif
44
45 #define skb_origaddr(skb)        (((struct bridge_skb_cb *) \
46                                  (skb->nf_bridge->data))->daddr.ipv4)
47 #define store_orig_dstaddr(skb)  (skb_origaddr(skb) = ip_hdr(skb)->daddr)
48 #define dnat_took_place(skb)     (skb_origaddr(skb) != ip_hdr(skb)->daddr)
49
50 #ifdef CONFIG_SYSCTL
51 static struct ctl_table_header *brnf_sysctl_header;
52 static int brnf_call_iptables __read_mostly = 1;
53 static int brnf_call_ip6tables __read_mostly = 1;
54 static int brnf_call_arptables __read_mostly = 1;
55 static int brnf_filter_vlan_tagged __read_mostly = 0;
56 static int brnf_filter_pppoe_tagged __read_mostly = 0;
57 #else
58 #define brnf_call_iptables 1
59 #define brnf_call_ip6tables 1
60 #define brnf_call_arptables 1
61 #define brnf_filter_vlan_tagged 0
62 #define brnf_filter_pppoe_tagged 0
63 #endif
64
65 static inline __be16 vlan_proto(const struct sk_buff *skb)
66 {
67         return vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
68 }
69
70 #define IS_VLAN_IP(skb) \
71         (skb->protocol == htons(ETH_P_8021Q) && \
72          vlan_proto(skb) == htons(ETH_P_IP) &&  \
73          brnf_filter_vlan_tagged)
74
75 #define IS_VLAN_IPV6(skb) \
76         (skb->protocol == htons(ETH_P_8021Q) && \
77          vlan_proto(skb) == htons(ETH_P_IPV6) &&\
78          brnf_filter_vlan_tagged)
79
80 #define IS_VLAN_ARP(skb) \
81         (skb->protocol == htons(ETH_P_8021Q) && \
82          vlan_proto(skb) == htons(ETH_P_ARP) && \
83          brnf_filter_vlan_tagged)
84
85 static inline __be16 pppoe_proto(const struct sk_buff *skb)
86 {
87         return *((__be16 *)(skb_mac_header(skb) + ETH_HLEN +
88                             sizeof(struct pppoe_hdr)));
89 }
90
91 #define IS_PPPOE_IP(skb) \
92         (skb->protocol == htons(ETH_P_PPP_SES) && \
93          pppoe_proto(skb) == htons(PPP_IP) && \
94          brnf_filter_pppoe_tagged)
95
96 #define IS_PPPOE_IPV6(skb) \
97         (skb->protocol == htons(ETH_P_PPP_SES) && \
98          pppoe_proto(skb) == htons(PPP_IPV6) && \
99          brnf_filter_pppoe_tagged)
100
101 static void fake_update_pmtu(struct dst_entry *dst, u32 mtu)
102 {
103 }
104
105 static struct dst_ops fake_dst_ops = {
106         .family =               AF_INET,
107         .protocol =             cpu_to_be16(ETH_P_IP),
108         .update_pmtu =          fake_update_pmtu,
109         .entries =              ATOMIC_INIT(0),
110 };
111
112 /*
113  * Initialize bogus route table used to keep netfilter happy.
114  * Currently, we fill in the PMTU entry because netfilter
115  * refragmentation needs it, and the rt_flags entry because
116  * ipt_REJECT needs it.  Future netfilter modules might
117  * require us to fill additional fields.
118  */
119 void br_netfilter_rtable_init(struct net_bridge *br)
120 {
121         struct rtable *rt = &br->fake_rtable;
122
123         atomic_set(&rt->dst.__refcnt, 1);
124         rt->dst.dev = br->dev;
125         rt->dst.path = &rt->dst;
126         rt->dst.metrics[RTAX_MTU - 1] = 1500;
127         rt->dst.flags   = DST_NOXFRM;
128         rt->dst.ops = &fake_dst_ops;
129 }
130
131 static inline struct rtable *bridge_parent_rtable(const struct net_device *dev)
132 {
133         if (!br_port_exists(dev))
134                 return NULL;
135         return &br_port_get_rcu(dev)->br->fake_rtable;
136 }
137
138 static inline struct net_device *bridge_parent(const struct net_device *dev)
139 {
140         if (!br_port_exists(dev))
141                 return NULL;
142
143         return br_port_get_rcu(dev)->br->dev;
144 }
145
146 static inline struct nf_bridge_info *nf_bridge_alloc(struct sk_buff *skb)
147 {
148         skb->nf_bridge = kzalloc(sizeof(struct nf_bridge_info), GFP_ATOMIC);
149         if (likely(skb->nf_bridge))
150                 atomic_set(&(skb->nf_bridge->use), 1);
151
152         return skb->nf_bridge;
153 }
154
155 static inline struct nf_bridge_info *nf_bridge_unshare(struct sk_buff *skb)
156 {
157         struct nf_bridge_info *nf_bridge = skb->nf_bridge;
158
159         if (atomic_read(&nf_bridge->use) > 1) {
160                 struct nf_bridge_info *tmp = nf_bridge_alloc(skb);
161
162                 if (tmp) {
163                         memcpy(tmp, nf_bridge, sizeof(struct nf_bridge_info));
164                         atomic_set(&tmp->use, 1);
165                         nf_bridge_put(nf_bridge);
166                 }
167                 nf_bridge = tmp;
168         }
169         return nf_bridge;
170 }
171
172 static inline void nf_bridge_push_encap_header(struct sk_buff *skb)
173 {
174         unsigned int len = nf_bridge_encap_header_len(skb);
175
176         skb_push(skb, len);
177         skb->network_header -= len;
178 }
179
180 static inline void nf_bridge_pull_encap_header(struct sk_buff *skb)
181 {
182         unsigned int len = nf_bridge_encap_header_len(skb);
183
184         skb_pull(skb, len);
185         skb->network_header += len;
186 }
187
188 static inline void nf_bridge_pull_encap_header_rcsum(struct sk_buff *skb)
189 {
190         unsigned int len = nf_bridge_encap_header_len(skb);
191
192         skb_pull_rcsum(skb, len);
193         skb->network_header += len;
194 }
195
196 static inline void nf_bridge_save_header(struct sk_buff *skb)
197 {
198         int header_size = ETH_HLEN + nf_bridge_encap_header_len(skb);
199
200         skb_copy_from_linear_data_offset(skb, -header_size,
201                                          skb->nf_bridge->data, header_size);
202 }
203
204 static inline void nf_bridge_update_protocol(struct sk_buff *skb)
205 {
206         if (skb->nf_bridge->mask & BRNF_8021Q)
207                 skb->protocol = htons(ETH_P_8021Q);
208         else if (skb->nf_bridge->mask & BRNF_PPPoE)
209                 skb->protocol = htons(ETH_P_PPP_SES);
210 }
211
212 /* Fill in the header for fragmented IP packets handled by
213  * the IPv4 connection tracking code.
214  */
215 int nf_bridge_copy_header(struct sk_buff *skb)
216 {
217         int err;
218         unsigned int header_size;
219
220         nf_bridge_update_protocol(skb);
221         header_size = ETH_HLEN + nf_bridge_encap_header_len(skb);
222         err = skb_cow_head(skb, header_size);
223         if (err)
224                 return err;
225
226         skb_copy_to_linear_data_offset(skb, -header_size,
227                                        skb->nf_bridge->data, header_size);
228         __skb_push(skb, nf_bridge_encap_header_len(skb));
229         return 0;
230 }
231
232 /* PF_BRIDGE/PRE_ROUTING *********************************************/
233 /* Undo the changes made for ip6tables PREROUTING and continue the
234  * bridge PRE_ROUTING hook. */
235 static int br_nf_pre_routing_finish_ipv6(struct sk_buff *skb)
236 {
237         struct nf_bridge_info *nf_bridge = skb->nf_bridge;
238         struct rtable *rt;
239
240         if (nf_bridge->mask & BRNF_PKT_TYPE) {
241                 skb->pkt_type = PACKET_OTHERHOST;
242                 nf_bridge->mask ^= BRNF_PKT_TYPE;
243         }
244         nf_bridge->mask ^= BRNF_NF_BRIDGE_PREROUTING;
245
246         rt = bridge_parent_rtable(nf_bridge->physindev);
247         if (!rt) {
248                 kfree_skb(skb);
249                 return 0;
250         }
251         skb_dst_set_noref(skb, &rt->dst);
252
253         skb->dev = nf_bridge->physindev;
254         nf_bridge_update_protocol(skb);
255         nf_bridge_push_encap_header(skb);
256         NF_HOOK_THRESH(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL,
257                        br_handle_frame_finish, 1);
258
259         return 0;
260 }
261
262 /* Obtain the correct destination MAC address, while preserving the original
263  * source MAC address. If we already know this address, we just copy it. If we
264  * don't, we use the neighbour framework to find out. In both cases, we make
265  * sure that br_handle_frame_finish() is called afterwards.
266  */
267 static int br_nf_pre_routing_finish_bridge(struct sk_buff *skb)
268 {
269         struct nf_bridge_info *nf_bridge = skb->nf_bridge;
270         struct dst_entry *dst;
271
272         skb->dev = bridge_parent(skb->dev);
273         if (!skb->dev)
274                 goto free_skb;
275         dst = skb_dst(skb);
276         if (dst->hh) {
277                 neigh_hh_bridge(dst->hh, skb);
278                 skb->dev = nf_bridge->physindev;
279                 return br_handle_frame_finish(skb);
280         } else if (dst->neighbour) {
281                 /* the neighbour function below overwrites the complete
282                  * MAC header, so we save the Ethernet source address and
283                  * protocol number. */
284                 skb_copy_from_linear_data_offset(skb, -(ETH_HLEN-ETH_ALEN), skb->nf_bridge->data, ETH_HLEN-ETH_ALEN);
285                 /* tell br_dev_xmit to continue with forwarding */
286                 nf_bridge->mask |= BRNF_BRIDGED_DNAT;
287                 return dst->neighbour->output(skb);
288         }
289 free_skb:
290         kfree_skb(skb);
291         return 0;
292 }
293
294 /* This requires some explaining. If DNAT has taken place,
295  * we will need to fix up the destination Ethernet address.
296  *
297  * There are two cases to consider:
298  * 1. The packet was DNAT'ed to a device in the same bridge
299  *    port group as it was received on. We can still bridge
300  *    the packet.
301  * 2. The packet was DNAT'ed to a different device, either
302  *    a non-bridged device or another bridge port group.
303  *    The packet will need to be routed.
304  *
305  * The correct way of distinguishing between these two cases is to
306  * call ip_route_input() and to look at skb->dst->dev, which is
307  * changed to the destination device if ip_route_input() succeeds.
308  *
309  * Let's first consider the case that ip_route_input() succeeds:
310  *
311  * If the output device equals the logical bridge device the packet
312  * came in on, we can consider this bridging. The corresponding MAC
313  * address will be obtained in br_nf_pre_routing_finish_bridge.
314  * Otherwise, the packet is considered to be routed and we just
315  * change the destination MAC address so that the packet will
316  * later be passed up to the IP stack to be routed. For a redirected
317  * packet, ip_route_input() will give back the localhost as output device,
318  * which differs from the bridge device.
319  *
320  * Let's now consider the case that ip_route_input() fails:
321  *
322  * This can be because the destination address is martian, in which case
323  * the packet will be dropped.
324  * If IP forwarding is disabled, ip_route_input() will fail, while
325  * ip_route_output_key() can return success. The source
326  * address for ip_route_output_key() is set to zero, so ip_route_output_key()
327  * thinks we're handling a locally generated packet and won't care
328  * if IP forwarding is enabled. If the output device equals the logical bridge
329  * device, we proceed as if ip_route_input() succeeded. If it differs from the
330  * logical bridge port or if ip_route_output_key() fails we drop the packet.
331  */
332 static int br_nf_pre_routing_finish(struct sk_buff *skb)
333 {
334         struct net_device *dev = skb->dev;
335         struct iphdr *iph = ip_hdr(skb);
336         struct nf_bridge_info *nf_bridge = skb->nf_bridge;
337         struct rtable *rt;
338         int err;
339
340         if (nf_bridge->mask & BRNF_PKT_TYPE) {
341                 skb->pkt_type = PACKET_OTHERHOST;
342                 nf_bridge->mask ^= BRNF_PKT_TYPE;
343         }
344         nf_bridge->mask ^= BRNF_NF_BRIDGE_PREROUTING;
345         if (dnat_took_place(skb)) {
346                 if ((err = ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, dev))) {
347                         struct flowi fl = {
348                                 .nl_u = {
349                                         .ip4_u = {
350                                                  .daddr = iph->daddr,
351                                                  .saddr = 0,
352                                                  .tos = RT_TOS(iph->tos) },
353                                 },
354                                 .proto = 0,
355                         };
356                         struct in_device *in_dev = __in_dev_get_rcu(dev);
357
358                         /* If err equals -EHOSTUNREACH the error is due to a
359                          * martian destination or due to the fact that
360                          * forwarding is disabled. For most martian packets,
361                          * ip_route_output_key() will fail. It won't fail for 2 types of
362                          * martian destinations: loopback destinations and destination
363                          * 0.0.0.0. In both cases the packet will be dropped because the
364                          * destination is the loopback device and not the bridge. */
365                         if (err != -EHOSTUNREACH || !in_dev || IN_DEV_FORWARD(in_dev))
366                                 goto free_skb;
367
368                         if (!ip_route_output_key(dev_net(dev), &rt, &fl)) {
369                                 /* - Bridged-and-DNAT'ed traffic doesn't
370                                  *   require ip_forwarding. */
371                                 if (((struct dst_entry *)rt)->dev == dev) {
372                                         skb_dst_set(skb, (struct dst_entry *)rt);
373                                         goto bridged_dnat;
374                                 }
375                                 dst_release((struct dst_entry *)rt);
376                         }
377 free_skb:
378                         kfree_skb(skb);
379                         return 0;
380                 } else {
381                         if (skb_dst(skb)->dev == dev) {
382 bridged_dnat:
383                                 skb->dev = nf_bridge->physindev;
384                                 nf_bridge_update_protocol(skb);
385                                 nf_bridge_push_encap_header(skb);
386                                 NF_HOOK_THRESH(NFPROTO_BRIDGE,
387                                                NF_BR_PRE_ROUTING,
388                                                skb, skb->dev, NULL,
389                                                br_nf_pre_routing_finish_bridge,
390                                                1);
391                                 return 0;
392                         }
393                         memcpy(eth_hdr(skb)->h_dest, dev->dev_addr, ETH_ALEN);
394                         skb->pkt_type = PACKET_HOST;
395                 }
396         } else {
397                 rt = bridge_parent_rtable(nf_bridge->physindev);
398                 if (!rt) {
399                         kfree_skb(skb);
400                         return 0;
401                 }
402                 skb_dst_set_noref(skb, &rt->dst);
403         }
404
405         skb->dev = nf_bridge->physindev;
406         nf_bridge_update_protocol(skb);
407         nf_bridge_push_encap_header(skb);
408         NF_HOOK_THRESH(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL,
409                        br_handle_frame_finish, 1);
410
411         return 0;
412 }
413
414 /* Some common code for IPv4/IPv6 */
415 static struct net_device *setup_pre_routing(struct sk_buff *skb)
416 {
417         struct nf_bridge_info *nf_bridge = skb->nf_bridge;
418
419         if (skb->pkt_type == PACKET_OTHERHOST) {
420                 skb->pkt_type = PACKET_HOST;
421                 nf_bridge->mask |= BRNF_PKT_TYPE;
422         }
423
424         nf_bridge->mask |= BRNF_NF_BRIDGE_PREROUTING;
425         nf_bridge->physindev = skb->dev;
426         skb->dev = bridge_parent(skb->dev);
427         if (skb->protocol == htons(ETH_P_8021Q))
428                 nf_bridge->mask |= BRNF_8021Q;
429         else if (skb->protocol == htons(ETH_P_PPP_SES))
430                 nf_bridge->mask |= BRNF_PPPoE;
431
432         return skb->dev;
433 }
434
435 /* We only check the length. A bridge shouldn't do any hop-by-hop stuff anyway */
436 static int check_hbh_len(struct sk_buff *skb)
437 {
438         unsigned char *raw = (u8 *)(ipv6_hdr(skb) + 1);
439         u32 pkt_len;
440         const unsigned char *nh = skb_network_header(skb);
441         int off = raw - nh;
442         int len = (raw[1] + 1) << 3;
443
444         if ((raw + len) - skb->data > skb_headlen(skb))
445                 goto bad;
446
447         off += 2;
448         len -= 2;
449
450         while (len > 0) {
451                 int optlen = nh[off + 1] + 2;
452
453                 switch (nh[off]) {
454                 case IPV6_TLV_PAD0:
455                         optlen = 1;
456                         break;
457
458                 case IPV6_TLV_PADN:
459                         break;
460
461                 case IPV6_TLV_JUMBO:
462                         if (nh[off + 1] != 4 || (off & 3) != 2)
463                                 goto bad;
464                         pkt_len = ntohl(*(__be32 *) (nh + off + 2));
465                         if (pkt_len <= IPV6_MAXPLEN ||
466                             ipv6_hdr(skb)->payload_len)
467                                 goto bad;
468                         if (pkt_len > skb->len - sizeof(struct ipv6hdr))
469                                 goto bad;
470                         if (pskb_trim_rcsum(skb,
471                                             pkt_len + sizeof(struct ipv6hdr)))
472                                 goto bad;
473                         nh = skb_network_header(skb);
474                         break;
475                 default:
476                         if (optlen > len)
477                                 goto bad;
478                         break;
479                 }
480                 off += optlen;
481                 len -= optlen;
482         }
483         if (len == 0)
484                 return 0;
485 bad:
486         return -1;
487
488 }
489
490 /* Replicate the checks that IPv6 does on packet reception and pass the packet
491  * to ip6tables, which doesn't support NAT, so things are fairly simple. */
492 static unsigned int br_nf_pre_routing_ipv6(unsigned int hook,
493                                            struct sk_buff *skb,
494                                            const struct net_device *in,
495                                            const struct net_device *out,
496                                            int (*okfn)(struct sk_buff *))
497 {
498         struct ipv6hdr *hdr;
499         u32 pkt_len;
500
501         if (skb->len < sizeof(struct ipv6hdr))
502                 goto inhdr_error;
503
504         if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
505                 goto inhdr_error;
506
507         hdr = ipv6_hdr(skb);
508
509         if (hdr->version != 6)
510                 goto inhdr_error;
511
512         pkt_len = ntohs(hdr->payload_len);
513
514         if (pkt_len || hdr->nexthdr != NEXTHDR_HOP) {
515                 if (pkt_len + sizeof(struct ipv6hdr) > skb->len)
516                         goto inhdr_error;
517                 if (pskb_trim_rcsum(skb, pkt_len + sizeof(struct ipv6hdr)))
518                         goto inhdr_error;
519         }
520         if (hdr->nexthdr == NEXTHDR_HOP && check_hbh_len(skb))
521                 goto inhdr_error;
522
523         nf_bridge_put(skb->nf_bridge);
524         if (!nf_bridge_alloc(skb))
525                 return NF_DROP;
526         if (!setup_pre_routing(skb))
527                 return NF_DROP;
528
529         skb->protocol = htons(ETH_P_IPV6);
530         NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING, skb, skb->dev, NULL,
531                 br_nf_pre_routing_finish_ipv6);
532
533         return NF_STOLEN;
534
535 inhdr_error:
536         return NF_DROP;
537 }
538
539 /* Direct IPv6 traffic to br_nf_pre_routing_ipv6.
540  * Replicate the checks that IPv4 does on packet reception.
541  * Set skb->dev to the bridge device (i.e. parent of the
542  * receiving device) to make netfilter happy, the REDIRECT
543  * target in particular.  Save the original destination IP
544  * address to be able to detect DNAT afterwards. */
545 static unsigned int br_nf_pre_routing(unsigned int hook, struct sk_buff *skb,
546                                       const struct net_device *in,
547                                       const struct net_device *out,
548                                       int (*okfn)(struct sk_buff *))
549 {
550         struct net_bridge_port *p;
551         struct net_bridge *br;
552         struct iphdr *iph;
553         __u32 len = nf_bridge_encap_header_len(skb);
554
555         if (unlikely(!pskb_may_pull(skb, len)))
556                 goto out;
557
558         p = br_port_get_rcu(in);
559         if (p == NULL)
560                 goto out;
561         br = p->br;
562
563         if (skb->protocol == htons(ETH_P_IPV6) || IS_VLAN_IPV6(skb) ||
564             IS_PPPOE_IPV6(skb)) {
565                 if (!brnf_call_ip6tables && !br->nf_call_ip6tables)
566                         return NF_ACCEPT;
567
568                 nf_bridge_pull_encap_header_rcsum(skb);
569                 return br_nf_pre_routing_ipv6(hook, skb, in, out, okfn);
570         }
571
572         if (!brnf_call_iptables && !br->nf_call_iptables)
573                 return NF_ACCEPT;
574
575         if (skb->protocol != htons(ETH_P_IP) && !IS_VLAN_IP(skb) &&
576             !IS_PPPOE_IP(skb))
577                 return NF_ACCEPT;
578
579         nf_bridge_pull_encap_header_rcsum(skb);
580
581         if (!pskb_may_pull(skb, sizeof(struct iphdr)))
582                 goto inhdr_error;
583
584         iph = ip_hdr(skb);
585         if (iph->ihl < 5 || iph->version != 4)
586                 goto inhdr_error;
587
588         if (!pskb_may_pull(skb, 4 * iph->ihl))
589                 goto inhdr_error;
590
591         iph = ip_hdr(skb);
592         if (ip_fast_csum((__u8 *) iph, iph->ihl) != 0)
593                 goto inhdr_error;
594
595         len = ntohs(iph->tot_len);
596         if (skb->len < len || len < 4 * iph->ihl)
597                 goto inhdr_error;
598
599         pskb_trim_rcsum(skb, len);
600
601         /* BUG: Should really parse the IP options here. */
602         memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
603
604         nf_bridge_put(skb->nf_bridge);
605         if (!nf_bridge_alloc(skb))
606                 return NF_DROP;
607         if (!setup_pre_routing(skb))
608                 return NF_DROP;
609         store_orig_dstaddr(skb);
610         skb->protocol = htons(ETH_P_IP);
611
612         NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING, skb, skb->dev, NULL,
613                 br_nf_pre_routing_finish);
614
615         return NF_STOLEN;
616
617 inhdr_error:
618 //      IP_INC_STATS_BH(IpInHdrErrors);
619 out:
620         return NF_DROP;
621 }
622
623
624 /* PF_BRIDGE/LOCAL_IN ************************************************/
625 /* The packet is locally destined, which requires a real
626  * dst_entry, so detach the fake one.  On the way up, the
627  * packet would pass through PRE_ROUTING again (which already
628  * took place when the packet entered the bridge), but we
629  * register an IPv4 PRE_ROUTING 'sabotage' hook that will
630  * prevent this from happening. */
631 static unsigned int br_nf_local_in(unsigned int hook, struct sk_buff *skb,
632                                    const struct net_device *in,
633                                    const struct net_device *out,
634                                    int (*okfn)(struct sk_buff *))
635 {
636         struct rtable *rt = skb_rtable(skb);
637
638         if (rt && rt == bridge_parent_rtable(in))
639                 skb_dst_drop(skb);
640
641         return NF_ACCEPT;
642 }
643
644 /* PF_BRIDGE/FORWARD *************************************************/
645 static int br_nf_forward_finish(struct sk_buff *skb)
646 {
647         struct nf_bridge_info *nf_bridge = skb->nf_bridge;
648         struct net_device *in;
649
650         if (skb->protocol != htons(ETH_P_ARP) && !IS_VLAN_ARP(skb)) {
651                 in = nf_bridge->physindev;
652                 if (nf_bridge->mask & BRNF_PKT_TYPE) {
653                         skb->pkt_type = PACKET_OTHERHOST;
654                         nf_bridge->mask ^= BRNF_PKT_TYPE;
655                 }
656                 nf_bridge_update_protocol(skb);
657         } else {
658                 in = *((struct net_device **)(skb->cb));
659         }
660         nf_bridge_push_encap_header(skb);
661
662         NF_HOOK_THRESH(NFPROTO_BRIDGE, NF_BR_FORWARD, skb, in,
663                        skb->dev, br_forward_finish, 1);
664         return 0;
665 }
666
667 /* This is the 'purely bridged' case.  For IP, we pass the packet to
668  * netfilter with indev and outdev set to the bridge device,
669  * but we are still able to filter on the 'real' indev/outdev
670  * because of the physdev module. For ARP, indev and outdev are the
671  * bridge ports. */
672 static unsigned int br_nf_forward_ip(unsigned int hook, struct sk_buff *skb,
673                                      const struct net_device *in,
674                                      const struct net_device *out,
675                                      int (*okfn)(struct sk_buff *))
676 {
677         struct nf_bridge_info *nf_bridge;
678         struct net_device *parent;
679         u_int8_t pf;
680
681         if (!skb->nf_bridge)
682                 return NF_ACCEPT;
683
684         /* Need exclusive nf_bridge_info since we might have multiple
685          * different physoutdevs. */
686         if (!nf_bridge_unshare(skb))
687                 return NF_DROP;
688
689         parent = bridge_parent(out);
690         if (!parent)
691                 return NF_DROP;
692
693         if (skb->protocol == htons(ETH_P_IP) || IS_VLAN_IP(skb) ||
694             IS_PPPOE_IP(skb))
695                 pf = PF_INET;
696         else if (skb->protocol == htons(ETH_P_IPV6) || IS_VLAN_IPV6(skb) ||
697                  IS_PPPOE_IPV6(skb))
698                 pf = PF_INET6;
699         else
700                 return NF_ACCEPT;
701
702         nf_bridge_pull_encap_header(skb);
703
704         nf_bridge = skb->nf_bridge;
705         if (skb->pkt_type == PACKET_OTHERHOST) {
706                 skb->pkt_type = PACKET_HOST;
707                 nf_bridge->mask |= BRNF_PKT_TYPE;
708         }
709
710         /* The physdev module checks on this */
711         nf_bridge->mask |= BRNF_BRIDGED;
712         nf_bridge->physoutdev = skb->dev;
713         if (pf == PF_INET)
714                 skb->protocol = htons(ETH_P_IP);
715         else
716                 skb->protocol = htons(ETH_P_IPV6);
717
718         NF_HOOK(pf, NF_INET_FORWARD, skb, bridge_parent(in), parent,
719                 br_nf_forward_finish);
720
721         return NF_STOLEN;
722 }
723
724 static unsigned int br_nf_forward_arp(unsigned int hook, struct sk_buff *skb,
725                                       const struct net_device *in,
726                                       const struct net_device *out,
727                                       int (*okfn)(struct sk_buff *))
728 {
729         struct net_bridge_port *p;
730         struct net_bridge *br;
731         struct net_device **d = (struct net_device **)(skb->cb);
732
733         p = br_port_get_rcu(out);
734         if (p == NULL)
735                 return NF_ACCEPT;
736         br = p->br;
737
738         if (!brnf_call_arptables && !br->nf_call_arptables)
739                 return NF_ACCEPT;
740
741         if (skb->protocol != htons(ETH_P_ARP)) {
742                 if (!IS_VLAN_ARP(skb))
743                         return NF_ACCEPT;
744                 nf_bridge_pull_encap_header(skb);
745         }
746
747         if (arp_hdr(skb)->ar_pln != 4) {
748                 if (IS_VLAN_ARP(skb))
749                         nf_bridge_push_encap_header(skb);
750                 return NF_ACCEPT;
751         }
752         *d = (struct net_device *)in;
753         NF_HOOK(NFPROTO_ARP, NF_ARP_FORWARD, skb, (struct net_device *)in,
754                 (struct net_device *)out, br_nf_forward_finish);
755
756         return NF_STOLEN;
757 }
758
759 #if defined(CONFIG_NF_CONNTRACK_IPV4) || defined(CONFIG_NF_CONNTRACK_IPV4_MODULE)
760 static int br_nf_dev_queue_xmit(struct sk_buff *skb)
761 {
762         if (skb->nfct != NULL && skb->protocol == htons(ETH_P_IP) &&
763             skb->len + nf_bridge_mtu_reduction(skb) > skb->dev->mtu &&
764             !skb_is_gso(skb))
765                 return ip_fragment(skb, br_dev_queue_push_xmit);
766         else
767                 return br_dev_queue_push_xmit(skb);
768 }
769 #else
770 static int br_nf_dev_queue_xmit(struct sk_buff *skb)
771 {
772         return br_dev_queue_push_xmit(skb);
773 }
774 #endif
775
776 /* PF_BRIDGE/POST_ROUTING ********************************************/
777 static unsigned int br_nf_post_routing(unsigned int hook, struct sk_buff *skb,
778                                        const struct net_device *in,
779                                        const struct net_device *out,
780                                        int (*okfn)(struct sk_buff *))
781 {
782         struct nf_bridge_info *nf_bridge = skb->nf_bridge;
783         struct net_device *realoutdev = bridge_parent(skb->dev);
784         u_int8_t pf;
785
786         if (!nf_bridge || !(nf_bridge->mask & BRNF_BRIDGED))
787                 return NF_ACCEPT;
788
789         if (!realoutdev)
790                 return NF_DROP;
791
792         if (skb->protocol == htons(ETH_P_IP) || IS_VLAN_IP(skb) ||
793             IS_PPPOE_IP(skb))
794                 pf = PF_INET;
795         else if (skb->protocol == htons(ETH_P_IPV6) || IS_VLAN_IPV6(skb) ||
796                  IS_PPPOE_IPV6(skb))
797                 pf = PF_INET6;
798         else
799                 return NF_ACCEPT;
800
801         /* We assume any code from br_dev_queue_push_xmit onwards doesn't care
802          * about the value of skb->pkt_type. */
803         if (skb->pkt_type == PACKET_OTHERHOST) {
804                 skb->pkt_type = PACKET_HOST;
805                 nf_bridge->mask |= BRNF_PKT_TYPE;
806         }
807
808         nf_bridge_pull_encap_header(skb);
809         nf_bridge_save_header(skb);
810         if (pf == PF_INET)
811                 skb->protocol = htons(ETH_P_IP);
812         else
813                 skb->protocol = htons(ETH_P_IPV6);
814
815         NF_HOOK(pf, NF_INET_POST_ROUTING, skb, NULL, realoutdev,
816                 br_nf_dev_queue_xmit);
817
818         return NF_STOLEN;
819 }
820
821 /* IP/SABOTAGE *****************************************************/
822 /* Don't hand locally destined packets to PF_INET(6)/PRE_ROUTING
823  * for the second time. */
824 static unsigned int ip_sabotage_in(unsigned int hook, struct sk_buff *skb,
825                                    const struct net_device *in,
826                                    const struct net_device *out,
827                                    int (*okfn)(struct sk_buff *))
828 {
829         if (skb->nf_bridge &&
830             !(skb->nf_bridge->mask & BRNF_NF_BRIDGE_PREROUTING)) {
831                 return NF_STOP;
832         }
833
834         return NF_ACCEPT;
835 }
836
837 /* For br_nf_post_routing, we need (prio = NF_BR_PRI_LAST), because
838  * br_dev_queue_push_xmit is called afterwards */
839 static struct nf_hook_ops br_nf_ops[] __read_mostly = {
840         {
841                 .hook = br_nf_pre_routing,
842                 .owner = THIS_MODULE,
843                 .pf = PF_BRIDGE,
844                 .hooknum = NF_BR_PRE_ROUTING,
845                 .priority = NF_BR_PRI_BRNF,
846         },
847         {
848                 .hook = br_nf_local_in,
849                 .owner = THIS_MODULE,
850                 .pf = PF_BRIDGE,
851                 .hooknum = NF_BR_LOCAL_IN,
852                 .priority = NF_BR_PRI_BRNF,
853         },
854         {
855                 .hook = br_nf_forward_ip,
856                 .owner = THIS_MODULE,
857                 .pf = PF_BRIDGE,
858                 .hooknum = NF_BR_FORWARD,
859                 .priority = NF_BR_PRI_BRNF - 1,
860         },
861         {
862                 .hook = br_nf_forward_arp,
863                 .owner = THIS_MODULE,
864                 .pf = PF_BRIDGE,
865                 .hooknum = NF_BR_FORWARD,
866                 .priority = NF_BR_PRI_BRNF,
867         },
868         {
869                 .hook = br_nf_post_routing,
870                 .owner = THIS_MODULE,
871                 .pf = PF_BRIDGE,
872                 .hooknum = NF_BR_POST_ROUTING,
873                 .priority = NF_BR_PRI_LAST,
874         },
875         {
876                 .hook = ip_sabotage_in,
877                 .owner = THIS_MODULE,
878                 .pf = PF_INET,
879                 .hooknum = NF_INET_PRE_ROUTING,
880                 .priority = NF_IP_PRI_FIRST,
881         },
882         {
883                 .hook = ip_sabotage_in,
884                 .owner = THIS_MODULE,
885                 .pf = PF_INET6,
886                 .hooknum = NF_INET_PRE_ROUTING,
887                 .priority = NF_IP6_PRI_FIRST,
888         },
889 };
890
891 #ifdef CONFIG_SYSCTL
892 static
893 int brnf_sysctl_call_tables(ctl_table * ctl, int write,
894                             void __user * buffer, size_t * lenp, loff_t * ppos)
895 {
896         int ret;
897
898         ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
899
900         if (write && *(int *)(ctl->data))
901                 *(int *)(ctl->data) = 1;
902         return ret;
903 }
904
905 static ctl_table brnf_table[] = {
906         {
907                 .procname       = "bridge-nf-call-arptables",
908                 .data           = &brnf_call_arptables,
909                 .maxlen         = sizeof(int),
910                 .mode           = 0644,
911                 .proc_handler   = brnf_sysctl_call_tables,
912         },
913         {
914                 .procname       = "bridge-nf-call-iptables",
915                 .data           = &brnf_call_iptables,
916                 .maxlen         = sizeof(int),
917                 .mode           = 0644,
918                 .proc_handler   = brnf_sysctl_call_tables,
919         },
920         {
921                 .procname       = "bridge-nf-call-ip6tables",
922                 .data           = &brnf_call_ip6tables,
923                 .maxlen         = sizeof(int),
924                 .mode           = 0644,
925                 .proc_handler   = brnf_sysctl_call_tables,
926         },
927         {
928                 .procname       = "bridge-nf-filter-vlan-tagged",
929                 .data           = &brnf_filter_vlan_tagged,
930                 .maxlen         = sizeof(int),
931                 .mode           = 0644,
932                 .proc_handler   = brnf_sysctl_call_tables,
933         },
934         {
935                 .procname       = "bridge-nf-filter-pppoe-tagged",
936                 .data           = &brnf_filter_pppoe_tagged,
937                 .maxlen         = sizeof(int),
938                 .mode           = 0644,
939                 .proc_handler   = brnf_sysctl_call_tables,
940         },
941         { }
942 };
943
944 static struct ctl_path brnf_path[] = {
945         { .procname = "net", },
946         { .procname = "bridge", },
947         { }
948 };
949 #endif
950
951 int __init br_netfilter_init(void)
952 {
953         int ret;
954
955         ret = nf_register_hooks(br_nf_ops, ARRAY_SIZE(br_nf_ops));
956         if (ret < 0)
957                 return ret;
958 #ifdef CONFIG_SYSCTL
959         brnf_sysctl_header = register_sysctl_paths(brnf_path, brnf_table);
960         if (brnf_sysctl_header == NULL) {
961                 printk(KERN_WARNING
962                        "br_netfilter: can't register to sysctl.\n");
963                 nf_unregister_hooks(br_nf_ops, ARRAY_SIZE(br_nf_ops));
964                 return -ENOMEM;
965         }
966 #endif
967         printk(KERN_NOTICE "Bridge firewalling registered\n");
968         return 0;
969 }
970
971 void br_netfilter_fini(void)
972 {
973         nf_unregister_hooks(br_nf_ops, ARRAY_SIZE(br_nf_ops));
974 #ifdef CONFIG_SYSCTL
975         unregister_sysctl_table(brnf_sysctl_header);
976 #endif
977 }