1 /*
2  *	Handle firewalling
3  *	Linux ethernet bridge
4  *
5  *	Authors:
6  *	Lennert Buytenhek		<buytenh@gnu.org>
7  *	Bart De Schuymer		<bdschuym@pandora.be>
8  *
9  *	This program is free software; you can redistribute it and/or
10  *	modify it under the terms of the GNU General Public License
11  *	as published by the Free Software Foundation; either version
12  *	2 of the License, or (at your option) any later version.
13  *
14  *	Lennert dedicates this file to Kerstin Wurdinger.
15  */
16 
17 #include <linux/module.h>
18 #include <linux/kernel.h>
19 #include <linux/slab.h>
20 #include <linux/ip.h>
21 #include <linux/netdevice.h>
22 #include <linux/skbuff.h>
23 #include <linux/if_arp.h>
24 #include <linux/if_ether.h>
25 #include <linux/if_vlan.h>
26 #include <linux/if_pppox.h>
27 #include <linux/ppp_defs.h>
28 #include <linux/netfilter_bridge.h>
29 #include <uapi/linux/netfilter_bridge.h>
30 #include <linux/netfilter_ipv4.h>
31 #include <linux/netfilter_ipv6.h>
32 #include <linux/netfilter_arp.h>
33 #include <linux/in_route.h>
34 #include <linux/rculist.h>
35 #include <linux/inetdevice.h>
36 
37 #include <net/ip.h>
38 #include <net/ipv6.h>
39 #include <net/addrconf.h>
40 #include <net/route.h>
41 #include <net/netfilter/br_netfilter.h>
42 #include <net/netns/generic.h>
43 
44 #include <linux/uaccess.h>
45 #include "br_private.h"
46 #ifdef CONFIG_SYSCTL
47 #include <linux/sysctl.h>
48 #endif
49 
50 static unsigned int brnf_net_id __read_mostly;
51 
52 struct brnf_net {
53 	bool enabled;
54 };
55 
56 #ifdef CONFIG_SYSCTL
57 static struct ctl_table_header *brnf_sysctl_header;
58 static int brnf_call_iptables __read_mostly = 1;
59 static int brnf_call_ip6tables __read_mostly = 1;
60 static int brnf_call_arptables __read_mostly = 1;
61 static int brnf_filter_vlan_tagged __read_mostly;
62 static int brnf_filter_pppoe_tagged __read_mostly;
63 static int brnf_pass_vlan_indev __read_mostly;
64 #else
65 #define brnf_call_iptables 1
66 #define brnf_call_ip6tables 1
67 #define brnf_call_arptables 1
68 #define brnf_filter_vlan_tagged 0
69 #define brnf_filter_pppoe_tagged 0
70 #define brnf_pass_vlan_indev 0
71 #endif
72 
73 #define IS_IP(skb) \
74 	(!skb_vlan_tag_present(skb) && skb->protocol == htons(ETH_P_IP))
75 
76 #define IS_IPV6(skb) \
77 	(!skb_vlan_tag_present(skb) && skb->protocol == htons(ETH_P_IPV6))
78 
79 #define IS_ARP(skb) \
80 	(!skb_vlan_tag_present(skb) && skb->protocol == htons(ETH_P_ARP))
81 
vlan_proto(const struct sk_buff * skb)82 static inline __be16 vlan_proto(const struct sk_buff *skb)
83 {
84 	if (skb_vlan_tag_present(skb))
85 		return skb->protocol;
86 	else if (skb->protocol == htons(ETH_P_8021Q))
87 		return vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
88 	else
89 		return 0;
90 }
91 
92 #define IS_VLAN_IP(skb) \
93 	(vlan_proto(skb) == htons(ETH_P_IP) && \
94 	 brnf_filter_vlan_tagged)
95 
96 #define IS_VLAN_IPV6(skb) \
97 	(vlan_proto(skb) == htons(ETH_P_IPV6) && \
98 	 brnf_filter_vlan_tagged)
99 
100 #define IS_VLAN_ARP(skb) \
101 	(vlan_proto(skb) == htons(ETH_P_ARP) &&	\
102 	 brnf_filter_vlan_tagged)
103 
pppoe_proto(const struct sk_buff * skb)104 static inline __be16 pppoe_proto(const struct sk_buff *skb)
105 {
106 	return *((__be16 *)(skb_mac_header(skb) + ETH_HLEN +
107 			    sizeof(struct pppoe_hdr)));
108 }
109 
110 #define IS_PPPOE_IP(skb) \
111 	(skb->protocol == htons(ETH_P_PPP_SES) && \
112 	 pppoe_proto(skb) == htons(PPP_IP) && \
113 	 brnf_filter_pppoe_tagged)
114 
115 #define IS_PPPOE_IPV6(skb) \
116 	(skb->protocol == htons(ETH_P_PPP_SES) && \
117 	 pppoe_proto(skb) == htons(PPP_IPV6) && \
118 	 brnf_filter_pppoe_tagged)
119 
120 /* largest possible L2 header, see br_nf_dev_queue_xmit() */
121 #define NF_BRIDGE_MAX_MAC_HEADER_LENGTH (PPPOE_SES_HLEN + ETH_HLEN)
122 
123 struct brnf_frag_data {
124 	char mac[NF_BRIDGE_MAX_MAC_HEADER_LENGTH];
125 	u8 encap_size;
126 	u8 size;
127 	u16 vlan_tci;
128 	__be16 vlan_proto;
129 };
130 
131 static DEFINE_PER_CPU(struct brnf_frag_data, brnf_frag_data_storage);
132 
nf_bridge_info_free(struct sk_buff * skb)133 static void nf_bridge_info_free(struct sk_buff *skb)
134 {
135 	if (skb->nf_bridge) {
136 		nf_bridge_put(skb->nf_bridge);
137 		skb->nf_bridge = NULL;
138 	}
139 }
140 
bridge_parent(const struct net_device * dev)141 static inline struct net_device *bridge_parent(const struct net_device *dev)
142 {
143 	struct net_bridge_port *port;
144 
145 	port = br_port_get_rcu(dev);
146 	return port ? port->br->dev : NULL;
147 }
148 
nf_bridge_unshare(struct sk_buff * skb)149 static inline struct nf_bridge_info *nf_bridge_unshare(struct sk_buff *skb)
150 {
151 	struct nf_bridge_info *nf_bridge = skb->nf_bridge;
152 
153 	if (refcount_read(&nf_bridge->use) > 1) {
154 		struct nf_bridge_info *tmp = nf_bridge_alloc(skb);
155 
156 		if (tmp) {
157 			memcpy(tmp, nf_bridge, sizeof(struct nf_bridge_info));
158 			refcount_set(&tmp->use, 1);
159 		}
160 		nf_bridge_put(nf_bridge);
161 		nf_bridge = tmp;
162 	}
163 	return nf_bridge;
164 }
165 
nf_bridge_encap_header_len(const struct sk_buff * skb)166 unsigned int nf_bridge_encap_header_len(const struct sk_buff *skb)
167 {
168 	switch (skb->protocol) {
169 	case __cpu_to_be16(ETH_P_8021Q):
170 		return VLAN_HLEN;
171 	case __cpu_to_be16(ETH_P_PPP_SES):
172 		return PPPOE_SES_HLEN;
173 	default:
174 		return 0;
175 	}
176 }
177 
nf_bridge_pull_encap_header(struct sk_buff * skb)178 static inline void nf_bridge_pull_encap_header(struct sk_buff *skb)
179 {
180 	unsigned int len = nf_bridge_encap_header_len(skb);
181 
182 	skb_pull(skb, len);
183 	skb->network_header += len;
184 }
185 
nf_bridge_pull_encap_header_rcsum(struct sk_buff * skb)186 static inline void nf_bridge_pull_encap_header_rcsum(struct sk_buff *skb)
187 {
188 	unsigned int len = nf_bridge_encap_header_len(skb);
189 
190 	skb_pull_rcsum(skb, len);
191 	skb->network_header += len;
192 }
193 
194 /* When handing a packet over to the IP layer
195  * check whether we have a skb that is in the
196  * expected format
197  */
198 
br_validate_ipv4(struct net * net,struct sk_buff * skb)199 static int br_validate_ipv4(struct net *net, struct sk_buff *skb)
200 {
201 	const struct iphdr *iph;
202 	u32 len;
203 
204 	if (!pskb_may_pull(skb, sizeof(struct iphdr)))
205 		goto inhdr_error;
206 
207 	iph = ip_hdr(skb);
208 
209 	/* Basic sanity checks */
210 	if (iph->ihl < 5 || iph->version != 4)
211 		goto inhdr_error;
212 
213 	if (!pskb_may_pull(skb, iph->ihl*4))
214 		goto inhdr_error;
215 
216 	iph = ip_hdr(skb);
217 	if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl)))
218 		goto csum_error;
219 
220 	len = ntohs(iph->tot_len);
221 	if (skb->len < len) {
222 		__IP_INC_STATS(net, IPSTATS_MIB_INTRUNCATEDPKTS);
223 		goto drop;
224 	} else if (len < (iph->ihl*4))
225 		goto inhdr_error;
226 
227 	if (pskb_trim_rcsum(skb, len)) {
228 		__IP_INC_STATS(net, IPSTATS_MIB_INDISCARDS);
229 		goto drop;
230 	}
231 
232 	memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
233 	/* We should really parse IP options here but until
234 	 * somebody who actually uses IP options complains to
235 	 * us we'll just silently ignore the options because
236 	 * we're lazy!
237 	 */
238 	return 0;
239 
240 csum_error:
241 	__IP_INC_STATS(net, IPSTATS_MIB_CSUMERRORS);
242 inhdr_error:
243 	__IP_INC_STATS(net, IPSTATS_MIB_INHDRERRORS);
244 drop:
245 	return -1;
246 }
247 
nf_bridge_update_protocol(struct sk_buff * skb)248 void nf_bridge_update_protocol(struct sk_buff *skb)
249 {
250 	switch (skb->nf_bridge->orig_proto) {
251 	case BRNF_PROTO_8021Q:
252 		skb->protocol = htons(ETH_P_8021Q);
253 		break;
254 	case BRNF_PROTO_PPPOE:
255 		skb->protocol = htons(ETH_P_PPP_SES);
256 		break;
257 	case BRNF_PROTO_UNCHANGED:
258 		break;
259 	}
260 }
261 
262 /* Obtain the correct destination MAC address, while preserving the original
263  * source MAC address. If we already know this address, we just copy it. If we
264  * don't, we use the neighbour framework to find out. In both cases, we make
265  * sure that br_handle_frame_finish() is called afterwards.
266  */
br_nf_pre_routing_finish_bridge(struct net * net,struct sock * sk,struct sk_buff * skb)267 int br_nf_pre_routing_finish_bridge(struct net *net, struct sock *sk, struct sk_buff *skb)
268 {
269 	struct neighbour *neigh;
270 	struct dst_entry *dst;
271 
272 	skb->dev = bridge_parent(skb->dev);
273 	if (!skb->dev)
274 		goto free_skb;
275 	dst = skb_dst(skb);
276 	neigh = dst_neigh_lookup_skb(dst, skb);
277 	if (neigh) {
278 		struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
279 		int ret;
280 
281 		if (neigh->hh.hh_len) {
282 			neigh_hh_bridge(&neigh->hh, skb);
283 			skb->dev = nf_bridge->physindev;
284 			ret = br_handle_frame_finish(net, sk, skb);
285 		} else {
286 			/* the neighbour function below overwrites the complete
287 			 * MAC header, so we save the Ethernet source address and
288 			 * protocol number.
289 			 */
290 			skb_copy_from_linear_data_offset(skb,
291 							 -(ETH_HLEN-ETH_ALEN),
292 							 nf_bridge->neigh_header,
293 							 ETH_HLEN-ETH_ALEN);
294 			/* tell br_dev_xmit to continue with forwarding */
295 			nf_bridge->bridged_dnat = 1;
296 			/* FIXME Need to refragment */
297 			ret = neigh->output(neigh, skb);
298 		}
299 		neigh_release(neigh);
300 		return ret;
301 	}
302 free_skb:
303 	kfree_skb(skb);
304 	return 0;
305 }
306 
307 static inline bool
br_nf_ipv4_daddr_was_changed(const struct sk_buff * skb,const struct nf_bridge_info * nf_bridge)308 br_nf_ipv4_daddr_was_changed(const struct sk_buff *skb,
309 			     const struct nf_bridge_info *nf_bridge)
310 {
311 	return ip_hdr(skb)->daddr != nf_bridge->ipv4_daddr;
312 }
313 
314 /* This requires some explaining. If DNAT has taken place,
315  * we will need to fix up the destination Ethernet address.
316  * This is also true when SNAT takes place (for the reply direction).
317  *
318  * There are two cases to consider:
319  * 1. The packet was DNAT'ed to a device in the same bridge
320  *    port group as it was received on. We can still bridge
321  *    the packet.
322  * 2. The packet was DNAT'ed to a different device, either
323  *    a non-bridged device or another bridge port group.
324  *    The packet will need to be routed.
325  *
326  * The correct way of distinguishing between these two cases is to
327  * call ip_route_input() and to look at skb->dst->dev, which is
328  * changed to the destination device if ip_route_input() succeeds.
329  *
330  * Let's first consider the case that ip_route_input() succeeds:
331  *
332  * If the output device equals the logical bridge device the packet
333  * came in on, we can consider this bridging. The corresponding MAC
334  * address will be obtained in br_nf_pre_routing_finish_bridge.
335  * Otherwise, the packet is considered to be routed and we just
336  * change the destination MAC address so that the packet will
337  * later be passed up to the IP stack to be routed. For a redirected
338  * packet, ip_route_input() will give back the localhost as output device,
339  * which differs from the bridge device.
340  *
341  * Let's now consider the case that ip_route_input() fails:
342  *
343  * This can be because the destination address is martian, in which case
344  * the packet will be dropped.
345  * If IP forwarding is disabled, ip_route_input() will fail, while
346  * ip_route_output_key() can return success. The source
347  * address for ip_route_output_key() is set to zero, so ip_route_output_key()
348  * thinks we're handling a locally generated packet and won't care
349  * if IP forwarding is enabled. If the output device equals the logical bridge
350  * device, we proceed as if ip_route_input() succeeded. If it differs from the
351  * logical bridge port or if ip_route_output_key() fails we drop the packet.
352  */
br_nf_pre_routing_finish(struct net * net,struct sock * sk,struct sk_buff * skb)353 static int br_nf_pre_routing_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
354 {
355 	struct net_device *dev = skb->dev;
356 	struct iphdr *iph = ip_hdr(skb);
357 	struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
358 	struct rtable *rt;
359 	int err;
360 
361 	nf_bridge->frag_max_size = IPCB(skb)->frag_max_size;
362 
363 	if (nf_bridge->pkt_otherhost) {
364 		skb->pkt_type = PACKET_OTHERHOST;
365 		nf_bridge->pkt_otherhost = false;
366 	}
367 	nf_bridge->in_prerouting = 0;
368 	if (br_nf_ipv4_daddr_was_changed(skb, nf_bridge)) {
369 		if ((err = ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, dev))) {
370 			struct in_device *in_dev = __in_dev_get_rcu(dev);
371 
372 			/* If err equals -EHOSTUNREACH the error is due to a
373 			 * martian destination or due to the fact that
374 			 * forwarding is disabled. For most martian packets,
375 			 * ip_route_output_key() will fail. It won't fail for 2 types of
376 			 * martian destinations: loopback destinations and destination
377 			 * 0.0.0.0. In both cases the packet will be dropped because the
378 			 * destination is the loopback device and not the bridge. */
379 			if (err != -EHOSTUNREACH || !in_dev || IN_DEV_FORWARD(in_dev))
380 				goto free_skb;
381 
382 			rt = ip_route_output(net, iph->daddr, 0,
383 					     RT_TOS(iph->tos), 0);
384 			if (!IS_ERR(rt)) {
385 				/* - Bridged-and-DNAT'ed traffic doesn't
386 				 *   require ip_forwarding. */
387 				if (rt->dst.dev == dev) {
388 					skb_dst_set(skb, &rt->dst);
389 					goto bridged_dnat;
390 				}
391 				ip_rt_put(rt);
392 			}
393 free_skb:
394 			kfree_skb(skb);
395 			return 0;
396 		} else {
397 			if (skb_dst(skb)->dev == dev) {
398 bridged_dnat:
399 				skb->dev = nf_bridge->physindev;
400 				nf_bridge_update_protocol(skb);
401 				nf_bridge_push_encap_header(skb);
402 				br_nf_hook_thresh(NF_BR_PRE_ROUTING,
403 						  net, sk, skb, skb->dev,
404 						  NULL,
405 						  br_nf_pre_routing_finish_bridge);
406 				return 0;
407 			}
408 			ether_addr_copy(eth_hdr(skb)->h_dest, dev->dev_addr);
409 			skb->pkt_type = PACKET_HOST;
410 		}
411 	} else {
412 		rt = bridge_parent_rtable(nf_bridge->physindev);
413 		if (!rt) {
414 			kfree_skb(skb);
415 			return 0;
416 		}
417 		skb_dst_set_noref(skb, &rt->dst);
418 	}
419 
420 	skb->dev = nf_bridge->physindev;
421 	nf_bridge_update_protocol(skb);
422 	nf_bridge_push_encap_header(skb);
423 	br_nf_hook_thresh(NF_BR_PRE_ROUTING, net, sk, skb, skb->dev, NULL,
424 			  br_handle_frame_finish);
425 	return 0;
426 }
427 
brnf_get_logical_dev(struct sk_buff * skb,const struct net_device * dev)428 static struct net_device *brnf_get_logical_dev(struct sk_buff *skb, const struct net_device *dev)
429 {
430 	struct net_device *vlan, *br;
431 
432 	br = bridge_parent(dev);
433 	if (brnf_pass_vlan_indev == 0 || !skb_vlan_tag_present(skb))
434 		return br;
435 
436 	vlan = __vlan_find_dev_deep_rcu(br, skb->vlan_proto,
437 				    skb_vlan_tag_get(skb) & VLAN_VID_MASK);
438 
439 	return vlan ? vlan : br;
440 }
441 
442 /* Some common code for IPv4/IPv6 */
setup_pre_routing(struct sk_buff * skb)443 struct net_device *setup_pre_routing(struct sk_buff *skb)
444 {
445 	struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
446 
447 	if (skb->pkt_type == PACKET_OTHERHOST) {
448 		skb->pkt_type = PACKET_HOST;
449 		nf_bridge->pkt_otherhost = true;
450 	}
451 
452 	nf_bridge->in_prerouting = 1;
453 	nf_bridge->physindev = skb->dev;
454 	skb->dev = brnf_get_logical_dev(skb, skb->dev);
455 
456 	if (skb->protocol == htons(ETH_P_8021Q))
457 		nf_bridge->orig_proto = BRNF_PROTO_8021Q;
458 	else if (skb->protocol == htons(ETH_P_PPP_SES))
459 		nf_bridge->orig_proto = BRNF_PROTO_PPPOE;
460 
461 	/* Must drop socket now because of tproxy. */
462 	skb_orphan(skb);
463 	return skb->dev;
464 }
465 
466 /* Direct IPv6 traffic to br_nf_pre_routing_ipv6.
467  * Replicate the checks that IPv4 does on packet reception.
468  * Set skb->dev to the bridge device (i.e. parent of the
469  * receiving device) to make netfilter happy, the REDIRECT
470  * target in particular.  Save the original destination IP
471  * address to be able to detect DNAT afterwards. */
br_nf_pre_routing(void * priv,struct sk_buff * skb,const struct nf_hook_state * state)472 static unsigned int br_nf_pre_routing(void *priv,
473 				      struct sk_buff *skb,
474 				      const struct nf_hook_state *state)
475 {
476 	struct nf_bridge_info *nf_bridge;
477 	struct net_bridge_port *p;
478 	struct net_bridge *br;
479 	__u32 len = nf_bridge_encap_header_len(skb);
480 
481 	if (unlikely(!pskb_may_pull(skb, len)))
482 		return NF_DROP;
483 
484 	p = br_port_get_rcu(state->in);
485 	if (p == NULL)
486 		return NF_DROP;
487 	br = p->br;
488 
489 	if (IS_IPV6(skb) || IS_VLAN_IPV6(skb) || IS_PPPOE_IPV6(skb)) {
490 		if (!brnf_call_ip6tables && !br->nf_call_ip6tables)
491 			return NF_ACCEPT;
492 
493 		nf_bridge_pull_encap_header_rcsum(skb);
494 		return br_nf_pre_routing_ipv6(priv, skb, state);
495 	}
496 
497 	if (!brnf_call_iptables && !br->nf_call_iptables)
498 		return NF_ACCEPT;
499 
500 	if (!IS_IP(skb) && !IS_VLAN_IP(skb) && !IS_PPPOE_IP(skb))
501 		return NF_ACCEPT;
502 
503 	nf_bridge_pull_encap_header_rcsum(skb);
504 
505 	if (br_validate_ipv4(state->net, skb))
506 		return NF_DROP;
507 
508 	nf_bridge_put(skb->nf_bridge);
509 	if (!nf_bridge_alloc(skb))
510 		return NF_DROP;
511 	if (!setup_pre_routing(skb))
512 		return NF_DROP;
513 
514 	nf_bridge = nf_bridge_info_get(skb);
515 	nf_bridge->ipv4_daddr = ip_hdr(skb)->daddr;
516 
517 	skb->protocol = htons(ETH_P_IP);
518 
519 	NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING, state->net, state->sk, skb,
520 		skb->dev, NULL,
521 		br_nf_pre_routing_finish);
522 
523 	return NF_STOLEN;
524 }
525 
526 
527 /* PF_BRIDGE/FORWARD *************************************************/
br_nf_forward_finish(struct net * net,struct sock * sk,struct sk_buff * skb)528 static int br_nf_forward_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
529 {
530 	struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
531 	struct net_device *in;
532 
533 	if (!IS_ARP(skb) && !IS_VLAN_ARP(skb)) {
534 
535 		if (skb->protocol == htons(ETH_P_IP))
536 			nf_bridge->frag_max_size = IPCB(skb)->frag_max_size;
537 
538 		if (skb->protocol == htons(ETH_P_IPV6))
539 			nf_bridge->frag_max_size = IP6CB(skb)->frag_max_size;
540 
541 		in = nf_bridge->physindev;
542 		if (nf_bridge->pkt_otherhost) {
543 			skb->pkt_type = PACKET_OTHERHOST;
544 			nf_bridge->pkt_otherhost = false;
545 		}
546 		nf_bridge_update_protocol(skb);
547 	} else {
548 		in = *((struct net_device **)(skb->cb));
549 	}
550 	nf_bridge_push_encap_header(skb);
551 
552 	br_nf_hook_thresh(NF_BR_FORWARD, net, sk, skb, in, skb->dev,
553 			  br_forward_finish);
554 	return 0;
555 }
556 
557 
558 /* This is the 'purely bridged' case.  For IP, we pass the packet to
559  * netfilter with indev and outdev set to the bridge device,
560  * but we are still able to filter on the 'real' indev/outdev
561  * because of the physdev module. For ARP, indev and outdev are the
562  * bridge ports. */
br_nf_forward_ip(void * priv,struct sk_buff * skb,const struct nf_hook_state * state)563 static unsigned int br_nf_forward_ip(void *priv,
564 				     struct sk_buff *skb,
565 				     const struct nf_hook_state *state)
566 {
567 	struct nf_bridge_info *nf_bridge;
568 	struct net_device *parent;
569 	u_int8_t pf;
570 
571 	if (!skb->nf_bridge)
572 		return NF_ACCEPT;
573 
574 	/* Need exclusive nf_bridge_info since we might have multiple
575 	 * different physoutdevs. */
576 	if (!nf_bridge_unshare(skb))
577 		return NF_DROP;
578 
579 	nf_bridge = nf_bridge_info_get(skb);
580 	if (!nf_bridge)
581 		return NF_DROP;
582 
583 	parent = bridge_parent(state->out);
584 	if (!parent)
585 		return NF_DROP;
586 
587 	if (IS_IP(skb) || IS_VLAN_IP(skb) || IS_PPPOE_IP(skb))
588 		pf = NFPROTO_IPV4;
589 	else if (IS_IPV6(skb) || IS_VLAN_IPV6(skb) || IS_PPPOE_IPV6(skb))
590 		pf = NFPROTO_IPV6;
591 	else
592 		return NF_ACCEPT;
593 
594 	nf_bridge_pull_encap_header(skb);
595 
596 	if (skb->pkt_type == PACKET_OTHERHOST) {
597 		skb->pkt_type = PACKET_HOST;
598 		nf_bridge->pkt_otherhost = true;
599 	}
600 
601 	if (pf == NFPROTO_IPV4) {
602 		if (br_validate_ipv4(state->net, skb))
603 			return NF_DROP;
604 		IPCB(skb)->frag_max_size = nf_bridge->frag_max_size;
605 	}
606 
607 	if (pf == NFPROTO_IPV6) {
608 		if (br_validate_ipv6(state->net, skb))
609 			return NF_DROP;
610 		IP6CB(skb)->frag_max_size = nf_bridge->frag_max_size;
611 	}
612 
613 	nf_bridge->physoutdev = skb->dev;
614 	if (pf == NFPROTO_IPV4)
615 		skb->protocol = htons(ETH_P_IP);
616 	else
617 		skb->protocol = htons(ETH_P_IPV6);
618 
619 	NF_HOOK(pf, NF_INET_FORWARD, state->net, NULL, skb,
620 		brnf_get_logical_dev(skb, state->in),
621 		parent,	br_nf_forward_finish);
622 
623 	return NF_STOLEN;
624 }
625 
br_nf_forward_arp(void * priv,struct sk_buff * skb,const struct nf_hook_state * state)626 static unsigned int br_nf_forward_arp(void *priv,
627 				      struct sk_buff *skb,
628 				      const struct nf_hook_state *state)
629 {
630 	struct net_bridge_port *p;
631 	struct net_bridge *br;
632 	struct net_device **d = (struct net_device **)(skb->cb);
633 
634 	p = br_port_get_rcu(state->out);
635 	if (p == NULL)
636 		return NF_ACCEPT;
637 	br = p->br;
638 
639 	if (!brnf_call_arptables && !br->nf_call_arptables)
640 		return NF_ACCEPT;
641 
642 	if (!IS_ARP(skb)) {
643 		if (!IS_VLAN_ARP(skb))
644 			return NF_ACCEPT;
645 		nf_bridge_pull_encap_header(skb);
646 	}
647 
648 	if (arp_hdr(skb)->ar_pln != 4) {
649 		if (IS_VLAN_ARP(skb))
650 			nf_bridge_push_encap_header(skb);
651 		return NF_ACCEPT;
652 	}
653 	*d = state->in;
654 	NF_HOOK(NFPROTO_ARP, NF_ARP_FORWARD, state->net, state->sk, skb,
655 		state->in, state->out, br_nf_forward_finish);
656 
657 	return NF_STOLEN;
658 }
659 
br_nf_push_frag_xmit(struct net * net,struct sock * sk,struct sk_buff * skb)660 static int br_nf_push_frag_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
661 {
662 	struct brnf_frag_data *data;
663 	int err;
664 
665 	data = this_cpu_ptr(&brnf_frag_data_storage);
666 	err = skb_cow_head(skb, data->size);
667 
668 	if (err) {
669 		kfree_skb(skb);
670 		return 0;
671 	}
672 
673 	if (data->vlan_tci) {
674 		skb->vlan_tci = data->vlan_tci;
675 		skb->vlan_proto = data->vlan_proto;
676 	}
677 
678 	skb_copy_to_linear_data_offset(skb, -data->size, data->mac, data->size);
679 	__skb_push(skb, data->encap_size);
680 
681 	nf_bridge_info_free(skb);
682 	return br_dev_queue_push_xmit(net, sk, skb);
683 }
684 
685 static int
br_nf_ip_fragment(struct net * net,struct sock * sk,struct sk_buff * skb,int (* output)(struct net *,struct sock *,struct sk_buff *))686 br_nf_ip_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
687 		  int (*output)(struct net *, struct sock *, struct sk_buff *))
688 {
689 	unsigned int mtu = ip_skb_dst_mtu(sk, skb);
690 	struct iphdr *iph = ip_hdr(skb);
691 
692 	if (unlikely(((iph->frag_off & htons(IP_DF)) && !skb->ignore_df) ||
693 		     (IPCB(skb)->frag_max_size &&
694 		      IPCB(skb)->frag_max_size > mtu))) {
695 		IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS);
696 		kfree_skb(skb);
697 		return -EMSGSIZE;
698 	}
699 
700 	return ip_do_fragment(net, sk, skb, output);
701 }
702 
nf_bridge_mtu_reduction(const struct sk_buff * skb)703 static unsigned int nf_bridge_mtu_reduction(const struct sk_buff *skb)
704 {
705 	if (skb->nf_bridge->orig_proto == BRNF_PROTO_PPPOE)
706 		return PPPOE_SES_HLEN;
707 	return 0;
708 }
709 
br_nf_dev_queue_xmit(struct net * net,struct sock * sk,struct sk_buff * skb)710 static int br_nf_dev_queue_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
711 {
712 	struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
713 	unsigned int mtu, mtu_reserved;
714 
715 	mtu_reserved = nf_bridge_mtu_reduction(skb);
716 	mtu = skb->dev->mtu;
717 
718 	if (nf_bridge->frag_max_size && nf_bridge->frag_max_size < mtu)
719 		mtu = nf_bridge->frag_max_size;
720 
721 	if (skb_is_gso(skb) || skb->len + mtu_reserved <= mtu) {
722 		nf_bridge_info_free(skb);
723 		return br_dev_queue_push_xmit(net, sk, skb);
724 	}
725 
726 	/* This is wrong! We should preserve the original fragment
727 	 * boundaries by preserving frag_list rather than refragmenting.
728 	 */
729 	if (IS_ENABLED(CONFIG_NF_DEFRAG_IPV4) &&
730 	    skb->protocol == htons(ETH_P_IP)) {
731 		struct brnf_frag_data *data;
732 
733 		if (br_validate_ipv4(net, skb))
734 			goto drop;
735 
736 		IPCB(skb)->frag_max_size = nf_bridge->frag_max_size;
737 
738 		nf_bridge_update_protocol(skb);
739 
740 		data = this_cpu_ptr(&brnf_frag_data_storage);
741 
742 		data->vlan_tci = skb->vlan_tci;
743 		data->vlan_proto = skb->vlan_proto;
744 		data->encap_size = nf_bridge_encap_header_len(skb);
745 		data->size = ETH_HLEN + data->encap_size;
746 
747 		skb_copy_from_linear_data_offset(skb, -data->size, data->mac,
748 						 data->size);
749 
750 		return br_nf_ip_fragment(net, sk, skb, br_nf_push_frag_xmit);
751 	}
752 	if (IS_ENABLED(CONFIG_NF_DEFRAG_IPV6) &&
753 	    skb->protocol == htons(ETH_P_IPV6)) {
754 		const struct nf_ipv6_ops *v6ops = nf_get_ipv6_ops();
755 		struct brnf_frag_data *data;
756 
757 		if (br_validate_ipv6(net, skb))
758 			goto drop;
759 
760 		IP6CB(skb)->frag_max_size = nf_bridge->frag_max_size;
761 
762 		nf_bridge_update_protocol(skb);
763 
764 		data = this_cpu_ptr(&brnf_frag_data_storage);
765 		data->encap_size = nf_bridge_encap_header_len(skb);
766 		data->size = ETH_HLEN + data->encap_size;
767 
768 		skb_copy_from_linear_data_offset(skb, -data->size, data->mac,
769 						 data->size);
770 
771 		if (v6ops)
772 			return v6ops->fragment(net, sk, skb, br_nf_push_frag_xmit);
773 
774 		kfree_skb(skb);
775 		return -EMSGSIZE;
776 	}
777 	nf_bridge_info_free(skb);
778 	return br_dev_queue_push_xmit(net, sk, skb);
779  drop:
780 	kfree_skb(skb);
781 	return 0;
782 }
783 
784 /* PF_BRIDGE/POST_ROUTING ********************************************/
br_nf_post_routing(void * priv,struct sk_buff * skb,const struct nf_hook_state * state)785 static unsigned int br_nf_post_routing(void *priv,
786 				       struct sk_buff *skb,
787 				       const struct nf_hook_state *state)
788 {
789 	struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
790 	struct net_device *realoutdev = bridge_parent(skb->dev);
791 	u_int8_t pf;
792 
793 	/* if nf_bridge is set, but ->physoutdev is NULL, this packet came in
794 	 * on a bridge, but was delivered locally and is now being routed:
795 	 *
796 	 * POST_ROUTING was already invoked from the ip stack.
797 	 */
798 	if (!nf_bridge || !nf_bridge->physoutdev)
799 		return NF_ACCEPT;
800 
801 	if (!realoutdev)
802 		return NF_DROP;
803 
804 	if (IS_IP(skb) || IS_VLAN_IP(skb) || IS_PPPOE_IP(skb))
805 		pf = NFPROTO_IPV4;
806 	else if (IS_IPV6(skb) || IS_VLAN_IPV6(skb) || IS_PPPOE_IPV6(skb))
807 		pf = NFPROTO_IPV6;
808 	else
809 		return NF_ACCEPT;
810 
811 	/* We assume any code from br_dev_queue_push_xmit onwards doesn't care
812 	 * about the value of skb->pkt_type. */
813 	if (skb->pkt_type == PACKET_OTHERHOST) {
814 		skb->pkt_type = PACKET_HOST;
815 		nf_bridge->pkt_otherhost = true;
816 	}
817 
818 	nf_bridge_pull_encap_header(skb);
819 	if (pf == NFPROTO_IPV4)
820 		skb->protocol = htons(ETH_P_IP);
821 	else
822 		skb->protocol = htons(ETH_P_IPV6);
823 
824 	NF_HOOK(pf, NF_INET_POST_ROUTING, state->net, state->sk, skb,
825 		NULL, realoutdev,
826 		br_nf_dev_queue_xmit);
827 
828 	return NF_STOLEN;
829 }
830 
831 /* IP/SABOTAGE *****************************************************/
832 /* Don't hand locally destined packets to PF_INET(6)/PRE_ROUTING
833  * for the second time. */
ip_sabotage_in(void * priv,struct sk_buff * skb,const struct nf_hook_state * state)834 static unsigned int ip_sabotage_in(void *priv,
835 				   struct sk_buff *skb,
836 				   const struct nf_hook_state *state)
837 {
838 	if (skb->nf_bridge && !skb->nf_bridge->in_prerouting &&
839 	    !netif_is_l3_master(skb->dev)) {
840 		state->okfn(state->net, state->sk, skb);
841 		return NF_STOLEN;
842 	}
843 
844 	return NF_ACCEPT;
845 }
846 
847 /* This is called when br_netfilter has called into iptables/netfilter,
848  * and DNAT has taken place on a bridge-forwarded packet.
849  *
850  * neigh->output has created a new MAC header, with local br0 MAC
851  * as saddr.
852  *
853  * This restores the original MAC saddr of the bridged packet
854  * before invoking bridge forward logic to transmit the packet.
855  */
br_nf_pre_routing_finish_bridge_slow(struct sk_buff * skb)856 static void br_nf_pre_routing_finish_bridge_slow(struct sk_buff *skb)
857 {
858 	struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
859 
860 	skb_pull(skb, ETH_HLEN);
861 	nf_bridge->bridged_dnat = 0;
862 
863 	BUILD_BUG_ON(sizeof(nf_bridge->neigh_header) != (ETH_HLEN - ETH_ALEN));
864 
865 	skb_copy_to_linear_data_offset(skb, -(ETH_HLEN - ETH_ALEN),
866 				       nf_bridge->neigh_header,
867 				       ETH_HLEN - ETH_ALEN);
868 	skb->dev = nf_bridge->physindev;
869 
870 	nf_bridge->physoutdev = NULL;
871 	br_handle_frame_finish(dev_net(skb->dev), NULL, skb);
872 }
873 
br_nf_dev_xmit(struct sk_buff * skb)874 static int br_nf_dev_xmit(struct sk_buff *skb)
875 {
876 	if (skb->nf_bridge && skb->nf_bridge->bridged_dnat) {
877 		br_nf_pre_routing_finish_bridge_slow(skb);
878 		return 1;
879 	}
880 	return 0;
881 }
882 
883 static const struct nf_br_ops br_ops = {
884 	.br_dev_xmit_hook =	br_nf_dev_xmit,
885 };
886 
br_netfilter_enable(void)887 void br_netfilter_enable(void)
888 {
889 }
890 EXPORT_SYMBOL_GPL(br_netfilter_enable);
891 
892 /* For br_nf_post_routing, we need (prio = NF_BR_PRI_LAST), because
893  * br_dev_queue_push_xmit is called afterwards */
894 static const struct nf_hook_ops br_nf_ops[] = {
895 	{
896 		.hook = br_nf_pre_routing,
897 		.pf = NFPROTO_BRIDGE,
898 		.hooknum = NF_BR_PRE_ROUTING,
899 		.priority = NF_BR_PRI_BRNF,
900 	},
901 	{
902 		.hook = br_nf_forward_ip,
903 		.pf = NFPROTO_BRIDGE,
904 		.hooknum = NF_BR_FORWARD,
905 		.priority = NF_BR_PRI_BRNF - 1,
906 	},
907 	{
908 		.hook = br_nf_forward_arp,
909 		.pf = NFPROTO_BRIDGE,
910 		.hooknum = NF_BR_FORWARD,
911 		.priority = NF_BR_PRI_BRNF,
912 	},
913 	{
914 		.hook = br_nf_post_routing,
915 		.pf = NFPROTO_BRIDGE,
916 		.hooknum = NF_BR_POST_ROUTING,
917 		.priority = NF_BR_PRI_LAST,
918 	},
919 	{
920 		.hook = ip_sabotage_in,
921 		.pf = NFPROTO_IPV4,
922 		.hooknum = NF_INET_PRE_ROUTING,
923 		.priority = NF_IP_PRI_FIRST,
924 	},
925 	{
926 		.hook = ip_sabotage_in,
927 		.pf = NFPROTO_IPV6,
928 		.hooknum = NF_INET_PRE_ROUTING,
929 		.priority = NF_IP6_PRI_FIRST,
930 	},
931 };
932 
brnf_device_event(struct notifier_block * unused,unsigned long event,void * ptr)933 static int brnf_device_event(struct notifier_block *unused, unsigned long event,
934 			     void *ptr)
935 {
936 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
937 	struct brnf_net *brnet;
938 	struct net *net;
939 	int ret;
940 
941 	if (event != NETDEV_REGISTER || !(dev->priv_flags & IFF_EBRIDGE))
942 		return NOTIFY_DONE;
943 
944 	ASSERT_RTNL();
945 
946 	net = dev_net(dev);
947 	brnet = net_generic(net, brnf_net_id);
948 	if (brnet->enabled)
949 		return NOTIFY_OK;
950 
951 	ret = nf_register_net_hooks(net, br_nf_ops, ARRAY_SIZE(br_nf_ops));
952 	if (ret)
953 		return NOTIFY_BAD;
954 
955 	brnet->enabled = true;
956 	return NOTIFY_OK;
957 }
958 
brnf_exit_net(struct net * net)959 static void __net_exit brnf_exit_net(struct net *net)
960 {
961 	struct brnf_net *brnet = net_generic(net, brnf_net_id);
962 
963 	if (!brnet->enabled)
964 		return;
965 
966 	nf_unregister_net_hooks(net, br_nf_ops, ARRAY_SIZE(br_nf_ops));
967 	brnet->enabled = false;
968 }
969 
970 static struct pernet_operations brnf_net_ops __read_mostly = {
971 	.exit = brnf_exit_net,
972 	.id   = &brnf_net_id,
973 	.size = sizeof(struct brnf_net),
974 };
975 
976 static struct notifier_block brnf_notifier __read_mostly = {
977 	.notifier_call = brnf_device_event,
978 };
979 
980 /* recursively invokes nf_hook_slow (again), skipping already-called
981  * hooks (< NF_BR_PRI_BRNF).
982  *
983  * Called with rcu read lock held.
984  */
br_nf_hook_thresh(unsigned int hook,struct net * net,struct sock * sk,struct sk_buff * skb,struct net_device * indev,struct net_device * outdev,int (* okfn)(struct net *,struct sock *,struct sk_buff *))985 int br_nf_hook_thresh(unsigned int hook, struct net *net,
986 		      struct sock *sk, struct sk_buff *skb,
987 		      struct net_device *indev,
988 		      struct net_device *outdev,
989 		      int (*okfn)(struct net *, struct sock *,
990 				  struct sk_buff *))
991 {
992 	const struct nf_hook_entries *e;
993 	struct nf_hook_state state;
994 	struct nf_hook_ops **ops;
995 	unsigned int i;
996 	int ret;
997 
998 	e = rcu_dereference(net->nf.hooks_bridge[hook]);
999 	if (!e)
1000 		return okfn(net, sk, skb);
1001 
1002 	ops = nf_hook_entries_get_hook_ops(e);
1003 	for (i = 0; i < e->num_hook_entries &&
1004 	      ops[i]->priority <= NF_BR_PRI_BRNF; i++)
1005 		;
1006 
1007 	nf_hook_state_init(&state, hook, NFPROTO_BRIDGE, indev, outdev,
1008 			   sk, net, okfn);
1009 
1010 	ret = nf_hook_slow(skb, &state, e, i);
1011 	if (ret == 1)
1012 		ret = okfn(net, sk, skb);
1013 
1014 	return ret;
1015 }
1016 
1017 #ifdef CONFIG_SYSCTL
1018 static
brnf_sysctl_call_tables(struct ctl_table * ctl,int write,void __user * buffer,size_t * lenp,loff_t * ppos)1019 int brnf_sysctl_call_tables(struct ctl_table *ctl, int write,
1020 			    void __user *buffer, size_t *lenp, loff_t *ppos)
1021 {
1022 	int ret;
1023 
1024 	ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
1025 
1026 	if (write && *(int *)(ctl->data))
1027 		*(int *)(ctl->data) = 1;
1028 	return ret;
1029 }
1030 
1031 static struct ctl_table brnf_table[] = {
1032 	{
1033 		.procname	= "bridge-nf-call-arptables",
1034 		.data		= &brnf_call_arptables,
1035 		.maxlen		= sizeof(int),
1036 		.mode		= 0644,
1037 		.proc_handler	= brnf_sysctl_call_tables,
1038 	},
1039 	{
1040 		.procname	= "bridge-nf-call-iptables",
1041 		.data		= &brnf_call_iptables,
1042 		.maxlen		= sizeof(int),
1043 		.mode		= 0644,
1044 		.proc_handler	= brnf_sysctl_call_tables,
1045 	},
1046 	{
1047 		.procname	= "bridge-nf-call-ip6tables",
1048 		.data		= &brnf_call_ip6tables,
1049 		.maxlen		= sizeof(int),
1050 		.mode		= 0644,
1051 		.proc_handler	= brnf_sysctl_call_tables,
1052 	},
1053 	{
1054 		.procname	= "bridge-nf-filter-vlan-tagged",
1055 		.data		= &brnf_filter_vlan_tagged,
1056 		.maxlen		= sizeof(int),
1057 		.mode		= 0644,
1058 		.proc_handler	= brnf_sysctl_call_tables,
1059 	},
1060 	{
1061 		.procname	= "bridge-nf-filter-pppoe-tagged",
1062 		.data		= &brnf_filter_pppoe_tagged,
1063 		.maxlen		= sizeof(int),
1064 		.mode		= 0644,
1065 		.proc_handler	= brnf_sysctl_call_tables,
1066 	},
1067 	{
1068 		.procname	= "bridge-nf-pass-vlan-input-dev",
1069 		.data		= &brnf_pass_vlan_indev,
1070 		.maxlen		= sizeof(int),
1071 		.mode		= 0644,
1072 		.proc_handler	= brnf_sysctl_call_tables,
1073 	},
1074 	{ }
1075 };
1076 #endif
1077 
br_netfilter_init(void)1078 static int __init br_netfilter_init(void)
1079 {
1080 	int ret;
1081 
1082 	ret = register_pernet_subsys(&brnf_net_ops);
1083 	if (ret < 0)
1084 		return ret;
1085 
1086 	ret = register_netdevice_notifier(&brnf_notifier);
1087 	if (ret < 0) {
1088 		unregister_pernet_subsys(&brnf_net_ops);
1089 		return ret;
1090 	}
1091 
1092 #ifdef CONFIG_SYSCTL
1093 	brnf_sysctl_header = register_net_sysctl(&init_net, "net/bridge", brnf_table);
1094 	if (brnf_sysctl_header == NULL) {
1095 		printk(KERN_WARNING
1096 		       "br_netfilter: can't register to sysctl.\n");
1097 		unregister_netdevice_notifier(&brnf_notifier);
1098 		unregister_pernet_subsys(&brnf_net_ops);
1099 		return -ENOMEM;
1100 	}
1101 #endif
1102 	RCU_INIT_POINTER(nf_br_ops, &br_ops);
1103 	printk(KERN_NOTICE "Bridge firewalling registered\n");
1104 	return 0;
1105 }
1106 
br_netfilter_fini(void)1107 static void __exit br_netfilter_fini(void)
1108 {
1109 	RCU_INIT_POINTER(nf_br_ops, NULL);
1110 	unregister_netdevice_notifier(&brnf_notifier);
1111 	unregister_pernet_subsys(&brnf_net_ops);
1112 #ifdef CONFIG_SYSCTL
1113 	unregister_net_sysctl_table(brnf_sysctl_header);
1114 #endif
1115 }
1116 
1117 module_init(br_netfilter_init);
1118 module_exit(br_netfilter_fini);
1119 
1120 MODULE_LICENSE("GPL");
1121 MODULE_AUTHOR("Lennert Buytenhek <buytenh@gnu.org>");
1122 MODULE_AUTHOR("Bart De Schuymer <bdschuym@pandora.be>");
1123 MODULE_DESCRIPTION("Linux ethernet netfilter firewall bridge");
1124