1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/kernel.h>
3 #include <linux/init.h>
4 #include <linux/module.h>
5 #include <linux/netfilter.h>
6 #include <linux/rhashtable.h>
7 #include <linux/netdevice.h>
8 #include <net/ip.h>
9 #include <net/ip6_route.h>
10 #include <net/netfilter/nf_tables.h>
11 #include <net/netfilter/nf_flow_table.h>
12 #include <net/netfilter/nf_conntrack.h>
13 #include <net/netfilter/nf_conntrack_core.h>
14 #include <net/netfilter/nf_conntrack_l4proto.h>
15 #include <net/netfilter/nf_conntrack_tuple.h>
16
17 static DEFINE_MUTEX(flowtable_lock);
18 static LIST_HEAD(flowtables);
19
20 static void
flow_offload_fill_dir(struct flow_offload * flow,enum flow_offload_tuple_dir dir)21 flow_offload_fill_dir(struct flow_offload *flow,
22 enum flow_offload_tuple_dir dir)
23 {
24 struct flow_offload_tuple *ft = &flow->tuplehash[dir].tuple;
25 struct nf_conntrack_tuple *ctt = &flow->ct->tuplehash[dir].tuple;
26
27 ft->dir = dir;
28
29 switch (ctt->src.l3num) {
30 case NFPROTO_IPV4:
31 ft->src_v4 = ctt->src.u3.in;
32 ft->dst_v4 = ctt->dst.u3.in;
33 break;
34 case NFPROTO_IPV6:
35 ft->src_v6 = ctt->src.u3.in6;
36 ft->dst_v6 = ctt->dst.u3.in6;
37 break;
38 }
39
40 ft->l3proto = ctt->src.l3num;
41 ft->l4proto = ctt->dst.protonum;
42
43 switch (ctt->dst.protonum) {
44 case IPPROTO_TCP:
45 case IPPROTO_UDP:
46 ft->src_port = ctt->src.u.tcp.port;
47 ft->dst_port = ctt->dst.u.tcp.port;
48 break;
49 }
50 }
51
flow_offload_alloc(struct nf_conn * ct)52 struct flow_offload *flow_offload_alloc(struct nf_conn *ct)
53 {
54 struct flow_offload *flow;
55
56 if (unlikely(nf_ct_is_dying(ct)))
57 return NULL;
58
59 flow = kzalloc(sizeof(*flow), GFP_ATOMIC);
60 if (!flow)
61 return NULL;
62
63 refcount_inc(&ct->ct_general.use);
64 flow->ct = ct;
65
66 flow_offload_fill_dir(flow, FLOW_OFFLOAD_DIR_ORIGINAL);
67 flow_offload_fill_dir(flow, FLOW_OFFLOAD_DIR_REPLY);
68
69 if (ct->status & IPS_SRC_NAT)
70 __set_bit(NF_FLOW_SNAT, &flow->flags);
71 if (ct->status & IPS_DST_NAT)
72 __set_bit(NF_FLOW_DNAT, &flow->flags);
73
74 return flow;
75 }
76 EXPORT_SYMBOL_GPL(flow_offload_alloc);
77
flow_offload_dst_cookie(struct flow_offload_tuple * flow_tuple)78 static u32 flow_offload_dst_cookie(struct flow_offload_tuple *flow_tuple)
79 {
80 const struct rt6_info *rt;
81
82 if (flow_tuple->l3proto == NFPROTO_IPV6) {
83 rt = (const struct rt6_info *)flow_tuple->dst_cache;
84 return rt6_get_cookie(rt);
85 }
86
87 return 0;
88 }
89
flow_offload_fill_route(struct flow_offload * flow,const struct nf_flow_route * route,enum flow_offload_tuple_dir dir)90 static int flow_offload_fill_route(struct flow_offload *flow,
91 const struct nf_flow_route *route,
92 enum flow_offload_tuple_dir dir)
93 {
94 struct flow_offload_tuple *flow_tuple = &flow->tuplehash[dir].tuple;
95 struct dst_entry *dst = route->tuple[dir].dst;
96 int i, j = 0;
97
98 switch (flow_tuple->l3proto) {
99 case NFPROTO_IPV4:
100 flow_tuple->mtu = ip_dst_mtu_maybe_forward(dst, true);
101 break;
102 case NFPROTO_IPV6:
103 flow_tuple->mtu = ip6_dst_mtu_maybe_forward(dst, true);
104 break;
105 }
106
107 flow_tuple->iifidx = route->tuple[dir].in.ifindex;
108 for (i = route->tuple[dir].in.num_encaps - 1; i >= 0; i--) {
109 flow_tuple->encap[j].id = route->tuple[dir].in.encap[i].id;
110 flow_tuple->encap[j].proto = route->tuple[dir].in.encap[i].proto;
111 if (route->tuple[dir].in.ingress_vlans & BIT(i))
112 flow_tuple->in_vlan_ingress |= BIT(j);
113 j++;
114 }
115 flow_tuple->encap_num = route->tuple[dir].in.num_encaps;
116
117 switch (route->tuple[dir].xmit_type) {
118 case FLOW_OFFLOAD_XMIT_DIRECT:
119 memcpy(flow_tuple->out.h_dest, route->tuple[dir].out.h_dest,
120 ETH_ALEN);
121 memcpy(flow_tuple->out.h_source, route->tuple[dir].out.h_source,
122 ETH_ALEN);
123 flow_tuple->out.ifidx = route->tuple[dir].out.ifindex;
124 flow_tuple->out.hw_ifidx = route->tuple[dir].out.hw_ifindex;
125 break;
126 case FLOW_OFFLOAD_XMIT_XFRM:
127 case FLOW_OFFLOAD_XMIT_NEIGH:
128 flow_tuple->dst_cache = dst;
129 flow_tuple->dst_cookie = flow_offload_dst_cookie(flow_tuple);
130 break;
131 default:
132 WARN_ON_ONCE(1);
133 break;
134 }
135 flow_tuple->xmit_type = route->tuple[dir].xmit_type;
136
137 return 0;
138 }
139
nft_flow_dst_release(struct flow_offload * flow,enum flow_offload_tuple_dir dir)140 static void nft_flow_dst_release(struct flow_offload *flow,
141 enum flow_offload_tuple_dir dir)
142 {
143 if (flow->tuplehash[dir].tuple.xmit_type == FLOW_OFFLOAD_XMIT_NEIGH ||
144 flow->tuplehash[dir].tuple.xmit_type == FLOW_OFFLOAD_XMIT_XFRM)
145 dst_release(flow->tuplehash[dir].tuple.dst_cache);
146 }
147
flow_offload_route_init(struct flow_offload * flow,const struct nf_flow_route * route)148 void flow_offload_route_init(struct flow_offload *flow,
149 const struct nf_flow_route *route)
150 {
151 flow_offload_fill_route(flow, route, FLOW_OFFLOAD_DIR_ORIGINAL);
152 flow_offload_fill_route(flow, route, FLOW_OFFLOAD_DIR_REPLY);
153 flow->type = NF_FLOW_OFFLOAD_ROUTE;
154 }
155 EXPORT_SYMBOL_GPL(flow_offload_route_init);
156
flow_offload_fixup_tcp(struct ip_ct_tcp * tcp)157 static void flow_offload_fixup_tcp(struct ip_ct_tcp *tcp)
158 {
159 tcp->seen[0].td_maxwin = 0;
160 tcp->seen[1].td_maxwin = 0;
161 }
162
flow_offload_fixup_ct(struct nf_conn * ct)163 static void flow_offload_fixup_ct(struct nf_conn *ct)
164 {
165 struct net *net = nf_ct_net(ct);
166 int l4num = nf_ct_protonum(ct);
167 s32 timeout;
168
169 if (l4num == IPPROTO_TCP) {
170 struct nf_tcp_net *tn = nf_tcp_pernet(net);
171
172 flow_offload_fixup_tcp(&ct->proto.tcp);
173
174 timeout = tn->timeouts[ct->proto.tcp.state];
175 timeout -= tn->offload_timeout;
176 } else if (l4num == IPPROTO_UDP) {
177 struct nf_udp_net *tn = nf_udp_pernet(net);
178 enum udp_conntrack state =
179 test_bit(IPS_SEEN_REPLY_BIT, &ct->status) ?
180 UDP_CT_REPLIED : UDP_CT_UNREPLIED;
181
182 timeout = tn->timeouts[state];
183 timeout -= tn->offload_timeout;
184 } else {
185 return;
186 }
187
188 if (timeout < 0)
189 timeout = 0;
190
191 if (nf_flow_timeout_delta(READ_ONCE(ct->timeout)) > (__s32)timeout)
192 WRITE_ONCE(ct->timeout, nfct_time_stamp + timeout);
193 }
194
flow_offload_route_release(struct flow_offload * flow)195 static void flow_offload_route_release(struct flow_offload *flow)
196 {
197 nft_flow_dst_release(flow, FLOW_OFFLOAD_DIR_ORIGINAL);
198 nft_flow_dst_release(flow, FLOW_OFFLOAD_DIR_REPLY);
199 }
200
flow_offload_free(struct flow_offload * flow)201 void flow_offload_free(struct flow_offload *flow)
202 {
203 switch (flow->type) {
204 case NF_FLOW_OFFLOAD_ROUTE:
205 flow_offload_route_release(flow);
206 break;
207 default:
208 break;
209 }
210 nf_ct_put(flow->ct);
211 kfree_rcu(flow, rcu_head);
212 }
213 EXPORT_SYMBOL_GPL(flow_offload_free);
214
flow_offload_hash(const void * data,u32 len,u32 seed)215 static u32 flow_offload_hash(const void *data, u32 len, u32 seed)
216 {
217 const struct flow_offload_tuple *tuple = data;
218
219 return jhash(tuple, offsetof(struct flow_offload_tuple, __hash), seed);
220 }
221
flow_offload_hash_obj(const void * data,u32 len,u32 seed)222 static u32 flow_offload_hash_obj(const void *data, u32 len, u32 seed)
223 {
224 const struct flow_offload_tuple_rhash *tuplehash = data;
225
226 return jhash(&tuplehash->tuple, offsetof(struct flow_offload_tuple, __hash), seed);
227 }
228
flow_offload_hash_cmp(struct rhashtable_compare_arg * arg,const void * ptr)229 static int flow_offload_hash_cmp(struct rhashtable_compare_arg *arg,
230 const void *ptr)
231 {
232 const struct flow_offload_tuple *tuple = arg->key;
233 const struct flow_offload_tuple_rhash *x = ptr;
234
235 if (memcmp(&x->tuple, tuple, offsetof(struct flow_offload_tuple, __hash)))
236 return 1;
237
238 return 0;
239 }
240
241 static const struct rhashtable_params nf_flow_offload_rhash_params = {
242 .head_offset = offsetof(struct flow_offload_tuple_rhash, node),
243 .hashfn = flow_offload_hash,
244 .obj_hashfn = flow_offload_hash_obj,
245 .obj_cmpfn = flow_offload_hash_cmp,
246 .automatic_shrinking = true,
247 };
248
flow_offload_get_timeout(struct flow_offload * flow)249 unsigned long flow_offload_get_timeout(struct flow_offload *flow)
250 {
251 unsigned long timeout = NF_FLOW_TIMEOUT;
252 struct net *net = nf_ct_net(flow->ct);
253 int l4num = nf_ct_protonum(flow->ct);
254
255 if (l4num == IPPROTO_TCP) {
256 struct nf_tcp_net *tn = nf_tcp_pernet(net);
257
258 timeout = tn->offload_timeout;
259 } else if (l4num == IPPROTO_UDP) {
260 struct nf_udp_net *tn = nf_udp_pernet(net);
261
262 timeout = tn->offload_timeout;
263 }
264
265 return timeout;
266 }
267
flow_offload_add(struct nf_flowtable * flow_table,struct flow_offload * flow)268 int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow)
269 {
270 int err;
271
272 flow->timeout = nf_flowtable_time_stamp + flow_offload_get_timeout(flow);
273
274 err = rhashtable_insert_fast(&flow_table->rhashtable,
275 &flow->tuplehash[0].node,
276 nf_flow_offload_rhash_params);
277 if (err < 0)
278 return err;
279
280 err = rhashtable_insert_fast(&flow_table->rhashtable,
281 &flow->tuplehash[1].node,
282 nf_flow_offload_rhash_params);
283 if (err < 0) {
284 rhashtable_remove_fast(&flow_table->rhashtable,
285 &flow->tuplehash[0].node,
286 nf_flow_offload_rhash_params);
287 return err;
288 }
289
290 nf_ct_offload_timeout(flow->ct);
291
292 if (nf_flowtable_hw_offload(flow_table)) {
293 __set_bit(NF_FLOW_HW, &flow->flags);
294 nf_flow_offload_add(flow_table, flow);
295 }
296
297 return 0;
298 }
299 EXPORT_SYMBOL_GPL(flow_offload_add);
300
flow_offload_refresh(struct nf_flowtable * flow_table,struct flow_offload * flow,bool force)301 void flow_offload_refresh(struct nf_flowtable *flow_table,
302 struct flow_offload *flow, bool force)
303 {
304 u32 timeout;
305
306 timeout = nf_flowtable_time_stamp + flow_offload_get_timeout(flow);
307 if (force || timeout - READ_ONCE(flow->timeout) > HZ)
308 WRITE_ONCE(flow->timeout, timeout);
309 else
310 return;
311
312 if (likely(!nf_flowtable_hw_offload(flow_table)))
313 return;
314
315 nf_flow_offload_add(flow_table, flow);
316 }
317 EXPORT_SYMBOL_GPL(flow_offload_refresh);
318
nf_flow_has_expired(const struct flow_offload * flow)319 static inline bool nf_flow_has_expired(const struct flow_offload *flow)
320 {
321 return nf_flow_timeout_delta(flow->timeout) <= 0;
322 }
323
flow_offload_del(struct nf_flowtable * flow_table,struct flow_offload * flow)324 static void flow_offload_del(struct nf_flowtable *flow_table,
325 struct flow_offload *flow)
326 {
327 rhashtable_remove_fast(&flow_table->rhashtable,
328 &flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].node,
329 nf_flow_offload_rhash_params);
330 rhashtable_remove_fast(&flow_table->rhashtable,
331 &flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].node,
332 nf_flow_offload_rhash_params);
333 flow_offload_free(flow);
334 }
335
flow_offload_teardown(struct flow_offload * flow)336 void flow_offload_teardown(struct flow_offload *flow)
337 {
338 clear_bit(IPS_OFFLOAD_BIT, &flow->ct->status);
339 set_bit(NF_FLOW_TEARDOWN, &flow->flags);
340 flow_offload_fixup_ct(flow->ct);
341 }
342 EXPORT_SYMBOL_GPL(flow_offload_teardown);
343
344 struct flow_offload_tuple_rhash *
flow_offload_lookup(struct nf_flowtable * flow_table,struct flow_offload_tuple * tuple)345 flow_offload_lookup(struct nf_flowtable *flow_table,
346 struct flow_offload_tuple *tuple)
347 {
348 struct flow_offload_tuple_rhash *tuplehash;
349 struct flow_offload *flow;
350 int dir;
351
352 tuplehash = rhashtable_lookup(&flow_table->rhashtable, tuple,
353 nf_flow_offload_rhash_params);
354 if (!tuplehash)
355 return NULL;
356
357 dir = tuplehash->tuple.dir;
358 flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
359 if (test_bit(NF_FLOW_TEARDOWN, &flow->flags))
360 return NULL;
361
362 if (unlikely(nf_ct_is_dying(flow->ct)))
363 return NULL;
364
365 return tuplehash;
366 }
367 EXPORT_SYMBOL_GPL(flow_offload_lookup);
368
369 static int
nf_flow_table_iterate(struct nf_flowtable * flow_table,void (* iter)(struct nf_flowtable * flowtable,struct flow_offload * flow,void * data),void * data)370 nf_flow_table_iterate(struct nf_flowtable *flow_table,
371 void (*iter)(struct nf_flowtable *flowtable,
372 struct flow_offload *flow, void *data),
373 void *data)
374 {
375 struct flow_offload_tuple_rhash *tuplehash;
376 struct rhashtable_iter hti;
377 struct flow_offload *flow;
378 int err = 0;
379
380 rhashtable_walk_enter(&flow_table->rhashtable, &hti);
381 rhashtable_walk_start(&hti);
382
383 while ((tuplehash = rhashtable_walk_next(&hti))) {
384 if (IS_ERR(tuplehash)) {
385 if (PTR_ERR(tuplehash) != -EAGAIN) {
386 err = PTR_ERR(tuplehash);
387 break;
388 }
389 continue;
390 }
391 if (tuplehash->tuple.dir)
392 continue;
393
394 flow = container_of(tuplehash, struct flow_offload, tuplehash[0]);
395
396 iter(flow_table, flow, data);
397 }
398 rhashtable_walk_stop(&hti);
399 rhashtable_walk_exit(&hti);
400
401 return err;
402 }
403
nf_flow_custom_gc(struct nf_flowtable * flow_table,const struct flow_offload * flow)404 static bool nf_flow_custom_gc(struct nf_flowtable *flow_table,
405 const struct flow_offload *flow)
406 {
407 return flow_table->type->gc && flow_table->type->gc(flow);
408 }
409
nf_flow_offload_gc_step(struct nf_flowtable * flow_table,struct flow_offload * flow,void * data)410 static void nf_flow_offload_gc_step(struct nf_flowtable *flow_table,
411 struct flow_offload *flow, void *data)
412 {
413 if (nf_flow_has_expired(flow) ||
414 nf_ct_is_dying(flow->ct) ||
415 nf_flow_custom_gc(flow_table, flow))
416 flow_offload_teardown(flow);
417
418 if (test_bit(NF_FLOW_TEARDOWN, &flow->flags)) {
419 if (test_bit(NF_FLOW_HW, &flow->flags)) {
420 if (!test_bit(NF_FLOW_HW_DYING, &flow->flags))
421 nf_flow_offload_del(flow_table, flow);
422 else if (test_bit(NF_FLOW_HW_DEAD, &flow->flags))
423 flow_offload_del(flow_table, flow);
424 } else {
425 flow_offload_del(flow_table, flow);
426 }
427 } else if (test_bit(NF_FLOW_HW, &flow->flags)) {
428 nf_flow_offload_stats(flow_table, flow);
429 }
430 }
431
nf_flow_table_gc_run(struct nf_flowtable * flow_table)432 void nf_flow_table_gc_run(struct nf_flowtable *flow_table)
433 {
434 nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step, NULL);
435 }
436
nf_flow_offload_work_gc(struct work_struct * work)437 static void nf_flow_offload_work_gc(struct work_struct *work)
438 {
439 struct nf_flowtable *flow_table;
440
441 flow_table = container_of(work, struct nf_flowtable, gc_work.work);
442 nf_flow_table_gc_run(flow_table);
443 queue_delayed_work(system_power_efficient_wq, &flow_table->gc_work, HZ);
444 }
445
nf_flow_nat_port_tcp(struct sk_buff * skb,unsigned int thoff,__be16 port,__be16 new_port)446 static void nf_flow_nat_port_tcp(struct sk_buff *skb, unsigned int thoff,
447 __be16 port, __be16 new_port)
448 {
449 struct tcphdr *tcph;
450
451 tcph = (void *)(skb_network_header(skb) + thoff);
452 inet_proto_csum_replace2(&tcph->check, skb, port, new_port, false);
453 }
454
nf_flow_nat_port_udp(struct sk_buff * skb,unsigned int thoff,__be16 port,__be16 new_port)455 static void nf_flow_nat_port_udp(struct sk_buff *skb, unsigned int thoff,
456 __be16 port, __be16 new_port)
457 {
458 struct udphdr *udph;
459
460 udph = (void *)(skb_network_header(skb) + thoff);
461 if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) {
462 inet_proto_csum_replace2(&udph->check, skb, port,
463 new_port, false);
464 if (!udph->check)
465 udph->check = CSUM_MANGLED_0;
466 }
467 }
468
nf_flow_nat_port(struct sk_buff * skb,unsigned int thoff,u8 protocol,__be16 port,__be16 new_port)469 static void nf_flow_nat_port(struct sk_buff *skb, unsigned int thoff,
470 u8 protocol, __be16 port, __be16 new_port)
471 {
472 switch (protocol) {
473 case IPPROTO_TCP:
474 nf_flow_nat_port_tcp(skb, thoff, port, new_port);
475 break;
476 case IPPROTO_UDP:
477 nf_flow_nat_port_udp(skb, thoff, port, new_port);
478 break;
479 }
480 }
481
nf_flow_snat_port(const struct flow_offload * flow,struct sk_buff * skb,unsigned int thoff,u8 protocol,enum flow_offload_tuple_dir dir)482 void nf_flow_snat_port(const struct flow_offload *flow,
483 struct sk_buff *skb, unsigned int thoff,
484 u8 protocol, enum flow_offload_tuple_dir dir)
485 {
486 struct flow_ports *hdr;
487 __be16 port, new_port;
488
489 hdr = (void *)(skb_network_header(skb) + thoff);
490
491 switch (dir) {
492 case FLOW_OFFLOAD_DIR_ORIGINAL:
493 port = hdr->source;
494 new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_port;
495 hdr->source = new_port;
496 break;
497 case FLOW_OFFLOAD_DIR_REPLY:
498 port = hdr->dest;
499 new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_port;
500 hdr->dest = new_port;
501 break;
502 }
503
504 nf_flow_nat_port(skb, thoff, protocol, port, new_port);
505 }
506 EXPORT_SYMBOL_GPL(nf_flow_snat_port);
507
nf_flow_dnat_port(const struct flow_offload * flow,struct sk_buff * skb,unsigned int thoff,u8 protocol,enum flow_offload_tuple_dir dir)508 void nf_flow_dnat_port(const struct flow_offload *flow, struct sk_buff *skb,
509 unsigned int thoff, u8 protocol,
510 enum flow_offload_tuple_dir dir)
511 {
512 struct flow_ports *hdr;
513 __be16 port, new_port;
514
515 hdr = (void *)(skb_network_header(skb) + thoff);
516
517 switch (dir) {
518 case FLOW_OFFLOAD_DIR_ORIGINAL:
519 port = hdr->dest;
520 new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_port;
521 hdr->dest = new_port;
522 break;
523 case FLOW_OFFLOAD_DIR_REPLY:
524 port = hdr->source;
525 new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_port;
526 hdr->source = new_port;
527 break;
528 }
529
530 nf_flow_nat_port(skb, thoff, protocol, port, new_port);
531 }
532 EXPORT_SYMBOL_GPL(nf_flow_dnat_port);
533
nf_flow_table_init(struct nf_flowtable * flowtable)534 int nf_flow_table_init(struct nf_flowtable *flowtable)
535 {
536 int err;
537
538 INIT_DELAYED_WORK(&flowtable->gc_work, nf_flow_offload_work_gc);
539 flow_block_init(&flowtable->flow_block);
540 init_rwsem(&flowtable->flow_block_lock);
541
542 err = rhashtable_init(&flowtable->rhashtable,
543 &nf_flow_offload_rhash_params);
544 if (err < 0)
545 return err;
546
547 queue_delayed_work(system_power_efficient_wq,
548 &flowtable->gc_work, HZ);
549
550 mutex_lock(&flowtable_lock);
551 list_add(&flowtable->list, &flowtables);
552 mutex_unlock(&flowtable_lock);
553
554 return 0;
555 }
556 EXPORT_SYMBOL_GPL(nf_flow_table_init);
557
nf_flow_table_do_cleanup(struct nf_flowtable * flow_table,struct flow_offload * flow,void * data)558 static void nf_flow_table_do_cleanup(struct nf_flowtable *flow_table,
559 struct flow_offload *flow, void *data)
560 {
561 struct net_device *dev = data;
562
563 if (!dev) {
564 flow_offload_teardown(flow);
565 return;
566 }
567
568 if (net_eq(nf_ct_net(flow->ct), dev_net(dev)) &&
569 (flow->tuplehash[0].tuple.iifidx == dev->ifindex ||
570 flow->tuplehash[1].tuple.iifidx == dev->ifindex))
571 flow_offload_teardown(flow);
572 }
573
nf_flow_table_gc_cleanup(struct nf_flowtable * flowtable,struct net_device * dev)574 void nf_flow_table_gc_cleanup(struct nf_flowtable *flowtable,
575 struct net_device *dev)
576 {
577 nf_flow_table_iterate(flowtable, nf_flow_table_do_cleanup, dev);
578 flush_delayed_work(&flowtable->gc_work);
579 nf_flow_table_offload_flush(flowtable);
580 }
581
nf_flow_table_cleanup(struct net_device * dev)582 void nf_flow_table_cleanup(struct net_device *dev)
583 {
584 struct nf_flowtable *flowtable;
585
586 mutex_lock(&flowtable_lock);
587 list_for_each_entry(flowtable, &flowtables, list)
588 nf_flow_table_gc_cleanup(flowtable, dev);
589 mutex_unlock(&flowtable_lock);
590 }
591 EXPORT_SYMBOL_GPL(nf_flow_table_cleanup);
592
nf_flow_table_free(struct nf_flowtable * flow_table)593 void nf_flow_table_free(struct nf_flowtable *flow_table)
594 {
595 mutex_lock(&flowtable_lock);
596 list_del(&flow_table->list);
597 mutex_unlock(&flowtable_lock);
598
599 cancel_delayed_work_sync(&flow_table->gc_work);
600 nf_flow_table_offload_flush(flow_table);
601 /* ... no more pending work after this stage ... */
602 nf_flow_table_iterate(flow_table, nf_flow_table_do_cleanup, NULL);
603 nf_flow_table_gc_run(flow_table);
604 nf_flow_table_offload_flush_cleanup(flow_table);
605 rhashtable_destroy(&flow_table->rhashtable);
606 }
607 EXPORT_SYMBOL_GPL(nf_flow_table_free);
608
nf_flow_table_init_net(struct net * net)609 static int nf_flow_table_init_net(struct net *net)
610 {
611 net->ft.stat = alloc_percpu(struct nf_flow_table_stat);
612 return net->ft.stat ? 0 : -ENOMEM;
613 }
614
nf_flow_table_fini_net(struct net * net)615 static void nf_flow_table_fini_net(struct net *net)
616 {
617 free_percpu(net->ft.stat);
618 }
619
nf_flow_table_pernet_init(struct net * net)620 static int nf_flow_table_pernet_init(struct net *net)
621 {
622 int ret;
623
624 ret = nf_flow_table_init_net(net);
625 if (ret < 0)
626 return ret;
627
628 ret = nf_flow_table_init_proc(net);
629 if (ret < 0)
630 goto out_proc;
631
632 return 0;
633
634 out_proc:
635 nf_flow_table_fini_net(net);
636 return ret;
637 }
638
nf_flow_table_pernet_exit(struct list_head * net_exit_list)639 static void nf_flow_table_pernet_exit(struct list_head *net_exit_list)
640 {
641 struct net *net;
642
643 list_for_each_entry(net, net_exit_list, exit_list) {
644 nf_flow_table_fini_proc(net);
645 nf_flow_table_fini_net(net);
646 }
647 }
648
649 static struct pernet_operations nf_flow_table_net_ops = {
650 .init = nf_flow_table_pernet_init,
651 .exit_batch = nf_flow_table_pernet_exit,
652 };
653
nf_flow_table_module_init(void)654 static int __init nf_flow_table_module_init(void)
655 {
656 int ret;
657
658 ret = register_pernet_subsys(&nf_flow_table_net_ops);
659 if (ret < 0)
660 return ret;
661
662 ret = nf_flow_table_offload_init();
663 if (ret)
664 goto out_offload;
665
666 return 0;
667
668 out_offload:
669 unregister_pernet_subsys(&nf_flow_table_net_ops);
670 return ret;
671 }
672
nf_flow_table_module_exit(void)673 static void __exit nf_flow_table_module_exit(void)
674 {
675 nf_flow_table_offload_exit();
676 unregister_pernet_subsys(&nf_flow_table_net_ops);
677 }
678
679 module_init(nf_flow_table_module_init);
680 module_exit(nf_flow_table_module_exit);
681
682 MODULE_LICENSE("GPL");
683 MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
684 MODULE_DESCRIPTION("Netfilter flow table module");
685