1 /* SPDX-License-Identifier: GPL-2.0 */
2 #include <linux/init.h>
3 #include <linux/module.h>
4 #include <linux/netfilter.h>
5 #include <net/flow_offload.h>
6 #include <net/netfilter/nf_tables.h>
7 #include <net/netfilter/nf_tables_offload.h>
8 #include <net/pkt_cls.h>
9 
nft_flow_rule_alloc(int num_actions)10 static struct nft_flow_rule *nft_flow_rule_alloc(int num_actions)
11 {
12 	struct nft_flow_rule *flow;
13 
14 	flow = kzalloc(sizeof(struct nft_flow_rule), GFP_KERNEL);
15 	if (!flow)
16 		return NULL;
17 
18 	flow->rule = flow_rule_alloc(num_actions);
19 	if (!flow->rule) {
20 		kfree(flow);
21 		return NULL;
22 	}
23 
24 	flow->rule->match.dissector	= &flow->match.dissector;
25 	flow->rule->match.mask		= &flow->match.mask;
26 	flow->rule->match.key		= &flow->match.key;
27 
28 	return flow;
29 }
30 
nft_flow_rule_set_addr_type(struct nft_flow_rule * flow,enum flow_dissector_key_id addr_type)31 void nft_flow_rule_set_addr_type(struct nft_flow_rule *flow,
32 				 enum flow_dissector_key_id addr_type)
33 {
34 	struct nft_flow_match *match = &flow->match;
35 	struct nft_flow_key *mask = &match->mask;
36 	struct nft_flow_key *key = &match->key;
37 
38 	if (match->dissector.used_keys & BIT(FLOW_DISSECTOR_KEY_CONTROL))
39 		return;
40 
41 	key->control.addr_type = addr_type;
42 	mask->control.addr_type = 0xffff;
43 	match->dissector.used_keys |= BIT(FLOW_DISSECTOR_KEY_CONTROL);
44 	match->dissector.offset[FLOW_DISSECTOR_KEY_CONTROL] =
45 		offsetof(struct nft_flow_key, control);
46 }
47 
48 struct nft_offload_ethertype {
49 	__be16 value;
50 	__be16 mask;
51 };
52 
nft_flow_rule_transfer_vlan(struct nft_offload_ctx * ctx,struct nft_flow_rule * flow)53 static void nft_flow_rule_transfer_vlan(struct nft_offload_ctx *ctx,
54 					struct nft_flow_rule *flow)
55 {
56 	struct nft_flow_match *match = &flow->match;
57 	struct nft_offload_ethertype ethertype = {
58 		.value	= match->key.basic.n_proto,
59 		.mask	= match->mask.basic.n_proto,
60 	};
61 
62 	if (match->dissector.used_keys & BIT(FLOW_DISSECTOR_KEY_VLAN) &&
63 	    (match->key.vlan.vlan_tpid == htons(ETH_P_8021Q) ||
64 	     match->key.vlan.vlan_tpid == htons(ETH_P_8021AD))) {
65 		match->key.basic.n_proto = match->key.cvlan.vlan_tpid;
66 		match->mask.basic.n_proto = match->mask.cvlan.vlan_tpid;
67 		match->key.cvlan.vlan_tpid = match->key.vlan.vlan_tpid;
68 		match->mask.cvlan.vlan_tpid = match->mask.vlan.vlan_tpid;
69 		match->key.vlan.vlan_tpid = ethertype.value;
70 		match->mask.vlan.vlan_tpid = ethertype.mask;
71 		match->dissector.offset[FLOW_DISSECTOR_KEY_CVLAN] =
72 			offsetof(struct nft_flow_key, cvlan);
73 		match->dissector.used_keys |= BIT(FLOW_DISSECTOR_KEY_CVLAN);
74 	} else if (match->dissector.used_keys & BIT(FLOW_DISSECTOR_KEY_BASIC) &&
75 		   (match->key.basic.n_proto == htons(ETH_P_8021Q) ||
76 		    match->key.basic.n_proto == htons(ETH_P_8021AD))) {
77 		match->key.basic.n_proto = match->key.vlan.vlan_tpid;
78 		match->mask.basic.n_proto = match->mask.vlan.vlan_tpid;
79 		match->key.vlan.vlan_tpid = ethertype.value;
80 		match->mask.vlan.vlan_tpid = ethertype.mask;
81 		match->dissector.offset[FLOW_DISSECTOR_KEY_VLAN] =
82 			offsetof(struct nft_flow_key, vlan);
83 		match->dissector.used_keys |= BIT(FLOW_DISSECTOR_KEY_VLAN);
84 	}
85 }
86 
nft_flow_rule_create(struct net * net,const struct nft_rule * rule)87 struct nft_flow_rule *nft_flow_rule_create(struct net *net,
88 					   const struct nft_rule *rule)
89 {
90 	struct nft_offload_ctx *ctx;
91 	struct nft_flow_rule *flow;
92 	int num_actions = 0, err;
93 	struct nft_expr *expr;
94 
95 	expr = nft_expr_first(rule);
96 	while (nft_expr_more(rule, expr)) {
97 		if (expr->ops->offload_flags & NFT_OFFLOAD_F_ACTION)
98 			num_actions++;
99 
100 		expr = nft_expr_next(expr);
101 	}
102 
103 	if (num_actions == 0)
104 		return ERR_PTR(-EOPNOTSUPP);
105 
106 	flow = nft_flow_rule_alloc(num_actions);
107 	if (!flow)
108 		return ERR_PTR(-ENOMEM);
109 
110 	expr = nft_expr_first(rule);
111 
112 	ctx = kzalloc(sizeof(struct nft_offload_ctx), GFP_KERNEL);
113 	if (!ctx) {
114 		err = -ENOMEM;
115 		goto err_out;
116 	}
117 	ctx->net = net;
118 	ctx->dep.type = NFT_OFFLOAD_DEP_UNSPEC;
119 
120 	while (nft_expr_more(rule, expr)) {
121 		if (!expr->ops->offload) {
122 			err = -EOPNOTSUPP;
123 			goto err_out;
124 		}
125 		err = expr->ops->offload(ctx, flow, expr);
126 		if (err < 0)
127 			goto err_out;
128 
129 		expr = nft_expr_next(expr);
130 	}
131 	nft_flow_rule_transfer_vlan(ctx, flow);
132 
133 	flow->proto = ctx->dep.l3num;
134 	kfree(ctx);
135 
136 	return flow;
137 err_out:
138 	kfree(ctx);
139 	nft_flow_rule_destroy(flow);
140 
141 	return ERR_PTR(err);
142 }
143 
nft_flow_rule_destroy(struct nft_flow_rule * flow)144 void nft_flow_rule_destroy(struct nft_flow_rule *flow)
145 {
146 	struct flow_action_entry *entry;
147 	int i;
148 
149 	flow_action_for_each(i, entry, &flow->rule->action) {
150 		switch (entry->id) {
151 		case FLOW_ACTION_REDIRECT:
152 		case FLOW_ACTION_MIRRED:
153 			dev_put(entry->dev);
154 			break;
155 		default:
156 			break;
157 		}
158 	}
159 	kfree(flow->rule);
160 	kfree(flow);
161 }
162 
nft_offload_set_dependency(struct nft_offload_ctx * ctx,enum nft_offload_dep_type type)163 void nft_offload_set_dependency(struct nft_offload_ctx *ctx,
164 				enum nft_offload_dep_type type)
165 {
166 	ctx->dep.type = type;
167 }
168 
nft_offload_update_dependency(struct nft_offload_ctx * ctx,const void * data,u32 len)169 void nft_offload_update_dependency(struct nft_offload_ctx *ctx,
170 				   const void *data, u32 len)
171 {
172 	switch (ctx->dep.type) {
173 	case NFT_OFFLOAD_DEP_NETWORK:
174 		WARN_ON(len != sizeof(__u16));
175 		memcpy(&ctx->dep.l3num, data, sizeof(__u16));
176 		break;
177 	case NFT_OFFLOAD_DEP_TRANSPORT:
178 		WARN_ON(len != sizeof(__u8));
179 		memcpy(&ctx->dep.protonum, data, sizeof(__u8));
180 		break;
181 	default:
182 		break;
183 	}
184 	ctx->dep.type = NFT_OFFLOAD_DEP_UNSPEC;
185 }
186 
nft_flow_offload_common_init(struct flow_cls_common_offload * common,__be16 proto,int priority,struct netlink_ext_ack * extack)187 static void nft_flow_offload_common_init(struct flow_cls_common_offload *common,
188 					 __be16 proto, int priority,
189 					 struct netlink_ext_ack *extack)
190 {
191 	common->protocol = proto;
192 	common->prio = priority;
193 	common->extack = extack;
194 }
195 
nft_setup_cb_call(enum tc_setup_type type,void * type_data,struct list_head * cb_list)196 static int nft_setup_cb_call(enum tc_setup_type type, void *type_data,
197 			     struct list_head *cb_list)
198 {
199 	struct flow_block_cb *block_cb;
200 	int err;
201 
202 	list_for_each_entry(block_cb, cb_list, list) {
203 		err = block_cb->cb(type, type_data, block_cb->cb_priv);
204 		if (err < 0)
205 			return err;
206 	}
207 	return 0;
208 }
209 
nft_chain_offload_priority(struct nft_base_chain * basechain)210 int nft_chain_offload_priority(struct nft_base_chain *basechain)
211 {
212 	if (basechain->ops.priority <= 0 ||
213 	    basechain->ops.priority > USHRT_MAX)
214 		return -1;
215 
216 	return 0;
217 }
218 
nft_flow_cls_offload_setup(struct flow_cls_offload * cls_flow,const struct nft_base_chain * basechain,const struct nft_rule * rule,const struct nft_flow_rule * flow,struct netlink_ext_ack * extack,enum flow_cls_command command)219 static void nft_flow_cls_offload_setup(struct flow_cls_offload *cls_flow,
220 				       const struct nft_base_chain *basechain,
221 				       const struct nft_rule *rule,
222 				       const struct nft_flow_rule *flow,
223 				       struct netlink_ext_ack *extack,
224 				       enum flow_cls_command command)
225 {
226 	__be16 proto = ETH_P_ALL;
227 
228 	memset(cls_flow, 0, sizeof(*cls_flow));
229 
230 	if (flow)
231 		proto = flow->proto;
232 
233 	nft_flow_offload_common_init(&cls_flow->common, proto,
234 				     basechain->ops.priority, extack);
235 	cls_flow->command = command;
236 	cls_flow->cookie = (unsigned long) rule;
237 	if (flow)
238 		cls_flow->rule = flow->rule;
239 }
240 
nft_flow_offload_cmd(const struct nft_chain * chain,const struct nft_rule * rule,struct nft_flow_rule * flow,enum flow_cls_command command,struct flow_cls_offload * cls_flow)241 static int nft_flow_offload_cmd(const struct nft_chain *chain,
242 				const struct nft_rule *rule,
243 				struct nft_flow_rule *flow,
244 				enum flow_cls_command command,
245 				struct flow_cls_offload *cls_flow)
246 {
247 	struct netlink_ext_ack extack = {};
248 	struct nft_base_chain *basechain;
249 
250 	if (!nft_is_base_chain(chain))
251 		return -EOPNOTSUPP;
252 
253 	basechain = nft_base_chain(chain);
254 	nft_flow_cls_offload_setup(cls_flow, basechain, rule, flow, &extack,
255 				   command);
256 
257 	return nft_setup_cb_call(TC_SETUP_CLSFLOWER, cls_flow,
258 				 &basechain->flow_block.cb_list);
259 }
260 
nft_flow_offload_rule(const struct nft_chain * chain,struct nft_rule * rule,struct nft_flow_rule * flow,enum flow_cls_command command)261 static int nft_flow_offload_rule(const struct nft_chain *chain,
262 				 struct nft_rule *rule,
263 				 struct nft_flow_rule *flow,
264 				 enum flow_cls_command command)
265 {
266 	struct flow_cls_offload cls_flow;
267 
268 	return nft_flow_offload_cmd(chain, rule, flow, command, &cls_flow);
269 }
270 
nft_flow_rule_stats(const struct nft_chain * chain,const struct nft_rule * rule)271 int nft_flow_rule_stats(const struct nft_chain *chain,
272 			const struct nft_rule *rule)
273 {
274 	struct flow_cls_offload cls_flow = {};
275 	struct nft_expr *expr, *next;
276 	int err;
277 
278 	err = nft_flow_offload_cmd(chain, rule, NULL, FLOW_CLS_STATS,
279 				   &cls_flow);
280 	if (err < 0)
281 		return err;
282 
283 	nft_rule_for_each_expr(expr, next, rule) {
284 		if (expr->ops->offload_stats)
285 			expr->ops->offload_stats(expr, &cls_flow.stats);
286 	}
287 
288 	return 0;
289 }
290 
nft_flow_offload_bind(struct flow_block_offload * bo,struct nft_base_chain * basechain)291 static int nft_flow_offload_bind(struct flow_block_offload *bo,
292 				 struct nft_base_chain *basechain)
293 {
294 	list_splice(&bo->cb_list, &basechain->flow_block.cb_list);
295 	return 0;
296 }
297 
nft_flow_offload_unbind(struct flow_block_offload * bo,struct nft_base_chain * basechain)298 static int nft_flow_offload_unbind(struct flow_block_offload *bo,
299 				   struct nft_base_chain *basechain)
300 {
301 	struct flow_block_cb *block_cb, *next;
302 	struct flow_cls_offload cls_flow;
303 	struct netlink_ext_ack extack;
304 	struct nft_chain *chain;
305 	struct nft_rule *rule;
306 
307 	chain = &basechain->chain;
308 	list_for_each_entry(rule, &chain->rules, list) {
309 		memset(&extack, 0, sizeof(extack));
310 		nft_flow_cls_offload_setup(&cls_flow, basechain, rule, NULL,
311 					   &extack, FLOW_CLS_DESTROY);
312 		nft_setup_cb_call(TC_SETUP_CLSFLOWER, &cls_flow, &bo->cb_list);
313 	}
314 
315 	list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
316 		list_del(&block_cb->list);
317 		flow_block_cb_free(block_cb);
318 	}
319 
320 	return 0;
321 }
322 
nft_block_setup(struct nft_base_chain * basechain,struct flow_block_offload * bo,enum flow_block_command cmd)323 static int nft_block_setup(struct nft_base_chain *basechain,
324 			   struct flow_block_offload *bo,
325 			   enum flow_block_command cmd)
326 {
327 	int err;
328 
329 	switch (cmd) {
330 	case FLOW_BLOCK_BIND:
331 		err = nft_flow_offload_bind(bo, basechain);
332 		break;
333 	case FLOW_BLOCK_UNBIND:
334 		err = nft_flow_offload_unbind(bo, basechain);
335 		break;
336 	default:
337 		WARN_ON_ONCE(1);
338 		err = -EOPNOTSUPP;
339 	}
340 
341 	return err;
342 }
343 
nft_flow_block_offload_init(struct flow_block_offload * bo,struct net * net,enum flow_block_command cmd,struct nft_base_chain * basechain,struct netlink_ext_ack * extack)344 static void nft_flow_block_offload_init(struct flow_block_offload *bo,
345 					struct net *net,
346 					enum flow_block_command cmd,
347 					struct nft_base_chain *basechain,
348 					struct netlink_ext_ack *extack)
349 {
350 	memset(bo, 0, sizeof(*bo));
351 	bo->net		= net;
352 	bo->block	= &basechain->flow_block;
353 	bo->command	= cmd;
354 	bo->binder_type	= FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS;
355 	bo->extack	= extack;
356 	bo->cb_list_head = &basechain->flow_block.cb_list;
357 	INIT_LIST_HEAD(&bo->cb_list);
358 }
359 
nft_block_offload_cmd(struct nft_base_chain * chain,struct net_device * dev,enum flow_block_command cmd)360 static int nft_block_offload_cmd(struct nft_base_chain *chain,
361 				 struct net_device *dev,
362 				 enum flow_block_command cmd)
363 {
364 	struct netlink_ext_ack extack = {};
365 	struct flow_block_offload bo;
366 	int err;
367 
368 	nft_flow_block_offload_init(&bo, dev_net(dev), cmd, chain, &extack);
369 
370 	err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo);
371 	if (err < 0)
372 		return err;
373 
374 	return nft_block_setup(chain, &bo, cmd);
375 }
376 
nft_indr_block_cleanup(struct flow_block_cb * block_cb)377 static void nft_indr_block_cleanup(struct flow_block_cb *block_cb)
378 {
379 	struct nft_base_chain *basechain = block_cb->indr.data;
380 	struct net_device *dev = block_cb->indr.dev;
381 	struct netlink_ext_ack extack = {};
382 	struct nftables_pernet *nft_net;
383 	struct net *net = dev_net(dev);
384 	struct flow_block_offload bo;
385 
386 	nft_flow_block_offload_init(&bo, dev_net(dev), FLOW_BLOCK_UNBIND,
387 				    basechain, &extack);
388 	nft_net = nft_pernet(net);
389 	mutex_lock(&nft_net->commit_mutex);
390 	list_del(&block_cb->driver_list);
391 	list_move(&block_cb->list, &bo.cb_list);
392 	nft_flow_offload_unbind(&bo, basechain);
393 	mutex_unlock(&nft_net->commit_mutex);
394 }
395 
nft_indr_block_offload_cmd(struct nft_base_chain * basechain,struct net_device * dev,enum flow_block_command cmd)396 static int nft_indr_block_offload_cmd(struct nft_base_chain *basechain,
397 				      struct net_device *dev,
398 				      enum flow_block_command cmd)
399 {
400 	struct netlink_ext_ack extack = {};
401 	struct flow_block_offload bo;
402 	int err;
403 
404 	nft_flow_block_offload_init(&bo, dev_net(dev), cmd, basechain, &extack);
405 
406 	err = flow_indr_dev_setup_offload(dev, NULL, TC_SETUP_BLOCK, basechain, &bo,
407 					  nft_indr_block_cleanup);
408 	if (err < 0)
409 		return err;
410 
411 	if (list_empty(&bo.cb_list))
412 		return -EOPNOTSUPP;
413 
414 	return nft_block_setup(basechain, &bo, cmd);
415 }
416 
nft_chain_offload_cmd(struct nft_base_chain * basechain,struct net_device * dev,enum flow_block_command cmd)417 static int nft_chain_offload_cmd(struct nft_base_chain *basechain,
418 				 struct net_device *dev,
419 				 enum flow_block_command cmd)
420 {
421 	int err;
422 
423 	if (dev->netdev_ops->ndo_setup_tc)
424 		err = nft_block_offload_cmd(basechain, dev, cmd);
425 	else
426 		err = nft_indr_block_offload_cmd(basechain, dev, cmd);
427 
428 	return err;
429 }
430 
nft_flow_block_chain(struct nft_base_chain * basechain,const struct net_device * this_dev,enum flow_block_command cmd)431 static int nft_flow_block_chain(struct nft_base_chain *basechain,
432 				const struct net_device *this_dev,
433 				enum flow_block_command cmd)
434 {
435 	struct net_device *dev;
436 	struct nft_hook *hook;
437 	int err, i = 0;
438 
439 	list_for_each_entry(hook, &basechain->hook_list, list) {
440 		dev = hook->ops.dev;
441 		if (this_dev && this_dev != dev)
442 			continue;
443 
444 		err = nft_chain_offload_cmd(basechain, dev, cmd);
445 		if (err < 0 && cmd == FLOW_BLOCK_BIND) {
446 			if (!this_dev)
447 				goto err_flow_block;
448 
449 			return err;
450 		}
451 		i++;
452 	}
453 
454 	return 0;
455 
456 err_flow_block:
457 	list_for_each_entry(hook, &basechain->hook_list, list) {
458 		if (i-- <= 0)
459 			break;
460 
461 		dev = hook->ops.dev;
462 		nft_chain_offload_cmd(basechain, dev, FLOW_BLOCK_UNBIND);
463 	}
464 	return err;
465 }
466 
nft_flow_offload_chain(struct nft_chain * chain,u8 * ppolicy,enum flow_block_command cmd)467 static int nft_flow_offload_chain(struct nft_chain *chain, u8 *ppolicy,
468 				  enum flow_block_command cmd)
469 {
470 	struct nft_base_chain *basechain;
471 	u8 policy;
472 
473 	if (!nft_is_base_chain(chain))
474 		return -EOPNOTSUPP;
475 
476 	basechain = nft_base_chain(chain);
477 	policy = ppolicy ? *ppolicy : basechain->policy;
478 
479 	/* Only default policy to accept is supported for now. */
480 	if (cmd == FLOW_BLOCK_BIND && policy == NF_DROP)
481 		return -EOPNOTSUPP;
482 
483 	return nft_flow_block_chain(basechain, NULL, cmd);
484 }
485 
nft_flow_rule_offload_abort(struct net * net,struct nft_trans * trans)486 static void nft_flow_rule_offload_abort(struct net *net,
487 					struct nft_trans *trans)
488 {
489 	struct nftables_pernet *nft_net = nft_pernet(net);
490 	int err = 0;
491 
492 	list_for_each_entry_continue_reverse(trans, &nft_net->commit_list, list) {
493 		if (trans->ctx.family != NFPROTO_NETDEV)
494 			continue;
495 
496 		switch (trans->msg_type) {
497 		case NFT_MSG_NEWCHAIN:
498 			if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD) ||
499 			    nft_trans_chain_update(trans))
500 				continue;
501 
502 			err = nft_flow_offload_chain(trans->ctx.chain, NULL,
503 						     FLOW_BLOCK_UNBIND);
504 			break;
505 		case NFT_MSG_DELCHAIN:
506 			if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
507 				continue;
508 
509 			err = nft_flow_offload_chain(trans->ctx.chain, NULL,
510 						     FLOW_BLOCK_BIND);
511 			break;
512 		case NFT_MSG_NEWRULE:
513 			if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
514 				continue;
515 
516 			err = nft_flow_offload_rule(trans->ctx.chain,
517 						    nft_trans_rule(trans),
518 						    NULL, FLOW_CLS_DESTROY);
519 			break;
520 		case NFT_MSG_DELRULE:
521 			if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
522 				continue;
523 
524 			err = nft_flow_offload_rule(trans->ctx.chain,
525 						    nft_trans_rule(trans),
526 						    nft_trans_flow_rule(trans),
527 						    FLOW_CLS_REPLACE);
528 			break;
529 		}
530 
531 		if (WARN_ON_ONCE(err))
532 			break;
533 	}
534 }
535 
nft_flow_rule_offload_commit(struct net * net)536 int nft_flow_rule_offload_commit(struct net *net)
537 {
538 	struct nftables_pernet *nft_net = nft_pernet(net);
539 	struct nft_trans *trans;
540 	int err = 0;
541 	u8 policy;
542 
543 	list_for_each_entry(trans, &nft_net->commit_list, list) {
544 		if (trans->ctx.family != NFPROTO_NETDEV)
545 			continue;
546 
547 		switch (trans->msg_type) {
548 		case NFT_MSG_NEWCHAIN:
549 			if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD) ||
550 			    nft_trans_chain_update(trans))
551 				continue;
552 
553 			policy = nft_trans_chain_policy(trans);
554 			err = nft_flow_offload_chain(trans->ctx.chain, &policy,
555 						     FLOW_BLOCK_BIND);
556 			break;
557 		case NFT_MSG_DELCHAIN:
558 			if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
559 				continue;
560 
561 			policy = nft_trans_chain_policy(trans);
562 			err = nft_flow_offload_chain(trans->ctx.chain, &policy,
563 						     FLOW_BLOCK_UNBIND);
564 			break;
565 		case NFT_MSG_NEWRULE:
566 			if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
567 				continue;
568 
569 			if (trans->ctx.flags & NLM_F_REPLACE ||
570 			    !(trans->ctx.flags & NLM_F_APPEND)) {
571 				err = -EOPNOTSUPP;
572 				break;
573 			}
574 			err = nft_flow_offload_rule(trans->ctx.chain,
575 						    nft_trans_rule(trans),
576 						    nft_trans_flow_rule(trans),
577 						    FLOW_CLS_REPLACE);
578 			break;
579 		case NFT_MSG_DELRULE:
580 			if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
581 				continue;
582 
583 			err = nft_flow_offload_rule(trans->ctx.chain,
584 						    nft_trans_rule(trans),
585 						    NULL, FLOW_CLS_DESTROY);
586 			break;
587 		}
588 
589 		if (err) {
590 			nft_flow_rule_offload_abort(net, trans);
591 			break;
592 		}
593 	}
594 
595 	return err;
596 }
597 
__nft_offload_get_chain(const struct nftables_pernet * nft_net,struct net_device * dev)598 static struct nft_chain *__nft_offload_get_chain(const struct nftables_pernet *nft_net,
599 						 struct net_device *dev)
600 {
601 	struct nft_base_chain *basechain;
602 	struct nft_hook *hook, *found;
603 	const struct nft_table *table;
604 	struct nft_chain *chain;
605 
606 	list_for_each_entry(table, &nft_net->tables, list) {
607 		if (table->family != NFPROTO_NETDEV)
608 			continue;
609 
610 		list_for_each_entry(chain, &table->chains, list) {
611 			if (!nft_is_base_chain(chain) ||
612 			    !(chain->flags & NFT_CHAIN_HW_OFFLOAD))
613 				continue;
614 
615 			found = NULL;
616 			basechain = nft_base_chain(chain);
617 			list_for_each_entry(hook, &basechain->hook_list, list) {
618 				if (hook->ops.dev != dev)
619 					continue;
620 
621 				found = hook;
622 				break;
623 			}
624 			if (!found)
625 				continue;
626 
627 			return chain;
628 		}
629 	}
630 
631 	return NULL;
632 }
633 
nft_offload_netdev_event(struct notifier_block * this,unsigned long event,void * ptr)634 static int nft_offload_netdev_event(struct notifier_block *this,
635 				    unsigned long event, void *ptr)
636 {
637 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
638 	struct nftables_pernet *nft_net;
639 	struct net *net = dev_net(dev);
640 	struct nft_chain *chain;
641 
642 	if (event != NETDEV_UNREGISTER)
643 		return NOTIFY_DONE;
644 
645 	nft_net = nft_pernet(net);
646 	mutex_lock(&nft_net->commit_mutex);
647 	chain = __nft_offload_get_chain(nft_net, dev);
648 	if (chain)
649 		nft_flow_block_chain(nft_base_chain(chain), dev,
650 				     FLOW_BLOCK_UNBIND);
651 
652 	mutex_unlock(&nft_net->commit_mutex);
653 
654 	return NOTIFY_DONE;
655 }
656 
657 static struct notifier_block nft_offload_netdev_notifier = {
658 	.notifier_call	= nft_offload_netdev_event,
659 };
660 
nft_offload_init(void)661 int nft_offload_init(void)
662 {
663 	return register_netdevice_notifier(&nft_offload_netdev_notifier);
664 }
665 
nft_offload_exit(void)666 void nft_offload_exit(void)
667 {
668 	unregister_netdevice_notifier(&nft_offload_netdev_notifier);
669 }
670