1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __NET_PKT_CLS_H
3 #define __NET_PKT_CLS_H
4
5 #include <linux/pkt_cls.h>
6 #include <linux/workqueue.h>
7 #include <net/sch_generic.h>
8 #include <net/act_api.h>
9
10 /* TC action not accessible from user space */
11 #define TC_ACT_REINSERT (TC_ACT_VALUE_MAX + 1)
12
13 /* Basic packet classifier frontend definitions. */
14
15 struct tcf_walker {
16 int stop;
17 int skip;
18 int count;
19 unsigned long cookie;
20 int (*fn)(struct tcf_proto *, void *node, struct tcf_walker *);
21 };
22
23 int register_tcf_proto_ops(struct tcf_proto_ops *ops);
24 int unregister_tcf_proto_ops(struct tcf_proto_ops *ops);
25
26 enum tcf_block_binder_type {
27 TCF_BLOCK_BINDER_TYPE_UNSPEC,
28 TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS,
29 TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS,
30 };
31
32 struct tcf_block_ext_info {
33 enum tcf_block_binder_type binder_type;
34 tcf_chain_head_change_t *chain_head_change;
35 void *chain_head_change_priv;
36 u32 block_index;
37 };
38
39 struct tcf_block_cb;
40 bool tcf_queue_work(struct rcu_work *rwork, work_func_t func);
41
42 #ifdef CONFIG_NET_CLS
43 struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block,
44 u32 chain_index);
45 void tcf_chain_put_by_act(struct tcf_chain *chain);
46 void tcf_block_netif_keep_dst(struct tcf_block *block);
47 int tcf_block_get(struct tcf_block **p_block,
48 struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
49 struct netlink_ext_ack *extack);
50 int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
51 struct tcf_block_ext_info *ei,
52 struct netlink_ext_ack *extack);
53 void tcf_block_put(struct tcf_block *block);
54 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
55 struct tcf_block_ext_info *ei);
56
tcf_block_shared(struct tcf_block * block)57 static inline bool tcf_block_shared(struct tcf_block *block)
58 {
59 return block->index;
60 }
61
tcf_block_q(struct tcf_block * block)62 static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
63 {
64 WARN_ON(tcf_block_shared(block));
65 return block->q;
66 }
67
tcf_block_dev(struct tcf_block * block)68 static inline struct net_device *tcf_block_dev(struct tcf_block *block)
69 {
70 return tcf_block_q(block)->dev_queue->dev;
71 }
72
73 void *tcf_block_cb_priv(struct tcf_block_cb *block_cb);
74 struct tcf_block_cb *tcf_block_cb_lookup(struct tcf_block *block,
75 tc_setup_cb_t *cb, void *cb_ident);
76 void tcf_block_cb_incref(struct tcf_block_cb *block_cb);
77 unsigned int tcf_block_cb_decref(struct tcf_block_cb *block_cb);
78 struct tcf_block_cb *__tcf_block_cb_register(struct tcf_block *block,
79 tc_setup_cb_t *cb, void *cb_ident,
80 void *cb_priv,
81 struct netlink_ext_ack *extack);
82 int tcf_block_cb_register(struct tcf_block *block,
83 tc_setup_cb_t *cb, void *cb_ident,
84 void *cb_priv, struct netlink_ext_ack *extack);
85 void __tcf_block_cb_unregister(struct tcf_block *block,
86 struct tcf_block_cb *block_cb);
87 void tcf_block_cb_unregister(struct tcf_block *block,
88 tc_setup_cb_t *cb, void *cb_ident);
89
90 int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
91 struct tcf_result *res, bool compat_mode);
92
93 #else
94 static inline
tcf_block_get(struct tcf_block ** p_block,struct tcf_proto __rcu ** p_filter_chain,struct Qdisc * q,struct netlink_ext_ack * extack)95 int tcf_block_get(struct tcf_block **p_block,
96 struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
97 struct netlink_ext_ack *extack)
98 {
99 return 0;
100 }
101
102 static inline
tcf_block_get_ext(struct tcf_block ** p_block,struct Qdisc * q,struct tcf_block_ext_info * ei,struct netlink_ext_ack * extack)103 int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
104 struct tcf_block_ext_info *ei,
105 struct netlink_ext_ack *extack)
106 {
107 return 0;
108 }
109
tcf_block_put(struct tcf_block * block)110 static inline void tcf_block_put(struct tcf_block *block)
111 {
112 }
113
114 static inline
tcf_block_put_ext(struct tcf_block * block,struct Qdisc * q,struct tcf_block_ext_info * ei)115 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
116 struct tcf_block_ext_info *ei)
117 {
118 }
119
tcf_block_q(struct tcf_block * block)120 static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
121 {
122 return NULL;
123 }
124
tcf_block_dev(struct tcf_block * block)125 static inline struct net_device *tcf_block_dev(struct tcf_block *block)
126 {
127 return NULL;
128 }
129
130 static inline
tc_setup_cb_block_register(struct tcf_block * block,tc_setup_cb_t * cb,void * cb_priv)131 int tc_setup_cb_block_register(struct tcf_block *block, tc_setup_cb_t *cb,
132 void *cb_priv)
133 {
134 return 0;
135 }
136
137 static inline
tc_setup_cb_block_unregister(struct tcf_block * block,tc_setup_cb_t * cb,void * cb_priv)138 void tc_setup_cb_block_unregister(struct tcf_block *block, tc_setup_cb_t *cb,
139 void *cb_priv)
140 {
141 }
142
143 static inline
tcf_block_cb_priv(struct tcf_block_cb * block_cb)144 void *tcf_block_cb_priv(struct tcf_block_cb *block_cb)
145 {
146 return NULL;
147 }
148
149 static inline
tcf_block_cb_lookup(struct tcf_block * block,tc_setup_cb_t * cb,void * cb_ident)150 struct tcf_block_cb *tcf_block_cb_lookup(struct tcf_block *block,
151 tc_setup_cb_t *cb, void *cb_ident)
152 {
153 return NULL;
154 }
155
156 static inline
tcf_block_cb_incref(struct tcf_block_cb * block_cb)157 void tcf_block_cb_incref(struct tcf_block_cb *block_cb)
158 {
159 }
160
161 static inline
tcf_block_cb_decref(struct tcf_block_cb * block_cb)162 unsigned int tcf_block_cb_decref(struct tcf_block_cb *block_cb)
163 {
164 return 0;
165 }
166
167 static inline
__tcf_block_cb_register(struct tcf_block * block,tc_setup_cb_t * cb,void * cb_ident,void * cb_priv,struct netlink_ext_ack * extack)168 struct tcf_block_cb *__tcf_block_cb_register(struct tcf_block *block,
169 tc_setup_cb_t *cb, void *cb_ident,
170 void *cb_priv,
171 struct netlink_ext_ack *extack)
172 {
173 return NULL;
174 }
175
176 static inline
tcf_block_cb_register(struct tcf_block * block,tc_setup_cb_t * cb,void * cb_ident,void * cb_priv,struct netlink_ext_ack * extack)177 int tcf_block_cb_register(struct tcf_block *block,
178 tc_setup_cb_t *cb, void *cb_ident,
179 void *cb_priv, struct netlink_ext_ack *extack)
180 {
181 return 0;
182 }
183
184 static inline
__tcf_block_cb_unregister(struct tcf_block * block,struct tcf_block_cb * block_cb)185 void __tcf_block_cb_unregister(struct tcf_block *block,
186 struct tcf_block_cb *block_cb)
187 {
188 }
189
190 static inline
tcf_block_cb_unregister(struct tcf_block * block,tc_setup_cb_t * cb,void * cb_ident)191 void tcf_block_cb_unregister(struct tcf_block *block,
192 tc_setup_cb_t *cb, void *cb_ident)
193 {
194 }
195
tcf_classify(struct sk_buff * skb,const struct tcf_proto * tp,struct tcf_result * res,bool compat_mode)196 static inline int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
197 struct tcf_result *res, bool compat_mode)
198 {
199 return TC_ACT_UNSPEC;
200 }
201 #endif
202
203 static inline unsigned long
__cls_set_class(unsigned long * clp,unsigned long cl)204 __cls_set_class(unsigned long *clp, unsigned long cl)
205 {
206 return xchg(clp, cl);
207 }
208
209 static inline unsigned long
cls_set_class(struct Qdisc * q,unsigned long * clp,unsigned long cl)210 cls_set_class(struct Qdisc *q, unsigned long *clp, unsigned long cl)
211 {
212 unsigned long old_cl;
213
214 sch_tree_lock(q);
215 old_cl = __cls_set_class(clp, cl);
216 sch_tree_unlock(q);
217 return old_cl;
218 }
219
220 static inline void
tcf_bind_filter(struct tcf_proto * tp,struct tcf_result * r,unsigned long base)221 tcf_bind_filter(struct tcf_proto *tp, struct tcf_result *r, unsigned long base)
222 {
223 struct Qdisc *q = tp->chain->block->q;
224 unsigned long cl;
225
226 /* Check q as it is not set for shared blocks. In that case,
227 * setting class is not supported.
228 */
229 if (!q)
230 return;
231 cl = q->ops->cl_ops->bind_tcf(q, base, r->classid);
232 cl = cls_set_class(q, &r->class, cl);
233 if (cl)
234 q->ops->cl_ops->unbind_tcf(q, cl);
235 }
236
237 static inline void
tcf_unbind_filter(struct tcf_proto * tp,struct tcf_result * r)238 tcf_unbind_filter(struct tcf_proto *tp, struct tcf_result *r)
239 {
240 struct Qdisc *q = tp->chain->block->q;
241 unsigned long cl;
242
243 if (!q)
244 return;
245 if ((cl = __cls_set_class(&r->class, 0)) != 0)
246 q->ops->cl_ops->unbind_tcf(q, cl);
247 }
248
249 struct tcf_exts {
250 #ifdef CONFIG_NET_CLS_ACT
251 __u32 type; /* for backward compat(TCA_OLD_COMPAT) */
252 int nr_actions;
253 struct tc_action **actions;
254 struct net *net;
255 #endif
256 /* Map to export classifier specific extension TLV types to the
257 * generic extensions API. Unsupported extensions must be set to 0.
258 */
259 int action;
260 int police;
261 };
262
tcf_exts_init(struct tcf_exts * exts,int action,int police)263 static inline int tcf_exts_init(struct tcf_exts *exts, int action, int police)
264 {
265 #ifdef CONFIG_NET_CLS_ACT
266 exts->type = 0;
267 exts->nr_actions = 0;
268 exts->net = NULL;
269 exts->actions = kcalloc(TCA_ACT_MAX_PRIO, sizeof(struct tc_action *),
270 GFP_KERNEL);
271 if (!exts->actions)
272 return -ENOMEM;
273 #endif
274 exts->action = action;
275 exts->police = police;
276 return 0;
277 }
278
279 /* Return false if the netns is being destroyed in cleanup_net(). Callers
280 * need to do cleanup synchronously in this case, otherwise may race with
281 * tc_action_net_exit(). Return true for other cases.
282 */
tcf_exts_get_net(struct tcf_exts * exts)283 static inline bool tcf_exts_get_net(struct tcf_exts *exts)
284 {
285 #ifdef CONFIG_NET_CLS_ACT
286 exts->net = maybe_get_net(exts->net);
287 return exts->net != NULL;
288 #else
289 return true;
290 #endif
291 }
292
tcf_exts_put_net(struct tcf_exts * exts)293 static inline void tcf_exts_put_net(struct tcf_exts *exts)
294 {
295 #ifdef CONFIG_NET_CLS_ACT
296 if (exts->net)
297 put_net(exts->net);
298 #endif
299 }
300
301 #ifdef CONFIG_NET_CLS_ACT
302 #define tcf_exts_for_each_action(i, a, exts) \
303 for (i = 0; i < TCA_ACT_MAX_PRIO && ((a) = (exts)->actions[i]); i++)
304 #else
305 #define tcf_exts_for_each_action(i, a, exts) \
306 for (; 0; (void)(i), (void)(a), (void)(exts))
307 #endif
308
309 static inline void
tcf_exts_stats_update(const struct tcf_exts * exts,u64 bytes,u64 packets,u64 lastuse)310 tcf_exts_stats_update(const struct tcf_exts *exts,
311 u64 bytes, u64 packets, u64 lastuse)
312 {
313 #ifdef CONFIG_NET_CLS_ACT
314 int i;
315
316 preempt_disable();
317
318 for (i = 0; i < exts->nr_actions; i++) {
319 struct tc_action *a = exts->actions[i];
320
321 tcf_action_stats_update(a, bytes, packets, lastuse);
322 }
323
324 preempt_enable();
325 #endif
326 }
327
328 /**
329 * tcf_exts_has_actions - check if at least one action is present
330 * @exts: tc filter extensions handle
331 *
332 * Returns true if at least one action is present.
333 */
tcf_exts_has_actions(struct tcf_exts * exts)334 static inline bool tcf_exts_has_actions(struct tcf_exts *exts)
335 {
336 #ifdef CONFIG_NET_CLS_ACT
337 return exts->nr_actions;
338 #else
339 return false;
340 #endif
341 }
342
343 /**
344 * tcf_exts_has_one_action - check if exactly one action is present
345 * @exts: tc filter extensions handle
346 *
347 * Returns true if exactly one action is present.
348 */
tcf_exts_has_one_action(struct tcf_exts * exts)349 static inline bool tcf_exts_has_one_action(struct tcf_exts *exts)
350 {
351 #ifdef CONFIG_NET_CLS_ACT
352 return exts->nr_actions == 1;
353 #else
354 return false;
355 #endif
356 }
357
tcf_exts_first_action(struct tcf_exts * exts)358 static inline struct tc_action *tcf_exts_first_action(struct tcf_exts *exts)
359 {
360 #ifdef CONFIG_NET_CLS_ACT
361 return exts->actions[0];
362 #else
363 return NULL;
364 #endif
365 }
366
367 /**
368 * tcf_exts_exec - execute tc filter extensions
369 * @skb: socket buffer
370 * @exts: tc filter extensions handle
371 * @res: desired result
372 *
373 * Executes all configured extensions. Returns TC_ACT_OK on a normal execution,
374 * a negative number if the filter must be considered unmatched or
375 * a positive action code (TC_ACT_*) which must be returned to the
376 * underlying layer.
377 */
378 static inline int
tcf_exts_exec(struct sk_buff * skb,struct tcf_exts * exts,struct tcf_result * res)379 tcf_exts_exec(struct sk_buff *skb, struct tcf_exts *exts,
380 struct tcf_result *res)
381 {
382 #ifdef CONFIG_NET_CLS_ACT
383 return tcf_action_exec(skb, exts->actions, exts->nr_actions, res);
384 #endif
385 return TC_ACT_OK;
386 }
387
388 int tcf_exts_validate(struct net *net, struct tcf_proto *tp,
389 struct nlattr **tb, struct nlattr *rate_tlv,
390 struct tcf_exts *exts, bool ovr,
391 struct netlink_ext_ack *extack);
392 void tcf_exts_destroy(struct tcf_exts *exts);
393 void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src);
394 int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts);
395 int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts);
396
397 /**
398 * struct tcf_pkt_info - packet information
399 */
400 struct tcf_pkt_info {
401 unsigned char * ptr;
402 int nexthdr;
403 };
404
405 #ifdef CONFIG_NET_EMATCH
406
407 struct tcf_ematch_ops;
408
409 /**
410 * struct tcf_ematch - extended match (ematch)
411 *
412 * @matchid: identifier to allow userspace to reidentify a match
413 * @flags: flags specifying attributes and the relation to other matches
414 * @ops: the operations lookup table of the corresponding ematch module
415 * @datalen: length of the ematch specific configuration data
416 * @data: ematch specific data
417 */
418 struct tcf_ematch {
419 struct tcf_ematch_ops * ops;
420 unsigned long data;
421 unsigned int datalen;
422 u16 matchid;
423 u16 flags;
424 struct net *net;
425 };
426
tcf_em_is_container(struct tcf_ematch * em)427 static inline int tcf_em_is_container(struct tcf_ematch *em)
428 {
429 return !em->ops;
430 }
431
tcf_em_is_simple(struct tcf_ematch * em)432 static inline int tcf_em_is_simple(struct tcf_ematch *em)
433 {
434 return em->flags & TCF_EM_SIMPLE;
435 }
436
tcf_em_is_inverted(struct tcf_ematch * em)437 static inline int tcf_em_is_inverted(struct tcf_ematch *em)
438 {
439 return em->flags & TCF_EM_INVERT;
440 }
441
tcf_em_last_match(struct tcf_ematch * em)442 static inline int tcf_em_last_match(struct tcf_ematch *em)
443 {
444 return (em->flags & TCF_EM_REL_MASK) == TCF_EM_REL_END;
445 }
446
tcf_em_early_end(struct tcf_ematch * em,int result)447 static inline int tcf_em_early_end(struct tcf_ematch *em, int result)
448 {
449 if (tcf_em_last_match(em))
450 return 1;
451
452 if (result == 0 && em->flags & TCF_EM_REL_AND)
453 return 1;
454
455 if (result != 0 && em->flags & TCF_EM_REL_OR)
456 return 1;
457
458 return 0;
459 }
460
461 /**
462 * struct tcf_ematch_tree - ematch tree handle
463 *
464 * @hdr: ematch tree header supplied by userspace
465 * @matches: array of ematches
466 */
467 struct tcf_ematch_tree {
468 struct tcf_ematch_tree_hdr hdr;
469 struct tcf_ematch * matches;
470
471 };
472
473 /**
474 * struct tcf_ematch_ops - ematch module operations
475 *
476 * @kind: identifier (kind) of this ematch module
477 * @datalen: length of expected configuration data (optional)
478 * @change: called during validation (optional)
479 * @match: called during ematch tree evaluation, must return 1/0
480 * @destroy: called during destroyage (optional)
481 * @dump: called during dumping process (optional)
482 * @owner: owner, must be set to THIS_MODULE
483 * @link: link to previous/next ematch module (internal use)
484 */
485 struct tcf_ematch_ops {
486 int kind;
487 int datalen;
488 int (*change)(struct net *net, void *,
489 int, struct tcf_ematch *);
490 int (*match)(struct sk_buff *, struct tcf_ematch *,
491 struct tcf_pkt_info *);
492 void (*destroy)(struct tcf_ematch *);
493 int (*dump)(struct sk_buff *, struct tcf_ematch *);
494 struct module *owner;
495 struct list_head link;
496 };
497
498 int tcf_em_register(struct tcf_ematch_ops *);
499 void tcf_em_unregister(struct tcf_ematch_ops *);
500 int tcf_em_tree_validate(struct tcf_proto *, struct nlattr *,
501 struct tcf_ematch_tree *);
502 void tcf_em_tree_destroy(struct tcf_ematch_tree *);
503 int tcf_em_tree_dump(struct sk_buff *, struct tcf_ematch_tree *, int);
504 int __tcf_em_tree_match(struct sk_buff *, struct tcf_ematch_tree *,
505 struct tcf_pkt_info *);
506
507 /**
508 * tcf_em_tree_match - evaulate an ematch tree
509 *
510 * @skb: socket buffer of the packet in question
511 * @tree: ematch tree to be used for evaluation
512 * @info: packet information examined by classifier
513 *
514 * This function matches @skb against the ematch tree in @tree by going
515 * through all ematches respecting their logic relations returning
516 * as soon as the result is obvious.
517 *
518 * Returns 1 if the ematch tree as-one matches, no ematches are configured
519 * or ematch is not enabled in the kernel, otherwise 0 is returned.
520 */
tcf_em_tree_match(struct sk_buff * skb,struct tcf_ematch_tree * tree,struct tcf_pkt_info * info)521 static inline int tcf_em_tree_match(struct sk_buff *skb,
522 struct tcf_ematch_tree *tree,
523 struct tcf_pkt_info *info)
524 {
525 if (tree->hdr.nmatches)
526 return __tcf_em_tree_match(skb, tree, info);
527 else
528 return 1;
529 }
530
531 #define MODULE_ALIAS_TCF_EMATCH(kind) MODULE_ALIAS("ematch-kind-" __stringify(kind))
532
533 #else /* CONFIG_NET_EMATCH */
534
535 struct tcf_ematch_tree {
536 };
537
538 #define tcf_em_tree_validate(tp, tb, t) ((void)(t), 0)
539 #define tcf_em_tree_destroy(t) do { (void)(t); } while(0)
540 #define tcf_em_tree_dump(skb, t, tlv) (0)
541 #define tcf_em_tree_match(skb, t, info) ((void)(info), 1)
542
543 #endif /* CONFIG_NET_EMATCH */
544
tcf_get_base_ptr(struct sk_buff * skb,int layer)545 static inline unsigned char * tcf_get_base_ptr(struct sk_buff *skb, int layer)
546 {
547 switch (layer) {
548 case TCF_LAYER_LINK:
549 return skb_mac_header(skb);
550 case TCF_LAYER_NETWORK:
551 return skb_network_header(skb);
552 case TCF_LAYER_TRANSPORT:
553 return skb_transport_header(skb);
554 }
555
556 return NULL;
557 }
558
tcf_valid_offset(const struct sk_buff * skb,const unsigned char * ptr,const int len)559 static inline int tcf_valid_offset(const struct sk_buff *skb,
560 const unsigned char *ptr, const int len)
561 {
562 return likely((ptr + len) <= skb_tail_pointer(skb) &&
563 ptr >= skb->head &&
564 (ptr <= (ptr + len)));
565 }
566
567 #ifdef CONFIG_NET_CLS_IND
568 #include <net/net_namespace.h>
569
570 static inline int
tcf_change_indev(struct net * net,struct nlattr * indev_tlv,struct netlink_ext_ack * extack)571 tcf_change_indev(struct net *net, struct nlattr *indev_tlv,
572 struct netlink_ext_ack *extack)
573 {
574 char indev[IFNAMSIZ];
575 struct net_device *dev;
576
577 if (nla_strlcpy(indev, indev_tlv, IFNAMSIZ) >= IFNAMSIZ) {
578 NL_SET_ERR_MSG(extack, "Interface name too long");
579 return -EINVAL;
580 }
581 dev = __dev_get_by_name(net, indev);
582 if (!dev)
583 return -ENODEV;
584 return dev->ifindex;
585 }
586
587 static inline bool
tcf_match_indev(struct sk_buff * skb,int ifindex)588 tcf_match_indev(struct sk_buff *skb, int ifindex)
589 {
590 if (!ifindex)
591 return true;
592 if (!skb->skb_iif)
593 return false;
594 return ifindex == skb->skb_iif;
595 }
596 #endif /* CONFIG_NET_CLS_IND */
597
598 int tc_setup_cb_call(struct tcf_block *block, struct tcf_exts *exts,
599 enum tc_setup_type type, void *type_data, bool err_stop);
600
601 enum tc_block_command {
602 TC_BLOCK_BIND,
603 TC_BLOCK_UNBIND,
604 };
605
606 struct tc_block_offload {
607 enum tc_block_command command;
608 enum tcf_block_binder_type binder_type;
609 struct tcf_block *block;
610 struct netlink_ext_ack *extack;
611 };
612
613 struct tc_cls_common_offload {
614 u32 chain_index;
615 __be16 protocol;
616 u32 prio;
617 struct netlink_ext_ack *extack;
618 };
619
620 struct tc_cls_u32_knode {
621 struct tcf_exts *exts;
622 struct tc_u32_sel *sel;
623 u32 handle;
624 u32 val;
625 u32 mask;
626 u32 link_handle;
627 u8 fshift;
628 };
629
630 struct tc_cls_u32_hnode {
631 u32 handle;
632 u32 prio;
633 unsigned int divisor;
634 };
635
636 enum tc_clsu32_command {
637 TC_CLSU32_NEW_KNODE,
638 TC_CLSU32_REPLACE_KNODE,
639 TC_CLSU32_DELETE_KNODE,
640 TC_CLSU32_NEW_HNODE,
641 TC_CLSU32_REPLACE_HNODE,
642 TC_CLSU32_DELETE_HNODE,
643 };
644
645 struct tc_cls_u32_offload {
646 struct tc_cls_common_offload common;
647 /* knode values */
648 enum tc_clsu32_command command;
649 union {
650 struct tc_cls_u32_knode knode;
651 struct tc_cls_u32_hnode hnode;
652 };
653 };
654
tc_can_offload(const struct net_device * dev)655 static inline bool tc_can_offload(const struct net_device *dev)
656 {
657 return dev->features & NETIF_F_HW_TC;
658 }
659
tc_can_offload_extack(const struct net_device * dev,struct netlink_ext_ack * extack)660 static inline bool tc_can_offload_extack(const struct net_device *dev,
661 struct netlink_ext_ack *extack)
662 {
663 bool can = tc_can_offload(dev);
664
665 if (!can)
666 NL_SET_ERR_MSG(extack, "TC offload is disabled on net device");
667
668 return can;
669 }
670
671 static inline bool
tc_cls_can_offload_and_chain0(const struct net_device * dev,struct tc_cls_common_offload * common)672 tc_cls_can_offload_and_chain0(const struct net_device *dev,
673 struct tc_cls_common_offload *common)
674 {
675 if (!tc_can_offload_extack(dev, common->extack))
676 return false;
677 if (common->chain_index) {
678 NL_SET_ERR_MSG(common->extack,
679 "Driver supports only offload of chain 0");
680 return false;
681 }
682 return true;
683 }
684
tc_skip_hw(u32 flags)685 static inline bool tc_skip_hw(u32 flags)
686 {
687 return (flags & TCA_CLS_FLAGS_SKIP_HW) ? true : false;
688 }
689
tc_skip_sw(u32 flags)690 static inline bool tc_skip_sw(u32 flags)
691 {
692 return (flags & TCA_CLS_FLAGS_SKIP_SW) ? true : false;
693 }
694
695 /* SKIP_HW and SKIP_SW are mutually exclusive flags. */
tc_flags_valid(u32 flags)696 static inline bool tc_flags_valid(u32 flags)
697 {
698 if (flags & ~(TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW |
699 TCA_CLS_FLAGS_VERBOSE))
700 return false;
701
702 flags &= TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW;
703 if (!(flags ^ (TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW)))
704 return false;
705
706 return true;
707 }
708
tc_in_hw(u32 flags)709 static inline bool tc_in_hw(u32 flags)
710 {
711 return (flags & TCA_CLS_FLAGS_IN_HW) ? true : false;
712 }
713
714 static inline void
tc_cls_common_offload_init(struct tc_cls_common_offload * cls_common,const struct tcf_proto * tp,u32 flags,struct netlink_ext_ack * extack)715 tc_cls_common_offload_init(struct tc_cls_common_offload *cls_common,
716 const struct tcf_proto *tp, u32 flags,
717 struct netlink_ext_ack *extack)
718 {
719 cls_common->chain_index = tp->chain->index;
720 cls_common->protocol = tp->protocol;
721 cls_common->prio = tp->prio;
722 if (tc_skip_sw(flags) || flags & TCA_CLS_FLAGS_VERBOSE)
723 cls_common->extack = extack;
724 }
725
726 enum tc_fl_command {
727 TC_CLSFLOWER_REPLACE,
728 TC_CLSFLOWER_DESTROY,
729 TC_CLSFLOWER_STATS,
730 TC_CLSFLOWER_TMPLT_CREATE,
731 TC_CLSFLOWER_TMPLT_DESTROY,
732 };
733
734 struct tc_cls_flower_offload {
735 struct tc_cls_common_offload common;
736 enum tc_fl_command command;
737 unsigned long cookie;
738 struct flow_dissector *dissector;
739 struct fl_flow_key *mask;
740 struct fl_flow_key *key;
741 struct tcf_exts *exts;
742 u32 classid;
743 };
744
745 enum tc_matchall_command {
746 TC_CLSMATCHALL_REPLACE,
747 TC_CLSMATCHALL_DESTROY,
748 };
749
750 struct tc_cls_matchall_offload {
751 struct tc_cls_common_offload common;
752 enum tc_matchall_command command;
753 struct tcf_exts *exts;
754 unsigned long cookie;
755 };
756
757 enum tc_clsbpf_command {
758 TC_CLSBPF_OFFLOAD,
759 TC_CLSBPF_STATS,
760 };
761
762 struct tc_cls_bpf_offload {
763 struct tc_cls_common_offload common;
764 enum tc_clsbpf_command command;
765 struct tcf_exts *exts;
766 struct bpf_prog *prog;
767 struct bpf_prog *oldprog;
768 const char *name;
769 bool exts_integrated;
770 };
771
772 struct tc_mqprio_qopt_offload {
773 /* struct tc_mqprio_qopt must always be the first element */
774 struct tc_mqprio_qopt qopt;
775 u16 mode;
776 u16 shaper;
777 u32 flags;
778 u64 min_rate[TC_QOPT_MAX_QUEUE];
779 u64 max_rate[TC_QOPT_MAX_QUEUE];
780 };
781
782 /* This structure holds cookie structure that is passed from user
783 * to the kernel for actions and classifiers
784 */
785 struct tc_cookie {
786 u8 *data;
787 u32 len;
788 struct rcu_head rcu;
789 };
790
791 struct tc_qopt_offload_stats {
792 struct gnet_stats_basic_packed *bstats;
793 struct gnet_stats_queue *qstats;
794 };
795
796 enum tc_mq_command {
797 TC_MQ_CREATE,
798 TC_MQ_DESTROY,
799 TC_MQ_STATS,
800 };
801
802 struct tc_mq_qopt_offload {
803 enum tc_mq_command command;
804 u32 handle;
805 struct tc_qopt_offload_stats stats;
806 };
807
808 enum tc_red_command {
809 TC_RED_REPLACE,
810 TC_RED_DESTROY,
811 TC_RED_STATS,
812 TC_RED_XSTATS,
813 };
814
815 struct tc_red_qopt_offload_params {
816 u32 min;
817 u32 max;
818 u32 probability;
819 bool is_ecn;
820 struct gnet_stats_queue *qstats;
821 };
822
823 struct tc_red_qopt_offload {
824 enum tc_red_command command;
825 u32 handle;
826 u32 parent;
827 union {
828 struct tc_red_qopt_offload_params set;
829 struct tc_qopt_offload_stats stats;
830 struct red_stats *xstats;
831 };
832 };
833
834 enum tc_prio_command {
835 TC_PRIO_REPLACE,
836 TC_PRIO_DESTROY,
837 TC_PRIO_STATS,
838 TC_PRIO_GRAFT,
839 };
840
841 struct tc_prio_qopt_offload_params {
842 int bands;
843 u8 priomap[TC_PRIO_MAX + 1];
844 /* In case that a prio qdisc is offloaded and now is changed to a
845 * non-offloadedable config, it needs to update the backlog & qlen
846 * values to negate the HW backlog & qlen values (and only them).
847 */
848 struct gnet_stats_queue *qstats;
849 };
850
851 struct tc_prio_qopt_offload_graft_params {
852 u8 band;
853 u32 child_handle;
854 };
855
856 struct tc_prio_qopt_offload {
857 enum tc_prio_command command;
858 u32 handle;
859 u32 parent;
860 union {
861 struct tc_prio_qopt_offload_params replace_params;
862 struct tc_qopt_offload_stats stats;
863 struct tc_prio_qopt_offload_graft_params graft_params;
864 };
865 };
866
867 #endif
868