1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * net/sched/act_police.c Input police filter
4 *
5 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
6 * J Hadi Salim (action changes)
7 */
8
9 #include <linux/module.h>
10 #include <linux/types.h>
11 #include <linux/kernel.h>
12 #include <linux/string.h>
13 #include <linux/errno.h>
14 #include <linux/skbuff.h>
15 #include <linux/rtnetlink.h>
16 #include <linux/init.h>
17 #include <linux/slab.h>
18 #include <net/act_api.h>
19 #include <net/netlink.h>
20 #include <net/pkt_cls.h>
21 #include <net/tc_act/tc_police.h>
22
23 /* Each policer is serialized by its individual spinlock */
24
25 static struct tc_action_ops act_police_ops;
26
27 static const struct nla_policy police_policy[TCA_POLICE_MAX + 1] = {
28 [TCA_POLICE_RATE] = { .len = TC_RTAB_SIZE },
29 [TCA_POLICE_PEAKRATE] = { .len = TC_RTAB_SIZE },
30 [TCA_POLICE_AVRATE] = { .type = NLA_U32 },
31 [TCA_POLICE_RESULT] = { .type = NLA_U32 },
32 [TCA_POLICE_RATE64] = { .type = NLA_U64 },
33 [TCA_POLICE_PEAKRATE64] = { .type = NLA_U64 },
34 [TCA_POLICE_PKTRATE64] = { .type = NLA_U64, .min = 1 },
35 [TCA_POLICE_PKTBURST64] = { .type = NLA_U64, .min = 1 },
36 };
37
tcf_police_init(struct net * net,struct nlattr * nla,struct nlattr * est,struct tc_action ** a,struct tcf_proto * tp,u32 flags,struct netlink_ext_ack * extack)38 static int tcf_police_init(struct net *net, struct nlattr *nla,
39 struct nlattr *est, struct tc_action **a,
40 struct tcf_proto *tp, u32 flags,
41 struct netlink_ext_ack *extack)
42 {
43 int ret = 0, tcfp_result = TC_ACT_OK, err, size;
44 bool bind = flags & TCA_ACT_FLAGS_BIND;
45 struct nlattr *tb[TCA_POLICE_MAX + 1];
46 struct tcf_chain *goto_ch = NULL;
47 struct tc_police *parm;
48 struct tcf_police *police;
49 struct qdisc_rate_table *R_tab = NULL, *P_tab = NULL;
50 struct tc_action_net *tn = net_generic(net, act_police_ops.net_id);
51 struct tcf_police_params *new;
52 bool exists = false;
53 u32 index;
54 u64 rate64, prate64;
55 u64 pps, ppsburst;
56
57 if (nla == NULL)
58 return -EINVAL;
59
60 err = nla_parse_nested_deprecated(tb, TCA_POLICE_MAX, nla,
61 police_policy, NULL);
62 if (err < 0)
63 return err;
64
65 if (tb[TCA_POLICE_TBF] == NULL)
66 return -EINVAL;
67 size = nla_len(tb[TCA_POLICE_TBF]);
68 if (size != sizeof(*parm) && size != sizeof(struct tc_police_compat))
69 return -EINVAL;
70
71 parm = nla_data(tb[TCA_POLICE_TBF]);
72 index = parm->index;
73 err = tcf_idr_check_alloc(tn, &index, a, bind);
74 if (err < 0)
75 return err;
76 exists = err;
77 if (exists && bind)
78 return 0;
79
80 if (!exists) {
81 ret = tcf_idr_create(tn, index, NULL, a,
82 &act_police_ops, bind, true, flags);
83 if (ret) {
84 tcf_idr_cleanup(tn, index);
85 return ret;
86 }
87 ret = ACT_P_CREATED;
88 spin_lock_init(&(to_police(*a)->tcfp_lock));
89 } else if (!(flags & TCA_ACT_FLAGS_REPLACE)) {
90 tcf_idr_release(*a, bind);
91 return -EEXIST;
92 }
93 err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
94 if (err < 0)
95 goto release_idr;
96
97 police = to_police(*a);
98 if (parm->rate.rate) {
99 err = -ENOMEM;
100 R_tab = qdisc_get_rtab(&parm->rate, tb[TCA_POLICE_RATE], NULL);
101 if (R_tab == NULL)
102 goto failure;
103
104 if (parm->peakrate.rate) {
105 P_tab = qdisc_get_rtab(&parm->peakrate,
106 tb[TCA_POLICE_PEAKRATE], NULL);
107 if (P_tab == NULL)
108 goto failure;
109 }
110 }
111
112 if (est) {
113 err = gen_replace_estimator(&police->tcf_bstats,
114 police->common.cpu_bstats,
115 &police->tcf_rate_est,
116 &police->tcf_lock,
117 false, est);
118 if (err)
119 goto failure;
120 } else if (tb[TCA_POLICE_AVRATE] &&
121 (ret == ACT_P_CREATED ||
122 !gen_estimator_active(&police->tcf_rate_est))) {
123 err = -EINVAL;
124 goto failure;
125 }
126
127 if (tb[TCA_POLICE_RESULT]) {
128 tcfp_result = nla_get_u32(tb[TCA_POLICE_RESULT]);
129 if (TC_ACT_EXT_CMP(tcfp_result, TC_ACT_GOTO_CHAIN)) {
130 NL_SET_ERR_MSG(extack,
131 "goto chain not allowed on fallback");
132 err = -EINVAL;
133 goto failure;
134 }
135 }
136
137 if ((tb[TCA_POLICE_PKTRATE64] && !tb[TCA_POLICE_PKTBURST64]) ||
138 (!tb[TCA_POLICE_PKTRATE64] && tb[TCA_POLICE_PKTBURST64])) {
139 NL_SET_ERR_MSG(extack,
140 "Both or neither packet-per-second burst and rate must be provided");
141 err = -EINVAL;
142 goto failure;
143 }
144
145 if (tb[TCA_POLICE_PKTRATE64] && R_tab) {
146 NL_SET_ERR_MSG(extack,
147 "packet-per-second and byte-per-second rate limits not allowed in same action");
148 err = -EINVAL;
149 goto failure;
150 }
151
152 new = kzalloc(sizeof(*new), GFP_KERNEL);
153 if (unlikely(!new)) {
154 err = -ENOMEM;
155 goto failure;
156 }
157
158 /* No failure allowed after this point */
159 new->tcfp_result = tcfp_result;
160 new->tcfp_mtu = parm->mtu;
161 if (!new->tcfp_mtu) {
162 new->tcfp_mtu = ~0;
163 if (R_tab)
164 new->tcfp_mtu = 255 << R_tab->rate.cell_log;
165 }
166 if (R_tab) {
167 new->rate_present = true;
168 rate64 = tb[TCA_POLICE_RATE64] ?
169 nla_get_u64(tb[TCA_POLICE_RATE64]) : 0;
170 psched_ratecfg_precompute(&new->rate, &R_tab->rate, rate64);
171 qdisc_put_rtab(R_tab);
172 } else {
173 new->rate_present = false;
174 }
175 if (P_tab) {
176 new->peak_present = true;
177 prate64 = tb[TCA_POLICE_PEAKRATE64] ?
178 nla_get_u64(tb[TCA_POLICE_PEAKRATE64]) : 0;
179 psched_ratecfg_precompute(&new->peak, &P_tab->rate, prate64);
180 qdisc_put_rtab(P_tab);
181 } else {
182 new->peak_present = false;
183 }
184
185 new->tcfp_burst = PSCHED_TICKS2NS(parm->burst);
186 if (new->peak_present)
187 new->tcfp_mtu_ptoks = (s64)psched_l2t_ns(&new->peak,
188 new->tcfp_mtu);
189
190 if (tb[TCA_POLICE_AVRATE])
191 new->tcfp_ewma_rate = nla_get_u32(tb[TCA_POLICE_AVRATE]);
192
193 if (tb[TCA_POLICE_PKTRATE64]) {
194 pps = nla_get_u64(tb[TCA_POLICE_PKTRATE64]);
195 ppsburst = nla_get_u64(tb[TCA_POLICE_PKTBURST64]);
196 new->pps_present = true;
197 new->tcfp_pkt_burst = PSCHED_TICKS2NS(ppsburst);
198 psched_ppscfg_precompute(&new->ppsrate, pps);
199 }
200
201 spin_lock_bh(&police->tcf_lock);
202 spin_lock_bh(&police->tcfp_lock);
203 police->tcfp_t_c = ktime_get_ns();
204 police->tcfp_toks = new->tcfp_burst;
205 if (new->peak_present)
206 police->tcfp_ptoks = new->tcfp_mtu_ptoks;
207 spin_unlock_bh(&police->tcfp_lock);
208 goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
209 new = rcu_replace_pointer(police->params,
210 new,
211 lockdep_is_held(&police->tcf_lock));
212 spin_unlock_bh(&police->tcf_lock);
213
214 if (goto_ch)
215 tcf_chain_put_by_act(goto_ch);
216 if (new)
217 kfree_rcu(new, rcu);
218
219 return ret;
220
221 failure:
222 qdisc_put_rtab(P_tab);
223 qdisc_put_rtab(R_tab);
224 if (goto_ch)
225 tcf_chain_put_by_act(goto_ch);
226 release_idr:
227 tcf_idr_release(*a, bind);
228 return err;
229 }
230
tcf_police_mtu_check(struct sk_buff * skb,u32 limit)231 static bool tcf_police_mtu_check(struct sk_buff *skb, u32 limit)
232 {
233 u32 len;
234
235 if (skb_is_gso(skb))
236 return skb_gso_validate_mac_len(skb, limit);
237
238 len = qdisc_pkt_len(skb);
239 if (skb_at_tc_ingress(skb))
240 len += skb->mac_len;
241
242 return len <= limit;
243 }
244
tcf_police_act(struct sk_buff * skb,const struct tc_action * a,struct tcf_result * res)245 static int tcf_police_act(struct sk_buff *skb, const struct tc_action *a,
246 struct tcf_result *res)
247 {
248 struct tcf_police *police = to_police(a);
249 s64 now, toks, ppstoks = 0, ptoks = 0;
250 struct tcf_police_params *p;
251 int ret;
252
253 tcf_lastuse_update(&police->tcf_tm);
254 bstats_update(this_cpu_ptr(police->common.cpu_bstats), skb);
255
256 ret = READ_ONCE(police->tcf_action);
257 p = rcu_dereference_bh(police->params);
258
259 if (p->tcfp_ewma_rate) {
260 struct gnet_stats_rate_est64 sample;
261
262 if (!gen_estimator_read(&police->tcf_rate_est, &sample) ||
263 sample.bps >= p->tcfp_ewma_rate)
264 goto inc_overlimits;
265 }
266
267 if (tcf_police_mtu_check(skb, p->tcfp_mtu)) {
268 if (!p->rate_present && !p->pps_present) {
269 ret = p->tcfp_result;
270 goto end;
271 }
272
273 now = ktime_get_ns();
274 spin_lock_bh(&police->tcfp_lock);
275 toks = min_t(s64, now - police->tcfp_t_c, p->tcfp_burst);
276 if (p->peak_present) {
277 ptoks = toks + police->tcfp_ptoks;
278 if (ptoks > p->tcfp_mtu_ptoks)
279 ptoks = p->tcfp_mtu_ptoks;
280 ptoks -= (s64)psched_l2t_ns(&p->peak,
281 qdisc_pkt_len(skb));
282 }
283 if (p->rate_present) {
284 toks += police->tcfp_toks;
285 if (toks > p->tcfp_burst)
286 toks = p->tcfp_burst;
287 toks -= (s64)psched_l2t_ns(&p->rate, qdisc_pkt_len(skb));
288 } else if (p->pps_present) {
289 ppstoks = min_t(s64, now - police->tcfp_t_c, p->tcfp_pkt_burst);
290 ppstoks += police->tcfp_pkttoks;
291 if (ppstoks > p->tcfp_pkt_burst)
292 ppstoks = p->tcfp_pkt_burst;
293 ppstoks -= (s64)psched_pkt2t_ns(&p->ppsrate, 1);
294 }
295 if ((toks | ptoks | ppstoks) >= 0) {
296 police->tcfp_t_c = now;
297 police->tcfp_toks = toks;
298 police->tcfp_ptoks = ptoks;
299 police->tcfp_pkttoks = ppstoks;
300 spin_unlock_bh(&police->tcfp_lock);
301 ret = p->tcfp_result;
302 goto inc_drops;
303 }
304 spin_unlock_bh(&police->tcfp_lock);
305 }
306
307 inc_overlimits:
308 qstats_overlimit_inc(this_cpu_ptr(police->common.cpu_qstats));
309 inc_drops:
310 if (ret == TC_ACT_SHOT)
311 qstats_drop_inc(this_cpu_ptr(police->common.cpu_qstats));
312 end:
313 return ret;
314 }
315
tcf_police_cleanup(struct tc_action * a)316 static void tcf_police_cleanup(struct tc_action *a)
317 {
318 struct tcf_police *police = to_police(a);
319 struct tcf_police_params *p;
320
321 p = rcu_dereference_protected(police->params, 1);
322 if (p)
323 kfree_rcu(p, rcu);
324 }
325
tcf_police_stats_update(struct tc_action * a,u64 bytes,u64 packets,u64 drops,u64 lastuse,bool hw)326 static void tcf_police_stats_update(struct tc_action *a,
327 u64 bytes, u64 packets, u64 drops,
328 u64 lastuse, bool hw)
329 {
330 struct tcf_police *police = to_police(a);
331 struct tcf_t *tm = &police->tcf_tm;
332
333 tcf_action_update_stats(a, bytes, packets, drops, hw);
334 tm->lastuse = max_t(u64, tm->lastuse, lastuse);
335 }
336
tcf_police_dump(struct sk_buff * skb,struct tc_action * a,int bind,int ref)337 static int tcf_police_dump(struct sk_buff *skb, struct tc_action *a,
338 int bind, int ref)
339 {
340 unsigned char *b = skb_tail_pointer(skb);
341 struct tcf_police *police = to_police(a);
342 struct tcf_police_params *p;
343 struct tc_police opt = {
344 .index = police->tcf_index,
345 .refcnt = refcount_read(&police->tcf_refcnt) - ref,
346 .bindcnt = atomic_read(&police->tcf_bindcnt) - bind,
347 };
348 struct tcf_t t;
349
350 spin_lock_bh(&police->tcf_lock);
351 opt.action = police->tcf_action;
352 p = rcu_dereference_protected(police->params,
353 lockdep_is_held(&police->tcf_lock));
354 opt.mtu = p->tcfp_mtu;
355 opt.burst = PSCHED_NS2TICKS(p->tcfp_burst);
356 if (p->rate_present) {
357 psched_ratecfg_getrate(&opt.rate, &p->rate);
358 if ((police->params->rate.rate_bytes_ps >= (1ULL << 32)) &&
359 nla_put_u64_64bit(skb, TCA_POLICE_RATE64,
360 police->params->rate.rate_bytes_ps,
361 TCA_POLICE_PAD))
362 goto nla_put_failure;
363 }
364 if (p->peak_present) {
365 psched_ratecfg_getrate(&opt.peakrate, &p->peak);
366 if ((police->params->peak.rate_bytes_ps >= (1ULL << 32)) &&
367 nla_put_u64_64bit(skb, TCA_POLICE_PEAKRATE64,
368 police->params->peak.rate_bytes_ps,
369 TCA_POLICE_PAD))
370 goto nla_put_failure;
371 }
372 if (p->pps_present) {
373 if (nla_put_u64_64bit(skb, TCA_POLICE_PKTRATE64,
374 police->params->ppsrate.rate_pkts_ps,
375 TCA_POLICE_PAD))
376 goto nla_put_failure;
377 if (nla_put_u64_64bit(skb, TCA_POLICE_PKTBURST64,
378 PSCHED_NS2TICKS(p->tcfp_pkt_burst),
379 TCA_POLICE_PAD))
380 goto nla_put_failure;
381 }
382 if (nla_put(skb, TCA_POLICE_TBF, sizeof(opt), &opt))
383 goto nla_put_failure;
384 if (p->tcfp_result &&
385 nla_put_u32(skb, TCA_POLICE_RESULT, p->tcfp_result))
386 goto nla_put_failure;
387 if (p->tcfp_ewma_rate &&
388 nla_put_u32(skb, TCA_POLICE_AVRATE, p->tcfp_ewma_rate))
389 goto nla_put_failure;
390
391 tcf_tm_dump(&t, &police->tcf_tm);
392 if (nla_put_64bit(skb, TCA_POLICE_TM, sizeof(t), &t, TCA_POLICE_PAD))
393 goto nla_put_failure;
394 spin_unlock_bh(&police->tcf_lock);
395
396 return skb->len;
397
398 nla_put_failure:
399 spin_unlock_bh(&police->tcf_lock);
400 nlmsg_trim(skb, b);
401 return -1;
402 }
403
tcf_police_act_to_flow_act(int tc_act,u32 * extval,struct netlink_ext_ack * extack)404 static int tcf_police_act_to_flow_act(int tc_act, u32 *extval,
405 struct netlink_ext_ack *extack)
406 {
407 int act_id = -EOPNOTSUPP;
408
409 if (!TC_ACT_EXT_OPCODE(tc_act)) {
410 if (tc_act == TC_ACT_OK)
411 act_id = FLOW_ACTION_ACCEPT;
412 else if (tc_act == TC_ACT_SHOT)
413 act_id = FLOW_ACTION_DROP;
414 else if (tc_act == TC_ACT_PIPE)
415 act_id = FLOW_ACTION_PIPE;
416 else if (tc_act == TC_ACT_RECLASSIFY)
417 NL_SET_ERR_MSG_MOD(extack, "Offload not supported when conform/exceed action is \"reclassify\"");
418 else
419 NL_SET_ERR_MSG_MOD(extack, "Unsupported conform/exceed action offload");
420 } else if (TC_ACT_EXT_CMP(tc_act, TC_ACT_GOTO_CHAIN)) {
421 act_id = FLOW_ACTION_GOTO;
422 *extval = tc_act & TC_ACT_EXT_VAL_MASK;
423 } else if (TC_ACT_EXT_CMP(tc_act, TC_ACT_JUMP)) {
424 act_id = FLOW_ACTION_JUMP;
425 *extval = tc_act & TC_ACT_EXT_VAL_MASK;
426 } else if (tc_act == TC_ACT_UNSPEC) {
427 act_id = FLOW_ACTION_CONTINUE;
428 } else {
429 NL_SET_ERR_MSG_MOD(extack, "Unsupported conform/exceed action offload");
430 }
431
432 return act_id;
433 }
434
tcf_police_offload_act_setup(struct tc_action * act,void * entry_data,u32 * index_inc,bool bind,struct netlink_ext_ack * extack)435 static int tcf_police_offload_act_setup(struct tc_action *act, void *entry_data,
436 u32 *index_inc, bool bind,
437 struct netlink_ext_ack *extack)
438 {
439 if (bind) {
440 struct flow_action_entry *entry = entry_data;
441 struct tcf_police *police = to_police(act);
442 struct tcf_police_params *p;
443 int act_id;
444
445 p = rcu_dereference_protected(police->params,
446 lockdep_is_held(&police->tcf_lock));
447
448 entry->id = FLOW_ACTION_POLICE;
449 entry->police.burst = tcf_police_burst(act);
450 entry->police.rate_bytes_ps =
451 tcf_police_rate_bytes_ps(act);
452 entry->police.peakrate_bytes_ps = tcf_police_peakrate_bytes_ps(act);
453 entry->police.avrate = tcf_police_tcfp_ewma_rate(act);
454 entry->police.overhead = tcf_police_rate_overhead(act);
455 entry->police.burst_pkt = tcf_police_burst_pkt(act);
456 entry->police.rate_pkt_ps =
457 tcf_police_rate_pkt_ps(act);
458 entry->police.mtu = tcf_police_tcfp_mtu(act);
459
460 act_id = tcf_police_act_to_flow_act(police->tcf_action,
461 &entry->police.exceed.extval,
462 extack);
463 if (act_id < 0)
464 return act_id;
465
466 entry->police.exceed.act_id = act_id;
467
468 act_id = tcf_police_act_to_flow_act(p->tcfp_result,
469 &entry->police.notexceed.extval,
470 extack);
471 if (act_id < 0)
472 return act_id;
473
474 entry->police.notexceed.act_id = act_id;
475
476 *index_inc = 1;
477 } else {
478 struct flow_offload_action *fl_action = entry_data;
479
480 fl_action->id = FLOW_ACTION_POLICE;
481 }
482
483 return 0;
484 }
485
486 MODULE_AUTHOR("Alexey Kuznetsov");
487 MODULE_DESCRIPTION("Policing actions");
488 MODULE_LICENSE("GPL");
489
490 static struct tc_action_ops act_police_ops = {
491 .kind = "police",
492 .id = TCA_ID_POLICE,
493 .owner = THIS_MODULE,
494 .stats_update = tcf_police_stats_update,
495 .act = tcf_police_act,
496 .dump = tcf_police_dump,
497 .init = tcf_police_init,
498 .cleanup = tcf_police_cleanup,
499 .offload_act_setup = tcf_police_offload_act_setup,
500 .size = sizeof(struct tcf_police),
501 };
502
police_init_net(struct net * net)503 static __net_init int police_init_net(struct net *net)
504 {
505 struct tc_action_net *tn = net_generic(net, act_police_ops.net_id);
506
507 return tc_action_net_init(net, tn, &act_police_ops);
508 }
509
police_exit_net(struct list_head * net_list)510 static void __net_exit police_exit_net(struct list_head *net_list)
511 {
512 tc_action_net_exit(net_list, act_police_ops.net_id);
513 }
514
515 static struct pernet_operations police_net_ops = {
516 .init = police_init_net,
517 .exit_batch = police_exit_net,
518 .id = &act_police_ops.net_id,
519 .size = sizeof(struct tc_action_net),
520 };
521
police_init_module(void)522 static int __init police_init_module(void)
523 {
524 return tcf_register_action(&act_police_ops, &police_net_ops);
525 }
526
police_cleanup_module(void)527 static void __exit police_cleanup_module(void)
528 {
529 tcf_unregister_action(&act_police_ops, &police_net_ops);
530 }
531
532 module_init(police_init_module);
533 module_exit(police_cleanup_module);
534