1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2018 Netronome Systems, Inc. */
3
4 #include <linux/bitfield.h>
5 #include <net/pkt_cls.h>
6
7 #include "../nfpcore/nfp_cpp.h"
8 #include "../nfp_app.h"
9 #include "../nfp_net_repr.h"
10 #include "main.h"
11
12 struct nfp_abm_u32_match {
13 u32 handle;
14 u32 band;
15 u8 mask;
16 u8 val;
17 struct list_head list;
18 };
19
20 static bool
nfp_abm_u32_check_knode(struct nfp_abm * abm,struct tc_cls_u32_knode * knode,__be16 proto,struct netlink_ext_ack * extack)21 nfp_abm_u32_check_knode(struct nfp_abm *abm, struct tc_cls_u32_knode *knode,
22 __be16 proto, struct netlink_ext_ack *extack)
23 {
24 struct tc_u32_key *k;
25 unsigned int tos_off;
26
27 if (knode->exts && tcf_exts_has_actions(knode->exts)) {
28 NL_SET_ERR_MSG_MOD(extack, "action offload not supported");
29 return false;
30 }
31 if (knode->link_handle) {
32 NL_SET_ERR_MSG_MOD(extack, "linking not supported");
33 return false;
34 }
35 if (knode->sel->flags != TC_U32_TERMINAL) {
36 NL_SET_ERR_MSG_MOD(extack,
37 "flags must be equal to TC_U32_TERMINAL");
38 return false;
39 }
40 if (knode->sel->off || knode->sel->offshift || knode->sel->offmask ||
41 knode->sel->offoff || knode->fshift) {
42 NL_SET_ERR_MSG_MOD(extack, "variable offsetting not supported");
43 return false;
44 }
45 if (knode->sel->hoff || knode->sel->hmask) {
46 NL_SET_ERR_MSG_MOD(extack, "hashing not supported");
47 return false;
48 }
49 if (knode->val || knode->mask) {
50 NL_SET_ERR_MSG_MOD(extack, "matching on mark not supported");
51 return false;
52 }
53 if (knode->res && knode->res->class) {
54 NL_SET_ERR_MSG_MOD(extack, "setting non-0 class not supported");
55 return false;
56 }
57 if (knode->res && knode->res->classid >= abm->num_bands) {
58 NL_SET_ERR_MSG_MOD(extack,
59 "classid higher than number of bands");
60 return false;
61 }
62 if (knode->sel->nkeys != 1) {
63 NL_SET_ERR_MSG_MOD(extack, "exactly one key required");
64 return false;
65 }
66
67 switch (proto) {
68 case htons(ETH_P_IP):
69 tos_off = 16;
70 break;
71 case htons(ETH_P_IPV6):
72 tos_off = 20;
73 break;
74 default:
75 NL_SET_ERR_MSG_MOD(extack, "only IP and IPv6 supported as filter protocol");
76 return false;
77 }
78
79 k = &knode->sel->keys[0];
80 if (k->offmask) {
81 NL_SET_ERR_MSG_MOD(extack, "offset mask - variable offsetting not supported");
82 return false;
83 }
84 if (k->off) {
85 NL_SET_ERR_MSG_MOD(extack, "only DSCP fields can be matched");
86 return false;
87 }
88 if (k->val & ~k->mask) {
89 NL_SET_ERR_MSG_MOD(extack, "mask does not cover the key");
90 return false;
91 }
92 if (be32_to_cpu(k->mask) >> tos_off & ~abm->dscp_mask) {
93 NL_SET_ERR_MSG_MOD(extack, "only high DSCP class selector bits can be used");
94 nfp_err(abm->app->cpp,
95 "u32 offload: requested mask %x FW can support only %x\n",
96 be32_to_cpu(k->mask) >> tos_off, abm->dscp_mask);
97 return false;
98 }
99
100 return true;
101 }
102
103 /* This filter list -> map conversion is O(n * m), we expect single digit or
104 * low double digit number of prios and likewise for the filters. Also u32
105 * doesn't report stats, so it's really only setup time cost.
106 */
107 static unsigned int
nfp_abm_find_band_for_prio(struct nfp_abm_link * alink,unsigned int prio)108 nfp_abm_find_band_for_prio(struct nfp_abm_link *alink, unsigned int prio)
109 {
110 struct nfp_abm_u32_match *iter;
111
112 list_for_each_entry(iter, &alink->dscp_map, list)
113 if ((prio & iter->mask) == iter->val)
114 return iter->band;
115
116 return alink->def_band;
117 }
118
nfp_abm_update_band_map(struct nfp_abm_link * alink)119 static int nfp_abm_update_band_map(struct nfp_abm_link *alink)
120 {
121 unsigned int i, bits_per_prio, prios_per_word, base_shift;
122 struct nfp_abm *abm = alink->abm;
123 u32 field_mask;
124
125 alink->has_prio = !list_empty(&alink->dscp_map);
126
127 bits_per_prio = roundup_pow_of_two(order_base_2(abm->num_bands));
128 field_mask = (1 << bits_per_prio) - 1;
129 prios_per_word = sizeof(u32) * BITS_PER_BYTE / bits_per_prio;
130
131 /* FW mask applies from top bits */
132 base_shift = 8 - order_base_2(abm->num_prios);
133
134 for (i = 0; i < abm->num_prios; i++) {
135 unsigned int offset;
136 u32 *word;
137 u8 band;
138
139 word = &alink->prio_map[i / prios_per_word];
140 offset = (i % prios_per_word) * bits_per_prio;
141
142 band = nfp_abm_find_band_for_prio(alink, i << base_shift);
143
144 *word &= ~(field_mask << offset);
145 *word |= band << offset;
146 }
147
148 /* Qdisc offload status may change if has_prio changed */
149 nfp_abm_qdisc_offload_update(alink);
150
151 return nfp_abm_ctrl_prio_map_update(alink, alink->prio_map);
152 }
153
154 static void
nfp_abm_u32_knode_delete(struct nfp_abm_link * alink,struct tc_cls_u32_knode * knode)155 nfp_abm_u32_knode_delete(struct nfp_abm_link *alink,
156 struct tc_cls_u32_knode *knode)
157 {
158 struct nfp_abm_u32_match *iter;
159
160 list_for_each_entry(iter, &alink->dscp_map, list)
161 if (iter->handle == knode->handle) {
162 list_del(&iter->list);
163 kfree(iter);
164 nfp_abm_update_band_map(alink);
165 return;
166 }
167 }
168
169 static int
nfp_abm_u32_knode_replace(struct nfp_abm_link * alink,struct tc_cls_u32_knode * knode,__be16 proto,struct netlink_ext_ack * extack)170 nfp_abm_u32_knode_replace(struct nfp_abm_link *alink,
171 struct tc_cls_u32_knode *knode,
172 __be16 proto, struct netlink_ext_ack *extack)
173 {
174 struct nfp_abm_u32_match *match = NULL, *iter;
175 unsigned int tos_off;
176 u8 mask, val;
177 int err;
178
179 if (!nfp_abm_u32_check_knode(alink->abm, knode, proto, extack)) {
180 err = -EOPNOTSUPP;
181 goto err_delete;
182 }
183
184 tos_off = proto == htons(ETH_P_IP) ? 16 : 20;
185
186 /* Extract the DSCP Class Selector bits */
187 val = be32_to_cpu(knode->sel->keys[0].val) >> tos_off & 0xff;
188 mask = be32_to_cpu(knode->sel->keys[0].mask) >> tos_off & 0xff;
189
190 /* Check if there is no conflicting mapping and find match by handle */
191 list_for_each_entry(iter, &alink->dscp_map, list) {
192 u32 cmask;
193
194 if (iter->handle == knode->handle) {
195 match = iter;
196 continue;
197 }
198
199 cmask = iter->mask & mask;
200 if ((iter->val & cmask) == (val & cmask) &&
201 iter->band != knode->res->classid) {
202 NL_SET_ERR_MSG_MOD(extack, "conflict with already offloaded filter");
203 err = -EOPNOTSUPP;
204 goto err_delete;
205 }
206 }
207
208 if (!match) {
209 match = kzalloc(sizeof(*match), GFP_KERNEL);
210 if (!match) {
211 err = -ENOMEM;
212 goto err_delete;
213 }
214
215 list_add(&match->list, &alink->dscp_map);
216 }
217 match->handle = knode->handle;
218 match->band = knode->res->classid;
219 match->mask = mask;
220 match->val = val;
221
222 err = nfp_abm_update_band_map(alink);
223 if (err)
224 goto err_delete;
225
226 return 0;
227
228 err_delete:
229 nfp_abm_u32_knode_delete(alink, knode);
230 return err;
231 }
232
nfp_abm_setup_tc_block_cb(enum tc_setup_type type,void * type_data,void * cb_priv)233 static int nfp_abm_setup_tc_block_cb(enum tc_setup_type type,
234 void *type_data, void *cb_priv)
235 {
236 struct tc_cls_u32_offload *cls_u32 = type_data;
237 struct nfp_repr *repr = cb_priv;
238 struct nfp_abm_link *alink;
239
240 alink = repr->app_priv;
241
242 if (type != TC_SETUP_CLSU32) {
243 NL_SET_ERR_MSG_MOD(cls_u32->common.extack,
244 "only offload of u32 classifier supported");
245 return -EOPNOTSUPP;
246 }
247 if (!tc_cls_can_offload_and_chain0(repr->netdev, &cls_u32->common))
248 return -EOPNOTSUPP;
249
250 if (cls_u32->common.protocol != htons(ETH_P_IP) &&
251 cls_u32->common.protocol != htons(ETH_P_IPV6)) {
252 NL_SET_ERR_MSG_MOD(cls_u32->common.extack,
253 "only IP and IPv6 supported as filter protocol");
254 return -EOPNOTSUPP;
255 }
256
257 switch (cls_u32->command) {
258 case TC_CLSU32_NEW_KNODE:
259 case TC_CLSU32_REPLACE_KNODE:
260 return nfp_abm_u32_knode_replace(alink, &cls_u32->knode,
261 cls_u32->common.protocol,
262 cls_u32->common.extack);
263 case TC_CLSU32_DELETE_KNODE:
264 nfp_abm_u32_knode_delete(alink, &cls_u32->knode);
265 return 0;
266 default:
267 return -EOPNOTSUPP;
268 }
269 }
270
271 static LIST_HEAD(nfp_abm_block_cb_list);
272
nfp_abm_setup_cls_block(struct net_device * netdev,struct nfp_repr * repr,struct flow_block_offload * f)273 int nfp_abm_setup_cls_block(struct net_device *netdev, struct nfp_repr *repr,
274 struct flow_block_offload *f)
275 {
276 return flow_block_cb_setup_simple(f, &nfp_abm_block_cb_list,
277 nfp_abm_setup_tc_block_cb,
278 repr, repr, true);
279 }
280