1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2020 Marvell International Ltd. All rights reserved */
3
4 #include "prestera.h"
5 #include "prestera_acl.h"
6 #include "prestera_flower.h"
7
prestera_flower_parse_actions(struct prestera_flow_block * block,struct prestera_acl_rule * rule,struct flow_action * flow_action,struct netlink_ext_ack * extack)8 static int prestera_flower_parse_actions(struct prestera_flow_block *block,
9 struct prestera_acl_rule *rule,
10 struct flow_action *flow_action,
11 struct netlink_ext_ack *extack)
12 {
13 struct prestera_acl_rule_action_entry a_entry;
14 const struct flow_action_entry *act;
15 int err, i;
16
17 if (!flow_action_has_entries(flow_action))
18 return 0;
19
20 flow_action_for_each(i, act, flow_action) {
21 memset(&a_entry, 0, sizeof(a_entry));
22
23 switch (act->id) {
24 case FLOW_ACTION_ACCEPT:
25 a_entry.id = PRESTERA_ACL_RULE_ACTION_ACCEPT;
26 break;
27 case FLOW_ACTION_DROP:
28 a_entry.id = PRESTERA_ACL_RULE_ACTION_DROP;
29 break;
30 case FLOW_ACTION_TRAP:
31 a_entry.id = PRESTERA_ACL_RULE_ACTION_TRAP;
32 break;
33 default:
34 NL_SET_ERR_MSG_MOD(extack, "Unsupported action");
35 pr_err("Unsupported action\n");
36 return -EOPNOTSUPP;
37 }
38
39 err = prestera_acl_rule_action_add(rule, &a_entry);
40 if (err)
41 return err;
42 }
43
44 return 0;
45 }
46
prestera_flower_parse_meta(struct prestera_acl_rule * rule,struct flow_cls_offload * f,struct prestera_flow_block * block)47 static int prestera_flower_parse_meta(struct prestera_acl_rule *rule,
48 struct flow_cls_offload *f,
49 struct prestera_flow_block *block)
50 {
51 struct flow_rule *f_rule = flow_cls_offload_flow_rule(f);
52 struct prestera_acl_rule_match_entry m_entry = {0};
53 struct net_device *ingress_dev;
54 struct flow_match_meta match;
55 struct prestera_port *port;
56
57 flow_rule_match_meta(f_rule, &match);
58 if (match.mask->ingress_ifindex != 0xFFFFFFFF) {
59 NL_SET_ERR_MSG_MOD(f->common.extack,
60 "Unsupported ingress ifindex mask");
61 return -EINVAL;
62 }
63
64 ingress_dev = __dev_get_by_index(prestera_acl_block_net(block),
65 match.key->ingress_ifindex);
66 if (!ingress_dev) {
67 NL_SET_ERR_MSG_MOD(f->common.extack,
68 "Can't find specified ingress port to match on");
69 return -EINVAL;
70 }
71
72 if (!prestera_netdev_check(ingress_dev)) {
73 NL_SET_ERR_MSG_MOD(f->common.extack,
74 "Can't match on switchdev ingress port");
75 return -EINVAL;
76 }
77 port = netdev_priv(ingress_dev);
78
79 m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_PORT;
80 m_entry.keymask.u64.key = port->hw_id | ((u64)port->dev_id << 32);
81 m_entry.keymask.u64.mask = ~(u64)0;
82
83 return prestera_acl_rule_match_add(rule, &m_entry);
84 }
85
prestera_flower_parse(struct prestera_flow_block * block,struct prestera_acl_rule * rule,struct flow_cls_offload * f)86 static int prestera_flower_parse(struct prestera_flow_block *block,
87 struct prestera_acl_rule *rule,
88 struct flow_cls_offload *f)
89 {
90 struct flow_rule *f_rule = flow_cls_offload_flow_rule(f);
91 struct flow_dissector *dissector = f_rule->match.dissector;
92 struct prestera_acl_rule_match_entry m_entry;
93 u16 n_proto_mask = 0;
94 u16 n_proto_key = 0;
95 u16 addr_type = 0;
96 u8 ip_proto = 0;
97 int err;
98
99 if (dissector->used_keys &
100 ~(BIT(FLOW_DISSECTOR_KEY_META) |
101 BIT(FLOW_DISSECTOR_KEY_CONTROL) |
102 BIT(FLOW_DISSECTOR_KEY_BASIC) |
103 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
104 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
105 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
106 BIT(FLOW_DISSECTOR_KEY_ICMP) |
107 BIT(FLOW_DISSECTOR_KEY_PORTS) |
108 BIT(FLOW_DISSECTOR_KEY_VLAN))) {
109 NL_SET_ERR_MSG_MOD(f->common.extack, "Unsupported key");
110 return -EOPNOTSUPP;
111 }
112
113 prestera_acl_rule_priority_set(rule, f->common.prio);
114
115 if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_META)) {
116 err = prestera_flower_parse_meta(rule, f, block);
117 if (err)
118 return err;
119 }
120
121 if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_CONTROL)) {
122 struct flow_match_control match;
123
124 flow_rule_match_control(f_rule, &match);
125 addr_type = match.key->addr_type;
126 }
127
128 if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_BASIC)) {
129 struct flow_match_basic match;
130
131 flow_rule_match_basic(f_rule, &match);
132 n_proto_key = ntohs(match.key->n_proto);
133 n_proto_mask = ntohs(match.mask->n_proto);
134
135 if (n_proto_key == ETH_P_ALL) {
136 n_proto_key = 0;
137 n_proto_mask = 0;
138 }
139
140 /* add eth type key,mask */
141 memset(&m_entry, 0, sizeof(m_entry));
142 m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ETH_TYPE;
143 m_entry.keymask.u16.key = n_proto_key;
144 m_entry.keymask.u16.mask = n_proto_mask;
145 err = prestera_acl_rule_match_add(rule, &m_entry);
146 if (err)
147 return err;
148
149 /* add ip proto key,mask */
150 memset(&m_entry, 0, sizeof(m_entry));
151 m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_IP_PROTO;
152 m_entry.keymask.u8.key = match.key->ip_proto;
153 m_entry.keymask.u8.mask = match.mask->ip_proto;
154 err = prestera_acl_rule_match_add(rule, &m_entry);
155 if (err)
156 return err;
157
158 ip_proto = match.key->ip_proto;
159 }
160
161 if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
162 struct flow_match_eth_addrs match;
163
164 flow_rule_match_eth_addrs(f_rule, &match);
165
166 /* add ethernet dst key,mask */
167 memset(&m_entry, 0, sizeof(m_entry));
168 m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ETH_DMAC;
169 memcpy(&m_entry.keymask.mac.key,
170 &match.key->dst, sizeof(match.key->dst));
171 memcpy(&m_entry.keymask.mac.mask,
172 &match.mask->dst, sizeof(match.mask->dst));
173 err = prestera_acl_rule_match_add(rule, &m_entry);
174 if (err)
175 return err;
176
177 /* add ethernet src key,mask */
178 memset(&m_entry, 0, sizeof(m_entry));
179 m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ETH_SMAC;
180 memcpy(&m_entry.keymask.mac.key,
181 &match.key->src, sizeof(match.key->src));
182 memcpy(&m_entry.keymask.mac.mask,
183 &match.mask->src, sizeof(match.mask->src));
184 err = prestera_acl_rule_match_add(rule, &m_entry);
185 if (err)
186 return err;
187 }
188
189 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
190 struct flow_match_ipv4_addrs match;
191
192 flow_rule_match_ipv4_addrs(f_rule, &match);
193
194 memset(&m_entry, 0, sizeof(m_entry));
195 m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_IP_SRC;
196 memcpy(&m_entry.keymask.u32.key,
197 &match.key->src, sizeof(match.key->src));
198 memcpy(&m_entry.keymask.u32.mask,
199 &match.mask->src, sizeof(match.mask->src));
200 err = prestera_acl_rule_match_add(rule, &m_entry);
201 if (err)
202 return err;
203
204 memset(&m_entry, 0, sizeof(m_entry));
205 m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_IP_DST;
206 memcpy(&m_entry.keymask.u32.key,
207 &match.key->dst, sizeof(match.key->dst));
208 memcpy(&m_entry.keymask.u32.mask,
209 &match.mask->dst, sizeof(match.mask->dst));
210 err = prestera_acl_rule_match_add(rule, &m_entry);
211 if (err)
212 return err;
213 }
214
215 if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_PORTS)) {
216 struct flow_match_ports match;
217
218 if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) {
219 NL_SET_ERR_MSG_MOD
220 (f->common.extack,
221 "Only UDP and TCP keys are supported");
222 return -EINVAL;
223 }
224
225 flow_rule_match_ports(f_rule, &match);
226
227 memset(&m_entry, 0, sizeof(m_entry));
228 m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_L4_PORT_SRC;
229 m_entry.keymask.u16.key = ntohs(match.key->src);
230 m_entry.keymask.u16.mask = ntohs(match.mask->src);
231 err = prestera_acl_rule_match_add(rule, &m_entry);
232 if (err)
233 return err;
234
235 memset(&m_entry, 0, sizeof(m_entry));
236 m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_L4_PORT_DST;
237 m_entry.keymask.u16.key = ntohs(match.key->dst);
238 m_entry.keymask.u16.mask = ntohs(match.mask->dst);
239 err = prestera_acl_rule_match_add(rule, &m_entry);
240 if (err)
241 return err;
242 }
243
244 if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_VLAN)) {
245 struct flow_match_vlan match;
246
247 flow_rule_match_vlan(f_rule, &match);
248
249 if (match.mask->vlan_id != 0) {
250 memset(&m_entry, 0, sizeof(m_entry));
251 m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_VLAN_ID;
252 m_entry.keymask.u16.key = match.key->vlan_id;
253 m_entry.keymask.u16.mask = match.mask->vlan_id;
254 err = prestera_acl_rule_match_add(rule, &m_entry);
255 if (err)
256 return err;
257 }
258
259 memset(&m_entry, 0, sizeof(m_entry));
260 m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_VLAN_TPID;
261 m_entry.keymask.u16.key = ntohs(match.key->vlan_tpid);
262 m_entry.keymask.u16.mask = ntohs(match.mask->vlan_tpid);
263 err = prestera_acl_rule_match_add(rule, &m_entry);
264 if (err)
265 return err;
266 }
267
268 if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_ICMP)) {
269 struct flow_match_icmp match;
270
271 flow_rule_match_icmp(f_rule, &match);
272
273 memset(&m_entry, 0, sizeof(m_entry));
274 m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ICMP_TYPE;
275 m_entry.keymask.u8.key = match.key->type;
276 m_entry.keymask.u8.mask = match.mask->type;
277 err = prestera_acl_rule_match_add(rule, &m_entry);
278 if (err)
279 return err;
280
281 memset(&m_entry, 0, sizeof(m_entry));
282 m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ICMP_CODE;
283 m_entry.keymask.u8.key = match.key->code;
284 m_entry.keymask.u8.mask = match.mask->code;
285 err = prestera_acl_rule_match_add(rule, &m_entry);
286 if (err)
287 return err;
288 }
289
290 return prestera_flower_parse_actions(block, rule,
291 &f->rule->action,
292 f->common.extack);
293 }
294
prestera_flower_replace(struct prestera_flow_block * block,struct flow_cls_offload * f)295 int prestera_flower_replace(struct prestera_flow_block *block,
296 struct flow_cls_offload *f)
297 {
298 struct prestera_switch *sw = prestera_acl_block_sw(block);
299 struct prestera_acl_rule *rule;
300 int err;
301
302 rule = prestera_acl_rule_create(block, f->cookie);
303 if (IS_ERR(rule))
304 return PTR_ERR(rule);
305
306 err = prestera_flower_parse(block, rule, f);
307 if (err)
308 goto err_flower_parse;
309
310 err = prestera_acl_rule_add(sw, rule);
311 if (err)
312 goto err_rule_add;
313
314 return 0;
315
316 err_rule_add:
317 err_flower_parse:
318 prestera_acl_rule_destroy(rule);
319 return err;
320 }
321
prestera_flower_destroy(struct prestera_flow_block * block,struct flow_cls_offload * f)322 void prestera_flower_destroy(struct prestera_flow_block *block,
323 struct flow_cls_offload *f)
324 {
325 struct prestera_acl_rule *rule;
326 struct prestera_switch *sw;
327
328 rule = prestera_acl_rule_lookup(prestera_acl_block_ruleset_get(block),
329 f->cookie);
330 if (rule) {
331 sw = prestera_acl_block_sw(block);
332 prestera_acl_rule_del(sw, rule);
333 prestera_acl_rule_destroy(rule);
334 }
335 }
336
prestera_flower_stats(struct prestera_flow_block * block,struct flow_cls_offload * f)337 int prestera_flower_stats(struct prestera_flow_block *block,
338 struct flow_cls_offload *f)
339 {
340 struct prestera_switch *sw = prestera_acl_block_sw(block);
341 struct prestera_acl_rule *rule;
342 u64 packets;
343 u64 lastuse;
344 u64 bytes;
345 int err;
346
347 rule = prestera_acl_rule_lookup(prestera_acl_block_ruleset_get(block),
348 f->cookie);
349 if (!rule)
350 return -EINVAL;
351
352 err = prestera_acl_rule_get_stats(sw, rule, &packets, &bytes, &lastuse);
353 if (err)
354 return err;
355
356 flow_stats_update(&f->stats, bytes, packets, 0, lastuse,
357 FLOW_ACTION_HW_STATS_IMMEDIATE);
358 return 0;
359 }
360