1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2019 Mellanox Technologies */
3
4 #include "mlx5_core.h"
5 #include "fs_core.h"
6 #include "fs_cmd.h"
7 #include "mlx5dr.h"
8 #include "fs_dr.h"
9
mlx5_dr_is_fw_table(u32 flags)10 static bool mlx5_dr_is_fw_table(u32 flags)
11 {
12 if (flags & MLX5_FLOW_TABLE_TERMINATION)
13 return true;
14
15 return false;
16 }
17
mlx5_cmd_dr_update_root_ft(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,u32 underlay_qpn,bool disconnect)18 static int mlx5_cmd_dr_update_root_ft(struct mlx5_flow_root_namespace *ns,
19 struct mlx5_flow_table *ft,
20 u32 underlay_qpn,
21 bool disconnect)
22 {
23 return mlx5_fs_cmd_get_fw_cmds()->update_root_ft(ns, ft, underlay_qpn,
24 disconnect);
25 }
26
set_miss_action(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct mlx5_flow_table * next_ft)27 static int set_miss_action(struct mlx5_flow_root_namespace *ns,
28 struct mlx5_flow_table *ft,
29 struct mlx5_flow_table *next_ft)
30 {
31 struct mlx5dr_action *old_miss_action;
32 struct mlx5dr_action *action = NULL;
33 struct mlx5dr_table *next_tbl;
34 int err;
35
36 next_tbl = next_ft ? next_ft->fs_dr_table.dr_table : NULL;
37 if (next_tbl) {
38 action = mlx5dr_action_create_dest_table(next_tbl);
39 if (!action)
40 return -EINVAL;
41 }
42 old_miss_action = ft->fs_dr_table.miss_action;
43 err = mlx5dr_table_set_miss_action(ft->fs_dr_table.dr_table, action);
44 if (err && action) {
45 err = mlx5dr_action_destroy(action);
46 if (err) {
47 action = NULL;
48 mlx5_core_err(ns->dev, "Failed to destroy action (%d)\n",
49 err);
50 }
51 }
52 ft->fs_dr_table.miss_action = action;
53 if (old_miss_action) {
54 err = mlx5dr_action_destroy(old_miss_action);
55 if (err)
56 mlx5_core_err(ns->dev, "Failed to destroy action (%d)\n",
57 err);
58 }
59
60 return err;
61 }
62
mlx5_cmd_dr_create_flow_table(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,unsigned int log_size,struct mlx5_flow_table * next_ft)63 static int mlx5_cmd_dr_create_flow_table(struct mlx5_flow_root_namespace *ns,
64 struct mlx5_flow_table *ft,
65 unsigned int log_size,
66 struct mlx5_flow_table *next_ft)
67 {
68 struct mlx5dr_table *tbl;
69 int err;
70
71 if (mlx5_dr_is_fw_table(ft->flags))
72 return mlx5_fs_cmd_get_fw_cmds()->create_flow_table(ns, ft,
73 log_size,
74 next_ft);
75
76 tbl = mlx5dr_table_create(ns->fs_dr_domain.dr_domain,
77 ft->level);
78 if (!tbl) {
79 mlx5_core_err(ns->dev, "Failed creating dr flow_table\n");
80 return -EINVAL;
81 }
82
83 ft->fs_dr_table.dr_table = tbl;
84 ft->id = mlx5dr_table_get_id(tbl);
85
86 if (next_ft) {
87 err = set_miss_action(ns, ft, next_ft);
88 if (err) {
89 mlx5dr_table_destroy(tbl);
90 ft->fs_dr_table.dr_table = NULL;
91 return err;
92 }
93 }
94
95 return 0;
96 }
97
mlx5_cmd_dr_destroy_flow_table(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft)98 static int mlx5_cmd_dr_destroy_flow_table(struct mlx5_flow_root_namespace *ns,
99 struct mlx5_flow_table *ft)
100 {
101 struct mlx5dr_action *action = ft->fs_dr_table.miss_action;
102 int err;
103
104 if (mlx5_dr_is_fw_table(ft->flags))
105 return mlx5_fs_cmd_get_fw_cmds()->destroy_flow_table(ns, ft);
106
107 err = mlx5dr_table_destroy(ft->fs_dr_table.dr_table);
108 if (err) {
109 mlx5_core_err(ns->dev, "Failed to destroy flow_table (%d)\n",
110 err);
111 return err;
112 }
113 if (action) {
114 err = mlx5dr_action_destroy(action);
115 if (err) {
116 mlx5_core_err(ns->dev, "Failed to destroy action(%d)\n",
117 err);
118 return err;
119 }
120 }
121
122 return err;
123 }
124
mlx5_cmd_dr_modify_flow_table(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct mlx5_flow_table * next_ft)125 static int mlx5_cmd_dr_modify_flow_table(struct mlx5_flow_root_namespace *ns,
126 struct mlx5_flow_table *ft,
127 struct mlx5_flow_table *next_ft)
128 {
129 return set_miss_action(ns, ft, next_ft);
130 }
131
mlx5_cmd_dr_create_flow_group(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,u32 * in,struct mlx5_flow_group * fg)132 static int mlx5_cmd_dr_create_flow_group(struct mlx5_flow_root_namespace *ns,
133 struct mlx5_flow_table *ft,
134 u32 *in,
135 struct mlx5_flow_group *fg)
136 {
137 struct mlx5dr_matcher *matcher;
138 u16 priority = MLX5_GET(create_flow_group_in, in,
139 start_flow_index);
140 u8 match_criteria_enable = MLX5_GET(create_flow_group_in,
141 in,
142 match_criteria_enable);
143 struct mlx5dr_match_parameters mask;
144
145 if (mlx5_dr_is_fw_table(ft->flags))
146 return mlx5_fs_cmd_get_fw_cmds()->create_flow_group(ns, ft, in,
147 fg);
148
149 mask.match_buf = MLX5_ADDR_OF(create_flow_group_in,
150 in, match_criteria);
151 mask.match_sz = sizeof(fg->mask.match_criteria);
152
153 matcher = mlx5dr_matcher_create(ft->fs_dr_table.dr_table,
154 priority,
155 match_criteria_enable,
156 &mask);
157 if (!matcher) {
158 mlx5_core_err(ns->dev, "Failed creating matcher\n");
159 return -EINVAL;
160 }
161
162 fg->fs_dr_matcher.dr_matcher = matcher;
163 return 0;
164 }
165
mlx5_cmd_dr_destroy_flow_group(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct mlx5_flow_group * fg)166 static int mlx5_cmd_dr_destroy_flow_group(struct mlx5_flow_root_namespace *ns,
167 struct mlx5_flow_table *ft,
168 struct mlx5_flow_group *fg)
169 {
170 if (mlx5_dr_is_fw_table(ft->flags))
171 return mlx5_fs_cmd_get_fw_cmds()->destroy_flow_group(ns, ft, fg);
172
173 return mlx5dr_matcher_destroy(fg->fs_dr_matcher.dr_matcher);
174 }
175
create_vport_action(struct mlx5dr_domain * domain,struct mlx5_flow_rule * dst)176 static struct mlx5dr_action *create_vport_action(struct mlx5dr_domain *domain,
177 struct mlx5_flow_rule *dst)
178 {
179 struct mlx5_flow_destination *dest_attr = &dst->dest_attr;
180
181 return mlx5dr_action_create_dest_vport(domain, dest_attr->vport.num,
182 dest_attr->vport.flags &
183 MLX5_FLOW_DEST_VPORT_VHCA_ID,
184 dest_attr->vport.vhca_id);
185 }
186
create_ft_action(struct mlx5_core_dev * dev,struct mlx5_flow_rule * dst)187 static struct mlx5dr_action *create_ft_action(struct mlx5_core_dev *dev,
188 struct mlx5_flow_rule *dst)
189 {
190 struct mlx5_flow_table *dest_ft = dst->dest_attr.ft;
191
192 if (mlx5_dr_is_fw_table(dest_ft->flags))
193 return mlx5dr_create_action_dest_flow_fw_table(dest_ft, dev);
194 return mlx5dr_action_create_dest_table(dest_ft->fs_dr_table.dr_table);
195 }
196
create_action_push_vlan(struct mlx5dr_domain * domain,struct mlx5_fs_vlan * vlan)197 static struct mlx5dr_action *create_action_push_vlan(struct mlx5dr_domain *domain,
198 struct mlx5_fs_vlan *vlan)
199 {
200 u16 n_ethtype = vlan->ethtype;
201 u8 prio = vlan->prio;
202 u16 vid = vlan->vid;
203 u32 vlan_hdr;
204
205 vlan_hdr = (u32)n_ethtype << 16 | (u32)(prio) << 12 | (u32)vid;
206 return mlx5dr_action_create_push_vlan(domain, htonl(vlan_hdr));
207 }
208
209 #define MLX5_FLOW_CONTEXT_ACTION_MAX 20
mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct mlx5_flow_group * group,struct fs_fte * fte)210 static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
211 struct mlx5_flow_table *ft,
212 struct mlx5_flow_group *group,
213 struct fs_fte *fte)
214 {
215 struct mlx5dr_domain *domain = ns->fs_dr_domain.dr_domain;
216 struct mlx5dr_action *term_action = NULL;
217 struct mlx5dr_match_parameters params;
218 struct mlx5_core_dev *dev = ns->dev;
219 struct mlx5dr_action **fs_dr_actions;
220 struct mlx5dr_action *tmp_action;
221 struct mlx5dr_action **actions;
222 bool delay_encap_set = false;
223 struct mlx5dr_rule *rule;
224 struct mlx5_flow_rule *dst;
225 int fs_dr_num_actions = 0;
226 int num_actions = 0;
227 size_t match_sz;
228 int err = 0;
229 int i;
230
231 if (mlx5_dr_is_fw_table(ft->flags))
232 return mlx5_fs_cmd_get_fw_cmds()->create_fte(ns, ft, group, fte);
233
234 actions = kcalloc(MLX5_FLOW_CONTEXT_ACTION_MAX, sizeof(*actions),
235 GFP_KERNEL);
236 if (!actions)
237 return -ENOMEM;
238
239 fs_dr_actions = kcalloc(MLX5_FLOW_CONTEXT_ACTION_MAX,
240 sizeof(*fs_dr_actions), GFP_KERNEL);
241 if (!fs_dr_actions) {
242 kfree(actions);
243 return -ENOMEM;
244 }
245
246 match_sz = sizeof(fte->val);
247
248 /* The order of the actions are must to be keep, only the following
249 * order is supported by SW steering:
250 * TX: push vlan -> modify header -> encap
251 * RX: decap -> pop vlan -> modify header
252 */
253 if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
254 tmp_action = create_action_push_vlan(domain, &fte->action.vlan[0]);
255 if (!tmp_action) {
256 err = -ENOMEM;
257 goto free_actions;
258 }
259 fs_dr_actions[fs_dr_num_actions++] = tmp_action;
260 actions[num_actions++] = tmp_action;
261 }
262
263 if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) {
264 tmp_action = create_action_push_vlan(domain, &fte->action.vlan[1]);
265 if (!tmp_action) {
266 err = -ENOMEM;
267 goto free_actions;
268 }
269 fs_dr_actions[fs_dr_num_actions++] = tmp_action;
270 actions[num_actions++] = tmp_action;
271 }
272
273 if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_DECAP) {
274 enum mlx5dr_action_reformat_type decap_type =
275 DR_ACTION_REFORMAT_TYP_TNL_L2_TO_L2;
276
277 tmp_action = mlx5dr_action_create_packet_reformat(domain,
278 decap_type, 0,
279 NULL);
280 if (!tmp_action) {
281 err = -ENOMEM;
282 goto free_actions;
283 }
284 fs_dr_actions[fs_dr_num_actions++] = tmp_action;
285 actions[num_actions++] = tmp_action;
286 }
287
288 if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT) {
289 bool is_decap = fte->action.pkt_reformat->reformat_type ==
290 MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2;
291
292 if (is_decap)
293 actions[num_actions++] =
294 fte->action.pkt_reformat->action.dr_action;
295 else
296 delay_encap_set = true;
297 }
298
299 if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) {
300 tmp_action =
301 mlx5dr_action_create_pop_vlan();
302 if (!tmp_action) {
303 err = -ENOMEM;
304 goto free_actions;
305 }
306 fs_dr_actions[fs_dr_num_actions++] = tmp_action;
307 actions[num_actions++] = tmp_action;
308 }
309
310 if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2) {
311 tmp_action =
312 mlx5dr_action_create_pop_vlan();
313 if (!tmp_action) {
314 err = -ENOMEM;
315 goto free_actions;
316 }
317 fs_dr_actions[fs_dr_num_actions++] = tmp_action;
318 actions[num_actions++] = tmp_action;
319 }
320
321 if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
322 actions[num_actions++] =
323 fte->action.modify_hdr->action.dr_action;
324
325 if (delay_encap_set)
326 actions[num_actions++] =
327 fte->action.pkt_reformat->action.dr_action;
328
329 /* The order of the actions below is not important */
330
331 if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_DROP) {
332 tmp_action = mlx5dr_action_create_drop();
333 if (!tmp_action) {
334 err = -ENOMEM;
335 goto free_actions;
336 }
337 fs_dr_actions[fs_dr_num_actions++] = tmp_action;
338 term_action = tmp_action;
339 }
340
341 if (fte->flow_context.flow_tag) {
342 tmp_action =
343 mlx5dr_action_create_tag(fte->flow_context.flow_tag);
344 if (!tmp_action) {
345 err = -ENOMEM;
346 goto free_actions;
347 }
348 fs_dr_actions[fs_dr_num_actions++] = tmp_action;
349 actions[num_actions++] = tmp_action;
350 }
351
352 if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
353 list_for_each_entry(dst, &fte->node.children, node.list) {
354 enum mlx5_flow_destination_type type = dst->dest_attr.type;
355 u32 id;
356
357 if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
358 err = -ENOSPC;
359 goto free_actions;
360 }
361
362 switch (type) {
363 case MLX5_FLOW_DESTINATION_TYPE_COUNTER:
364 id = dst->dest_attr.counter_id;
365
366 tmp_action =
367 mlx5dr_action_create_flow_counter(id);
368 if (!tmp_action) {
369 err = -ENOMEM;
370 goto free_actions;
371 }
372 fs_dr_actions[fs_dr_num_actions++] = tmp_action;
373 actions[num_actions++] = tmp_action;
374 break;
375 case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
376 tmp_action = create_ft_action(dev, dst);
377 if (!tmp_action) {
378 err = -ENOMEM;
379 goto free_actions;
380 }
381 fs_dr_actions[fs_dr_num_actions++] = tmp_action;
382 term_action = tmp_action;
383 break;
384 case MLX5_FLOW_DESTINATION_TYPE_VPORT:
385 tmp_action = create_vport_action(domain, dst);
386 if (!tmp_action) {
387 err = -ENOMEM;
388 goto free_actions;
389 }
390 fs_dr_actions[fs_dr_num_actions++] = tmp_action;
391 term_action = tmp_action;
392 break;
393 default:
394 err = -EOPNOTSUPP;
395 goto free_actions;
396 }
397 }
398 }
399
400 params.match_sz = match_sz;
401 params.match_buf = (u64 *)fte->val;
402
403 if (term_action)
404 actions[num_actions++] = term_action;
405
406 rule = mlx5dr_rule_create(group->fs_dr_matcher.dr_matcher,
407 ¶ms,
408 num_actions,
409 actions);
410 if (!rule) {
411 err = -EINVAL;
412 goto free_actions;
413 }
414
415 kfree(actions);
416 fte->fs_dr_rule.dr_rule = rule;
417 fte->fs_dr_rule.num_actions = fs_dr_num_actions;
418 fte->fs_dr_rule.dr_actions = fs_dr_actions;
419
420 return 0;
421
422 free_actions:
423 for (i = 0; i < fs_dr_num_actions; i++)
424 if (!IS_ERR_OR_NULL(fs_dr_actions[i]))
425 mlx5dr_action_destroy(fs_dr_actions[i]);
426
427 mlx5_core_err(dev, "Failed to create dr rule err(%d)\n", err);
428 kfree(actions);
429 kfree(fs_dr_actions);
430 return err;
431 }
432
mlx5_cmd_dr_packet_reformat_alloc(struct mlx5_flow_root_namespace * ns,int reformat_type,size_t size,void * reformat_data,enum mlx5_flow_namespace_type namespace,struct mlx5_pkt_reformat * pkt_reformat)433 static int mlx5_cmd_dr_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns,
434 int reformat_type,
435 size_t size,
436 void *reformat_data,
437 enum mlx5_flow_namespace_type namespace,
438 struct mlx5_pkt_reformat *pkt_reformat)
439 {
440 struct mlx5dr_domain *dr_domain = ns->fs_dr_domain.dr_domain;
441 struct mlx5dr_action *action;
442 int dr_reformat;
443
444 switch (reformat_type) {
445 case MLX5_REFORMAT_TYPE_L2_TO_VXLAN:
446 case MLX5_REFORMAT_TYPE_L2_TO_NVGRE:
447 case MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL:
448 dr_reformat = DR_ACTION_REFORMAT_TYP_L2_TO_TNL_L2;
449 break;
450 case MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2:
451 dr_reformat = DR_ACTION_REFORMAT_TYP_TNL_L3_TO_L2;
452 break;
453 case MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL:
454 dr_reformat = DR_ACTION_REFORMAT_TYP_L2_TO_TNL_L3;
455 break;
456 default:
457 mlx5_core_err(ns->dev, "Packet-reformat not supported(%d)\n",
458 reformat_type);
459 return -EOPNOTSUPP;
460 }
461
462 action = mlx5dr_action_create_packet_reformat(dr_domain,
463 dr_reformat,
464 size,
465 reformat_data);
466 if (!action) {
467 mlx5_core_err(ns->dev, "Failed allocating packet-reformat action\n");
468 return -EINVAL;
469 }
470
471 pkt_reformat->action.dr_action = action;
472
473 return 0;
474 }
475
mlx5_cmd_dr_packet_reformat_dealloc(struct mlx5_flow_root_namespace * ns,struct mlx5_pkt_reformat * pkt_reformat)476 static void mlx5_cmd_dr_packet_reformat_dealloc(struct mlx5_flow_root_namespace *ns,
477 struct mlx5_pkt_reformat *pkt_reformat)
478 {
479 mlx5dr_action_destroy(pkt_reformat->action.dr_action);
480 }
481
mlx5_cmd_dr_modify_header_alloc(struct mlx5_flow_root_namespace * ns,u8 namespace,u8 num_actions,void * modify_actions,struct mlx5_modify_hdr * modify_hdr)482 static int mlx5_cmd_dr_modify_header_alloc(struct mlx5_flow_root_namespace *ns,
483 u8 namespace, u8 num_actions,
484 void *modify_actions,
485 struct mlx5_modify_hdr *modify_hdr)
486 {
487 struct mlx5dr_domain *dr_domain = ns->fs_dr_domain.dr_domain;
488 struct mlx5dr_action *action;
489 size_t actions_sz;
490
491 actions_sz = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto) *
492 num_actions;
493 action = mlx5dr_action_create_modify_header(dr_domain, 0,
494 actions_sz,
495 modify_actions);
496 if (!action) {
497 mlx5_core_err(ns->dev, "Failed allocating modify-header action\n");
498 return -EINVAL;
499 }
500
501 modify_hdr->action.dr_action = action;
502
503 return 0;
504 }
505
mlx5_cmd_dr_modify_header_dealloc(struct mlx5_flow_root_namespace * ns,struct mlx5_modify_hdr * modify_hdr)506 static void mlx5_cmd_dr_modify_header_dealloc(struct mlx5_flow_root_namespace *ns,
507 struct mlx5_modify_hdr *modify_hdr)
508 {
509 mlx5dr_action_destroy(modify_hdr->action.dr_action);
510 }
511
mlx5_cmd_dr_update_fte(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct mlx5_flow_group * group,int modify_mask,struct fs_fte * fte)512 static int mlx5_cmd_dr_update_fte(struct mlx5_flow_root_namespace *ns,
513 struct mlx5_flow_table *ft,
514 struct mlx5_flow_group *group,
515 int modify_mask,
516 struct fs_fte *fte)
517 {
518 return -EOPNOTSUPP;
519 }
520
mlx5_cmd_dr_delete_fte(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct fs_fte * fte)521 static int mlx5_cmd_dr_delete_fte(struct mlx5_flow_root_namespace *ns,
522 struct mlx5_flow_table *ft,
523 struct fs_fte *fte)
524 {
525 struct mlx5_fs_dr_rule *rule = &fte->fs_dr_rule;
526 int err;
527 int i;
528
529 if (mlx5_dr_is_fw_table(ft->flags))
530 return mlx5_fs_cmd_get_fw_cmds()->delete_fte(ns, ft, fte);
531
532 err = mlx5dr_rule_destroy(rule->dr_rule);
533 if (err)
534 return err;
535
536 for (i = 0; i < rule->num_actions; i++)
537 if (!IS_ERR_OR_NULL(rule->dr_actions[i]))
538 mlx5dr_action_destroy(rule->dr_actions[i]);
539
540 kfree(rule->dr_actions);
541 return 0;
542 }
543
mlx5_cmd_dr_set_peer(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_root_namespace * peer_ns)544 static int mlx5_cmd_dr_set_peer(struct mlx5_flow_root_namespace *ns,
545 struct mlx5_flow_root_namespace *peer_ns)
546 {
547 struct mlx5dr_domain *peer_domain = NULL;
548
549 if (peer_ns)
550 peer_domain = peer_ns->fs_dr_domain.dr_domain;
551 mlx5dr_domain_set_peer(ns->fs_dr_domain.dr_domain,
552 peer_domain);
553 return 0;
554 }
555
mlx5_cmd_dr_create_ns(struct mlx5_flow_root_namespace * ns)556 static int mlx5_cmd_dr_create_ns(struct mlx5_flow_root_namespace *ns)
557 {
558 ns->fs_dr_domain.dr_domain =
559 mlx5dr_domain_create(ns->dev,
560 MLX5DR_DOMAIN_TYPE_FDB);
561 if (!ns->fs_dr_domain.dr_domain) {
562 mlx5_core_err(ns->dev, "Failed to create dr flow namespace\n");
563 return -EOPNOTSUPP;
564 }
565 return 0;
566 }
567
mlx5_cmd_dr_destroy_ns(struct mlx5_flow_root_namespace * ns)568 static int mlx5_cmd_dr_destroy_ns(struct mlx5_flow_root_namespace *ns)
569 {
570 return mlx5dr_domain_destroy(ns->fs_dr_domain.dr_domain);
571 }
572
mlx5_fs_dr_is_supported(struct mlx5_core_dev * dev)573 bool mlx5_fs_dr_is_supported(struct mlx5_core_dev *dev)
574 {
575 return mlx5dr_is_supported(dev);
576 }
577
578 static const struct mlx5_flow_cmds mlx5_flow_cmds_dr = {
579 .create_flow_table = mlx5_cmd_dr_create_flow_table,
580 .destroy_flow_table = mlx5_cmd_dr_destroy_flow_table,
581 .modify_flow_table = mlx5_cmd_dr_modify_flow_table,
582 .create_flow_group = mlx5_cmd_dr_create_flow_group,
583 .destroy_flow_group = mlx5_cmd_dr_destroy_flow_group,
584 .create_fte = mlx5_cmd_dr_create_fte,
585 .update_fte = mlx5_cmd_dr_update_fte,
586 .delete_fte = mlx5_cmd_dr_delete_fte,
587 .update_root_ft = mlx5_cmd_dr_update_root_ft,
588 .packet_reformat_alloc = mlx5_cmd_dr_packet_reformat_alloc,
589 .packet_reformat_dealloc = mlx5_cmd_dr_packet_reformat_dealloc,
590 .modify_header_alloc = mlx5_cmd_dr_modify_header_alloc,
591 .modify_header_dealloc = mlx5_cmd_dr_modify_header_dealloc,
592 .set_peer = mlx5_cmd_dr_set_peer,
593 .create_ns = mlx5_cmd_dr_create_ns,
594 .destroy_ns = mlx5_cmd_dr_destroy_ns,
595 };
596
mlx5_fs_cmd_get_dr_cmds(void)597 const struct mlx5_flow_cmds *mlx5_fs_cmd_get_dr_cmds(void)
598 {
599 return &mlx5_flow_cmds_dr;
600 }
601