1 /*
2  * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/mlx5/driver.h>
34 #include <linux/mlx5/device.h>
35 #include <linux/mlx5/mlx5_ifc.h>
36 
37 #include "fs_core.h"
38 #include "fs_cmd.h"
39 #include "mlx5_core.h"
40 #include "eswitch.h"
41 
mlx5_cmd_stub_update_root_ft(struct mlx5_core_dev * dev,struct mlx5_flow_table * ft,u32 underlay_qpn,bool disconnect)42 static int mlx5_cmd_stub_update_root_ft(struct mlx5_core_dev *dev,
43 					struct mlx5_flow_table *ft,
44 					u32 underlay_qpn,
45 					bool disconnect)
46 {
47 	return 0;
48 }
49 
mlx5_cmd_stub_create_flow_table(struct mlx5_core_dev * dev,u16 vport,enum fs_flow_table_op_mod op_mod,enum fs_flow_table_type type,unsigned int level,unsigned int log_size,struct mlx5_flow_table * next_ft,unsigned int * table_id,u32 flags)50 static int mlx5_cmd_stub_create_flow_table(struct mlx5_core_dev *dev,
51 					   u16 vport,
52 					   enum fs_flow_table_op_mod op_mod,
53 					   enum fs_flow_table_type type,
54 					   unsigned int level,
55 					   unsigned int log_size,
56 					   struct mlx5_flow_table *next_ft,
57 					   unsigned int *table_id, u32 flags)
58 {
59 	return 0;
60 }
61 
mlx5_cmd_stub_destroy_flow_table(struct mlx5_core_dev * dev,struct mlx5_flow_table * ft)62 static int mlx5_cmd_stub_destroy_flow_table(struct mlx5_core_dev *dev,
63 					    struct mlx5_flow_table *ft)
64 {
65 	return 0;
66 }
67 
mlx5_cmd_stub_modify_flow_table(struct mlx5_core_dev * dev,struct mlx5_flow_table * ft,struct mlx5_flow_table * next_ft)68 static int mlx5_cmd_stub_modify_flow_table(struct mlx5_core_dev *dev,
69 					   struct mlx5_flow_table *ft,
70 					   struct mlx5_flow_table *next_ft)
71 {
72 	return 0;
73 }
74 
mlx5_cmd_stub_create_flow_group(struct mlx5_core_dev * dev,struct mlx5_flow_table * ft,u32 * in,unsigned int * group_id)75 static int mlx5_cmd_stub_create_flow_group(struct mlx5_core_dev *dev,
76 					   struct mlx5_flow_table *ft,
77 					   u32 *in,
78 					   unsigned int *group_id)
79 {
80 	return 0;
81 }
82 
mlx5_cmd_stub_destroy_flow_group(struct mlx5_core_dev * dev,struct mlx5_flow_table * ft,unsigned int group_id)83 static int mlx5_cmd_stub_destroy_flow_group(struct mlx5_core_dev *dev,
84 					    struct mlx5_flow_table *ft,
85 					    unsigned int group_id)
86 {
87 	return 0;
88 }
89 
mlx5_cmd_stub_create_fte(struct mlx5_core_dev * dev,struct mlx5_flow_table * ft,struct mlx5_flow_group * group,struct fs_fte * fte)90 static int mlx5_cmd_stub_create_fte(struct mlx5_core_dev *dev,
91 				    struct mlx5_flow_table *ft,
92 				    struct mlx5_flow_group *group,
93 				    struct fs_fte *fte)
94 {
95 	return 0;
96 }
97 
mlx5_cmd_stub_update_fte(struct mlx5_core_dev * dev,struct mlx5_flow_table * ft,unsigned int group_id,int modify_mask,struct fs_fte * fte)98 static int mlx5_cmd_stub_update_fte(struct mlx5_core_dev *dev,
99 				    struct mlx5_flow_table *ft,
100 				    unsigned int group_id,
101 				    int modify_mask,
102 				    struct fs_fte *fte)
103 {
104 	return -EOPNOTSUPP;
105 }
106 
mlx5_cmd_stub_delete_fte(struct mlx5_core_dev * dev,struct mlx5_flow_table * ft,struct fs_fte * fte)107 static int mlx5_cmd_stub_delete_fte(struct mlx5_core_dev *dev,
108 				    struct mlx5_flow_table *ft,
109 				    struct fs_fte *fte)
110 {
111 	return 0;
112 }
113 
mlx5_cmd_update_root_ft(struct mlx5_core_dev * dev,struct mlx5_flow_table * ft,u32 underlay_qpn,bool disconnect)114 static int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev,
115 				   struct mlx5_flow_table *ft, u32 underlay_qpn,
116 				   bool disconnect)
117 {
118 	u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)]   = {0};
119 	u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)] = {0};
120 
121 	if ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) &&
122 	    underlay_qpn == 0)
123 		return 0;
124 
125 	MLX5_SET(set_flow_table_root_in, in, opcode,
126 		 MLX5_CMD_OP_SET_FLOW_TABLE_ROOT);
127 	MLX5_SET(set_flow_table_root_in, in, table_type, ft->type);
128 
129 	if (disconnect) {
130 		MLX5_SET(set_flow_table_root_in, in, op_mod, 1);
131 		MLX5_SET(set_flow_table_root_in, in, table_id, 0);
132 	} else {
133 		MLX5_SET(set_flow_table_root_in, in, op_mod, 0);
134 		MLX5_SET(set_flow_table_root_in, in, table_id, ft->id);
135 	}
136 
137 	MLX5_SET(set_flow_table_root_in, in, underlay_qpn, underlay_qpn);
138 	if (ft->vport) {
139 		MLX5_SET(set_flow_table_root_in, in, vport_number, ft->vport);
140 		MLX5_SET(set_flow_table_root_in, in, other_vport, 1);
141 	}
142 
143 	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
144 }
145 
mlx5_cmd_create_flow_table(struct mlx5_core_dev * dev,u16 vport,enum fs_flow_table_op_mod op_mod,enum fs_flow_table_type type,unsigned int level,unsigned int log_size,struct mlx5_flow_table * next_ft,unsigned int * table_id,u32 flags)146 static int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev,
147 				      u16 vport,
148 				      enum fs_flow_table_op_mod op_mod,
149 				      enum fs_flow_table_type type,
150 				      unsigned int level,
151 				      unsigned int log_size,
152 				      struct mlx5_flow_table *next_ft,
153 				      unsigned int *table_id, u32 flags)
154 {
155 	int en_encap_decap = !!(flags & MLX5_FLOW_TABLE_TUNNEL_EN);
156 	u32 out[MLX5_ST_SZ_DW(create_flow_table_out)] = {0};
157 	u32 in[MLX5_ST_SZ_DW(create_flow_table_in)]   = {0};
158 	int err;
159 
160 	MLX5_SET(create_flow_table_in, in, opcode,
161 		 MLX5_CMD_OP_CREATE_FLOW_TABLE);
162 
163 	MLX5_SET(create_flow_table_in, in, table_type, type);
164 	MLX5_SET(create_flow_table_in, in, flow_table_context.level, level);
165 	MLX5_SET(create_flow_table_in, in, flow_table_context.log_size, log_size);
166 	if (vport) {
167 		MLX5_SET(create_flow_table_in, in, vport_number, vport);
168 		MLX5_SET(create_flow_table_in, in, other_vport, 1);
169 	}
170 
171 	MLX5_SET(create_flow_table_in, in, flow_table_context.decap_en,
172 		 en_encap_decap);
173 	MLX5_SET(create_flow_table_in, in, flow_table_context.encap_en,
174 		 en_encap_decap);
175 
176 	switch (op_mod) {
177 	case FS_FT_OP_MOD_NORMAL:
178 		if (next_ft) {
179 			MLX5_SET(create_flow_table_in, in,
180 				 flow_table_context.table_miss_action, 1);
181 			MLX5_SET(create_flow_table_in, in,
182 				 flow_table_context.table_miss_id, next_ft->id);
183 		}
184 		break;
185 
186 	case FS_FT_OP_MOD_LAG_DEMUX:
187 		MLX5_SET(create_flow_table_in, in, op_mod, 0x1);
188 		if (next_ft)
189 			MLX5_SET(create_flow_table_in, in,
190 				 flow_table_context.lag_master_next_table_id,
191 				 next_ft->id);
192 		break;
193 	}
194 
195 	err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
196 	if (!err)
197 		*table_id = MLX5_GET(create_flow_table_out, out,
198 				     table_id);
199 	return err;
200 }
201 
mlx5_cmd_destroy_flow_table(struct mlx5_core_dev * dev,struct mlx5_flow_table * ft)202 static int mlx5_cmd_destroy_flow_table(struct mlx5_core_dev *dev,
203 				       struct mlx5_flow_table *ft)
204 {
205 	u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)]   = {0};
206 	u32 out[MLX5_ST_SZ_DW(destroy_flow_table_out)] = {0};
207 
208 	MLX5_SET(destroy_flow_table_in, in, opcode,
209 		 MLX5_CMD_OP_DESTROY_FLOW_TABLE);
210 	MLX5_SET(destroy_flow_table_in, in, table_type, ft->type);
211 	MLX5_SET(destroy_flow_table_in, in, table_id, ft->id);
212 	if (ft->vport) {
213 		MLX5_SET(destroy_flow_table_in, in, vport_number, ft->vport);
214 		MLX5_SET(destroy_flow_table_in, in, other_vport, 1);
215 	}
216 
217 	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
218 }
219 
mlx5_cmd_modify_flow_table(struct mlx5_core_dev * dev,struct mlx5_flow_table * ft,struct mlx5_flow_table * next_ft)220 static int mlx5_cmd_modify_flow_table(struct mlx5_core_dev *dev,
221 				      struct mlx5_flow_table *ft,
222 				      struct mlx5_flow_table *next_ft)
223 {
224 	u32 in[MLX5_ST_SZ_DW(modify_flow_table_in)]   = {0};
225 	u32 out[MLX5_ST_SZ_DW(modify_flow_table_out)] = {0};
226 
227 	MLX5_SET(modify_flow_table_in, in, opcode,
228 		 MLX5_CMD_OP_MODIFY_FLOW_TABLE);
229 	MLX5_SET(modify_flow_table_in, in, table_type, ft->type);
230 	MLX5_SET(modify_flow_table_in, in, table_id, ft->id);
231 
232 	if (ft->op_mod == FS_FT_OP_MOD_LAG_DEMUX) {
233 		MLX5_SET(modify_flow_table_in, in, modify_field_select,
234 			 MLX5_MODIFY_FLOW_TABLE_LAG_NEXT_TABLE_ID);
235 		if (next_ft) {
236 			MLX5_SET(modify_flow_table_in, in,
237 				 flow_table_context.lag_master_next_table_id, next_ft->id);
238 		} else {
239 			MLX5_SET(modify_flow_table_in, in,
240 				 flow_table_context.lag_master_next_table_id, 0);
241 		}
242 	} else {
243 		if (ft->vport) {
244 			MLX5_SET(modify_flow_table_in, in, vport_number,
245 				 ft->vport);
246 			MLX5_SET(modify_flow_table_in, in, other_vport, 1);
247 		}
248 		MLX5_SET(modify_flow_table_in, in, modify_field_select,
249 			 MLX5_MODIFY_FLOW_TABLE_MISS_TABLE_ID);
250 		if (next_ft) {
251 			MLX5_SET(modify_flow_table_in, in,
252 				 flow_table_context.table_miss_action, 1);
253 			MLX5_SET(modify_flow_table_in, in,
254 				 flow_table_context.table_miss_id,
255 				 next_ft->id);
256 		} else {
257 			MLX5_SET(modify_flow_table_in, in,
258 				 flow_table_context.table_miss_action, 0);
259 		}
260 	}
261 
262 	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
263 }
264 
mlx5_cmd_create_flow_group(struct mlx5_core_dev * dev,struct mlx5_flow_table * ft,u32 * in,unsigned int * group_id)265 static int mlx5_cmd_create_flow_group(struct mlx5_core_dev *dev,
266 				      struct mlx5_flow_table *ft,
267 				      u32 *in,
268 				      unsigned int *group_id)
269 {
270 	u32 out[MLX5_ST_SZ_DW(create_flow_group_out)] = {0};
271 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
272 	int err;
273 
274 	MLX5_SET(create_flow_group_in, in, opcode,
275 		 MLX5_CMD_OP_CREATE_FLOW_GROUP);
276 	MLX5_SET(create_flow_group_in, in, table_type, ft->type);
277 	MLX5_SET(create_flow_group_in, in, table_id, ft->id);
278 	if (ft->vport) {
279 		MLX5_SET(create_flow_group_in, in, vport_number, ft->vport);
280 		MLX5_SET(create_flow_group_in, in, other_vport, 1);
281 	}
282 
283 	err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
284 	if (!err)
285 		*group_id = MLX5_GET(create_flow_group_out, out,
286 				     group_id);
287 	return err;
288 }
289 
mlx5_cmd_destroy_flow_group(struct mlx5_core_dev * dev,struct mlx5_flow_table * ft,unsigned int group_id)290 static int mlx5_cmd_destroy_flow_group(struct mlx5_core_dev *dev,
291 				       struct mlx5_flow_table *ft,
292 				       unsigned int group_id)
293 {
294 	u32 out[MLX5_ST_SZ_DW(destroy_flow_group_out)] = {0};
295 	u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)]   = {0};
296 
297 	MLX5_SET(destroy_flow_group_in, in, opcode,
298 		 MLX5_CMD_OP_DESTROY_FLOW_GROUP);
299 	MLX5_SET(destroy_flow_group_in, in, table_type, ft->type);
300 	MLX5_SET(destroy_flow_group_in, in, table_id, ft->id);
301 	MLX5_SET(destroy_flow_group_in, in, group_id, group_id);
302 	if (ft->vport) {
303 		MLX5_SET(destroy_flow_group_in, in, vport_number, ft->vport);
304 		MLX5_SET(destroy_flow_group_in, in, other_vport, 1);
305 	}
306 
307 	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
308 }
309 
mlx5_cmd_set_fte(struct mlx5_core_dev * dev,int opmod,int modify_mask,struct mlx5_flow_table * ft,unsigned group_id,struct fs_fte * fte)310 static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
311 			    int opmod, int modify_mask,
312 			    struct mlx5_flow_table *ft,
313 			    unsigned group_id,
314 			    struct fs_fte *fte)
315 {
316 	unsigned int inlen = MLX5_ST_SZ_BYTES(set_fte_in) +
317 		fte->dests_size * MLX5_ST_SZ_BYTES(dest_format_struct);
318 	u32 out[MLX5_ST_SZ_DW(set_fte_out)] = {0};
319 	struct mlx5_flow_rule *dst;
320 	void *in_flow_context, *vlan;
321 	void *in_match_value;
322 	void *in_dests;
323 	u32 *in;
324 	int err;
325 
326 	in = kvzalloc(inlen, GFP_KERNEL);
327 	if (!in)
328 		return -ENOMEM;
329 
330 	MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY);
331 	MLX5_SET(set_fte_in, in, op_mod, opmod);
332 	MLX5_SET(set_fte_in, in, modify_enable_mask, modify_mask);
333 	MLX5_SET(set_fte_in, in, table_type, ft->type);
334 	MLX5_SET(set_fte_in, in, table_id,   ft->id);
335 	MLX5_SET(set_fte_in, in, flow_index, fte->index);
336 	if (ft->vport) {
337 		MLX5_SET(set_fte_in, in, vport_number, ft->vport);
338 		MLX5_SET(set_fte_in, in, other_vport, 1);
339 	}
340 
341 	in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
342 	MLX5_SET(flow_context, in_flow_context, group_id, group_id);
343 
344 	MLX5_SET(flow_context, in_flow_context, flow_tag, fte->action.flow_tag);
345 	MLX5_SET(flow_context, in_flow_context, action, fte->action.action);
346 	MLX5_SET(flow_context, in_flow_context, encap_id, fte->action.encap_id);
347 	MLX5_SET(flow_context, in_flow_context, modify_header_id,
348 		 fte->action.modify_id);
349 
350 	vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan);
351 
352 	MLX5_SET(vlan, vlan, ethtype, fte->action.vlan[0].ethtype);
353 	MLX5_SET(vlan, vlan, vid, fte->action.vlan[0].vid);
354 	MLX5_SET(vlan, vlan, prio, fte->action.vlan[0].prio);
355 
356 	vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan_2);
357 
358 	MLX5_SET(vlan, vlan, ethtype, fte->action.vlan[1].ethtype);
359 	MLX5_SET(vlan, vlan, vid, fte->action.vlan[1].vid);
360 	MLX5_SET(vlan, vlan, prio, fte->action.vlan[1].prio);
361 
362 	in_match_value = MLX5_ADDR_OF(flow_context, in_flow_context,
363 				      match_value);
364 	memcpy(in_match_value, &fte->val, sizeof(fte->val));
365 
366 	in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
367 	if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
368 		int list_size = 0;
369 
370 		list_for_each_entry(dst, &fte->node.children, node.list) {
371 			unsigned int id, type = dst->dest_attr.type;
372 
373 			if (type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
374 				continue;
375 
376 			switch (type) {
377 			case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM:
378 				id = dst->dest_attr.ft_num;
379 				type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
380 				break;
381 			case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
382 				id = dst->dest_attr.ft->id;
383 				break;
384 			case MLX5_FLOW_DESTINATION_TYPE_VPORT:
385 				id = dst->dest_attr.vport.num;
386 				MLX5_SET(dest_format_struct, in_dests,
387 					 destination_eswitch_owner_vhca_id_valid,
388 					 dst->dest_attr.vport.vhca_id_valid);
389 				MLX5_SET(dest_format_struct, in_dests,
390 					 destination_eswitch_owner_vhca_id,
391 					 dst->dest_attr.vport.vhca_id);
392 				break;
393 			default:
394 				id = dst->dest_attr.tir_num;
395 			}
396 
397 			MLX5_SET(dest_format_struct, in_dests, destination_type,
398 				 type);
399 			MLX5_SET(dest_format_struct, in_dests, destination_id, id);
400 			in_dests += MLX5_ST_SZ_BYTES(dest_format_struct);
401 			list_size++;
402 		}
403 
404 		MLX5_SET(flow_context, in_flow_context, destination_list_size,
405 			 list_size);
406 	}
407 
408 	if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
409 		int max_list_size = BIT(MLX5_CAP_FLOWTABLE_TYPE(dev,
410 					log_max_flow_counter,
411 					ft->type));
412 		int list_size = 0;
413 
414 		list_for_each_entry(dst, &fte->node.children, node.list) {
415 			if (dst->dest_attr.type !=
416 			    MLX5_FLOW_DESTINATION_TYPE_COUNTER)
417 				continue;
418 
419 			MLX5_SET(flow_counter_list, in_dests, flow_counter_id,
420 				 dst->dest_attr.counter->id);
421 			in_dests += MLX5_ST_SZ_BYTES(dest_format_struct);
422 			list_size++;
423 		}
424 		if (list_size > max_list_size) {
425 			err = -EINVAL;
426 			goto err_out;
427 		}
428 
429 		MLX5_SET(flow_context, in_flow_context, flow_counter_list_size,
430 			 list_size);
431 	}
432 
433 	err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
434 err_out:
435 	kvfree(in);
436 	return err;
437 }
438 
mlx5_cmd_create_fte(struct mlx5_core_dev * dev,struct mlx5_flow_table * ft,struct mlx5_flow_group * group,struct fs_fte * fte)439 static int mlx5_cmd_create_fte(struct mlx5_core_dev *dev,
440 			       struct mlx5_flow_table *ft,
441 			       struct mlx5_flow_group *group,
442 			       struct fs_fte *fte)
443 {
444 	unsigned int group_id = group->id;
445 
446 	return mlx5_cmd_set_fte(dev, 0, 0, ft, group_id, fte);
447 }
448 
mlx5_cmd_update_fte(struct mlx5_core_dev * dev,struct mlx5_flow_table * ft,unsigned int group_id,int modify_mask,struct fs_fte * fte)449 static int mlx5_cmd_update_fte(struct mlx5_core_dev *dev,
450 			       struct mlx5_flow_table *ft,
451 			       unsigned int group_id,
452 			       int modify_mask,
453 			       struct fs_fte *fte)
454 {
455 	int opmod;
456 	int atomic_mod_cap = MLX5_CAP_FLOWTABLE(dev,
457 						flow_table_properties_nic_receive.
458 						flow_modify_en);
459 	if (!atomic_mod_cap)
460 		return -EOPNOTSUPP;
461 	opmod = 1;
462 
463 	return	mlx5_cmd_set_fte(dev, opmod, modify_mask, ft, group_id, fte);
464 }
465 
mlx5_cmd_delete_fte(struct mlx5_core_dev * dev,struct mlx5_flow_table * ft,struct fs_fte * fte)466 static int mlx5_cmd_delete_fte(struct mlx5_core_dev *dev,
467 			       struct mlx5_flow_table *ft,
468 			       struct fs_fte *fte)
469 {
470 	u32 out[MLX5_ST_SZ_DW(delete_fte_out)] = {0};
471 	u32 in[MLX5_ST_SZ_DW(delete_fte_in)]   = {0};
472 
473 	MLX5_SET(delete_fte_in, in, opcode, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
474 	MLX5_SET(delete_fte_in, in, table_type, ft->type);
475 	MLX5_SET(delete_fte_in, in, table_id, ft->id);
476 	MLX5_SET(delete_fte_in, in, flow_index, fte->index);
477 	if (ft->vport) {
478 		MLX5_SET(delete_fte_in, in, vport_number, ft->vport);
479 		MLX5_SET(delete_fte_in, in, other_vport, 1);
480 	}
481 
482 	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
483 }
484 
mlx5_cmd_fc_alloc(struct mlx5_core_dev * dev,u32 * id)485 int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id)
486 {
487 	u32 in[MLX5_ST_SZ_DW(alloc_flow_counter_in)]   = {0};
488 	u32 out[MLX5_ST_SZ_DW(alloc_flow_counter_out)] = {0};
489 	int err;
490 
491 	MLX5_SET(alloc_flow_counter_in, in, opcode,
492 		 MLX5_CMD_OP_ALLOC_FLOW_COUNTER);
493 
494 	err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
495 	if (!err)
496 		*id = MLX5_GET(alloc_flow_counter_out, out, flow_counter_id);
497 	return err;
498 }
499 
mlx5_cmd_fc_free(struct mlx5_core_dev * dev,u32 id)500 int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u32 id)
501 {
502 	u32 in[MLX5_ST_SZ_DW(dealloc_flow_counter_in)]   = {0};
503 	u32 out[MLX5_ST_SZ_DW(dealloc_flow_counter_out)] = {0};
504 
505 	MLX5_SET(dealloc_flow_counter_in, in, opcode,
506 		 MLX5_CMD_OP_DEALLOC_FLOW_COUNTER);
507 	MLX5_SET(dealloc_flow_counter_in, in, flow_counter_id, id);
508 	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
509 }
510 
mlx5_cmd_fc_query(struct mlx5_core_dev * dev,u32 id,u64 * packets,u64 * bytes)511 int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u32 id,
512 		      u64 *packets, u64 *bytes)
513 {
514 	u32 out[MLX5_ST_SZ_BYTES(query_flow_counter_out) +
515 		MLX5_ST_SZ_BYTES(traffic_counter)]   = {0};
516 	u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {0};
517 	void *stats;
518 	int err = 0;
519 
520 	MLX5_SET(query_flow_counter_in, in, opcode,
521 		 MLX5_CMD_OP_QUERY_FLOW_COUNTER);
522 	MLX5_SET(query_flow_counter_in, in, op_mod, 0);
523 	MLX5_SET(query_flow_counter_in, in, flow_counter_id, id);
524 	err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
525 	if (err)
526 		return err;
527 
528 	stats = MLX5_ADDR_OF(query_flow_counter_out, out, flow_statistics);
529 	*packets = MLX5_GET64(traffic_counter, stats, packets);
530 	*bytes = MLX5_GET64(traffic_counter, stats, octets);
531 	return 0;
532 }
533 
534 struct mlx5_cmd_fc_bulk {
535 	u32 id;
536 	int num;
537 	int outlen;
538 	u32 out[0];
539 };
540 
541 struct mlx5_cmd_fc_bulk *
mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev * dev,u32 id,int num)542 mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev, u32 id, int num)
543 {
544 	struct mlx5_cmd_fc_bulk *b;
545 	int outlen =
546 		MLX5_ST_SZ_BYTES(query_flow_counter_out) +
547 		MLX5_ST_SZ_BYTES(traffic_counter) * num;
548 
549 	b = kzalloc(sizeof(*b) + outlen, GFP_KERNEL);
550 	if (!b)
551 		return NULL;
552 
553 	b->id = id;
554 	b->num = num;
555 	b->outlen = outlen;
556 
557 	return b;
558 }
559 
mlx5_cmd_fc_bulk_free(struct mlx5_cmd_fc_bulk * b)560 void mlx5_cmd_fc_bulk_free(struct mlx5_cmd_fc_bulk *b)
561 {
562 	kfree(b);
563 }
564 
565 int
mlx5_cmd_fc_bulk_query(struct mlx5_core_dev * dev,struct mlx5_cmd_fc_bulk * b)566 mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, struct mlx5_cmd_fc_bulk *b)
567 {
568 	u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {0};
569 
570 	MLX5_SET(query_flow_counter_in, in, opcode,
571 		 MLX5_CMD_OP_QUERY_FLOW_COUNTER);
572 	MLX5_SET(query_flow_counter_in, in, op_mod, 0);
573 	MLX5_SET(query_flow_counter_in, in, flow_counter_id, b->id);
574 	MLX5_SET(query_flow_counter_in, in, num_of_counters, b->num);
575 	return mlx5_cmd_exec(dev, in, sizeof(in), b->out, b->outlen);
576 }
577 
mlx5_cmd_fc_bulk_get(struct mlx5_core_dev * dev,struct mlx5_cmd_fc_bulk * b,u32 id,u64 * packets,u64 * bytes)578 void mlx5_cmd_fc_bulk_get(struct mlx5_core_dev *dev,
579 			  struct mlx5_cmd_fc_bulk *b, u32 id,
580 			  u64 *packets, u64 *bytes)
581 {
582 	int index = id - b->id;
583 	void *stats;
584 
585 	if (index < 0 || index >= b->num) {
586 		mlx5_core_warn(dev, "Flow counter id (0x%x) out of range (0x%x..0x%x). Counter ignored.\n",
587 			       id, b->id, b->id + b->num - 1);
588 		return;
589 	}
590 
591 	stats = MLX5_ADDR_OF(query_flow_counter_out, b->out,
592 			     flow_statistics[index]);
593 	*packets = MLX5_GET64(traffic_counter, stats, packets);
594 	*bytes = MLX5_GET64(traffic_counter, stats, octets);
595 }
596 
mlx5_encap_alloc(struct mlx5_core_dev * dev,int header_type,size_t size,void * encap_header,u32 * encap_id)597 int mlx5_encap_alloc(struct mlx5_core_dev *dev,
598 		     int header_type,
599 		     size_t size,
600 		     void *encap_header,
601 		     u32 *encap_id)
602 {
603 	int max_encap_size = MLX5_CAP_ESW(dev, max_encap_header_size);
604 	u32 out[MLX5_ST_SZ_DW(alloc_encap_header_out)];
605 	void *encap_header_in;
606 	void *header;
607 	int inlen;
608 	int err;
609 	u32 *in;
610 
611 	if (size > max_encap_size) {
612 		mlx5_core_warn(dev, "encap size %zd too big, max supported is %d\n",
613 			       size, max_encap_size);
614 		return -EINVAL;
615 	}
616 
617 	in = kzalloc(MLX5_ST_SZ_BYTES(alloc_encap_header_in) + size,
618 		     GFP_KERNEL);
619 	if (!in)
620 		return -ENOMEM;
621 
622 	encap_header_in = MLX5_ADDR_OF(alloc_encap_header_in, in, encap_header);
623 	header = MLX5_ADDR_OF(encap_header_in, encap_header_in, encap_header);
624 	inlen = header - (void *)in  + size;
625 
626 	memset(in, 0, inlen);
627 	MLX5_SET(alloc_encap_header_in, in, opcode,
628 		 MLX5_CMD_OP_ALLOC_ENCAP_HEADER);
629 	MLX5_SET(encap_header_in, encap_header_in, encap_header_size, size);
630 	MLX5_SET(encap_header_in, encap_header_in, header_type, header_type);
631 	memcpy(header, encap_header, size);
632 
633 	memset(out, 0, sizeof(out));
634 	err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
635 
636 	*encap_id = MLX5_GET(alloc_encap_header_out, out, encap_id);
637 	kfree(in);
638 	return err;
639 }
640 
mlx5_encap_dealloc(struct mlx5_core_dev * dev,u32 encap_id)641 void mlx5_encap_dealloc(struct mlx5_core_dev *dev, u32 encap_id)
642 {
643 	u32 in[MLX5_ST_SZ_DW(dealloc_encap_header_in)];
644 	u32 out[MLX5_ST_SZ_DW(dealloc_encap_header_out)];
645 
646 	memset(in, 0, sizeof(in));
647 	MLX5_SET(dealloc_encap_header_in, in, opcode,
648 		 MLX5_CMD_OP_DEALLOC_ENCAP_HEADER);
649 	MLX5_SET(dealloc_encap_header_in, in, encap_id, encap_id);
650 
651 	mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
652 }
653 
mlx5_modify_header_alloc(struct mlx5_core_dev * dev,u8 namespace,u8 num_actions,void * modify_actions,u32 * modify_header_id)654 int mlx5_modify_header_alloc(struct mlx5_core_dev *dev,
655 			     u8 namespace, u8 num_actions,
656 			     void *modify_actions, u32 *modify_header_id)
657 {
658 	u32 out[MLX5_ST_SZ_DW(alloc_modify_header_context_out)];
659 	int max_actions, actions_size, inlen, err;
660 	void *actions_in;
661 	u8 table_type;
662 	u32 *in;
663 
664 	switch (namespace) {
665 	case MLX5_FLOW_NAMESPACE_FDB:
666 		max_actions = MLX5_CAP_ESW_FLOWTABLE_FDB(dev, max_modify_header_actions);
667 		table_type = FS_FT_FDB;
668 		break;
669 	case MLX5_FLOW_NAMESPACE_KERNEL:
670 		max_actions = MLX5_CAP_FLOWTABLE_NIC_RX(dev, max_modify_header_actions);
671 		table_type = FS_FT_NIC_RX;
672 		break;
673 	default:
674 		return -EOPNOTSUPP;
675 	}
676 
677 	if (num_actions > max_actions) {
678 		mlx5_core_warn(dev, "too many modify header actions %d, max supported %d\n",
679 			       num_actions, max_actions);
680 		return -EOPNOTSUPP;
681 	}
682 
683 	actions_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto) * num_actions;
684 	inlen = MLX5_ST_SZ_BYTES(alloc_modify_header_context_in) + actions_size;
685 
686 	in = kzalloc(inlen, GFP_KERNEL);
687 	if (!in)
688 		return -ENOMEM;
689 
690 	MLX5_SET(alloc_modify_header_context_in, in, opcode,
691 		 MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT);
692 	MLX5_SET(alloc_modify_header_context_in, in, table_type, table_type);
693 	MLX5_SET(alloc_modify_header_context_in, in, num_of_actions, num_actions);
694 
695 	actions_in = MLX5_ADDR_OF(alloc_modify_header_context_in, in, actions);
696 	memcpy(actions_in, modify_actions, actions_size);
697 
698 	memset(out, 0, sizeof(out));
699 	err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
700 
701 	*modify_header_id = MLX5_GET(alloc_modify_header_context_out, out, modify_header_id);
702 	kfree(in);
703 	return err;
704 }
705 
mlx5_modify_header_dealloc(struct mlx5_core_dev * dev,u32 modify_header_id)706 void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev, u32 modify_header_id)
707 {
708 	u32 in[MLX5_ST_SZ_DW(dealloc_modify_header_context_in)];
709 	u32 out[MLX5_ST_SZ_DW(dealloc_modify_header_context_out)];
710 
711 	memset(in, 0, sizeof(in));
712 	MLX5_SET(dealloc_modify_header_context_in, in, opcode,
713 		 MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT);
714 	MLX5_SET(dealloc_modify_header_context_in, in, modify_header_id,
715 		 modify_header_id);
716 
717 	mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
718 }
719 
720 static const struct mlx5_flow_cmds mlx5_flow_cmds = {
721 	.create_flow_table = mlx5_cmd_create_flow_table,
722 	.destroy_flow_table = mlx5_cmd_destroy_flow_table,
723 	.modify_flow_table = mlx5_cmd_modify_flow_table,
724 	.create_flow_group = mlx5_cmd_create_flow_group,
725 	.destroy_flow_group = mlx5_cmd_destroy_flow_group,
726 	.create_fte = mlx5_cmd_create_fte,
727 	.update_fte = mlx5_cmd_update_fte,
728 	.delete_fte = mlx5_cmd_delete_fte,
729 	.update_root_ft = mlx5_cmd_update_root_ft,
730 };
731 
732 static const struct mlx5_flow_cmds mlx5_flow_cmd_stubs = {
733 	.create_flow_table = mlx5_cmd_stub_create_flow_table,
734 	.destroy_flow_table = mlx5_cmd_stub_destroy_flow_table,
735 	.modify_flow_table = mlx5_cmd_stub_modify_flow_table,
736 	.create_flow_group = mlx5_cmd_stub_create_flow_group,
737 	.destroy_flow_group = mlx5_cmd_stub_destroy_flow_group,
738 	.create_fte = mlx5_cmd_stub_create_fte,
739 	.update_fte = mlx5_cmd_stub_update_fte,
740 	.delete_fte = mlx5_cmd_stub_delete_fte,
741 	.update_root_ft = mlx5_cmd_stub_update_root_ft,
742 };
743 
mlx5_fs_cmd_get_fw_cmds(void)744 static const struct mlx5_flow_cmds *mlx5_fs_cmd_get_fw_cmds(void)
745 {
746 	return &mlx5_flow_cmds;
747 }
748 
mlx5_fs_cmd_get_stub_cmds(void)749 static const struct mlx5_flow_cmds *mlx5_fs_cmd_get_stub_cmds(void)
750 {
751 	return &mlx5_flow_cmd_stubs;
752 }
753 
mlx5_fs_cmd_get_default(enum fs_flow_table_type type)754 const struct mlx5_flow_cmds *mlx5_fs_cmd_get_default(enum fs_flow_table_type type)
755 {
756 	switch (type) {
757 	case FS_FT_NIC_RX:
758 	case FS_FT_ESW_EGRESS_ACL:
759 	case FS_FT_ESW_INGRESS_ACL:
760 	case FS_FT_FDB:
761 	case FS_FT_SNIFFER_RX:
762 	case FS_FT_SNIFFER_TX:
763 		return mlx5_fs_cmd_get_fw_cmds();
764 	case FS_FT_NIC_TX:
765 	default:
766 		return mlx5_fs_cmd_get_stub_cmds();
767 	}
768 }
769