1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
3 
4 #include "ice_lib.h"
5 #include "ice_switch.h"
6 
7 #define ICE_ETH_DA_OFFSET		0
8 #define ICE_ETH_ETHTYPE_OFFSET		12
9 #define ICE_ETH_VLAN_TCI_OFFSET		14
10 #define ICE_MAX_VLAN_ID			0xFFF
11 
12 /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
13  * struct to configure any switch filter rules.
14  * {DA (6 bytes), SA(6 bytes),
15  * Ether type (2 bytes for header without VLAN tag) OR
16  * VLAN tag (4 bytes for header with VLAN tag) }
17  *
18  * Word on Hardcoded values
19  * byte 0 = 0x2: to identify it as locally administered DA MAC
20  * byte 6 = 0x2: to identify it as locally administered SA MAC
21  * byte 12 = 0x81 & byte 13 = 0x00:
22  *	In case of VLAN filter first two bytes defines ether type (0x8100)
23  *	and remaining two bytes are placeholder for programming a given VLAN ID
24  *	In case of Ether type filter it is treated as header without VLAN tag
25  *	and byte 12 and 13 is used to program a given Ether type instead
26  */
27 #define DUMMY_ETH_HDR_LEN		16
28 static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
29 							0x2, 0, 0, 0, 0, 0,
30 							0x81, 0, 0, 0};
31 
32 #define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE \
33 	(offsetof(struct ice_aqc_sw_rules_elem, pdata.lkup_tx_rx.hdr) + \
34 	 (DUMMY_ETH_HDR_LEN * \
35 	  sizeof(((struct ice_sw_rule_lkup_rx_tx *)0)->hdr[0])))
36 #define ICE_SW_RULE_RX_TX_NO_HDR_SIZE \
37 	(offsetof(struct ice_aqc_sw_rules_elem, pdata.lkup_tx_rx.hdr))
38 #define ICE_SW_RULE_LG_ACT_SIZE(n) \
39 	(offsetof(struct ice_aqc_sw_rules_elem, pdata.lg_act.act) + \
40 	 ((n) * sizeof(((struct ice_sw_rule_lg_act *)0)->act[0])))
41 #define ICE_SW_RULE_VSI_LIST_SIZE(n) \
42 	(offsetof(struct ice_aqc_sw_rules_elem, pdata.vsi_list.vsi) + \
43 	 ((n) * sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi[0])))
44 
45 /**
46  * ice_init_def_sw_recp - initialize the recipe book keeping tables
47  * @hw: pointer to the HW struct
48  *
49  * Allocate memory for the entire recipe table and initialize the structures/
50  * entries corresponding to basic recipes.
51  */
ice_init_def_sw_recp(struct ice_hw * hw)52 enum ice_status ice_init_def_sw_recp(struct ice_hw *hw)
53 {
54 	struct ice_sw_recipe *recps;
55 	u8 i;
56 
57 	recps = devm_kcalloc(ice_hw_to_dev(hw), ICE_MAX_NUM_RECIPES,
58 			     sizeof(*recps), GFP_KERNEL);
59 	if (!recps)
60 		return ICE_ERR_NO_MEMORY;
61 
62 	for (i = 0; i < ICE_SW_LKUP_LAST; i++) {
63 		recps[i].root_rid = i;
64 		INIT_LIST_HEAD(&recps[i].filt_rules);
65 		INIT_LIST_HEAD(&recps[i].filt_replay_rules);
66 		mutex_init(&recps[i].filt_rule_lock);
67 	}
68 
69 	hw->switch_info->recp_list = recps;
70 
71 	return 0;
72 }
73 
74 /**
75  * ice_aq_get_sw_cfg - get switch configuration
76  * @hw: pointer to the hardware structure
77  * @buf: pointer to the result buffer
78  * @buf_size: length of the buffer available for response
79  * @req_desc: pointer to requested descriptor
80  * @num_elems: pointer to number of elements
81  * @cd: pointer to command details structure or NULL
82  *
83  * Get switch configuration (0x0200) to be placed in buf.
84  * This admin command returns information such as initial VSI/port number
85  * and switch ID it belongs to.
86  *
87  * NOTE: *req_desc is both an input/output parameter.
88  * The caller of this function first calls this function with *request_desc set
89  * to 0. If the response from f/w has *req_desc set to 0, all the switch
90  * configuration information has been returned; if non-zero (meaning not all
91  * the information was returned), the caller should call this function again
92  * with *req_desc set to the previous value returned by f/w to get the
93  * next block of switch configuration information.
94  *
95  * *num_elems is output only parameter. This reflects the number of elements
96  * in response buffer. The caller of this function to use *num_elems while
97  * parsing the response buffer.
98  */
99 static enum ice_status
ice_aq_get_sw_cfg(struct ice_hw * hw,struct ice_aqc_get_sw_cfg_resp_elem * buf,u16 buf_size,u16 * req_desc,u16 * num_elems,struct ice_sq_cd * cd)100 ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp_elem *buf,
101 		  u16 buf_size, u16 *req_desc, u16 *num_elems,
102 		  struct ice_sq_cd *cd)
103 {
104 	struct ice_aqc_get_sw_cfg *cmd;
105 	struct ice_aq_desc desc;
106 	enum ice_status status;
107 
108 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
109 	cmd = &desc.params.get_sw_conf;
110 	cmd->element = cpu_to_le16(*req_desc);
111 
112 	status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
113 	if (!status) {
114 		*req_desc = le16_to_cpu(cmd->element);
115 		*num_elems = le16_to_cpu(cmd->num_elems);
116 	}
117 
118 	return status;
119 }
120 
121 /**
122  * ice_aq_add_vsi
123  * @hw: pointer to the HW struct
124  * @vsi_ctx: pointer to a VSI context struct
125  * @cd: pointer to command details structure or NULL
126  *
127  * Add a VSI context to the hardware (0x0210)
128  */
129 static enum ice_status
ice_aq_add_vsi(struct ice_hw * hw,struct ice_vsi_ctx * vsi_ctx,struct ice_sq_cd * cd)130 ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
131 	       struct ice_sq_cd *cd)
132 {
133 	struct ice_aqc_add_update_free_vsi_resp *res;
134 	struct ice_aqc_add_get_update_free_vsi *cmd;
135 	struct ice_aq_desc desc;
136 	enum ice_status status;
137 
138 	cmd = &desc.params.vsi_cmd;
139 	res = &desc.params.add_update_free_vsi_res;
140 
141 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
142 
143 	if (!vsi_ctx->alloc_from_pool)
144 		cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num |
145 					   ICE_AQ_VSI_IS_VALID);
146 	cmd->vf_id = vsi_ctx->vf_num;
147 
148 	cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags);
149 
150 	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
151 
152 	status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
153 				 sizeof(vsi_ctx->info), cd);
154 
155 	if (!status) {
156 		vsi_ctx->vsi_num = le16_to_cpu(res->vsi_num) & ICE_AQ_VSI_NUM_M;
157 		vsi_ctx->vsis_allocd = le16_to_cpu(res->vsi_used);
158 		vsi_ctx->vsis_unallocated = le16_to_cpu(res->vsi_free);
159 	}
160 
161 	return status;
162 }
163 
164 /**
165  * ice_aq_free_vsi
166  * @hw: pointer to the HW struct
167  * @vsi_ctx: pointer to a VSI context struct
168  * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
169  * @cd: pointer to command details structure or NULL
170  *
171  * Free VSI context info from hardware (0x0213)
172  */
173 static enum ice_status
ice_aq_free_vsi(struct ice_hw * hw,struct ice_vsi_ctx * vsi_ctx,bool keep_vsi_alloc,struct ice_sq_cd * cd)174 ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
175 		bool keep_vsi_alloc, struct ice_sq_cd *cd)
176 {
177 	struct ice_aqc_add_update_free_vsi_resp *resp;
178 	struct ice_aqc_add_get_update_free_vsi *cmd;
179 	struct ice_aq_desc desc;
180 	enum ice_status status;
181 
182 	cmd = &desc.params.vsi_cmd;
183 	resp = &desc.params.add_update_free_vsi_res;
184 
185 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
186 
187 	cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
188 	if (keep_vsi_alloc)
189 		cmd->cmd_flags = cpu_to_le16(ICE_AQ_VSI_KEEP_ALLOC);
190 
191 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
192 	if (!status) {
193 		vsi_ctx->vsis_allocd = le16_to_cpu(resp->vsi_used);
194 		vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
195 	}
196 
197 	return status;
198 }
199 
200 /**
201  * ice_aq_update_vsi
202  * @hw: pointer to the HW struct
203  * @vsi_ctx: pointer to a VSI context struct
204  * @cd: pointer to command details structure or NULL
205  *
206  * Update VSI context in the hardware (0x0211)
207  */
208 static enum ice_status
ice_aq_update_vsi(struct ice_hw * hw,struct ice_vsi_ctx * vsi_ctx,struct ice_sq_cd * cd)209 ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
210 		  struct ice_sq_cd *cd)
211 {
212 	struct ice_aqc_add_update_free_vsi_resp *resp;
213 	struct ice_aqc_add_get_update_free_vsi *cmd;
214 	struct ice_aq_desc desc;
215 	enum ice_status status;
216 
217 	cmd = &desc.params.vsi_cmd;
218 	resp = &desc.params.add_update_free_vsi_res;
219 
220 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
221 
222 	cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
223 
224 	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
225 
226 	status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
227 				 sizeof(vsi_ctx->info), cd);
228 
229 	if (!status) {
230 		vsi_ctx->vsis_allocd = le16_to_cpu(resp->vsi_used);
231 		vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
232 	}
233 
234 	return status;
235 }
236 
237 /**
238  * ice_is_vsi_valid - check whether the VSI is valid or not
239  * @hw: pointer to the HW struct
240  * @vsi_handle: VSI handle
241  *
242  * check whether the VSI is valid or not
243  */
ice_is_vsi_valid(struct ice_hw * hw,u16 vsi_handle)244 bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
245 {
246 	return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle];
247 }
248 
249 /**
250  * ice_get_hw_vsi_num - return the HW VSI number
251  * @hw: pointer to the HW struct
252  * @vsi_handle: VSI handle
253  *
254  * return the HW VSI number
255  * Caution: call this function only if VSI is valid (ice_is_vsi_valid)
256  */
ice_get_hw_vsi_num(struct ice_hw * hw,u16 vsi_handle)257 u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
258 {
259 	return hw->vsi_ctx[vsi_handle]->vsi_num;
260 }
261 
262 /**
263  * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle
264  * @hw: pointer to the HW struct
265  * @vsi_handle: VSI handle
266  *
267  * return the VSI context entry for a given VSI handle
268  */
ice_get_vsi_ctx(struct ice_hw * hw,u16 vsi_handle)269 struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
270 {
271 	return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle];
272 }
273 
274 /**
275  * ice_save_vsi_ctx - save the VSI context for a given VSI handle
276  * @hw: pointer to the HW struct
277  * @vsi_handle: VSI handle
278  * @vsi: VSI context pointer
279  *
280  * save the VSI context entry for a given VSI handle
281  */
282 static void
ice_save_vsi_ctx(struct ice_hw * hw,u16 vsi_handle,struct ice_vsi_ctx * vsi)283 ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi)
284 {
285 	hw->vsi_ctx[vsi_handle] = vsi;
286 }
287 
288 /**
289  * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs
290  * @hw: pointer to the HW struct
291  * @vsi_handle: VSI handle
292  */
ice_clear_vsi_q_ctx(struct ice_hw * hw,u16 vsi_handle)293 static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
294 {
295 	struct ice_vsi_ctx *vsi;
296 	u8 i;
297 
298 	vsi = ice_get_vsi_ctx(hw, vsi_handle);
299 	if (!vsi)
300 		return;
301 	ice_for_each_traffic_class(i) {
302 		if (vsi->lan_q_ctx[i]) {
303 			devm_kfree(ice_hw_to_dev(hw), vsi->lan_q_ctx[i]);
304 			vsi->lan_q_ctx[i] = NULL;
305 		}
306 		if (vsi->rdma_q_ctx[i]) {
307 			devm_kfree(ice_hw_to_dev(hw), vsi->rdma_q_ctx[i]);
308 			vsi->rdma_q_ctx[i] = NULL;
309 		}
310 	}
311 }
312 
313 /**
314  * ice_clear_vsi_ctx - clear the VSI context entry
315  * @hw: pointer to the HW struct
316  * @vsi_handle: VSI handle
317  *
318  * clear the VSI context entry
319  */
ice_clear_vsi_ctx(struct ice_hw * hw,u16 vsi_handle)320 static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
321 {
322 	struct ice_vsi_ctx *vsi;
323 
324 	vsi = ice_get_vsi_ctx(hw, vsi_handle);
325 	if (vsi) {
326 		ice_clear_vsi_q_ctx(hw, vsi_handle);
327 		devm_kfree(ice_hw_to_dev(hw), vsi);
328 		hw->vsi_ctx[vsi_handle] = NULL;
329 	}
330 }
331 
332 /**
333  * ice_clear_all_vsi_ctx - clear all the VSI context entries
334  * @hw: pointer to the HW struct
335  */
ice_clear_all_vsi_ctx(struct ice_hw * hw)336 void ice_clear_all_vsi_ctx(struct ice_hw *hw)
337 {
338 	u16 i;
339 
340 	for (i = 0; i < ICE_MAX_VSI; i++)
341 		ice_clear_vsi_ctx(hw, i);
342 }
343 
344 /**
345  * ice_add_vsi - add VSI context to the hardware and VSI handle list
346  * @hw: pointer to the HW struct
347  * @vsi_handle: unique VSI handle provided by drivers
348  * @vsi_ctx: pointer to a VSI context struct
349  * @cd: pointer to command details structure or NULL
350  *
351  * Add a VSI context to the hardware also add it into the VSI handle list.
352  * If this function gets called after reset for existing VSIs then update
353  * with the new HW VSI number in the corresponding VSI handle list entry.
354  */
355 enum ice_status
ice_add_vsi(struct ice_hw * hw,u16 vsi_handle,struct ice_vsi_ctx * vsi_ctx,struct ice_sq_cd * cd)356 ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
357 	    struct ice_sq_cd *cd)
358 {
359 	struct ice_vsi_ctx *tmp_vsi_ctx;
360 	enum ice_status status;
361 
362 	if (vsi_handle >= ICE_MAX_VSI)
363 		return ICE_ERR_PARAM;
364 	status = ice_aq_add_vsi(hw, vsi_ctx, cd);
365 	if (status)
366 		return status;
367 	tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
368 	if (!tmp_vsi_ctx) {
369 		/* Create a new VSI context */
370 		tmp_vsi_ctx = devm_kzalloc(ice_hw_to_dev(hw),
371 					   sizeof(*tmp_vsi_ctx), GFP_KERNEL);
372 		if (!tmp_vsi_ctx) {
373 			ice_aq_free_vsi(hw, vsi_ctx, false, cd);
374 			return ICE_ERR_NO_MEMORY;
375 		}
376 		*tmp_vsi_ctx = *vsi_ctx;
377 		ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
378 	} else {
379 		/* update with new HW VSI num */
380 		tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
381 	}
382 
383 	return 0;
384 }
385 
386 /**
387  * ice_free_vsi- free VSI context from hardware and VSI handle list
388  * @hw: pointer to the HW struct
389  * @vsi_handle: unique VSI handle
390  * @vsi_ctx: pointer to a VSI context struct
391  * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
392  * @cd: pointer to command details structure or NULL
393  *
394  * Free VSI context info from hardware as well as from VSI handle list
395  */
396 enum ice_status
ice_free_vsi(struct ice_hw * hw,u16 vsi_handle,struct ice_vsi_ctx * vsi_ctx,bool keep_vsi_alloc,struct ice_sq_cd * cd)397 ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
398 	     bool keep_vsi_alloc, struct ice_sq_cd *cd)
399 {
400 	enum ice_status status;
401 
402 	if (!ice_is_vsi_valid(hw, vsi_handle))
403 		return ICE_ERR_PARAM;
404 	vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
405 	status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd);
406 	if (!status)
407 		ice_clear_vsi_ctx(hw, vsi_handle);
408 	return status;
409 }
410 
411 /**
412  * ice_update_vsi
413  * @hw: pointer to the HW struct
414  * @vsi_handle: unique VSI handle
415  * @vsi_ctx: pointer to a VSI context struct
416  * @cd: pointer to command details structure or NULL
417  *
418  * Update VSI context in the hardware
419  */
420 enum ice_status
ice_update_vsi(struct ice_hw * hw,u16 vsi_handle,struct ice_vsi_ctx * vsi_ctx,struct ice_sq_cd * cd)421 ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
422 	       struct ice_sq_cd *cd)
423 {
424 	if (!ice_is_vsi_valid(hw, vsi_handle))
425 		return ICE_ERR_PARAM;
426 	vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
427 	return ice_aq_update_vsi(hw, vsi_ctx, cd);
428 }
429 
430 /**
431  * ice_cfg_rdma_fltr - enable/disable RDMA filtering on VSI
432  * @hw: pointer to HW struct
433  * @vsi_handle: VSI SW index
434  * @enable: boolean for enable/disable
435  */
436 int
ice_cfg_rdma_fltr(struct ice_hw * hw,u16 vsi_handle,bool enable)437 ice_cfg_rdma_fltr(struct ice_hw *hw, u16 vsi_handle, bool enable)
438 {
439 	struct ice_vsi_ctx *ctx;
440 
441 	ctx = ice_get_vsi_ctx(hw, vsi_handle);
442 	if (!ctx)
443 		return -EIO;
444 
445 	if (enable)
446 		ctx->info.q_opt_flags |= ICE_AQ_VSI_Q_OPT_PE_FLTR_EN;
447 	else
448 		ctx->info.q_opt_flags &= ~ICE_AQ_VSI_Q_OPT_PE_FLTR_EN;
449 
450 	return ice_status_to_errno(ice_update_vsi(hw, vsi_handle, ctx, NULL));
451 }
452 
453 /**
454  * ice_aq_alloc_free_vsi_list
455  * @hw: pointer to the HW struct
456  * @vsi_list_id: VSI list ID returned or used for lookup
457  * @lkup_type: switch rule filter lookup type
458  * @opc: switch rules population command type - pass in the command opcode
459  *
460  * allocates or free a VSI list resource
461  */
462 static enum ice_status
ice_aq_alloc_free_vsi_list(struct ice_hw * hw,u16 * vsi_list_id,enum ice_sw_lkup_type lkup_type,enum ice_adminq_opc opc)463 ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
464 			   enum ice_sw_lkup_type lkup_type,
465 			   enum ice_adminq_opc opc)
466 {
467 	struct ice_aqc_alloc_free_res_elem *sw_buf;
468 	struct ice_aqc_res_elem *vsi_ele;
469 	enum ice_status status;
470 	u16 buf_len;
471 
472 	buf_len = struct_size(sw_buf, elem, 1);
473 	sw_buf = devm_kzalloc(ice_hw_to_dev(hw), buf_len, GFP_KERNEL);
474 	if (!sw_buf)
475 		return ICE_ERR_NO_MEMORY;
476 	sw_buf->num_elems = cpu_to_le16(1);
477 
478 	if (lkup_type == ICE_SW_LKUP_MAC ||
479 	    lkup_type == ICE_SW_LKUP_MAC_VLAN ||
480 	    lkup_type == ICE_SW_LKUP_ETHERTYPE ||
481 	    lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
482 	    lkup_type == ICE_SW_LKUP_PROMISC ||
483 	    lkup_type == ICE_SW_LKUP_PROMISC_VLAN) {
484 		sw_buf->res_type = cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
485 	} else if (lkup_type == ICE_SW_LKUP_VLAN) {
486 		sw_buf->res_type =
487 			cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
488 	} else {
489 		status = ICE_ERR_PARAM;
490 		goto ice_aq_alloc_free_vsi_list_exit;
491 	}
492 
493 	if (opc == ice_aqc_opc_free_res)
494 		sw_buf->elem[0].e.sw_resp = cpu_to_le16(*vsi_list_id);
495 
496 	status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL);
497 	if (status)
498 		goto ice_aq_alloc_free_vsi_list_exit;
499 
500 	if (opc == ice_aqc_opc_alloc_res) {
501 		vsi_ele = &sw_buf->elem[0];
502 		*vsi_list_id = le16_to_cpu(vsi_ele->e.sw_resp);
503 	}
504 
505 ice_aq_alloc_free_vsi_list_exit:
506 	devm_kfree(ice_hw_to_dev(hw), sw_buf);
507 	return status;
508 }
509 
510 /**
511  * ice_aq_sw_rules - add/update/remove switch rules
512  * @hw: pointer to the HW struct
513  * @rule_list: pointer to switch rule population list
514  * @rule_list_sz: total size of the rule list in bytes
515  * @num_rules: number of switch rules in the rule_list
516  * @opc: switch rules population command type - pass in the command opcode
517  * @cd: pointer to command details structure or NULL
518  *
519  * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
520  */
521 static enum ice_status
ice_aq_sw_rules(struct ice_hw * hw,void * rule_list,u16 rule_list_sz,u8 num_rules,enum ice_adminq_opc opc,struct ice_sq_cd * cd)522 ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
523 		u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
524 {
525 	struct ice_aq_desc desc;
526 	enum ice_status status;
527 
528 	if (opc != ice_aqc_opc_add_sw_rules &&
529 	    opc != ice_aqc_opc_update_sw_rules &&
530 	    opc != ice_aqc_opc_remove_sw_rules)
531 		return ICE_ERR_PARAM;
532 
533 	ice_fill_dflt_direct_cmd_desc(&desc, opc);
534 
535 	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
536 	desc.params.sw_rules.num_rules_fltr_entry_index =
537 		cpu_to_le16(num_rules);
538 	status = ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
539 	if (opc != ice_aqc_opc_add_sw_rules &&
540 	    hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT)
541 		status = ICE_ERR_DOES_NOT_EXIST;
542 
543 	return status;
544 }
545 
546 /* ice_init_port_info - Initialize port_info with switch configuration data
547  * @pi: pointer to port_info
548  * @vsi_port_num: VSI number or port number
549  * @type: Type of switch element (port or VSI)
550  * @swid: switch ID of the switch the element is attached to
551  * @pf_vf_num: PF or VF number
552  * @is_vf: true if the element is a VF, false otherwise
553  */
554 static void
ice_init_port_info(struct ice_port_info * pi,u16 vsi_port_num,u8 type,u16 swid,u16 pf_vf_num,bool is_vf)555 ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
556 		   u16 swid, u16 pf_vf_num, bool is_vf)
557 {
558 	switch (type) {
559 	case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
560 		pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK);
561 		pi->sw_id = swid;
562 		pi->pf_vf_num = pf_vf_num;
563 		pi->is_vf = is_vf;
564 		pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
565 		pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
566 		break;
567 	default:
568 		ice_debug(pi->hw, ICE_DBG_SW, "incorrect VSI/port type received\n");
569 		break;
570 	}
571 }
572 
573 /* ice_get_initial_sw_cfg - Get initial port and default VSI data
574  * @hw: pointer to the hardware structure
575  */
ice_get_initial_sw_cfg(struct ice_hw * hw)576 enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
577 {
578 	struct ice_aqc_get_sw_cfg_resp_elem *rbuf;
579 	enum ice_status status;
580 	u16 req_desc = 0;
581 	u16 num_elems;
582 	u16 i;
583 
584 	rbuf = devm_kzalloc(ice_hw_to_dev(hw), ICE_SW_CFG_MAX_BUF_LEN,
585 			    GFP_KERNEL);
586 
587 	if (!rbuf)
588 		return ICE_ERR_NO_MEMORY;
589 
590 	/* Multiple calls to ice_aq_get_sw_cfg may be required
591 	 * to get all the switch configuration information. The need
592 	 * for additional calls is indicated by ice_aq_get_sw_cfg
593 	 * writing a non-zero value in req_desc
594 	 */
595 	do {
596 		struct ice_aqc_get_sw_cfg_resp_elem *ele;
597 
598 		status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
599 					   &req_desc, &num_elems, NULL);
600 
601 		if (status)
602 			break;
603 
604 		for (i = 0, ele = rbuf; i < num_elems; i++, ele++) {
605 			u16 pf_vf_num, swid, vsi_port_num;
606 			bool is_vf = false;
607 			u8 res_type;
608 
609 			vsi_port_num = le16_to_cpu(ele->vsi_port_num) &
610 				ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
611 
612 			pf_vf_num = le16_to_cpu(ele->pf_vf_num) &
613 				ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M;
614 
615 			swid = le16_to_cpu(ele->swid);
616 
617 			if (le16_to_cpu(ele->pf_vf_num) &
618 			    ICE_AQC_GET_SW_CONF_RESP_IS_VF)
619 				is_vf = true;
620 
621 			res_type = (u8)(le16_to_cpu(ele->vsi_port_num) >>
622 					ICE_AQC_GET_SW_CONF_RESP_TYPE_S);
623 
624 			if (res_type == ICE_AQC_GET_SW_CONF_RESP_VSI) {
625 				/* FW VSI is not needed. Just continue. */
626 				continue;
627 			}
628 
629 			ice_init_port_info(hw->port_info, vsi_port_num,
630 					   res_type, swid, pf_vf_num, is_vf);
631 		}
632 	} while (req_desc && !status);
633 
634 	devm_kfree(ice_hw_to_dev(hw), rbuf);
635 	return status;
636 }
637 
638 /**
639  * ice_fill_sw_info - Helper function to populate lb_en and lan_en
640  * @hw: pointer to the hardware structure
641  * @fi: filter info structure to fill/update
642  *
643  * This helper function populates the lb_en and lan_en elements of the provided
644  * ice_fltr_info struct using the switch's type and characteristics of the
645  * switch rule being configured.
646  */
ice_fill_sw_info(struct ice_hw * hw,struct ice_fltr_info * fi)647 static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
648 {
649 	fi->lb_en = false;
650 	fi->lan_en = false;
651 	if ((fi->flag & ICE_FLTR_TX) &&
652 	    (fi->fltr_act == ICE_FWD_TO_VSI ||
653 	     fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
654 	     fi->fltr_act == ICE_FWD_TO_Q ||
655 	     fi->fltr_act == ICE_FWD_TO_QGRP)) {
656 		/* Setting LB for prune actions will result in replicated
657 		 * packets to the internal switch that will be dropped.
658 		 */
659 		if (fi->lkup_type != ICE_SW_LKUP_VLAN)
660 			fi->lb_en = true;
661 
662 		/* Set lan_en to TRUE if
663 		 * 1. The switch is a VEB AND
664 		 * 2
665 		 * 2.1 The lookup is a directional lookup like ethertype,
666 		 * promiscuous, ethertype-MAC, promiscuous-VLAN
667 		 * and default-port OR
668 		 * 2.2 The lookup is VLAN, OR
669 		 * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR
670 		 * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC.
671 		 *
672 		 * OR
673 		 *
674 		 * The switch is a VEPA.
675 		 *
676 		 * In all other cases, the LAN enable has to be set to false.
677 		 */
678 		if (hw->evb_veb) {
679 			if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE ||
680 			    fi->lkup_type == ICE_SW_LKUP_PROMISC ||
681 			    fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
682 			    fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
683 			    fi->lkup_type == ICE_SW_LKUP_DFLT ||
684 			    fi->lkup_type == ICE_SW_LKUP_VLAN ||
685 			    (fi->lkup_type == ICE_SW_LKUP_MAC &&
686 			     !is_unicast_ether_addr(fi->l_data.mac.mac_addr)) ||
687 			    (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
688 			     !is_unicast_ether_addr(fi->l_data.mac.mac_addr)))
689 				fi->lan_en = true;
690 		} else {
691 			fi->lan_en = true;
692 		}
693 	}
694 }
695 
696 /**
697  * ice_fill_sw_rule - Helper function to fill switch rule structure
698  * @hw: pointer to the hardware structure
699  * @f_info: entry containing packet forwarding information
700  * @s_rule: switch rule structure to be filled in based on mac_entry
701  * @opc: switch rules population command type - pass in the command opcode
702  */
703 static void
ice_fill_sw_rule(struct ice_hw * hw,struct ice_fltr_info * f_info,struct ice_aqc_sw_rules_elem * s_rule,enum ice_adminq_opc opc)704 ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
705 		 struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc)
706 {
707 	u16 vlan_id = ICE_MAX_VLAN_ID + 1;
708 	void *daddr = NULL;
709 	u16 eth_hdr_sz;
710 	u8 *eth_hdr;
711 	u32 act = 0;
712 	__be16 *off;
713 	u8 q_rgn;
714 
715 	if (opc == ice_aqc_opc_remove_sw_rules) {
716 		s_rule->pdata.lkup_tx_rx.act = 0;
717 		s_rule->pdata.lkup_tx_rx.index =
718 			cpu_to_le16(f_info->fltr_rule_id);
719 		s_rule->pdata.lkup_tx_rx.hdr_len = 0;
720 		return;
721 	}
722 
723 	eth_hdr_sz = sizeof(dummy_eth_header);
724 	eth_hdr = s_rule->pdata.lkup_tx_rx.hdr;
725 
726 	/* initialize the ether header with a dummy header */
727 	memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz);
728 	ice_fill_sw_info(hw, f_info);
729 
730 	switch (f_info->fltr_act) {
731 	case ICE_FWD_TO_VSI:
732 		act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
733 			ICE_SINGLE_ACT_VSI_ID_M;
734 		if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
735 			act |= ICE_SINGLE_ACT_VSI_FORWARDING |
736 				ICE_SINGLE_ACT_VALID_BIT;
737 		break;
738 	case ICE_FWD_TO_VSI_LIST:
739 		act |= ICE_SINGLE_ACT_VSI_LIST;
740 		act |= (f_info->fwd_id.vsi_list_id <<
741 			ICE_SINGLE_ACT_VSI_LIST_ID_S) &
742 			ICE_SINGLE_ACT_VSI_LIST_ID_M;
743 		if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
744 			act |= ICE_SINGLE_ACT_VSI_FORWARDING |
745 				ICE_SINGLE_ACT_VALID_BIT;
746 		break;
747 	case ICE_FWD_TO_Q:
748 		act |= ICE_SINGLE_ACT_TO_Q;
749 		act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
750 			ICE_SINGLE_ACT_Q_INDEX_M;
751 		break;
752 	case ICE_DROP_PACKET:
753 		act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
754 			ICE_SINGLE_ACT_VALID_BIT;
755 		break;
756 	case ICE_FWD_TO_QGRP:
757 		q_rgn = f_info->qgrp_size > 0 ?
758 			(u8)ilog2(f_info->qgrp_size) : 0;
759 		act |= ICE_SINGLE_ACT_TO_Q;
760 		act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
761 			ICE_SINGLE_ACT_Q_INDEX_M;
762 		act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
763 			ICE_SINGLE_ACT_Q_REGION_M;
764 		break;
765 	default:
766 		return;
767 	}
768 
769 	if (f_info->lb_en)
770 		act |= ICE_SINGLE_ACT_LB_ENABLE;
771 	if (f_info->lan_en)
772 		act |= ICE_SINGLE_ACT_LAN_ENABLE;
773 
774 	switch (f_info->lkup_type) {
775 	case ICE_SW_LKUP_MAC:
776 		daddr = f_info->l_data.mac.mac_addr;
777 		break;
778 	case ICE_SW_LKUP_VLAN:
779 		vlan_id = f_info->l_data.vlan.vlan_id;
780 		if (f_info->fltr_act == ICE_FWD_TO_VSI ||
781 		    f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
782 			act |= ICE_SINGLE_ACT_PRUNE;
783 			act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS;
784 		}
785 		break;
786 	case ICE_SW_LKUP_ETHERTYPE_MAC:
787 		daddr = f_info->l_data.ethertype_mac.mac_addr;
788 		fallthrough;
789 	case ICE_SW_LKUP_ETHERTYPE:
790 		off = (__force __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
791 		*off = cpu_to_be16(f_info->l_data.ethertype_mac.ethertype);
792 		break;
793 	case ICE_SW_LKUP_MAC_VLAN:
794 		daddr = f_info->l_data.mac_vlan.mac_addr;
795 		vlan_id = f_info->l_data.mac_vlan.vlan_id;
796 		break;
797 	case ICE_SW_LKUP_PROMISC_VLAN:
798 		vlan_id = f_info->l_data.mac_vlan.vlan_id;
799 		fallthrough;
800 	case ICE_SW_LKUP_PROMISC:
801 		daddr = f_info->l_data.mac_vlan.mac_addr;
802 		break;
803 	default:
804 		break;
805 	}
806 
807 	s_rule->type = (f_info->flag & ICE_FLTR_RX) ?
808 		cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX) :
809 		cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_TX);
810 
811 	/* Recipe set depending on lookup type */
812 	s_rule->pdata.lkup_tx_rx.recipe_id = cpu_to_le16(f_info->lkup_type);
813 	s_rule->pdata.lkup_tx_rx.src = cpu_to_le16(f_info->src);
814 	s_rule->pdata.lkup_tx_rx.act = cpu_to_le32(act);
815 
816 	if (daddr)
817 		ether_addr_copy(eth_hdr + ICE_ETH_DA_OFFSET, daddr);
818 
819 	if (!(vlan_id > ICE_MAX_VLAN_ID)) {
820 		off = (__force __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
821 		*off = cpu_to_be16(vlan_id);
822 	}
823 
824 	/* Create the switch rule with the final dummy Ethernet header */
825 	if (opc != ice_aqc_opc_update_sw_rules)
826 		s_rule->pdata.lkup_tx_rx.hdr_len = cpu_to_le16(eth_hdr_sz);
827 }
828 
829 /**
830  * ice_add_marker_act
831  * @hw: pointer to the hardware structure
832  * @m_ent: the management entry for which sw marker needs to be added
833  * @sw_marker: sw marker to tag the Rx descriptor with
834  * @l_id: large action resource ID
835  *
836  * Create a large action to hold software marker and update the switch rule
837  * entry pointed by m_ent with newly created large action
838  */
839 static enum ice_status
ice_add_marker_act(struct ice_hw * hw,struct ice_fltr_mgmt_list_entry * m_ent,u16 sw_marker,u16 l_id)840 ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
841 		   u16 sw_marker, u16 l_id)
842 {
843 	struct ice_aqc_sw_rules_elem *lg_act, *rx_tx;
844 	/* For software marker we need 3 large actions
845 	 * 1. FWD action: FWD TO VSI or VSI LIST
846 	 * 2. GENERIC VALUE action to hold the profile ID
847 	 * 3. GENERIC VALUE action to hold the software marker ID
848 	 */
849 	const u16 num_lg_acts = 3;
850 	enum ice_status status;
851 	u16 lg_act_size;
852 	u16 rules_size;
853 	u32 act;
854 	u16 id;
855 
856 	if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
857 		return ICE_ERR_PARAM;
858 
859 	/* Create two back-to-back switch rules and submit them to the HW using
860 	 * one memory buffer:
861 	 *    1. Large Action
862 	 *    2. Look up Tx Rx
863 	 */
864 	lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts);
865 	rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
866 	lg_act = devm_kzalloc(ice_hw_to_dev(hw), rules_size, GFP_KERNEL);
867 	if (!lg_act)
868 		return ICE_ERR_NO_MEMORY;
869 
870 	rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
871 
872 	/* Fill in the first switch rule i.e. large action */
873 	lg_act->type = cpu_to_le16(ICE_AQC_SW_RULES_T_LG_ACT);
874 	lg_act->pdata.lg_act.index = cpu_to_le16(l_id);
875 	lg_act->pdata.lg_act.size = cpu_to_le16(num_lg_acts);
876 
877 	/* First action VSI forwarding or VSI list forwarding depending on how
878 	 * many VSIs
879 	 */
880 	id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
881 		m_ent->fltr_info.fwd_id.hw_vsi_id;
882 
883 	act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
884 	act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) & ICE_LG_ACT_VSI_LIST_ID_M;
885 	if (m_ent->vsi_count > 1)
886 		act |= ICE_LG_ACT_VSI_LIST;
887 	lg_act->pdata.lg_act.act[0] = cpu_to_le32(act);
888 
889 	/* Second action descriptor type */
890 	act = ICE_LG_ACT_GENERIC;
891 
892 	act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
893 	lg_act->pdata.lg_act.act[1] = cpu_to_le32(act);
894 
895 	act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
896 	       ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
897 
898 	/* Third action Marker value */
899 	act |= ICE_LG_ACT_GENERIC;
900 	act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
901 		ICE_LG_ACT_GENERIC_VALUE_M;
902 
903 	lg_act->pdata.lg_act.act[2] = cpu_to_le32(act);
904 
905 	/* call the fill switch rule to fill the lookup Tx Rx structure */
906 	ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
907 			 ice_aqc_opc_update_sw_rules);
908 
909 	/* Update the action to point to the large action ID */
910 	rx_tx->pdata.lkup_tx_rx.act =
911 		cpu_to_le32(ICE_SINGLE_ACT_PTR |
912 			    ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
913 			     ICE_SINGLE_ACT_PTR_VAL_M));
914 
915 	/* Use the filter rule ID of the previously created rule with single
916 	 * act. Once the update happens, hardware will treat this as large
917 	 * action
918 	 */
919 	rx_tx->pdata.lkup_tx_rx.index =
920 		cpu_to_le16(m_ent->fltr_info.fltr_rule_id);
921 
922 	status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
923 				 ice_aqc_opc_update_sw_rules, NULL);
924 	if (!status) {
925 		m_ent->lg_act_idx = l_id;
926 		m_ent->sw_marker_id = sw_marker;
927 	}
928 
929 	devm_kfree(ice_hw_to_dev(hw), lg_act);
930 	return status;
931 }
932 
933 /**
934  * ice_create_vsi_list_map
935  * @hw: pointer to the hardware structure
936  * @vsi_handle_arr: array of VSI handles to set in the VSI mapping
937  * @num_vsi: number of VSI handles in the array
938  * @vsi_list_id: VSI list ID generated as part of allocate resource
939  *
940  * Helper function to create a new entry of VSI list ID to VSI mapping
941  * using the given VSI list ID
942  */
943 static struct ice_vsi_list_map_info *
ice_create_vsi_list_map(struct ice_hw * hw,u16 * vsi_handle_arr,u16 num_vsi,u16 vsi_list_id)944 ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
945 			u16 vsi_list_id)
946 {
947 	struct ice_switch_info *sw = hw->switch_info;
948 	struct ice_vsi_list_map_info *v_map;
949 	int i;
950 
951 	v_map = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*v_map), GFP_KERNEL);
952 	if (!v_map)
953 		return NULL;
954 
955 	v_map->vsi_list_id = vsi_list_id;
956 	v_map->ref_cnt = 1;
957 	for (i = 0; i < num_vsi; i++)
958 		set_bit(vsi_handle_arr[i], v_map->vsi_map);
959 
960 	list_add(&v_map->list_entry, &sw->vsi_list_map_head);
961 	return v_map;
962 }
963 
964 /**
965  * ice_update_vsi_list_rule
966  * @hw: pointer to the hardware structure
967  * @vsi_handle_arr: array of VSI handles to form a VSI list
968  * @num_vsi: number of VSI handles in the array
969  * @vsi_list_id: VSI list ID generated as part of allocate resource
970  * @remove: Boolean value to indicate if this is a remove action
971  * @opc: switch rules population command type - pass in the command opcode
972  * @lkup_type: lookup type of the filter
973  *
974  * Call AQ command to add a new switch rule or update existing switch rule
975  * using the given VSI list ID
976  */
977 static enum ice_status
ice_update_vsi_list_rule(struct ice_hw * hw,u16 * vsi_handle_arr,u16 num_vsi,u16 vsi_list_id,bool remove,enum ice_adminq_opc opc,enum ice_sw_lkup_type lkup_type)978 ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
979 			 u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
980 			 enum ice_sw_lkup_type lkup_type)
981 {
982 	struct ice_aqc_sw_rules_elem *s_rule;
983 	enum ice_status status;
984 	u16 s_rule_size;
985 	u16 rule_type;
986 	int i;
987 
988 	if (!num_vsi)
989 		return ICE_ERR_PARAM;
990 
991 	if (lkup_type == ICE_SW_LKUP_MAC ||
992 	    lkup_type == ICE_SW_LKUP_MAC_VLAN ||
993 	    lkup_type == ICE_SW_LKUP_ETHERTYPE ||
994 	    lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
995 	    lkup_type == ICE_SW_LKUP_PROMISC ||
996 	    lkup_type == ICE_SW_LKUP_PROMISC_VLAN)
997 		rule_type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
998 			ICE_AQC_SW_RULES_T_VSI_LIST_SET;
999 	else if (lkup_type == ICE_SW_LKUP_VLAN)
1000 		rule_type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
1001 			ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
1002 	else
1003 		return ICE_ERR_PARAM;
1004 
1005 	s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi);
1006 	s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
1007 	if (!s_rule)
1008 		return ICE_ERR_NO_MEMORY;
1009 	for (i = 0; i < num_vsi; i++) {
1010 		if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) {
1011 			status = ICE_ERR_PARAM;
1012 			goto exit;
1013 		}
1014 		/* AQ call requires hw_vsi_id(s) */
1015 		s_rule->pdata.vsi_list.vsi[i] =
1016 			cpu_to_le16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
1017 	}
1018 
1019 	s_rule->type = cpu_to_le16(rule_type);
1020 	s_rule->pdata.vsi_list.number_vsi = cpu_to_le16(num_vsi);
1021 	s_rule->pdata.vsi_list.index = cpu_to_le16(vsi_list_id);
1022 
1023 	status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
1024 
1025 exit:
1026 	devm_kfree(ice_hw_to_dev(hw), s_rule);
1027 	return status;
1028 }
1029 
1030 /**
1031  * ice_create_vsi_list_rule - Creates and populates a VSI list rule
1032  * @hw: pointer to the HW struct
1033  * @vsi_handle_arr: array of VSI handles to form a VSI list
1034  * @num_vsi: number of VSI handles in the array
1035  * @vsi_list_id: stores the ID of the VSI list to be created
1036  * @lkup_type: switch rule filter's lookup type
1037  */
1038 static enum ice_status
ice_create_vsi_list_rule(struct ice_hw * hw,u16 * vsi_handle_arr,u16 num_vsi,u16 * vsi_list_id,enum ice_sw_lkup_type lkup_type)1039 ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
1040 			 u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
1041 {
1042 	enum ice_status status;
1043 
1044 	status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
1045 					    ice_aqc_opc_alloc_res);
1046 	if (status)
1047 		return status;
1048 
1049 	/* Update the newly created VSI list to include the specified VSIs */
1050 	return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi,
1051 					*vsi_list_id, false,
1052 					ice_aqc_opc_add_sw_rules, lkup_type);
1053 }
1054 
1055 /**
1056  * ice_create_pkt_fwd_rule
1057  * @hw: pointer to the hardware structure
1058  * @f_entry: entry containing packet forwarding information
1059  *
1060  * Create switch rule with given filter information and add an entry
1061  * to the corresponding filter management list to track this switch rule
1062  * and VSI mapping
1063  */
1064 static enum ice_status
ice_create_pkt_fwd_rule(struct ice_hw * hw,struct ice_fltr_list_entry * f_entry)1065 ice_create_pkt_fwd_rule(struct ice_hw *hw,
1066 			struct ice_fltr_list_entry *f_entry)
1067 {
1068 	struct ice_fltr_mgmt_list_entry *fm_entry;
1069 	struct ice_aqc_sw_rules_elem *s_rule;
1070 	enum ice_sw_lkup_type l_type;
1071 	struct ice_sw_recipe *recp;
1072 	enum ice_status status;
1073 
1074 	s_rule = devm_kzalloc(ice_hw_to_dev(hw),
1075 			      ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, GFP_KERNEL);
1076 	if (!s_rule)
1077 		return ICE_ERR_NO_MEMORY;
1078 	fm_entry = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*fm_entry),
1079 				GFP_KERNEL);
1080 	if (!fm_entry) {
1081 		status = ICE_ERR_NO_MEMORY;
1082 		goto ice_create_pkt_fwd_rule_exit;
1083 	}
1084 
1085 	fm_entry->fltr_info = f_entry->fltr_info;
1086 
1087 	/* Initialize all the fields for the management entry */
1088 	fm_entry->vsi_count = 1;
1089 	fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX;
1090 	fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID;
1091 	fm_entry->counter_index = ICE_INVAL_COUNTER_ID;
1092 
1093 	ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
1094 			 ice_aqc_opc_add_sw_rules);
1095 
1096 	status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
1097 				 ice_aqc_opc_add_sw_rules, NULL);
1098 	if (status) {
1099 		devm_kfree(ice_hw_to_dev(hw), fm_entry);
1100 		goto ice_create_pkt_fwd_rule_exit;
1101 	}
1102 
1103 	f_entry->fltr_info.fltr_rule_id =
1104 		le16_to_cpu(s_rule->pdata.lkup_tx_rx.index);
1105 	fm_entry->fltr_info.fltr_rule_id =
1106 		le16_to_cpu(s_rule->pdata.lkup_tx_rx.index);
1107 
1108 	/* The book keeping entries will get removed when base driver
1109 	 * calls remove filter AQ command
1110 	 */
1111 	l_type = fm_entry->fltr_info.lkup_type;
1112 	recp = &hw->switch_info->recp_list[l_type];
1113 	list_add(&fm_entry->list_entry, &recp->filt_rules);
1114 
1115 ice_create_pkt_fwd_rule_exit:
1116 	devm_kfree(ice_hw_to_dev(hw), s_rule);
1117 	return status;
1118 }
1119 
1120 /**
1121  * ice_update_pkt_fwd_rule
1122  * @hw: pointer to the hardware structure
1123  * @f_info: filter information for switch rule
1124  *
1125  * Call AQ command to update a previously created switch rule with a
1126  * VSI list ID
1127  */
1128 static enum ice_status
ice_update_pkt_fwd_rule(struct ice_hw * hw,struct ice_fltr_info * f_info)1129 ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
1130 {
1131 	struct ice_aqc_sw_rules_elem *s_rule;
1132 	enum ice_status status;
1133 
1134 	s_rule = devm_kzalloc(ice_hw_to_dev(hw),
1135 			      ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, GFP_KERNEL);
1136 	if (!s_rule)
1137 		return ICE_ERR_NO_MEMORY;
1138 
1139 	ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
1140 
1141 	s_rule->pdata.lkup_tx_rx.index = cpu_to_le16(f_info->fltr_rule_id);
1142 
1143 	/* Update switch rule with new rule set to forward VSI list */
1144 	status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
1145 				 ice_aqc_opc_update_sw_rules, NULL);
1146 
1147 	devm_kfree(ice_hw_to_dev(hw), s_rule);
1148 	return status;
1149 }
1150 
1151 /**
1152  * ice_update_sw_rule_bridge_mode
1153  * @hw: pointer to the HW struct
1154  *
1155  * Updates unicast switch filter rules based on VEB/VEPA mode
1156  */
ice_update_sw_rule_bridge_mode(struct ice_hw * hw)1157 enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
1158 {
1159 	struct ice_switch_info *sw = hw->switch_info;
1160 	struct ice_fltr_mgmt_list_entry *fm_entry;
1161 	enum ice_status status = 0;
1162 	struct list_head *rule_head;
1163 	struct mutex *rule_lock; /* Lock to protect filter rule list */
1164 
1165 	rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
1166 	rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
1167 
1168 	mutex_lock(rule_lock);
1169 	list_for_each_entry(fm_entry, rule_head, list_entry) {
1170 		struct ice_fltr_info *fi = &fm_entry->fltr_info;
1171 		u8 *addr = fi->l_data.mac.mac_addr;
1172 
1173 		/* Update unicast Tx rules to reflect the selected
1174 		 * VEB/VEPA mode
1175 		 */
1176 		if ((fi->flag & ICE_FLTR_TX) && is_unicast_ether_addr(addr) &&
1177 		    (fi->fltr_act == ICE_FWD_TO_VSI ||
1178 		     fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
1179 		     fi->fltr_act == ICE_FWD_TO_Q ||
1180 		     fi->fltr_act == ICE_FWD_TO_QGRP)) {
1181 			status = ice_update_pkt_fwd_rule(hw, fi);
1182 			if (status)
1183 				break;
1184 		}
1185 	}
1186 
1187 	mutex_unlock(rule_lock);
1188 
1189 	return status;
1190 }
1191 
1192 /**
1193  * ice_add_update_vsi_list
1194  * @hw: pointer to the hardware structure
1195  * @m_entry: pointer to current filter management list entry
1196  * @cur_fltr: filter information from the book keeping entry
1197  * @new_fltr: filter information with the new VSI to be added
1198  *
1199  * Call AQ command to add or update previously created VSI list with new VSI.
1200  *
1201  * Helper function to do book keeping associated with adding filter information
1202  * The algorithm to do the book keeping is described below :
1203  * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.)
1204  *	if only one VSI has been added till now
1205  *		Allocate a new VSI list and add two VSIs
1206  *		to this list using switch rule command
1207  *		Update the previously created switch rule with the
1208  *		newly created VSI list ID
1209  *	if a VSI list was previously created
1210  *		Add the new VSI to the previously created VSI list set
1211  *		using the update switch rule command
1212  */
1213 static enum ice_status
ice_add_update_vsi_list(struct ice_hw * hw,struct ice_fltr_mgmt_list_entry * m_entry,struct ice_fltr_info * cur_fltr,struct ice_fltr_info * new_fltr)1214 ice_add_update_vsi_list(struct ice_hw *hw,
1215 			struct ice_fltr_mgmt_list_entry *m_entry,
1216 			struct ice_fltr_info *cur_fltr,
1217 			struct ice_fltr_info *new_fltr)
1218 {
1219 	enum ice_status status = 0;
1220 	u16 vsi_list_id = 0;
1221 
1222 	if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
1223 	     cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
1224 		return ICE_ERR_NOT_IMPL;
1225 
1226 	if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
1227 	     new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
1228 	    (cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
1229 	     cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
1230 		return ICE_ERR_NOT_IMPL;
1231 
1232 	if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
1233 		/* Only one entry existed in the mapping and it was not already
1234 		 * a part of a VSI list. So, create a VSI list with the old and
1235 		 * new VSIs.
1236 		 */
1237 		struct ice_fltr_info tmp_fltr;
1238 		u16 vsi_handle_arr[2];
1239 
1240 		/* A rule already exists with the new VSI being added */
1241 		if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
1242 			return ICE_ERR_ALREADY_EXISTS;
1243 
1244 		vsi_handle_arr[0] = cur_fltr->vsi_handle;
1245 		vsi_handle_arr[1] = new_fltr->vsi_handle;
1246 		status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
1247 						  &vsi_list_id,
1248 						  new_fltr->lkup_type);
1249 		if (status)
1250 			return status;
1251 
1252 		tmp_fltr = *new_fltr;
1253 		tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
1254 		tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
1255 		tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
1256 		/* Update the previous switch rule of "MAC forward to VSI" to
1257 		 * "MAC fwd to VSI list"
1258 		 */
1259 		status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
1260 		if (status)
1261 			return status;
1262 
1263 		cur_fltr->fwd_id.vsi_list_id = vsi_list_id;
1264 		cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
1265 		m_entry->vsi_list_info =
1266 			ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
1267 						vsi_list_id);
1268 
1269 		if (!m_entry->vsi_list_info)
1270 			return ICE_ERR_NO_MEMORY;
1271 
1272 		/* If this entry was large action then the large action needs
1273 		 * to be updated to point to FWD to VSI list
1274 		 */
1275 		if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID)
1276 			status =
1277 			    ice_add_marker_act(hw, m_entry,
1278 					       m_entry->sw_marker_id,
1279 					       m_entry->lg_act_idx);
1280 	} else {
1281 		u16 vsi_handle = new_fltr->vsi_handle;
1282 		enum ice_adminq_opc opcode;
1283 
1284 		if (!m_entry->vsi_list_info)
1285 			return ICE_ERR_CFG;
1286 
1287 		/* A rule already exists with the new VSI being added */
1288 		if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map))
1289 			return 0;
1290 
1291 		/* Update the previously created VSI list set with
1292 		 * the new VSI ID passed in
1293 		 */
1294 		vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
1295 		opcode = ice_aqc_opc_update_sw_rules;
1296 
1297 		status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
1298 						  vsi_list_id, false, opcode,
1299 						  new_fltr->lkup_type);
1300 		/* update VSI list mapping info with new VSI ID */
1301 		if (!status)
1302 			set_bit(vsi_handle, m_entry->vsi_list_info->vsi_map);
1303 	}
1304 	if (!status)
1305 		m_entry->vsi_count++;
1306 	return status;
1307 }
1308 
1309 /**
1310  * ice_find_rule_entry - Search a rule entry
1311  * @hw: pointer to the hardware structure
1312  * @recp_id: lookup type for which the specified rule needs to be searched
1313  * @f_info: rule information
1314  *
1315  * Helper function to search for a given rule entry
1316  * Returns pointer to entry storing the rule if found
1317  */
1318 static struct ice_fltr_mgmt_list_entry *
ice_find_rule_entry(struct ice_hw * hw,u8 recp_id,struct ice_fltr_info * f_info)1319 ice_find_rule_entry(struct ice_hw *hw, u8 recp_id, struct ice_fltr_info *f_info)
1320 {
1321 	struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL;
1322 	struct ice_switch_info *sw = hw->switch_info;
1323 	struct list_head *list_head;
1324 
1325 	list_head = &sw->recp_list[recp_id].filt_rules;
1326 	list_for_each_entry(list_itr, list_head, list_entry) {
1327 		if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
1328 			    sizeof(f_info->l_data)) &&
1329 		    f_info->flag == list_itr->fltr_info.flag) {
1330 			ret = list_itr;
1331 			break;
1332 		}
1333 	}
1334 	return ret;
1335 }
1336 
1337 /**
1338  * ice_find_vsi_list_entry - Search VSI list map with VSI count 1
1339  * @hw: pointer to the hardware structure
1340  * @recp_id: lookup type for which VSI lists needs to be searched
1341  * @vsi_handle: VSI handle to be found in VSI list
1342  * @vsi_list_id: VSI list ID found containing vsi_handle
1343  *
1344  * Helper function to search a VSI list with single entry containing given VSI
1345  * handle element. This can be extended further to search VSI list with more
1346  * than 1 vsi_count. Returns pointer to VSI list entry if found.
1347  */
1348 static struct ice_vsi_list_map_info *
ice_find_vsi_list_entry(struct ice_hw * hw,u8 recp_id,u16 vsi_handle,u16 * vsi_list_id)1349 ice_find_vsi_list_entry(struct ice_hw *hw, u8 recp_id, u16 vsi_handle,
1350 			u16 *vsi_list_id)
1351 {
1352 	struct ice_vsi_list_map_info *map_info = NULL;
1353 	struct ice_switch_info *sw = hw->switch_info;
1354 	struct ice_fltr_mgmt_list_entry *list_itr;
1355 	struct list_head *list_head;
1356 
1357 	list_head = &sw->recp_list[recp_id].filt_rules;
1358 	list_for_each_entry(list_itr, list_head, list_entry) {
1359 		if (list_itr->vsi_count == 1 && list_itr->vsi_list_info) {
1360 			map_info = list_itr->vsi_list_info;
1361 			if (test_bit(vsi_handle, map_info->vsi_map)) {
1362 				*vsi_list_id = map_info->vsi_list_id;
1363 				return map_info;
1364 			}
1365 		}
1366 	}
1367 	return NULL;
1368 }
1369 
1370 /**
1371  * ice_add_rule_internal - add rule for a given lookup type
1372  * @hw: pointer to the hardware structure
1373  * @recp_id: lookup type (recipe ID) for which rule has to be added
1374  * @f_entry: structure containing MAC forwarding information
1375  *
1376  * Adds or updates the rule lists for a given recipe
1377  */
1378 static enum ice_status
ice_add_rule_internal(struct ice_hw * hw,u8 recp_id,struct ice_fltr_list_entry * f_entry)1379 ice_add_rule_internal(struct ice_hw *hw, u8 recp_id,
1380 		      struct ice_fltr_list_entry *f_entry)
1381 {
1382 	struct ice_switch_info *sw = hw->switch_info;
1383 	struct ice_fltr_info *new_fltr, *cur_fltr;
1384 	struct ice_fltr_mgmt_list_entry *m_entry;
1385 	struct mutex *rule_lock; /* Lock to protect filter rule list */
1386 	enum ice_status status = 0;
1387 
1388 	if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
1389 		return ICE_ERR_PARAM;
1390 	f_entry->fltr_info.fwd_id.hw_vsi_id =
1391 		ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
1392 
1393 	rule_lock = &sw->recp_list[recp_id].filt_rule_lock;
1394 
1395 	mutex_lock(rule_lock);
1396 	new_fltr = &f_entry->fltr_info;
1397 	if (new_fltr->flag & ICE_FLTR_RX)
1398 		new_fltr->src = hw->port_info->lport;
1399 	else if (new_fltr->flag & ICE_FLTR_TX)
1400 		new_fltr->src = f_entry->fltr_info.fwd_id.hw_vsi_id;
1401 
1402 	m_entry = ice_find_rule_entry(hw, recp_id, new_fltr);
1403 	if (!m_entry) {
1404 		mutex_unlock(rule_lock);
1405 		return ice_create_pkt_fwd_rule(hw, f_entry);
1406 	}
1407 
1408 	cur_fltr = &m_entry->fltr_info;
1409 	status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr);
1410 	mutex_unlock(rule_lock);
1411 
1412 	return status;
1413 }
1414 
1415 /**
1416  * ice_remove_vsi_list_rule
1417  * @hw: pointer to the hardware structure
1418  * @vsi_list_id: VSI list ID generated as part of allocate resource
1419  * @lkup_type: switch rule filter lookup type
1420  *
1421  * The VSI list should be emptied before this function is called to remove the
1422  * VSI list.
1423  */
1424 static enum ice_status
ice_remove_vsi_list_rule(struct ice_hw * hw,u16 vsi_list_id,enum ice_sw_lkup_type lkup_type)1425 ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
1426 			 enum ice_sw_lkup_type lkup_type)
1427 {
1428 	struct ice_aqc_sw_rules_elem *s_rule;
1429 	enum ice_status status;
1430 	u16 s_rule_size;
1431 
1432 	s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(0);
1433 	s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
1434 	if (!s_rule)
1435 		return ICE_ERR_NO_MEMORY;
1436 
1437 	s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR);
1438 	s_rule->pdata.vsi_list.index = cpu_to_le16(vsi_list_id);
1439 
1440 	/* Free the vsi_list resource that we allocated. It is assumed that the
1441 	 * list is empty at this point.
1442 	 */
1443 	status = ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
1444 					    ice_aqc_opc_free_res);
1445 
1446 	devm_kfree(ice_hw_to_dev(hw), s_rule);
1447 	return status;
1448 }
1449 
1450 /**
1451  * ice_rem_update_vsi_list
1452  * @hw: pointer to the hardware structure
1453  * @vsi_handle: VSI handle of the VSI to remove
1454  * @fm_list: filter management entry for which the VSI list management needs to
1455  *           be done
1456  */
1457 static enum ice_status
ice_rem_update_vsi_list(struct ice_hw * hw,u16 vsi_handle,struct ice_fltr_mgmt_list_entry * fm_list)1458 ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
1459 			struct ice_fltr_mgmt_list_entry *fm_list)
1460 {
1461 	enum ice_sw_lkup_type lkup_type;
1462 	enum ice_status status = 0;
1463 	u16 vsi_list_id;
1464 
1465 	if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
1466 	    fm_list->vsi_count == 0)
1467 		return ICE_ERR_PARAM;
1468 
1469 	/* A rule with the VSI being removed does not exist */
1470 	if (!test_bit(vsi_handle, fm_list->vsi_list_info->vsi_map))
1471 		return ICE_ERR_DOES_NOT_EXIST;
1472 
1473 	lkup_type = fm_list->fltr_info.lkup_type;
1474 	vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
1475 	status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
1476 					  ice_aqc_opc_update_sw_rules,
1477 					  lkup_type);
1478 	if (status)
1479 		return status;
1480 
1481 	fm_list->vsi_count--;
1482 	clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
1483 
1484 	if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) {
1485 		struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info;
1486 		struct ice_vsi_list_map_info *vsi_list_info =
1487 			fm_list->vsi_list_info;
1488 		u16 rem_vsi_handle;
1489 
1490 		rem_vsi_handle = find_first_bit(vsi_list_info->vsi_map,
1491 						ICE_MAX_VSI);
1492 		if (!ice_is_vsi_valid(hw, rem_vsi_handle))
1493 			return ICE_ERR_OUT_OF_RANGE;
1494 
1495 		/* Make sure VSI list is empty before removing it below */
1496 		status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
1497 						  vsi_list_id, true,
1498 						  ice_aqc_opc_update_sw_rules,
1499 						  lkup_type);
1500 		if (status)
1501 			return status;
1502 
1503 		tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI;
1504 		tmp_fltr_info.fwd_id.hw_vsi_id =
1505 			ice_get_hw_vsi_num(hw, rem_vsi_handle);
1506 		tmp_fltr_info.vsi_handle = rem_vsi_handle;
1507 		status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info);
1508 		if (status) {
1509 			ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
1510 				  tmp_fltr_info.fwd_id.hw_vsi_id, status);
1511 			return status;
1512 		}
1513 
1514 		fm_list->fltr_info = tmp_fltr_info;
1515 	}
1516 
1517 	if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
1518 	    (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
1519 		struct ice_vsi_list_map_info *vsi_list_info =
1520 			fm_list->vsi_list_info;
1521 
1522 		/* Remove the VSI list since it is no longer used */
1523 		status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
1524 		if (status) {
1525 			ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n",
1526 				  vsi_list_id, status);
1527 			return status;
1528 		}
1529 
1530 		list_del(&vsi_list_info->list_entry);
1531 		devm_kfree(ice_hw_to_dev(hw), vsi_list_info);
1532 		fm_list->vsi_list_info = NULL;
1533 	}
1534 
1535 	return status;
1536 }
1537 
1538 /**
1539  * ice_remove_rule_internal - Remove a filter rule of a given type
1540  * @hw: pointer to the hardware structure
1541  * @recp_id: recipe ID for which the rule needs to removed
1542  * @f_entry: rule entry containing filter information
1543  */
1544 static enum ice_status
ice_remove_rule_internal(struct ice_hw * hw,u8 recp_id,struct ice_fltr_list_entry * f_entry)1545 ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id,
1546 			 struct ice_fltr_list_entry *f_entry)
1547 {
1548 	struct ice_switch_info *sw = hw->switch_info;
1549 	struct ice_fltr_mgmt_list_entry *list_elem;
1550 	struct mutex *rule_lock; /* Lock to protect filter rule list */
1551 	enum ice_status status = 0;
1552 	bool remove_rule = false;
1553 	u16 vsi_handle;
1554 
1555 	if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
1556 		return ICE_ERR_PARAM;
1557 	f_entry->fltr_info.fwd_id.hw_vsi_id =
1558 		ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
1559 
1560 	rule_lock = &sw->recp_list[recp_id].filt_rule_lock;
1561 	mutex_lock(rule_lock);
1562 	list_elem = ice_find_rule_entry(hw, recp_id, &f_entry->fltr_info);
1563 	if (!list_elem) {
1564 		status = ICE_ERR_DOES_NOT_EXIST;
1565 		goto exit;
1566 	}
1567 
1568 	if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
1569 		remove_rule = true;
1570 	} else if (!list_elem->vsi_list_info) {
1571 		status = ICE_ERR_DOES_NOT_EXIST;
1572 		goto exit;
1573 	} else if (list_elem->vsi_list_info->ref_cnt > 1) {
1574 		/* a ref_cnt > 1 indicates that the vsi_list is being
1575 		 * shared by multiple rules. Decrement the ref_cnt and
1576 		 * remove this rule, but do not modify the list, as it
1577 		 * is in-use by other rules.
1578 		 */
1579 		list_elem->vsi_list_info->ref_cnt--;
1580 		remove_rule = true;
1581 	} else {
1582 		/* a ref_cnt of 1 indicates the vsi_list is only used
1583 		 * by one rule. However, the original removal request is only
1584 		 * for a single VSI. Update the vsi_list first, and only
1585 		 * remove the rule if there are no further VSIs in this list.
1586 		 */
1587 		vsi_handle = f_entry->fltr_info.vsi_handle;
1588 		status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
1589 		if (status)
1590 			goto exit;
1591 		/* if VSI count goes to zero after updating the VSI list */
1592 		if (list_elem->vsi_count == 0)
1593 			remove_rule = true;
1594 	}
1595 
1596 	if (remove_rule) {
1597 		/* Remove the lookup rule */
1598 		struct ice_aqc_sw_rules_elem *s_rule;
1599 
1600 		s_rule = devm_kzalloc(ice_hw_to_dev(hw),
1601 				      ICE_SW_RULE_RX_TX_NO_HDR_SIZE,
1602 				      GFP_KERNEL);
1603 		if (!s_rule) {
1604 			status = ICE_ERR_NO_MEMORY;
1605 			goto exit;
1606 		}
1607 
1608 		ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule,
1609 				 ice_aqc_opc_remove_sw_rules);
1610 
1611 		status = ice_aq_sw_rules(hw, s_rule,
1612 					 ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
1613 					 ice_aqc_opc_remove_sw_rules, NULL);
1614 
1615 		/* Remove a book keeping from the list */
1616 		devm_kfree(ice_hw_to_dev(hw), s_rule);
1617 
1618 		if (status)
1619 			goto exit;
1620 
1621 		list_del(&list_elem->list_entry);
1622 		devm_kfree(ice_hw_to_dev(hw), list_elem);
1623 	}
1624 exit:
1625 	mutex_unlock(rule_lock);
1626 	return status;
1627 }
1628 
1629 /**
1630  * ice_add_mac - Add a MAC address based filter rule
1631  * @hw: pointer to the hardware structure
1632  * @m_list: list of MAC addresses and forwarding information
1633  *
1634  * IMPORTANT: When the ucast_shared flag is set to false and m_list has
1635  * multiple unicast addresses, the function assumes that all the
1636  * addresses are unique in a given add_mac call. It doesn't
1637  * check for duplicates in this case, removing duplicates from a given
1638  * list should be taken care of in the caller of this function.
1639  */
ice_add_mac(struct ice_hw * hw,struct list_head * m_list)1640 enum ice_status ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
1641 {
1642 	struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
1643 	struct ice_fltr_list_entry *m_list_itr;
1644 	struct list_head *rule_head;
1645 	u16 total_elem_left, s_rule_size;
1646 	struct ice_switch_info *sw;
1647 	struct mutex *rule_lock; /* Lock to protect filter rule list */
1648 	enum ice_status status = 0;
1649 	u16 num_unicast = 0;
1650 	u8 elem_sent;
1651 
1652 	if (!m_list || !hw)
1653 		return ICE_ERR_PARAM;
1654 
1655 	s_rule = NULL;
1656 	sw = hw->switch_info;
1657 	rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
1658 	list_for_each_entry(m_list_itr, m_list, list_entry) {
1659 		u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
1660 		u16 vsi_handle;
1661 		u16 hw_vsi_id;
1662 
1663 		m_list_itr->fltr_info.flag = ICE_FLTR_TX;
1664 		vsi_handle = m_list_itr->fltr_info.vsi_handle;
1665 		if (!ice_is_vsi_valid(hw, vsi_handle))
1666 			return ICE_ERR_PARAM;
1667 		hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
1668 		m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
1669 		/* update the src in case it is VSI num */
1670 		if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
1671 			return ICE_ERR_PARAM;
1672 		m_list_itr->fltr_info.src = hw_vsi_id;
1673 		if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
1674 		    is_zero_ether_addr(add))
1675 			return ICE_ERR_PARAM;
1676 		if (is_unicast_ether_addr(add) && !hw->ucast_shared) {
1677 			/* Don't overwrite the unicast address */
1678 			mutex_lock(rule_lock);
1679 			if (ice_find_rule_entry(hw, ICE_SW_LKUP_MAC,
1680 						&m_list_itr->fltr_info)) {
1681 				mutex_unlock(rule_lock);
1682 				return ICE_ERR_ALREADY_EXISTS;
1683 			}
1684 			mutex_unlock(rule_lock);
1685 			num_unicast++;
1686 		} else if (is_multicast_ether_addr(add) ||
1687 			   (is_unicast_ether_addr(add) && hw->ucast_shared)) {
1688 			m_list_itr->status =
1689 				ice_add_rule_internal(hw, ICE_SW_LKUP_MAC,
1690 						      m_list_itr);
1691 			if (m_list_itr->status)
1692 				return m_list_itr->status;
1693 		}
1694 	}
1695 
1696 	mutex_lock(rule_lock);
1697 	/* Exit if no suitable entries were found for adding bulk switch rule */
1698 	if (!num_unicast) {
1699 		status = 0;
1700 		goto ice_add_mac_exit;
1701 	}
1702 
1703 	rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
1704 
1705 	/* Allocate switch rule buffer for the bulk update for unicast */
1706 	s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
1707 	s_rule = devm_kcalloc(ice_hw_to_dev(hw), num_unicast, s_rule_size,
1708 			      GFP_KERNEL);
1709 	if (!s_rule) {
1710 		status = ICE_ERR_NO_MEMORY;
1711 		goto ice_add_mac_exit;
1712 	}
1713 
1714 	r_iter = s_rule;
1715 	list_for_each_entry(m_list_itr, m_list, list_entry) {
1716 		struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
1717 		u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
1718 
1719 		if (is_unicast_ether_addr(mac_addr)) {
1720 			ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter,
1721 					 ice_aqc_opc_add_sw_rules);
1722 			r_iter = (struct ice_aqc_sw_rules_elem *)
1723 				((u8 *)r_iter + s_rule_size);
1724 		}
1725 	}
1726 
1727 	/* Call AQ bulk switch rule update for all unicast addresses */
1728 	r_iter = s_rule;
1729 	/* Call AQ switch rule in AQ_MAX chunk */
1730 	for (total_elem_left = num_unicast; total_elem_left > 0;
1731 	     total_elem_left -= elem_sent) {
1732 		struct ice_aqc_sw_rules_elem *entry = r_iter;
1733 
1734 		elem_sent = min_t(u8, total_elem_left,
1735 				  (ICE_AQ_MAX_BUF_LEN / s_rule_size));
1736 		status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
1737 					 elem_sent, ice_aqc_opc_add_sw_rules,
1738 					 NULL);
1739 		if (status)
1740 			goto ice_add_mac_exit;
1741 		r_iter = (struct ice_aqc_sw_rules_elem *)
1742 			((u8 *)r_iter + (elem_sent * s_rule_size));
1743 	}
1744 
1745 	/* Fill up rule ID based on the value returned from FW */
1746 	r_iter = s_rule;
1747 	list_for_each_entry(m_list_itr, m_list, list_entry) {
1748 		struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
1749 		u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
1750 		struct ice_fltr_mgmt_list_entry *fm_entry;
1751 
1752 		if (is_unicast_ether_addr(mac_addr)) {
1753 			f_info->fltr_rule_id =
1754 				le16_to_cpu(r_iter->pdata.lkup_tx_rx.index);
1755 			f_info->fltr_act = ICE_FWD_TO_VSI;
1756 			/* Create an entry to track this MAC address */
1757 			fm_entry = devm_kzalloc(ice_hw_to_dev(hw),
1758 						sizeof(*fm_entry), GFP_KERNEL);
1759 			if (!fm_entry) {
1760 				status = ICE_ERR_NO_MEMORY;
1761 				goto ice_add_mac_exit;
1762 			}
1763 			fm_entry->fltr_info = *f_info;
1764 			fm_entry->vsi_count = 1;
1765 			/* The book keeping entries will get removed when
1766 			 * base driver calls remove filter AQ command
1767 			 */
1768 
1769 			list_add(&fm_entry->list_entry, rule_head);
1770 			r_iter = (struct ice_aqc_sw_rules_elem *)
1771 				((u8 *)r_iter + s_rule_size);
1772 		}
1773 	}
1774 
1775 ice_add_mac_exit:
1776 	mutex_unlock(rule_lock);
1777 	if (s_rule)
1778 		devm_kfree(ice_hw_to_dev(hw), s_rule);
1779 	return status;
1780 }
1781 
1782 /**
1783  * ice_add_vlan_internal - Add one VLAN based filter rule
1784  * @hw: pointer to the hardware structure
1785  * @f_entry: filter entry containing one VLAN information
1786  */
1787 static enum ice_status
ice_add_vlan_internal(struct ice_hw * hw,struct ice_fltr_list_entry * f_entry)1788 ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
1789 {
1790 	struct ice_switch_info *sw = hw->switch_info;
1791 	struct ice_fltr_mgmt_list_entry *v_list_itr;
1792 	struct ice_fltr_info *new_fltr, *cur_fltr;
1793 	enum ice_sw_lkup_type lkup_type;
1794 	u16 vsi_list_id = 0, vsi_handle;
1795 	struct mutex *rule_lock; /* Lock to protect filter rule list */
1796 	enum ice_status status = 0;
1797 
1798 	if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
1799 		return ICE_ERR_PARAM;
1800 
1801 	f_entry->fltr_info.fwd_id.hw_vsi_id =
1802 		ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
1803 	new_fltr = &f_entry->fltr_info;
1804 
1805 	/* VLAN ID should only be 12 bits */
1806 	if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
1807 		return ICE_ERR_PARAM;
1808 
1809 	if (new_fltr->src_id != ICE_SRC_ID_VSI)
1810 		return ICE_ERR_PARAM;
1811 
1812 	new_fltr->src = new_fltr->fwd_id.hw_vsi_id;
1813 	lkup_type = new_fltr->lkup_type;
1814 	vsi_handle = new_fltr->vsi_handle;
1815 	rule_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
1816 	mutex_lock(rule_lock);
1817 	v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN, new_fltr);
1818 	if (!v_list_itr) {
1819 		struct ice_vsi_list_map_info *map_info = NULL;
1820 
1821 		if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
1822 			/* All VLAN pruning rules use a VSI list. Check if
1823 			 * there is already a VSI list containing VSI that we
1824 			 * want to add. If found, use the same vsi_list_id for
1825 			 * this new VLAN rule or else create a new list.
1826 			 */
1827 			map_info = ice_find_vsi_list_entry(hw, ICE_SW_LKUP_VLAN,
1828 							   vsi_handle,
1829 							   &vsi_list_id);
1830 			if (!map_info) {
1831 				status = ice_create_vsi_list_rule(hw,
1832 								  &vsi_handle,
1833 								  1,
1834 								  &vsi_list_id,
1835 								  lkup_type);
1836 				if (status)
1837 					goto exit;
1838 			}
1839 			/* Convert the action to forwarding to a VSI list. */
1840 			new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
1841 			new_fltr->fwd_id.vsi_list_id = vsi_list_id;
1842 		}
1843 
1844 		status = ice_create_pkt_fwd_rule(hw, f_entry);
1845 		if (!status) {
1846 			v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN,
1847 							 new_fltr);
1848 			if (!v_list_itr) {
1849 				status = ICE_ERR_DOES_NOT_EXIST;
1850 				goto exit;
1851 			}
1852 			/* reuse VSI list for new rule and increment ref_cnt */
1853 			if (map_info) {
1854 				v_list_itr->vsi_list_info = map_info;
1855 				map_info->ref_cnt++;
1856 			} else {
1857 				v_list_itr->vsi_list_info =
1858 					ice_create_vsi_list_map(hw, &vsi_handle,
1859 								1, vsi_list_id);
1860 			}
1861 		}
1862 	} else if (v_list_itr->vsi_list_info->ref_cnt == 1) {
1863 		/* Update existing VSI list to add new VSI ID only if it used
1864 		 * by one VLAN rule.
1865 		 */
1866 		cur_fltr = &v_list_itr->fltr_info;
1867 		status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr,
1868 						 new_fltr);
1869 	} else {
1870 		/* If VLAN rule exists and VSI list being used by this rule is
1871 		 * referenced by more than 1 VLAN rule. Then create a new VSI
1872 		 * list appending previous VSI with new VSI and update existing
1873 		 * VLAN rule to point to new VSI list ID
1874 		 */
1875 		struct ice_fltr_info tmp_fltr;
1876 		u16 vsi_handle_arr[2];
1877 		u16 cur_handle;
1878 
1879 		/* Current implementation only supports reusing VSI list with
1880 		 * one VSI count. We should never hit below condition
1881 		 */
1882 		if (v_list_itr->vsi_count > 1 &&
1883 		    v_list_itr->vsi_list_info->ref_cnt > 1) {
1884 			ice_debug(hw, ICE_DBG_SW, "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
1885 			status = ICE_ERR_CFG;
1886 			goto exit;
1887 		}
1888 
1889 		cur_handle =
1890 			find_first_bit(v_list_itr->vsi_list_info->vsi_map,
1891 				       ICE_MAX_VSI);
1892 
1893 		/* A rule already exists with the new VSI being added */
1894 		if (cur_handle == vsi_handle) {
1895 			status = ICE_ERR_ALREADY_EXISTS;
1896 			goto exit;
1897 		}
1898 
1899 		vsi_handle_arr[0] = cur_handle;
1900 		vsi_handle_arr[1] = vsi_handle;
1901 		status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
1902 						  &vsi_list_id, lkup_type);
1903 		if (status)
1904 			goto exit;
1905 
1906 		tmp_fltr = v_list_itr->fltr_info;
1907 		tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id;
1908 		tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
1909 		tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
1910 		/* Update the previous switch rule to a new VSI list which
1911 		 * includes current VSI that is requested
1912 		 */
1913 		status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
1914 		if (status)
1915 			goto exit;
1916 
1917 		/* before overriding VSI list map info. decrement ref_cnt of
1918 		 * previous VSI list
1919 		 */
1920 		v_list_itr->vsi_list_info->ref_cnt--;
1921 
1922 		/* now update to newly created list */
1923 		v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id;
1924 		v_list_itr->vsi_list_info =
1925 			ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
1926 						vsi_list_id);
1927 		v_list_itr->vsi_count++;
1928 	}
1929 
1930 exit:
1931 	mutex_unlock(rule_lock);
1932 	return status;
1933 }
1934 
1935 /**
1936  * ice_add_vlan - Add VLAN based filter rule
1937  * @hw: pointer to the hardware structure
1938  * @v_list: list of VLAN entries and forwarding information
1939  */
ice_add_vlan(struct ice_hw * hw,struct list_head * v_list)1940 enum ice_status ice_add_vlan(struct ice_hw *hw, struct list_head *v_list)
1941 {
1942 	struct ice_fltr_list_entry *v_list_itr;
1943 
1944 	if (!v_list || !hw)
1945 		return ICE_ERR_PARAM;
1946 
1947 	list_for_each_entry(v_list_itr, v_list, list_entry) {
1948 		if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
1949 			return ICE_ERR_PARAM;
1950 		v_list_itr->fltr_info.flag = ICE_FLTR_TX;
1951 		v_list_itr->status = ice_add_vlan_internal(hw, v_list_itr);
1952 		if (v_list_itr->status)
1953 			return v_list_itr->status;
1954 	}
1955 	return 0;
1956 }
1957 
1958 /**
1959  * ice_add_eth_mac - Add ethertype and MAC based filter rule
1960  * @hw: pointer to the hardware structure
1961  * @em_list: list of ether type MAC filter, MAC is optional
1962  *
1963  * This function requires the caller to populate the entries in
1964  * the filter list with the necessary fields (including flags to
1965  * indicate Tx or Rx rules).
1966  */
1967 enum ice_status
ice_add_eth_mac(struct ice_hw * hw,struct list_head * em_list)1968 ice_add_eth_mac(struct ice_hw *hw, struct list_head *em_list)
1969 {
1970 	struct ice_fltr_list_entry *em_list_itr;
1971 
1972 	if (!em_list || !hw)
1973 		return ICE_ERR_PARAM;
1974 
1975 	list_for_each_entry(em_list_itr, em_list, list_entry) {
1976 		enum ice_sw_lkup_type l_type =
1977 			em_list_itr->fltr_info.lkup_type;
1978 
1979 		if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
1980 		    l_type != ICE_SW_LKUP_ETHERTYPE)
1981 			return ICE_ERR_PARAM;
1982 
1983 		em_list_itr->status = ice_add_rule_internal(hw, l_type,
1984 							    em_list_itr);
1985 		if (em_list_itr->status)
1986 			return em_list_itr->status;
1987 	}
1988 	return 0;
1989 }
1990 
1991 /**
1992  * ice_remove_eth_mac - Remove an ethertype (or MAC) based filter rule
1993  * @hw: pointer to the hardware structure
1994  * @em_list: list of ethertype or ethertype MAC entries
1995  */
1996 enum ice_status
ice_remove_eth_mac(struct ice_hw * hw,struct list_head * em_list)1997 ice_remove_eth_mac(struct ice_hw *hw, struct list_head *em_list)
1998 {
1999 	struct ice_fltr_list_entry *em_list_itr, *tmp;
2000 
2001 	if (!em_list || !hw)
2002 		return ICE_ERR_PARAM;
2003 
2004 	list_for_each_entry_safe(em_list_itr, tmp, em_list, list_entry) {
2005 		enum ice_sw_lkup_type l_type =
2006 			em_list_itr->fltr_info.lkup_type;
2007 
2008 		if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
2009 		    l_type != ICE_SW_LKUP_ETHERTYPE)
2010 			return ICE_ERR_PARAM;
2011 
2012 		em_list_itr->status = ice_remove_rule_internal(hw, l_type,
2013 							       em_list_itr);
2014 		if (em_list_itr->status)
2015 			return em_list_itr->status;
2016 	}
2017 	return 0;
2018 }
2019 
2020 /**
2021  * ice_rem_sw_rule_info
2022  * @hw: pointer to the hardware structure
2023  * @rule_head: pointer to the switch list structure that we want to delete
2024  */
2025 static void
ice_rem_sw_rule_info(struct ice_hw * hw,struct list_head * rule_head)2026 ice_rem_sw_rule_info(struct ice_hw *hw, struct list_head *rule_head)
2027 {
2028 	if (!list_empty(rule_head)) {
2029 		struct ice_fltr_mgmt_list_entry *entry;
2030 		struct ice_fltr_mgmt_list_entry *tmp;
2031 
2032 		list_for_each_entry_safe(entry, tmp, rule_head, list_entry) {
2033 			list_del(&entry->list_entry);
2034 			devm_kfree(ice_hw_to_dev(hw), entry);
2035 		}
2036 	}
2037 }
2038 
2039 /**
2040  * ice_cfg_dflt_vsi - change state of VSI to set/clear default
2041  * @hw: pointer to the hardware structure
2042  * @vsi_handle: VSI handle to set as default
2043  * @set: true to add the above mentioned switch rule, false to remove it
2044  * @direction: ICE_FLTR_RX or ICE_FLTR_TX
2045  *
2046  * add filter rule to set/unset given VSI as default VSI for the switch
2047  * (represented by swid)
2048  */
2049 enum ice_status
ice_cfg_dflt_vsi(struct ice_hw * hw,u16 vsi_handle,bool set,u8 direction)2050 ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_handle, bool set, u8 direction)
2051 {
2052 	struct ice_aqc_sw_rules_elem *s_rule;
2053 	struct ice_fltr_info f_info;
2054 	enum ice_adminq_opc opcode;
2055 	enum ice_status status;
2056 	u16 s_rule_size;
2057 	u16 hw_vsi_id;
2058 
2059 	if (!ice_is_vsi_valid(hw, vsi_handle))
2060 		return ICE_ERR_PARAM;
2061 	hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
2062 
2063 	s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
2064 		ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
2065 
2066 	s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
2067 	if (!s_rule)
2068 		return ICE_ERR_NO_MEMORY;
2069 
2070 	memset(&f_info, 0, sizeof(f_info));
2071 
2072 	f_info.lkup_type = ICE_SW_LKUP_DFLT;
2073 	f_info.flag = direction;
2074 	f_info.fltr_act = ICE_FWD_TO_VSI;
2075 	f_info.fwd_id.hw_vsi_id = hw_vsi_id;
2076 
2077 	if (f_info.flag & ICE_FLTR_RX) {
2078 		f_info.src = hw->port_info->lport;
2079 		f_info.src_id = ICE_SRC_ID_LPORT;
2080 		if (!set)
2081 			f_info.fltr_rule_id =
2082 				hw->port_info->dflt_rx_vsi_rule_id;
2083 	} else if (f_info.flag & ICE_FLTR_TX) {
2084 		f_info.src_id = ICE_SRC_ID_VSI;
2085 		f_info.src = hw_vsi_id;
2086 		if (!set)
2087 			f_info.fltr_rule_id =
2088 				hw->port_info->dflt_tx_vsi_rule_id;
2089 	}
2090 
2091 	if (set)
2092 		opcode = ice_aqc_opc_add_sw_rules;
2093 	else
2094 		opcode = ice_aqc_opc_remove_sw_rules;
2095 
2096 	ice_fill_sw_rule(hw, &f_info, s_rule, opcode);
2097 
2098 	status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL);
2099 	if (status || !(f_info.flag & ICE_FLTR_TX_RX))
2100 		goto out;
2101 	if (set) {
2102 		u16 index = le16_to_cpu(s_rule->pdata.lkup_tx_rx.index);
2103 
2104 		if (f_info.flag & ICE_FLTR_TX) {
2105 			hw->port_info->dflt_tx_vsi_num = hw_vsi_id;
2106 			hw->port_info->dflt_tx_vsi_rule_id = index;
2107 		} else if (f_info.flag & ICE_FLTR_RX) {
2108 			hw->port_info->dflt_rx_vsi_num = hw_vsi_id;
2109 			hw->port_info->dflt_rx_vsi_rule_id = index;
2110 		}
2111 	} else {
2112 		if (f_info.flag & ICE_FLTR_TX) {
2113 			hw->port_info->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
2114 			hw->port_info->dflt_tx_vsi_rule_id = ICE_INVAL_ACT;
2115 		} else if (f_info.flag & ICE_FLTR_RX) {
2116 			hw->port_info->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
2117 			hw->port_info->dflt_rx_vsi_rule_id = ICE_INVAL_ACT;
2118 		}
2119 	}
2120 
2121 out:
2122 	devm_kfree(ice_hw_to_dev(hw), s_rule);
2123 	return status;
2124 }
2125 
2126 /**
2127  * ice_find_ucast_rule_entry - Search for a unicast MAC filter rule entry
2128  * @hw: pointer to the hardware structure
2129  * @recp_id: lookup type for which the specified rule needs to be searched
2130  * @f_info: rule information
2131  *
2132  * Helper function to search for a unicast rule entry - this is to be used
2133  * to remove unicast MAC filter that is not shared with other VSIs on the
2134  * PF switch.
2135  *
2136  * Returns pointer to entry storing the rule if found
2137  */
2138 static struct ice_fltr_mgmt_list_entry *
ice_find_ucast_rule_entry(struct ice_hw * hw,u8 recp_id,struct ice_fltr_info * f_info)2139 ice_find_ucast_rule_entry(struct ice_hw *hw, u8 recp_id,
2140 			  struct ice_fltr_info *f_info)
2141 {
2142 	struct ice_switch_info *sw = hw->switch_info;
2143 	struct ice_fltr_mgmt_list_entry *list_itr;
2144 	struct list_head *list_head;
2145 
2146 	list_head = &sw->recp_list[recp_id].filt_rules;
2147 	list_for_each_entry(list_itr, list_head, list_entry) {
2148 		if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
2149 			    sizeof(f_info->l_data)) &&
2150 		    f_info->fwd_id.hw_vsi_id ==
2151 		    list_itr->fltr_info.fwd_id.hw_vsi_id &&
2152 		    f_info->flag == list_itr->fltr_info.flag)
2153 			return list_itr;
2154 	}
2155 	return NULL;
2156 }
2157 
2158 /**
2159  * ice_remove_mac - remove a MAC address based filter rule
2160  * @hw: pointer to the hardware structure
2161  * @m_list: list of MAC addresses and forwarding information
2162  *
2163  * This function removes either a MAC filter rule or a specific VSI from a
2164  * VSI list for a multicast MAC address.
2165  *
2166  * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by
2167  * ice_add_mac. Caller should be aware that this call will only work if all
2168  * the entries passed into m_list were added previously. It will not attempt to
2169  * do a partial remove of entries that were found.
2170  */
ice_remove_mac(struct ice_hw * hw,struct list_head * m_list)2171 enum ice_status ice_remove_mac(struct ice_hw *hw, struct list_head *m_list)
2172 {
2173 	struct ice_fltr_list_entry *list_itr, *tmp;
2174 	struct mutex *rule_lock; /* Lock to protect filter rule list */
2175 
2176 	if (!m_list)
2177 		return ICE_ERR_PARAM;
2178 
2179 	rule_lock = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
2180 	list_for_each_entry_safe(list_itr, tmp, m_list, list_entry) {
2181 		enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
2182 		u8 *add = &list_itr->fltr_info.l_data.mac.mac_addr[0];
2183 		u16 vsi_handle;
2184 
2185 		if (l_type != ICE_SW_LKUP_MAC)
2186 			return ICE_ERR_PARAM;
2187 
2188 		vsi_handle = list_itr->fltr_info.vsi_handle;
2189 		if (!ice_is_vsi_valid(hw, vsi_handle))
2190 			return ICE_ERR_PARAM;
2191 
2192 		list_itr->fltr_info.fwd_id.hw_vsi_id =
2193 					ice_get_hw_vsi_num(hw, vsi_handle);
2194 		if (is_unicast_ether_addr(add) && !hw->ucast_shared) {
2195 			/* Don't remove the unicast address that belongs to
2196 			 * another VSI on the switch, since it is not being
2197 			 * shared...
2198 			 */
2199 			mutex_lock(rule_lock);
2200 			if (!ice_find_ucast_rule_entry(hw, ICE_SW_LKUP_MAC,
2201 						       &list_itr->fltr_info)) {
2202 				mutex_unlock(rule_lock);
2203 				return ICE_ERR_DOES_NOT_EXIST;
2204 			}
2205 			mutex_unlock(rule_lock);
2206 		}
2207 		list_itr->status = ice_remove_rule_internal(hw,
2208 							    ICE_SW_LKUP_MAC,
2209 							    list_itr);
2210 		if (list_itr->status)
2211 			return list_itr->status;
2212 	}
2213 	return 0;
2214 }
2215 
2216 /**
2217  * ice_remove_vlan - Remove VLAN based filter rule
2218  * @hw: pointer to the hardware structure
2219  * @v_list: list of VLAN entries and forwarding information
2220  */
2221 enum ice_status
ice_remove_vlan(struct ice_hw * hw,struct list_head * v_list)2222 ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list)
2223 {
2224 	struct ice_fltr_list_entry *v_list_itr, *tmp;
2225 
2226 	if (!v_list || !hw)
2227 		return ICE_ERR_PARAM;
2228 
2229 	list_for_each_entry_safe(v_list_itr, tmp, v_list, list_entry) {
2230 		enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
2231 
2232 		if (l_type != ICE_SW_LKUP_VLAN)
2233 			return ICE_ERR_PARAM;
2234 		v_list_itr->status = ice_remove_rule_internal(hw,
2235 							      ICE_SW_LKUP_VLAN,
2236 							      v_list_itr);
2237 		if (v_list_itr->status)
2238 			return v_list_itr->status;
2239 	}
2240 	return 0;
2241 }
2242 
2243 /**
2244  * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
2245  * @fm_entry: filter entry to inspect
2246  * @vsi_handle: VSI handle to compare with filter info
2247  */
2248 static bool
ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry * fm_entry,u16 vsi_handle)2249 ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
2250 {
2251 	return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
2252 		 fm_entry->fltr_info.vsi_handle == vsi_handle) ||
2253 		(fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
2254 		 fm_entry->vsi_list_info &&
2255 		 (test_bit(vsi_handle, fm_entry->vsi_list_info->vsi_map))));
2256 }
2257 
2258 /**
2259  * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list
2260  * @hw: pointer to the hardware structure
2261  * @vsi_handle: VSI handle to remove filters from
2262  * @vsi_list_head: pointer to the list to add entry to
2263  * @fi: pointer to fltr_info of filter entry to copy & add
2264  *
2265  * Helper function, used when creating a list of filters to remove from
2266  * a specific VSI. The entry added to vsi_list_head is a COPY of the
2267  * original filter entry, with the exception of fltr_info.fltr_act and
2268  * fltr_info.fwd_id fields. These are set such that later logic can
2269  * extract which VSI to remove the fltr from, and pass on that information.
2270  */
2271 static enum ice_status
ice_add_entry_to_vsi_fltr_list(struct ice_hw * hw,u16 vsi_handle,struct list_head * vsi_list_head,struct ice_fltr_info * fi)2272 ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
2273 			       struct list_head *vsi_list_head,
2274 			       struct ice_fltr_info *fi)
2275 {
2276 	struct ice_fltr_list_entry *tmp;
2277 
2278 	/* this memory is freed up in the caller function
2279 	 * once filters for this VSI are removed
2280 	 */
2281 	tmp = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*tmp), GFP_KERNEL);
2282 	if (!tmp)
2283 		return ICE_ERR_NO_MEMORY;
2284 
2285 	tmp->fltr_info = *fi;
2286 
2287 	/* Overwrite these fields to indicate which VSI to remove filter from,
2288 	 * so find and remove logic can extract the information from the
2289 	 * list entries. Note that original entries will still have proper
2290 	 * values.
2291 	 */
2292 	tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
2293 	tmp->fltr_info.vsi_handle = vsi_handle;
2294 	tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
2295 
2296 	list_add(&tmp->list_entry, vsi_list_head);
2297 
2298 	return 0;
2299 }
2300 
2301 /**
2302  * ice_add_to_vsi_fltr_list - Add VSI filters to the list
2303  * @hw: pointer to the hardware structure
2304  * @vsi_handle: VSI handle to remove filters from
2305  * @lkup_list_head: pointer to the list that has certain lookup type filters
2306  * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle
2307  *
2308  * Locates all filters in lkup_list_head that are used by the given VSI,
2309  * and adds COPIES of those entries to vsi_list_head (intended to be used
2310  * to remove the listed filters).
2311  * Note that this means all entries in vsi_list_head must be explicitly
2312  * deallocated by the caller when done with list.
2313  */
2314 static enum ice_status
ice_add_to_vsi_fltr_list(struct ice_hw * hw,u16 vsi_handle,struct list_head * lkup_list_head,struct list_head * vsi_list_head)2315 ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
2316 			 struct list_head *lkup_list_head,
2317 			 struct list_head *vsi_list_head)
2318 {
2319 	struct ice_fltr_mgmt_list_entry *fm_entry;
2320 	enum ice_status status = 0;
2321 
2322 	/* check to make sure VSI ID is valid and within boundary */
2323 	if (!ice_is_vsi_valid(hw, vsi_handle))
2324 		return ICE_ERR_PARAM;
2325 
2326 	list_for_each_entry(fm_entry, lkup_list_head, list_entry) {
2327 		if (!ice_vsi_uses_fltr(fm_entry, vsi_handle))
2328 			continue;
2329 
2330 		status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
2331 							vsi_list_head,
2332 							&fm_entry->fltr_info);
2333 		if (status)
2334 			return status;
2335 	}
2336 	return status;
2337 }
2338 
2339 /**
2340  * ice_determine_promisc_mask
2341  * @fi: filter info to parse
2342  *
2343  * Helper function to determine which ICE_PROMISC_ mask corresponds
2344  * to given filter into.
2345  */
ice_determine_promisc_mask(struct ice_fltr_info * fi)2346 static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
2347 {
2348 	u16 vid = fi->l_data.mac_vlan.vlan_id;
2349 	u8 *macaddr = fi->l_data.mac.mac_addr;
2350 	bool is_tx_fltr = false;
2351 	u8 promisc_mask = 0;
2352 
2353 	if (fi->flag == ICE_FLTR_TX)
2354 		is_tx_fltr = true;
2355 
2356 	if (is_broadcast_ether_addr(macaddr))
2357 		promisc_mask |= is_tx_fltr ?
2358 			ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX;
2359 	else if (is_multicast_ether_addr(macaddr))
2360 		promisc_mask |= is_tx_fltr ?
2361 			ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX;
2362 	else if (is_unicast_ether_addr(macaddr))
2363 		promisc_mask |= is_tx_fltr ?
2364 			ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX;
2365 	if (vid)
2366 		promisc_mask |= is_tx_fltr ?
2367 			ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX;
2368 
2369 	return promisc_mask;
2370 }
2371 
2372 /**
2373  * ice_remove_promisc - Remove promisc based filter rules
2374  * @hw: pointer to the hardware structure
2375  * @recp_id: recipe ID for which the rule needs to removed
2376  * @v_list: list of promisc entries
2377  */
2378 static enum ice_status
ice_remove_promisc(struct ice_hw * hw,u8 recp_id,struct list_head * v_list)2379 ice_remove_promisc(struct ice_hw *hw, u8 recp_id,
2380 		   struct list_head *v_list)
2381 {
2382 	struct ice_fltr_list_entry *v_list_itr, *tmp;
2383 
2384 	list_for_each_entry_safe(v_list_itr, tmp, v_list, list_entry) {
2385 		v_list_itr->status =
2386 			ice_remove_rule_internal(hw, recp_id, v_list_itr);
2387 		if (v_list_itr->status)
2388 			return v_list_itr->status;
2389 	}
2390 	return 0;
2391 }
2392 
2393 /**
2394  * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI
2395  * @hw: pointer to the hardware structure
2396  * @vsi_handle: VSI handle to clear mode
2397  * @promisc_mask: mask of promiscuous config bits to clear
2398  * @vid: VLAN ID to clear VLAN promiscuous
2399  */
2400 enum ice_status
ice_clear_vsi_promisc(struct ice_hw * hw,u16 vsi_handle,u8 promisc_mask,u16 vid)2401 ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
2402 		      u16 vid)
2403 {
2404 	struct ice_switch_info *sw = hw->switch_info;
2405 	struct ice_fltr_list_entry *fm_entry, *tmp;
2406 	struct list_head remove_list_head;
2407 	struct ice_fltr_mgmt_list_entry *itr;
2408 	struct list_head *rule_head;
2409 	struct mutex *rule_lock;	/* Lock to protect filter rule list */
2410 	enum ice_status status = 0;
2411 	u8 recipe_id;
2412 
2413 	if (!ice_is_vsi_valid(hw, vsi_handle))
2414 		return ICE_ERR_PARAM;
2415 
2416 	if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX))
2417 		recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
2418 	else
2419 		recipe_id = ICE_SW_LKUP_PROMISC;
2420 
2421 	rule_head = &sw->recp_list[recipe_id].filt_rules;
2422 	rule_lock = &sw->recp_list[recipe_id].filt_rule_lock;
2423 
2424 	INIT_LIST_HEAD(&remove_list_head);
2425 
2426 	mutex_lock(rule_lock);
2427 	list_for_each_entry(itr, rule_head, list_entry) {
2428 		struct ice_fltr_info *fltr_info;
2429 		u8 fltr_promisc_mask = 0;
2430 
2431 		if (!ice_vsi_uses_fltr(itr, vsi_handle))
2432 			continue;
2433 		fltr_info = &itr->fltr_info;
2434 
2435 		if (recipe_id == ICE_SW_LKUP_PROMISC_VLAN &&
2436 		    vid != fltr_info->l_data.mac_vlan.vlan_id)
2437 			continue;
2438 
2439 		fltr_promisc_mask |= ice_determine_promisc_mask(fltr_info);
2440 
2441 		/* Skip if filter is not completely specified by given mask */
2442 		if (fltr_promisc_mask & ~promisc_mask)
2443 			continue;
2444 
2445 		status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
2446 							&remove_list_head,
2447 							fltr_info);
2448 		if (status) {
2449 			mutex_unlock(rule_lock);
2450 			goto free_fltr_list;
2451 		}
2452 	}
2453 	mutex_unlock(rule_lock);
2454 
2455 	status = ice_remove_promisc(hw, recipe_id, &remove_list_head);
2456 
2457 free_fltr_list:
2458 	list_for_each_entry_safe(fm_entry, tmp, &remove_list_head, list_entry) {
2459 		list_del(&fm_entry->list_entry);
2460 		devm_kfree(ice_hw_to_dev(hw), fm_entry);
2461 	}
2462 
2463 	return status;
2464 }
2465 
2466 /**
2467  * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
2468  * @hw: pointer to the hardware structure
2469  * @vsi_handle: VSI handle to configure
2470  * @promisc_mask: mask of promiscuous config bits
2471  * @vid: VLAN ID to set VLAN promiscuous
2472  */
2473 enum ice_status
ice_set_vsi_promisc(struct ice_hw * hw,u16 vsi_handle,u8 promisc_mask,u16 vid)2474 ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid)
2475 {
2476 	enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
2477 	struct ice_fltr_list_entry f_list_entry;
2478 	struct ice_fltr_info new_fltr;
2479 	enum ice_status status = 0;
2480 	bool is_tx_fltr;
2481 	u16 hw_vsi_id;
2482 	int pkt_type;
2483 	u8 recipe_id;
2484 
2485 	if (!ice_is_vsi_valid(hw, vsi_handle))
2486 		return ICE_ERR_PARAM;
2487 	hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
2488 
2489 	memset(&new_fltr, 0, sizeof(new_fltr));
2490 
2491 	if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) {
2492 		new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN;
2493 		new_fltr.l_data.mac_vlan.vlan_id = vid;
2494 		recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
2495 	} else {
2496 		new_fltr.lkup_type = ICE_SW_LKUP_PROMISC;
2497 		recipe_id = ICE_SW_LKUP_PROMISC;
2498 	}
2499 
2500 	/* Separate filters must be set for each direction/packet type
2501 	 * combination, so we will loop over the mask value, store the
2502 	 * individual type, and clear it out in the input mask as it
2503 	 * is found.
2504 	 */
2505 	while (promisc_mask) {
2506 		u8 *mac_addr;
2507 
2508 		pkt_type = 0;
2509 		is_tx_fltr = false;
2510 
2511 		if (promisc_mask & ICE_PROMISC_UCAST_RX) {
2512 			promisc_mask &= ~ICE_PROMISC_UCAST_RX;
2513 			pkt_type = UCAST_FLTR;
2514 		} else if (promisc_mask & ICE_PROMISC_UCAST_TX) {
2515 			promisc_mask &= ~ICE_PROMISC_UCAST_TX;
2516 			pkt_type = UCAST_FLTR;
2517 			is_tx_fltr = true;
2518 		} else if (promisc_mask & ICE_PROMISC_MCAST_RX) {
2519 			promisc_mask &= ~ICE_PROMISC_MCAST_RX;
2520 			pkt_type = MCAST_FLTR;
2521 		} else if (promisc_mask & ICE_PROMISC_MCAST_TX) {
2522 			promisc_mask &= ~ICE_PROMISC_MCAST_TX;
2523 			pkt_type = MCAST_FLTR;
2524 			is_tx_fltr = true;
2525 		} else if (promisc_mask & ICE_PROMISC_BCAST_RX) {
2526 			promisc_mask &= ~ICE_PROMISC_BCAST_RX;
2527 			pkt_type = BCAST_FLTR;
2528 		} else if (promisc_mask & ICE_PROMISC_BCAST_TX) {
2529 			promisc_mask &= ~ICE_PROMISC_BCAST_TX;
2530 			pkt_type = BCAST_FLTR;
2531 			is_tx_fltr = true;
2532 		}
2533 
2534 		/* Check for VLAN promiscuous flag */
2535 		if (promisc_mask & ICE_PROMISC_VLAN_RX) {
2536 			promisc_mask &= ~ICE_PROMISC_VLAN_RX;
2537 		} else if (promisc_mask & ICE_PROMISC_VLAN_TX) {
2538 			promisc_mask &= ~ICE_PROMISC_VLAN_TX;
2539 			is_tx_fltr = true;
2540 		}
2541 
2542 		/* Set filter DA based on packet type */
2543 		mac_addr = new_fltr.l_data.mac.mac_addr;
2544 		if (pkt_type == BCAST_FLTR) {
2545 			eth_broadcast_addr(mac_addr);
2546 		} else if (pkt_type == MCAST_FLTR ||
2547 			   pkt_type == UCAST_FLTR) {
2548 			/* Use the dummy ether header DA */
2549 			ether_addr_copy(mac_addr, dummy_eth_header);
2550 			if (pkt_type == MCAST_FLTR)
2551 				mac_addr[0] |= 0x1;	/* Set multicast bit */
2552 		}
2553 
2554 		/* Need to reset this to zero for all iterations */
2555 		new_fltr.flag = 0;
2556 		if (is_tx_fltr) {
2557 			new_fltr.flag |= ICE_FLTR_TX;
2558 			new_fltr.src = hw_vsi_id;
2559 		} else {
2560 			new_fltr.flag |= ICE_FLTR_RX;
2561 			new_fltr.src = hw->port_info->lport;
2562 		}
2563 
2564 		new_fltr.fltr_act = ICE_FWD_TO_VSI;
2565 		new_fltr.vsi_handle = vsi_handle;
2566 		new_fltr.fwd_id.hw_vsi_id = hw_vsi_id;
2567 		f_list_entry.fltr_info = new_fltr;
2568 
2569 		status = ice_add_rule_internal(hw, recipe_id, &f_list_entry);
2570 		if (status)
2571 			goto set_promisc_exit;
2572 	}
2573 
2574 set_promisc_exit:
2575 	return status;
2576 }
2577 
2578 /**
2579  * ice_set_vlan_vsi_promisc
2580  * @hw: pointer to the hardware structure
2581  * @vsi_handle: VSI handle to configure
2582  * @promisc_mask: mask of promiscuous config bits
2583  * @rm_vlan_promisc: Clear VLANs VSI promisc mode
2584  *
2585  * Configure VSI with all associated VLANs to given promiscuous mode(s)
2586  */
2587 enum ice_status
ice_set_vlan_vsi_promisc(struct ice_hw * hw,u16 vsi_handle,u8 promisc_mask,bool rm_vlan_promisc)2588 ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
2589 			 bool rm_vlan_promisc)
2590 {
2591 	struct ice_switch_info *sw = hw->switch_info;
2592 	struct ice_fltr_list_entry *list_itr, *tmp;
2593 	struct list_head vsi_list_head;
2594 	struct list_head *vlan_head;
2595 	struct mutex *vlan_lock; /* Lock to protect filter rule list */
2596 	enum ice_status status;
2597 	u16 vlan_id;
2598 
2599 	INIT_LIST_HEAD(&vsi_list_head);
2600 	vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
2601 	vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
2602 	mutex_lock(vlan_lock);
2603 	status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head,
2604 					  &vsi_list_head);
2605 	mutex_unlock(vlan_lock);
2606 	if (status)
2607 		goto free_fltr_list;
2608 
2609 	list_for_each_entry(list_itr, &vsi_list_head, list_entry) {
2610 		vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
2611 		if (rm_vlan_promisc)
2612 			status = ice_clear_vsi_promisc(hw, vsi_handle,
2613 						       promisc_mask, vlan_id);
2614 		else
2615 			status = ice_set_vsi_promisc(hw, vsi_handle,
2616 						     promisc_mask, vlan_id);
2617 		if (status)
2618 			break;
2619 	}
2620 
2621 free_fltr_list:
2622 	list_for_each_entry_safe(list_itr, tmp, &vsi_list_head, list_entry) {
2623 		list_del(&list_itr->list_entry);
2624 		devm_kfree(ice_hw_to_dev(hw), list_itr);
2625 	}
2626 	return status;
2627 }
2628 
2629 /**
2630  * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
2631  * @hw: pointer to the hardware structure
2632  * @vsi_handle: VSI handle to remove filters from
2633  * @lkup: switch rule filter lookup type
2634  */
2635 static void
ice_remove_vsi_lkup_fltr(struct ice_hw * hw,u16 vsi_handle,enum ice_sw_lkup_type lkup)2636 ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
2637 			 enum ice_sw_lkup_type lkup)
2638 {
2639 	struct ice_switch_info *sw = hw->switch_info;
2640 	struct ice_fltr_list_entry *fm_entry;
2641 	struct list_head remove_list_head;
2642 	struct list_head *rule_head;
2643 	struct ice_fltr_list_entry *tmp;
2644 	struct mutex *rule_lock;	/* Lock to protect filter rule list */
2645 	enum ice_status status;
2646 
2647 	INIT_LIST_HEAD(&remove_list_head);
2648 	rule_lock = &sw->recp_list[lkup].filt_rule_lock;
2649 	rule_head = &sw->recp_list[lkup].filt_rules;
2650 	mutex_lock(rule_lock);
2651 	status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head,
2652 					  &remove_list_head);
2653 	mutex_unlock(rule_lock);
2654 	if (status)
2655 		goto free_fltr_list;
2656 
2657 	switch (lkup) {
2658 	case ICE_SW_LKUP_MAC:
2659 		ice_remove_mac(hw, &remove_list_head);
2660 		break;
2661 	case ICE_SW_LKUP_VLAN:
2662 		ice_remove_vlan(hw, &remove_list_head);
2663 		break;
2664 	case ICE_SW_LKUP_PROMISC:
2665 	case ICE_SW_LKUP_PROMISC_VLAN:
2666 		ice_remove_promisc(hw, lkup, &remove_list_head);
2667 		break;
2668 	case ICE_SW_LKUP_MAC_VLAN:
2669 	case ICE_SW_LKUP_ETHERTYPE:
2670 	case ICE_SW_LKUP_ETHERTYPE_MAC:
2671 	case ICE_SW_LKUP_DFLT:
2672 	case ICE_SW_LKUP_LAST:
2673 	default:
2674 		ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type %d\n", lkup);
2675 		break;
2676 	}
2677 
2678 free_fltr_list:
2679 	list_for_each_entry_safe(fm_entry, tmp, &remove_list_head, list_entry) {
2680 		list_del(&fm_entry->list_entry);
2681 		devm_kfree(ice_hw_to_dev(hw), fm_entry);
2682 	}
2683 }
2684 
2685 /**
2686  * ice_remove_vsi_fltr - Remove all filters for a VSI
2687  * @hw: pointer to the hardware structure
2688  * @vsi_handle: VSI handle to remove filters from
2689  */
ice_remove_vsi_fltr(struct ice_hw * hw,u16 vsi_handle)2690 void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
2691 {
2692 	ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC);
2693 	ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC_VLAN);
2694 	ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC);
2695 	ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_VLAN);
2696 	ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_DFLT);
2697 	ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE);
2698 	ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE_MAC);
2699 	ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC_VLAN);
2700 }
2701 
2702 /**
2703  * ice_alloc_res_cntr - allocating resource counter
2704  * @hw: pointer to the hardware structure
2705  * @type: type of resource
2706  * @alloc_shared: if set it is shared else dedicated
2707  * @num_items: number of entries requested for FD resource type
2708  * @counter_id: counter index returned by AQ call
2709  */
2710 enum ice_status
ice_alloc_res_cntr(struct ice_hw * hw,u8 type,u8 alloc_shared,u16 num_items,u16 * counter_id)2711 ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
2712 		   u16 *counter_id)
2713 {
2714 	struct ice_aqc_alloc_free_res_elem *buf;
2715 	enum ice_status status;
2716 	u16 buf_len;
2717 
2718 	/* Allocate resource */
2719 	buf_len = struct_size(buf, elem, 1);
2720 	buf = kzalloc(buf_len, GFP_KERNEL);
2721 	if (!buf)
2722 		return ICE_ERR_NO_MEMORY;
2723 
2724 	buf->num_elems = cpu_to_le16(num_items);
2725 	buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) &
2726 				      ICE_AQC_RES_TYPE_M) | alloc_shared);
2727 
2728 	status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
2729 				       ice_aqc_opc_alloc_res, NULL);
2730 	if (status)
2731 		goto exit;
2732 
2733 	*counter_id = le16_to_cpu(buf->elem[0].e.sw_resp);
2734 
2735 exit:
2736 	kfree(buf);
2737 	return status;
2738 }
2739 
2740 /**
2741  * ice_free_res_cntr - free resource counter
2742  * @hw: pointer to the hardware structure
2743  * @type: type of resource
2744  * @alloc_shared: if set it is shared else dedicated
2745  * @num_items: number of entries to be freed for FD resource type
2746  * @counter_id: counter ID resource which needs to be freed
2747  */
2748 enum ice_status
ice_free_res_cntr(struct ice_hw * hw,u8 type,u8 alloc_shared,u16 num_items,u16 counter_id)2749 ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
2750 		  u16 counter_id)
2751 {
2752 	struct ice_aqc_alloc_free_res_elem *buf;
2753 	enum ice_status status;
2754 	u16 buf_len;
2755 
2756 	/* Free resource */
2757 	buf_len = struct_size(buf, elem, 1);
2758 	buf = kzalloc(buf_len, GFP_KERNEL);
2759 	if (!buf)
2760 		return ICE_ERR_NO_MEMORY;
2761 
2762 	buf->num_elems = cpu_to_le16(num_items);
2763 	buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) &
2764 				      ICE_AQC_RES_TYPE_M) | alloc_shared);
2765 	buf->elem[0].e.sw_resp = cpu_to_le16(counter_id);
2766 
2767 	status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
2768 				       ice_aqc_opc_free_res, NULL);
2769 	if (status)
2770 		ice_debug(hw, ICE_DBG_SW, "counter resource could not be freed\n");
2771 
2772 	kfree(buf);
2773 	return status;
2774 }
2775 
2776 /**
2777  * ice_replay_vsi_fltr - Replay filters for requested VSI
2778  * @hw: pointer to the hardware structure
2779  * @vsi_handle: driver VSI handle
2780  * @recp_id: Recipe ID for which rules need to be replayed
2781  * @list_head: list for which filters need to be replayed
2782  *
2783  * Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
2784  * It is required to pass valid VSI handle.
2785  */
2786 static enum ice_status
ice_replay_vsi_fltr(struct ice_hw * hw,u16 vsi_handle,u8 recp_id,struct list_head * list_head)2787 ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id,
2788 		    struct list_head *list_head)
2789 {
2790 	struct ice_fltr_mgmt_list_entry *itr;
2791 	enum ice_status status = 0;
2792 	u16 hw_vsi_id;
2793 
2794 	if (list_empty(list_head))
2795 		return status;
2796 	hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
2797 
2798 	list_for_each_entry(itr, list_head, list_entry) {
2799 		struct ice_fltr_list_entry f_entry;
2800 
2801 		f_entry.fltr_info = itr->fltr_info;
2802 		if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN &&
2803 		    itr->fltr_info.vsi_handle == vsi_handle) {
2804 			/* update the src in case it is VSI num */
2805 			if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
2806 				f_entry.fltr_info.src = hw_vsi_id;
2807 			status = ice_add_rule_internal(hw, recp_id, &f_entry);
2808 			if (status)
2809 				goto end;
2810 			continue;
2811 		}
2812 		if (!itr->vsi_list_info ||
2813 		    !test_bit(vsi_handle, itr->vsi_list_info->vsi_map))
2814 			continue;
2815 		/* Clearing it so that the logic can add it back */
2816 		clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
2817 		f_entry.fltr_info.vsi_handle = vsi_handle;
2818 		f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
2819 		/* update the src in case it is VSI num */
2820 		if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
2821 			f_entry.fltr_info.src = hw_vsi_id;
2822 		if (recp_id == ICE_SW_LKUP_VLAN)
2823 			status = ice_add_vlan_internal(hw, &f_entry);
2824 		else
2825 			status = ice_add_rule_internal(hw, recp_id, &f_entry);
2826 		if (status)
2827 			goto end;
2828 	}
2829 end:
2830 	return status;
2831 }
2832 
2833 /**
2834  * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists
2835  * @hw: pointer to the hardware structure
2836  * @vsi_handle: driver VSI handle
2837  *
2838  * Replays filters for requested VSI via vsi_handle.
2839  */
ice_replay_vsi_all_fltr(struct ice_hw * hw,u16 vsi_handle)2840 enum ice_status ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle)
2841 {
2842 	struct ice_switch_info *sw = hw->switch_info;
2843 	enum ice_status status = 0;
2844 	u8 i;
2845 
2846 	for (i = 0; i < ICE_SW_LKUP_LAST; i++) {
2847 		struct list_head *head;
2848 
2849 		head = &sw->recp_list[i].filt_replay_rules;
2850 		status = ice_replay_vsi_fltr(hw, vsi_handle, i, head);
2851 		if (status)
2852 			return status;
2853 	}
2854 	return status;
2855 }
2856 
2857 /**
2858  * ice_rm_all_sw_replay_rule_info - deletes filter replay rules
2859  * @hw: pointer to the HW struct
2860  *
2861  * Deletes the filter replay rules.
2862  */
ice_rm_all_sw_replay_rule_info(struct ice_hw * hw)2863 void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
2864 {
2865 	struct ice_switch_info *sw = hw->switch_info;
2866 	u8 i;
2867 
2868 	if (!sw)
2869 		return;
2870 
2871 	for (i = 0; i < ICE_SW_LKUP_LAST; i++) {
2872 		if (!list_empty(&sw->recp_list[i].filt_replay_rules)) {
2873 			struct list_head *l_head;
2874 
2875 			l_head = &sw->recp_list[i].filt_replay_rules;
2876 			ice_rem_sw_rule_info(hw, l_head);
2877 		}
2878 	}
2879 }
2880