1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2019 Mellanox Technologies. */
3
4 #include "dr_types.h"
5
6 #define DR_RULE_MAX_STE_CHAIN (DR_RULE_MAX_STES + DR_ACTION_MAX_STES)
7
8 struct mlx5dr_rule_action_member {
9 struct mlx5dr_action *action;
10 struct list_head list;
11 };
12
dr_rule_append_to_miss_list(struct mlx5dr_ste * new_last_ste,struct list_head * miss_list,struct list_head * send_list)13 static int dr_rule_append_to_miss_list(struct mlx5dr_ste *new_last_ste,
14 struct list_head *miss_list,
15 struct list_head *send_list)
16 {
17 struct mlx5dr_ste_send_info *ste_info_last;
18 struct mlx5dr_ste *last_ste;
19
20 /* The new entry will be inserted after the last */
21 last_ste = list_last_entry(miss_list, struct mlx5dr_ste, miss_list_node);
22 WARN_ON(!last_ste);
23
24 ste_info_last = kzalloc(sizeof(*ste_info_last), GFP_KERNEL);
25 if (!ste_info_last)
26 return -ENOMEM;
27
28 mlx5dr_ste_set_miss_addr(last_ste->hw_ste,
29 mlx5dr_ste_get_icm_addr(new_last_ste));
30 list_add_tail(&new_last_ste->miss_list_node, miss_list);
31
32 mlx5dr_send_fill_and_append_ste_send_info(last_ste, DR_STE_SIZE_REDUCED,
33 0, last_ste->hw_ste,
34 ste_info_last, send_list, true);
35
36 return 0;
37 }
38
39 static struct mlx5dr_ste *
dr_rule_create_collision_htbl(struct mlx5dr_matcher * matcher,struct mlx5dr_matcher_rx_tx * nic_matcher,u8 * hw_ste)40 dr_rule_create_collision_htbl(struct mlx5dr_matcher *matcher,
41 struct mlx5dr_matcher_rx_tx *nic_matcher,
42 u8 *hw_ste)
43 {
44 struct mlx5dr_domain *dmn = matcher->tbl->dmn;
45 struct mlx5dr_ste_htbl *new_htbl;
46 struct mlx5dr_ste *ste;
47
48 /* Create new table for miss entry */
49 new_htbl = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool,
50 DR_CHUNK_SIZE_1,
51 MLX5DR_STE_LU_TYPE_DONT_CARE,
52 0);
53 if (!new_htbl) {
54 mlx5dr_dbg(dmn, "Failed allocating collision table\n");
55 return NULL;
56 }
57
58 /* One and only entry, never grows */
59 ste = new_htbl->ste_arr;
60 mlx5dr_ste_set_miss_addr(hw_ste, nic_matcher->e_anchor->chunk->icm_addr);
61 mlx5dr_htbl_get(new_htbl);
62
63 return ste;
64 }
65
66 static struct mlx5dr_ste *
dr_rule_create_collision_entry(struct mlx5dr_matcher * matcher,struct mlx5dr_matcher_rx_tx * nic_matcher,u8 * hw_ste,struct mlx5dr_ste * orig_ste)67 dr_rule_create_collision_entry(struct mlx5dr_matcher *matcher,
68 struct mlx5dr_matcher_rx_tx *nic_matcher,
69 u8 *hw_ste,
70 struct mlx5dr_ste *orig_ste)
71 {
72 struct mlx5dr_ste *ste;
73
74 ste = dr_rule_create_collision_htbl(matcher, nic_matcher, hw_ste);
75 if (!ste) {
76 mlx5dr_dbg(matcher->tbl->dmn, "Failed creating collision entry\n");
77 return NULL;
78 }
79
80 ste->ste_chain_location = orig_ste->ste_chain_location;
81
82 /* In collision entry, all members share the same miss_list_head */
83 ste->htbl->miss_list = mlx5dr_ste_get_miss_list(orig_ste);
84
85 /* Next table */
86 if (mlx5dr_ste_create_next_htbl(matcher, nic_matcher, ste, hw_ste,
87 DR_CHUNK_SIZE_1)) {
88 mlx5dr_dbg(matcher->tbl->dmn, "Failed allocating table\n");
89 goto free_tbl;
90 }
91
92 return ste;
93
94 free_tbl:
95 mlx5dr_ste_free(ste, matcher, nic_matcher);
96 return NULL;
97 }
98
99 static int
dr_rule_handle_one_ste_in_update_list(struct mlx5dr_ste_send_info * ste_info,struct mlx5dr_domain * dmn)100 dr_rule_handle_one_ste_in_update_list(struct mlx5dr_ste_send_info *ste_info,
101 struct mlx5dr_domain *dmn)
102 {
103 int ret;
104
105 list_del(&ste_info->send_list);
106 ret = mlx5dr_send_postsend_ste(dmn, ste_info->ste, ste_info->data,
107 ste_info->size, ste_info->offset);
108 if (ret)
109 goto out;
110 /* Copy data to ste, only reduced size, the last 16B (mask)
111 * is already written to the hw.
112 */
113 memcpy(ste_info->ste->hw_ste, ste_info->data, DR_STE_SIZE_REDUCED);
114
115 out:
116 kfree(ste_info);
117 return ret;
118 }
119
dr_rule_send_update_list(struct list_head * send_ste_list,struct mlx5dr_domain * dmn,bool is_reverse)120 static int dr_rule_send_update_list(struct list_head *send_ste_list,
121 struct mlx5dr_domain *dmn,
122 bool is_reverse)
123 {
124 struct mlx5dr_ste_send_info *ste_info, *tmp_ste_info;
125 int ret;
126
127 if (is_reverse) {
128 list_for_each_entry_safe_reverse(ste_info, tmp_ste_info,
129 send_ste_list, send_list) {
130 ret = dr_rule_handle_one_ste_in_update_list(ste_info,
131 dmn);
132 if (ret)
133 return ret;
134 }
135 } else {
136 list_for_each_entry_safe(ste_info, tmp_ste_info,
137 send_ste_list, send_list) {
138 ret = dr_rule_handle_one_ste_in_update_list(ste_info,
139 dmn);
140 if (ret)
141 return ret;
142 }
143 }
144
145 return 0;
146 }
147
148 static struct mlx5dr_ste *
dr_rule_find_ste_in_miss_list(struct list_head * miss_list,u8 * hw_ste)149 dr_rule_find_ste_in_miss_list(struct list_head *miss_list, u8 *hw_ste)
150 {
151 struct mlx5dr_ste *ste;
152
153 if (list_empty(miss_list))
154 return NULL;
155
156 /* Check if hw_ste is present in the list */
157 list_for_each_entry(ste, miss_list, miss_list_node) {
158 if (mlx5dr_ste_equal_tag(ste->hw_ste, hw_ste))
159 return ste;
160 }
161
162 return NULL;
163 }
164
165 static struct mlx5dr_ste *
dr_rule_rehash_handle_collision(struct mlx5dr_matcher * matcher,struct mlx5dr_matcher_rx_tx * nic_matcher,struct list_head * update_list,struct mlx5dr_ste * col_ste,u8 * hw_ste)166 dr_rule_rehash_handle_collision(struct mlx5dr_matcher *matcher,
167 struct mlx5dr_matcher_rx_tx *nic_matcher,
168 struct list_head *update_list,
169 struct mlx5dr_ste *col_ste,
170 u8 *hw_ste)
171 {
172 struct mlx5dr_ste *new_ste;
173 int ret;
174
175 new_ste = dr_rule_create_collision_htbl(matcher, nic_matcher, hw_ste);
176 if (!new_ste)
177 return NULL;
178
179 /* In collision entry, all members share the same miss_list_head */
180 new_ste->htbl->miss_list = mlx5dr_ste_get_miss_list(col_ste);
181
182 /* Update the previous from the list */
183 ret = dr_rule_append_to_miss_list(new_ste,
184 mlx5dr_ste_get_miss_list(col_ste),
185 update_list);
186 if (ret) {
187 mlx5dr_dbg(matcher->tbl->dmn, "Failed update dup entry\n");
188 goto err_exit;
189 }
190
191 return new_ste;
192
193 err_exit:
194 mlx5dr_ste_free(new_ste, matcher, nic_matcher);
195 return NULL;
196 }
197
dr_rule_rehash_copy_ste_ctrl(struct mlx5dr_matcher * matcher,struct mlx5dr_matcher_rx_tx * nic_matcher,struct mlx5dr_ste * cur_ste,struct mlx5dr_ste * new_ste)198 static void dr_rule_rehash_copy_ste_ctrl(struct mlx5dr_matcher *matcher,
199 struct mlx5dr_matcher_rx_tx *nic_matcher,
200 struct mlx5dr_ste *cur_ste,
201 struct mlx5dr_ste *new_ste)
202 {
203 new_ste->next_htbl = cur_ste->next_htbl;
204 new_ste->ste_chain_location = cur_ste->ste_chain_location;
205
206 if (!mlx5dr_ste_is_last_in_rule(nic_matcher, new_ste->ste_chain_location))
207 new_ste->next_htbl->pointing_ste = new_ste;
208
209 /* We need to copy the refcount since this ste
210 * may have been traversed several times
211 */
212 refcount_set(&new_ste->refcount, refcount_read(&cur_ste->refcount));
213
214 /* Link old STEs rule_mem list to the new ste */
215 mlx5dr_rule_update_rule_member(cur_ste, new_ste);
216 INIT_LIST_HEAD(&new_ste->rule_list);
217 list_splice_tail_init(&cur_ste->rule_list, &new_ste->rule_list);
218 }
219
220 static struct mlx5dr_ste *
dr_rule_rehash_copy_ste(struct mlx5dr_matcher * matcher,struct mlx5dr_matcher_rx_tx * nic_matcher,struct mlx5dr_ste * cur_ste,struct mlx5dr_ste_htbl * new_htbl,struct list_head * update_list)221 dr_rule_rehash_copy_ste(struct mlx5dr_matcher *matcher,
222 struct mlx5dr_matcher_rx_tx *nic_matcher,
223 struct mlx5dr_ste *cur_ste,
224 struct mlx5dr_ste_htbl *new_htbl,
225 struct list_head *update_list)
226 {
227 struct mlx5dr_ste_send_info *ste_info;
228 bool use_update_list = false;
229 u8 hw_ste[DR_STE_SIZE] = {};
230 struct mlx5dr_ste *new_ste;
231 int new_idx;
232 u8 sb_idx;
233
234 /* Copy STE mask from the matcher */
235 sb_idx = cur_ste->ste_chain_location - 1;
236 mlx5dr_ste_set_bit_mask(hw_ste, nic_matcher->ste_builder[sb_idx].bit_mask);
237
238 /* Copy STE control and tag */
239 memcpy(hw_ste, cur_ste->hw_ste, DR_STE_SIZE_REDUCED);
240 mlx5dr_ste_set_miss_addr(hw_ste, nic_matcher->e_anchor->chunk->icm_addr);
241
242 new_idx = mlx5dr_ste_calc_hash_index(hw_ste, new_htbl);
243 new_ste = &new_htbl->ste_arr[new_idx];
244
245 if (mlx5dr_ste_not_used_ste(new_ste)) {
246 mlx5dr_htbl_get(new_htbl);
247 list_add_tail(&new_ste->miss_list_node,
248 mlx5dr_ste_get_miss_list(new_ste));
249 } else {
250 new_ste = dr_rule_rehash_handle_collision(matcher,
251 nic_matcher,
252 update_list,
253 new_ste,
254 hw_ste);
255 if (!new_ste) {
256 mlx5dr_dbg(matcher->tbl->dmn, "Failed adding collision entry, index: %d\n",
257 new_idx);
258 return NULL;
259 }
260 new_htbl->ctrl.num_of_collisions++;
261 use_update_list = true;
262 }
263
264 memcpy(new_ste->hw_ste, hw_ste, DR_STE_SIZE_REDUCED);
265
266 new_htbl->ctrl.num_of_valid_entries++;
267
268 if (use_update_list) {
269 ste_info = kzalloc(sizeof(*ste_info), GFP_KERNEL);
270 if (!ste_info)
271 goto err_exit;
272
273 mlx5dr_send_fill_and_append_ste_send_info(new_ste, DR_STE_SIZE, 0,
274 hw_ste, ste_info,
275 update_list, true);
276 }
277
278 dr_rule_rehash_copy_ste_ctrl(matcher, nic_matcher, cur_ste, new_ste);
279
280 return new_ste;
281
282 err_exit:
283 mlx5dr_ste_free(new_ste, matcher, nic_matcher);
284 return NULL;
285 }
286
dr_rule_rehash_copy_miss_list(struct mlx5dr_matcher * matcher,struct mlx5dr_matcher_rx_tx * nic_matcher,struct list_head * cur_miss_list,struct mlx5dr_ste_htbl * new_htbl,struct list_head * update_list)287 static int dr_rule_rehash_copy_miss_list(struct mlx5dr_matcher *matcher,
288 struct mlx5dr_matcher_rx_tx *nic_matcher,
289 struct list_head *cur_miss_list,
290 struct mlx5dr_ste_htbl *new_htbl,
291 struct list_head *update_list)
292 {
293 struct mlx5dr_ste *tmp_ste, *cur_ste, *new_ste;
294
295 if (list_empty(cur_miss_list))
296 return 0;
297
298 list_for_each_entry_safe(cur_ste, tmp_ste, cur_miss_list, miss_list_node) {
299 new_ste = dr_rule_rehash_copy_ste(matcher,
300 nic_matcher,
301 cur_ste,
302 new_htbl,
303 update_list);
304 if (!new_ste)
305 goto err_insert;
306
307 list_del(&cur_ste->miss_list_node);
308 mlx5dr_htbl_put(cur_ste->htbl);
309 }
310 return 0;
311
312 err_insert:
313 mlx5dr_err(matcher->tbl->dmn, "Fatal error during resize\n");
314 WARN_ON(true);
315 return -EINVAL;
316 }
317
dr_rule_rehash_copy_htbl(struct mlx5dr_matcher * matcher,struct mlx5dr_matcher_rx_tx * nic_matcher,struct mlx5dr_ste_htbl * cur_htbl,struct mlx5dr_ste_htbl * new_htbl,struct list_head * update_list)318 static int dr_rule_rehash_copy_htbl(struct mlx5dr_matcher *matcher,
319 struct mlx5dr_matcher_rx_tx *nic_matcher,
320 struct mlx5dr_ste_htbl *cur_htbl,
321 struct mlx5dr_ste_htbl *new_htbl,
322 struct list_head *update_list)
323 {
324 struct mlx5dr_ste *cur_ste;
325 int cur_entries;
326 int err = 0;
327 int i;
328
329 cur_entries = mlx5dr_icm_pool_chunk_size_to_entries(cur_htbl->chunk_size);
330
331 if (cur_entries < 1) {
332 mlx5dr_dbg(matcher->tbl->dmn, "Invalid number of entries\n");
333 return -EINVAL;
334 }
335
336 for (i = 0; i < cur_entries; i++) {
337 cur_ste = &cur_htbl->ste_arr[i];
338 if (mlx5dr_ste_not_used_ste(cur_ste)) /* Empty, nothing to copy */
339 continue;
340
341 err = dr_rule_rehash_copy_miss_list(matcher,
342 nic_matcher,
343 mlx5dr_ste_get_miss_list(cur_ste),
344 new_htbl,
345 update_list);
346 if (err)
347 goto clean_copy;
348 }
349
350 clean_copy:
351 return err;
352 }
353
354 static struct mlx5dr_ste_htbl *
dr_rule_rehash_htbl(struct mlx5dr_rule * rule,struct mlx5dr_rule_rx_tx * nic_rule,struct mlx5dr_ste_htbl * cur_htbl,u8 ste_location,struct list_head * update_list,enum mlx5dr_icm_chunk_size new_size)355 dr_rule_rehash_htbl(struct mlx5dr_rule *rule,
356 struct mlx5dr_rule_rx_tx *nic_rule,
357 struct mlx5dr_ste_htbl *cur_htbl,
358 u8 ste_location,
359 struct list_head *update_list,
360 enum mlx5dr_icm_chunk_size new_size)
361 {
362 struct mlx5dr_ste_send_info *del_ste_info, *tmp_ste_info;
363 struct mlx5dr_matcher *matcher = rule->matcher;
364 struct mlx5dr_domain *dmn = matcher->tbl->dmn;
365 struct mlx5dr_matcher_rx_tx *nic_matcher;
366 struct mlx5dr_ste_send_info *ste_info;
367 struct mlx5dr_htbl_connect_info info;
368 struct mlx5dr_domain_rx_tx *nic_dmn;
369 u8 formatted_ste[DR_STE_SIZE] = {};
370 LIST_HEAD(rehash_table_send_list);
371 struct mlx5dr_ste *ste_to_update;
372 struct mlx5dr_ste_htbl *new_htbl;
373 int err;
374
375 nic_matcher = nic_rule->nic_matcher;
376 nic_dmn = nic_matcher->nic_tbl->nic_dmn;
377
378 ste_info = kzalloc(sizeof(*ste_info), GFP_KERNEL);
379 if (!ste_info)
380 return NULL;
381
382 new_htbl = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool,
383 new_size,
384 cur_htbl->lu_type,
385 cur_htbl->byte_mask);
386 if (!new_htbl) {
387 mlx5dr_err(dmn, "Failed to allocate new hash table\n");
388 goto free_ste_info;
389 }
390
391 /* Write new table to HW */
392 info.type = CONNECT_MISS;
393 info.miss_icm_addr = nic_matcher->e_anchor->chunk->icm_addr;
394 mlx5dr_ste_set_formatted_ste(dmn->info.caps.gvmi,
395 nic_dmn,
396 new_htbl,
397 formatted_ste,
398 &info);
399
400 new_htbl->pointing_ste = cur_htbl->pointing_ste;
401 new_htbl->pointing_ste->next_htbl = new_htbl;
402 err = dr_rule_rehash_copy_htbl(matcher,
403 nic_matcher,
404 cur_htbl,
405 new_htbl,
406 &rehash_table_send_list);
407 if (err)
408 goto free_new_htbl;
409
410 if (mlx5dr_send_postsend_htbl(dmn, new_htbl, formatted_ste,
411 nic_matcher->ste_builder[ste_location - 1].bit_mask)) {
412 mlx5dr_err(dmn, "Failed writing table to HW\n");
413 goto free_new_htbl;
414 }
415
416 /* Writing to the hw is done in regular order of rehash_table_send_list,
417 * in order to have the origin data written before the miss address of
418 * collision entries, if exists.
419 */
420 if (dr_rule_send_update_list(&rehash_table_send_list, dmn, false)) {
421 mlx5dr_err(dmn, "Failed updating table to HW\n");
422 goto free_ste_list;
423 }
424
425 /* Connect previous hash table to current */
426 if (ste_location == 1) {
427 /* The previous table is an anchor, anchors size is always one STE */
428 struct mlx5dr_ste_htbl *prev_htbl = cur_htbl->pointing_ste->htbl;
429
430 /* On matcher s_anchor we keep an extra refcount */
431 mlx5dr_htbl_get(new_htbl);
432 mlx5dr_htbl_put(cur_htbl);
433
434 nic_matcher->s_htbl = new_htbl;
435
436 /* It is safe to operate dr_ste_set_hit_addr on the hw_ste here
437 * (48B len) which works only on first 32B
438 */
439 mlx5dr_ste_set_hit_addr(prev_htbl->ste_arr[0].hw_ste,
440 new_htbl->chunk->icm_addr,
441 new_htbl->chunk->num_of_entries);
442
443 ste_to_update = &prev_htbl->ste_arr[0];
444 } else {
445 mlx5dr_ste_set_hit_addr_by_next_htbl(cur_htbl->pointing_ste->hw_ste,
446 new_htbl);
447 ste_to_update = cur_htbl->pointing_ste;
448 }
449
450 mlx5dr_send_fill_and_append_ste_send_info(ste_to_update, DR_STE_SIZE_REDUCED,
451 0, ste_to_update->hw_ste, ste_info,
452 update_list, false);
453
454 return new_htbl;
455
456 free_ste_list:
457 /* Clean all ste_info's from the new table */
458 list_for_each_entry_safe(del_ste_info, tmp_ste_info,
459 &rehash_table_send_list, send_list) {
460 list_del(&del_ste_info->send_list);
461 kfree(del_ste_info);
462 }
463
464 free_new_htbl:
465 mlx5dr_ste_htbl_free(new_htbl);
466 free_ste_info:
467 kfree(ste_info);
468 mlx5dr_info(dmn, "Failed creating rehash table\n");
469 return NULL;
470 }
471
dr_rule_rehash(struct mlx5dr_rule * rule,struct mlx5dr_rule_rx_tx * nic_rule,struct mlx5dr_ste_htbl * cur_htbl,u8 ste_location,struct list_head * update_list)472 static struct mlx5dr_ste_htbl *dr_rule_rehash(struct mlx5dr_rule *rule,
473 struct mlx5dr_rule_rx_tx *nic_rule,
474 struct mlx5dr_ste_htbl *cur_htbl,
475 u8 ste_location,
476 struct list_head *update_list)
477 {
478 struct mlx5dr_domain *dmn = rule->matcher->tbl->dmn;
479 enum mlx5dr_icm_chunk_size new_size;
480
481 new_size = mlx5dr_icm_next_higher_chunk(cur_htbl->chunk_size);
482 new_size = min_t(u32, new_size, dmn->info.max_log_sw_icm_sz);
483
484 if (new_size == cur_htbl->chunk_size)
485 return NULL; /* Skip rehash, we already at the max size */
486
487 return dr_rule_rehash_htbl(rule, nic_rule, cur_htbl, ste_location,
488 update_list, new_size);
489 }
490
491 static struct mlx5dr_ste *
dr_rule_handle_collision(struct mlx5dr_matcher * matcher,struct mlx5dr_matcher_rx_tx * nic_matcher,struct mlx5dr_ste * ste,u8 * hw_ste,struct list_head * miss_list,struct list_head * send_list)492 dr_rule_handle_collision(struct mlx5dr_matcher *matcher,
493 struct mlx5dr_matcher_rx_tx *nic_matcher,
494 struct mlx5dr_ste *ste,
495 u8 *hw_ste,
496 struct list_head *miss_list,
497 struct list_head *send_list)
498 {
499 struct mlx5dr_ste_send_info *ste_info;
500 struct mlx5dr_ste *new_ste;
501
502 ste_info = kzalloc(sizeof(*ste_info), GFP_KERNEL);
503 if (!ste_info)
504 return NULL;
505
506 new_ste = dr_rule_create_collision_entry(matcher, nic_matcher, hw_ste, ste);
507 if (!new_ste)
508 goto free_send_info;
509
510 if (dr_rule_append_to_miss_list(new_ste, miss_list, send_list)) {
511 mlx5dr_dbg(matcher->tbl->dmn, "Failed to update prev miss_list\n");
512 goto err_exit;
513 }
514
515 mlx5dr_send_fill_and_append_ste_send_info(new_ste, DR_STE_SIZE, 0, hw_ste,
516 ste_info, send_list, false);
517
518 ste->htbl->ctrl.num_of_collisions++;
519 ste->htbl->ctrl.num_of_valid_entries++;
520
521 return new_ste;
522
523 err_exit:
524 mlx5dr_ste_free(new_ste, matcher, nic_matcher);
525 free_send_info:
526 kfree(ste_info);
527 return NULL;
528 }
529
dr_rule_remove_action_members(struct mlx5dr_rule * rule)530 static void dr_rule_remove_action_members(struct mlx5dr_rule *rule)
531 {
532 struct mlx5dr_rule_action_member *action_mem;
533 struct mlx5dr_rule_action_member *tmp;
534
535 list_for_each_entry_safe(action_mem, tmp, &rule->rule_actions_list, list) {
536 list_del(&action_mem->list);
537 refcount_dec(&action_mem->action->refcount);
538 kvfree(action_mem);
539 }
540 }
541
dr_rule_add_action_members(struct mlx5dr_rule * rule,size_t num_actions,struct mlx5dr_action * actions[])542 static int dr_rule_add_action_members(struct mlx5dr_rule *rule,
543 size_t num_actions,
544 struct mlx5dr_action *actions[])
545 {
546 struct mlx5dr_rule_action_member *action_mem;
547 int i;
548
549 for (i = 0; i < num_actions; i++) {
550 action_mem = kvzalloc(sizeof(*action_mem), GFP_KERNEL);
551 if (!action_mem)
552 goto free_action_members;
553
554 action_mem->action = actions[i];
555 INIT_LIST_HEAD(&action_mem->list);
556 list_add_tail(&action_mem->list, &rule->rule_actions_list);
557 refcount_inc(&action_mem->action->refcount);
558 }
559
560 return 0;
561
562 free_action_members:
563 dr_rule_remove_action_members(rule);
564 return -ENOMEM;
565 }
566
567 /* While the pointer of ste is no longer valid, like while moving ste to be
568 * the first in the miss_list, and to be in the origin table,
569 * all rule-members that are attached to this ste should update their ste member
570 * to the new pointer
571 */
mlx5dr_rule_update_rule_member(struct mlx5dr_ste * ste,struct mlx5dr_ste * new_ste)572 void mlx5dr_rule_update_rule_member(struct mlx5dr_ste *ste,
573 struct mlx5dr_ste *new_ste)
574 {
575 struct mlx5dr_rule_member *rule_mem;
576
577 if (!list_empty(&ste->rule_list))
578 list_for_each_entry(rule_mem, &ste->rule_list, use_ste_list)
579 rule_mem->ste = new_ste;
580 }
581
dr_rule_clean_rule_members(struct mlx5dr_rule * rule,struct mlx5dr_rule_rx_tx * nic_rule)582 static void dr_rule_clean_rule_members(struct mlx5dr_rule *rule,
583 struct mlx5dr_rule_rx_tx *nic_rule)
584 {
585 struct mlx5dr_rule_member *rule_mem;
586 struct mlx5dr_rule_member *tmp_mem;
587
588 if (list_empty(&nic_rule->rule_members_list))
589 return;
590 list_for_each_entry_safe(rule_mem, tmp_mem, &nic_rule->rule_members_list, list) {
591 list_del(&rule_mem->list);
592 list_del(&rule_mem->use_ste_list);
593 mlx5dr_ste_put(rule_mem->ste, rule->matcher, nic_rule->nic_matcher);
594 kvfree(rule_mem);
595 }
596 }
597
dr_get_bits_per_mask(u16 byte_mask)598 static u16 dr_get_bits_per_mask(u16 byte_mask)
599 {
600 u16 bits = 0;
601
602 while (byte_mask) {
603 byte_mask = byte_mask & (byte_mask - 1);
604 bits++;
605 }
606
607 return bits;
608 }
609
dr_rule_need_enlarge_hash(struct mlx5dr_ste_htbl * htbl,struct mlx5dr_domain * dmn,struct mlx5dr_domain_rx_tx * nic_dmn)610 static bool dr_rule_need_enlarge_hash(struct mlx5dr_ste_htbl *htbl,
611 struct mlx5dr_domain *dmn,
612 struct mlx5dr_domain_rx_tx *nic_dmn)
613 {
614 struct mlx5dr_ste_htbl_ctrl *ctrl = &htbl->ctrl;
615
616 if (dmn->info.max_log_sw_icm_sz <= htbl->chunk_size)
617 return false;
618
619 if (!ctrl->may_grow)
620 return false;
621
622 if (dr_get_bits_per_mask(htbl->byte_mask) * BITS_PER_BYTE <= htbl->chunk_size)
623 return false;
624
625 if (ctrl->num_of_collisions >= ctrl->increase_threshold &&
626 (ctrl->num_of_valid_entries - ctrl->num_of_collisions) >= ctrl->increase_threshold)
627 return true;
628
629 return false;
630 }
631
dr_rule_add_member(struct mlx5dr_rule_rx_tx * nic_rule,struct mlx5dr_ste * ste)632 static int dr_rule_add_member(struct mlx5dr_rule_rx_tx *nic_rule,
633 struct mlx5dr_ste *ste)
634 {
635 struct mlx5dr_rule_member *rule_mem;
636
637 rule_mem = kvzalloc(sizeof(*rule_mem), GFP_KERNEL);
638 if (!rule_mem)
639 return -ENOMEM;
640
641 rule_mem->ste = ste;
642 list_add_tail(&rule_mem->list, &nic_rule->rule_members_list);
643
644 list_add_tail(&rule_mem->use_ste_list, &ste->rule_list);
645
646 return 0;
647 }
648
dr_rule_handle_action_stes(struct mlx5dr_rule * rule,struct mlx5dr_rule_rx_tx * nic_rule,struct list_head * send_ste_list,struct mlx5dr_ste * last_ste,u8 * hw_ste_arr,u32 new_hw_ste_arr_sz)649 static int dr_rule_handle_action_stes(struct mlx5dr_rule *rule,
650 struct mlx5dr_rule_rx_tx *nic_rule,
651 struct list_head *send_ste_list,
652 struct mlx5dr_ste *last_ste,
653 u8 *hw_ste_arr,
654 u32 new_hw_ste_arr_sz)
655 {
656 struct mlx5dr_matcher_rx_tx *nic_matcher = nic_rule->nic_matcher;
657 struct mlx5dr_ste_send_info *ste_info_arr[DR_ACTION_MAX_STES];
658 u8 num_of_builders = nic_matcher->num_of_builders;
659 struct mlx5dr_matcher *matcher = rule->matcher;
660 u8 *curr_hw_ste, *prev_hw_ste;
661 struct mlx5dr_ste *action_ste;
662 int i, k, ret;
663
664 /* Two cases:
665 * 1. num_of_builders is equal to new_hw_ste_arr_sz, the action in the ste
666 * 2. num_of_builders is less then new_hw_ste_arr_sz, new ste was added
667 * to support the action.
668 */
669 if (num_of_builders == new_hw_ste_arr_sz)
670 return 0;
671
672 for (i = num_of_builders, k = 0; i < new_hw_ste_arr_sz; i++, k++) {
673 curr_hw_ste = hw_ste_arr + i * DR_STE_SIZE;
674 prev_hw_ste = (i == 0) ? curr_hw_ste : hw_ste_arr + ((i - 1) * DR_STE_SIZE);
675 action_ste = dr_rule_create_collision_htbl(matcher,
676 nic_matcher,
677 curr_hw_ste);
678 if (!action_ste)
679 return -ENOMEM;
680
681 mlx5dr_ste_get(action_ste);
682
683 /* While free ste we go over the miss list, so add this ste to the list */
684 list_add_tail(&action_ste->miss_list_node,
685 mlx5dr_ste_get_miss_list(action_ste));
686
687 ste_info_arr[k] = kzalloc(sizeof(*ste_info_arr[k]),
688 GFP_KERNEL);
689 if (!ste_info_arr[k])
690 goto err_exit;
691
692 /* Point current ste to the new action */
693 mlx5dr_ste_set_hit_addr_by_next_htbl(prev_hw_ste, action_ste->htbl);
694 ret = dr_rule_add_member(nic_rule, action_ste);
695 if (ret) {
696 mlx5dr_dbg(matcher->tbl->dmn, "Failed adding rule member\n");
697 goto free_ste_info;
698 }
699 mlx5dr_send_fill_and_append_ste_send_info(action_ste, DR_STE_SIZE, 0,
700 curr_hw_ste,
701 ste_info_arr[k],
702 send_ste_list, false);
703 }
704
705 return 0;
706
707 free_ste_info:
708 kfree(ste_info_arr[k]);
709 err_exit:
710 mlx5dr_ste_put(action_ste, matcher, nic_matcher);
711 return -ENOMEM;
712 }
713
dr_rule_handle_empty_entry(struct mlx5dr_matcher * matcher,struct mlx5dr_matcher_rx_tx * nic_matcher,struct mlx5dr_ste_htbl * cur_htbl,struct mlx5dr_ste * ste,u8 ste_location,u8 * hw_ste,struct list_head * miss_list,struct list_head * send_list)714 static int dr_rule_handle_empty_entry(struct mlx5dr_matcher *matcher,
715 struct mlx5dr_matcher_rx_tx *nic_matcher,
716 struct mlx5dr_ste_htbl *cur_htbl,
717 struct mlx5dr_ste *ste,
718 u8 ste_location,
719 u8 *hw_ste,
720 struct list_head *miss_list,
721 struct list_head *send_list)
722 {
723 struct mlx5dr_ste_send_info *ste_info;
724
725 /* Take ref on table, only on first time this ste is used */
726 mlx5dr_htbl_get(cur_htbl);
727
728 /* new entry -> new branch */
729 list_add_tail(&ste->miss_list_node, miss_list);
730
731 mlx5dr_ste_set_miss_addr(hw_ste, nic_matcher->e_anchor->chunk->icm_addr);
732
733 ste->ste_chain_location = ste_location;
734
735 ste_info = kzalloc(sizeof(*ste_info), GFP_KERNEL);
736 if (!ste_info)
737 goto clean_ste_setting;
738
739 if (mlx5dr_ste_create_next_htbl(matcher,
740 nic_matcher,
741 ste,
742 hw_ste,
743 DR_CHUNK_SIZE_1)) {
744 mlx5dr_dbg(matcher->tbl->dmn, "Failed allocating table\n");
745 goto clean_ste_info;
746 }
747
748 cur_htbl->ctrl.num_of_valid_entries++;
749
750 mlx5dr_send_fill_and_append_ste_send_info(ste, DR_STE_SIZE, 0, hw_ste,
751 ste_info, send_list, false);
752
753 return 0;
754
755 clean_ste_info:
756 kfree(ste_info);
757 clean_ste_setting:
758 list_del_init(&ste->miss_list_node);
759 mlx5dr_htbl_put(cur_htbl);
760
761 return -ENOMEM;
762 }
763
764 static struct mlx5dr_ste *
dr_rule_handle_ste_branch(struct mlx5dr_rule * rule,struct mlx5dr_rule_rx_tx * nic_rule,struct list_head * send_ste_list,struct mlx5dr_ste_htbl * cur_htbl,u8 * hw_ste,u8 ste_location,struct mlx5dr_ste_htbl ** put_htbl)765 dr_rule_handle_ste_branch(struct mlx5dr_rule *rule,
766 struct mlx5dr_rule_rx_tx *nic_rule,
767 struct list_head *send_ste_list,
768 struct mlx5dr_ste_htbl *cur_htbl,
769 u8 *hw_ste,
770 u8 ste_location,
771 struct mlx5dr_ste_htbl **put_htbl)
772 {
773 struct mlx5dr_matcher *matcher = rule->matcher;
774 struct mlx5dr_domain *dmn = matcher->tbl->dmn;
775 struct mlx5dr_matcher_rx_tx *nic_matcher;
776 struct mlx5dr_domain_rx_tx *nic_dmn;
777 struct mlx5dr_ste_htbl *new_htbl;
778 struct mlx5dr_ste *matched_ste;
779 struct list_head *miss_list;
780 bool skip_rehash = false;
781 struct mlx5dr_ste *ste;
782 int index;
783
784 nic_matcher = nic_rule->nic_matcher;
785 nic_dmn = nic_matcher->nic_tbl->nic_dmn;
786
787 again:
788 index = mlx5dr_ste_calc_hash_index(hw_ste, cur_htbl);
789 miss_list = &cur_htbl->chunk->miss_list[index];
790 ste = &cur_htbl->ste_arr[index];
791
792 if (mlx5dr_ste_not_used_ste(ste)) {
793 if (dr_rule_handle_empty_entry(matcher, nic_matcher, cur_htbl,
794 ste, ste_location,
795 hw_ste, miss_list,
796 send_ste_list))
797 return NULL;
798 } else {
799 /* Hash table index in use, check if this ste is in the miss list */
800 matched_ste = dr_rule_find_ste_in_miss_list(miss_list, hw_ste);
801 if (matched_ste) {
802 /* If it is last STE in the chain, and has the same tag
803 * it means that all the previous stes are the same,
804 * if so, this rule is duplicated.
805 */
806 if (!mlx5dr_ste_is_last_in_rule(nic_matcher, ste_location))
807 return matched_ste;
808
809 mlx5dr_dbg(dmn, "Duplicate rule inserted\n");
810 }
811
812 if (!skip_rehash && dr_rule_need_enlarge_hash(cur_htbl, dmn, nic_dmn)) {
813 /* Hash table index in use, try to resize of the hash */
814 skip_rehash = true;
815
816 /* Hold the table till we update.
817 * Release in dr_rule_create_rule()
818 */
819 *put_htbl = cur_htbl;
820 mlx5dr_htbl_get(cur_htbl);
821
822 new_htbl = dr_rule_rehash(rule, nic_rule, cur_htbl,
823 ste_location, send_ste_list);
824 if (!new_htbl) {
825 mlx5dr_htbl_put(cur_htbl);
826 mlx5dr_info(dmn, "failed creating rehash table, htbl-log_size: %d\n",
827 cur_htbl->chunk_size);
828 } else {
829 cur_htbl = new_htbl;
830 }
831 goto again;
832 } else {
833 /* Hash table index in use, add another collision (miss) */
834 ste = dr_rule_handle_collision(matcher,
835 nic_matcher,
836 ste,
837 hw_ste,
838 miss_list,
839 send_ste_list);
840 if (!ste) {
841 mlx5dr_dbg(dmn, "failed adding collision entry, index: %d\n",
842 index);
843 return NULL;
844 }
845 }
846 }
847 return ste;
848 }
849
dr_rule_cmp_value_to_mask(u8 * mask,u8 * value,u32 s_idx,u32 e_idx)850 static bool dr_rule_cmp_value_to_mask(u8 *mask, u8 *value,
851 u32 s_idx, u32 e_idx)
852 {
853 u32 i;
854
855 for (i = s_idx; i < e_idx; i++) {
856 if (value[i] & ~mask[i]) {
857 pr_info("Rule parameters contains a value not specified by mask\n");
858 return false;
859 }
860 }
861 return true;
862 }
863
dr_rule_verify(struct mlx5dr_matcher * matcher,struct mlx5dr_match_parameters * value,struct mlx5dr_match_param * param)864 static bool dr_rule_verify(struct mlx5dr_matcher *matcher,
865 struct mlx5dr_match_parameters *value,
866 struct mlx5dr_match_param *param)
867 {
868 u8 match_criteria = matcher->match_criteria;
869 size_t value_size = value->match_sz;
870 u8 *mask_p = (u8 *)&matcher->mask;
871 u8 *param_p = (u8 *)param;
872 u32 s_idx, e_idx;
873
874 if (!value_size ||
875 (value_size > sizeof(struct mlx5dr_match_param) ||
876 (value_size % sizeof(u32)))) {
877 mlx5dr_dbg(matcher->tbl->dmn, "Rule parameters length is incorrect\n");
878 return false;
879 }
880
881 mlx5dr_ste_copy_param(matcher->match_criteria, param, value);
882
883 if (match_criteria & DR_MATCHER_CRITERIA_OUTER) {
884 s_idx = offsetof(struct mlx5dr_match_param, outer);
885 e_idx = min(s_idx + sizeof(param->outer), value_size);
886
887 if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
888 mlx5dr_dbg(matcher->tbl->dmn, "Rule outer parameters contains a value not specified by mask\n");
889 return false;
890 }
891 }
892
893 if (match_criteria & DR_MATCHER_CRITERIA_MISC) {
894 s_idx = offsetof(struct mlx5dr_match_param, misc);
895 e_idx = min(s_idx + sizeof(param->misc), value_size);
896
897 if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
898 mlx5dr_dbg(matcher->tbl->dmn, "Rule misc parameters contains a value not specified by mask\n");
899 return false;
900 }
901 }
902
903 if (match_criteria & DR_MATCHER_CRITERIA_INNER) {
904 s_idx = offsetof(struct mlx5dr_match_param, inner);
905 e_idx = min(s_idx + sizeof(param->inner), value_size);
906
907 if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
908 mlx5dr_dbg(matcher->tbl->dmn, "Rule inner parameters contains a value not specified by mask\n");
909 return false;
910 }
911 }
912
913 if (match_criteria & DR_MATCHER_CRITERIA_MISC2) {
914 s_idx = offsetof(struct mlx5dr_match_param, misc2);
915 e_idx = min(s_idx + sizeof(param->misc2), value_size);
916
917 if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
918 mlx5dr_dbg(matcher->tbl->dmn, "Rule misc2 parameters contains a value not specified by mask\n");
919 return false;
920 }
921 }
922
923 if (match_criteria & DR_MATCHER_CRITERIA_MISC3) {
924 s_idx = offsetof(struct mlx5dr_match_param, misc3);
925 e_idx = min(s_idx + sizeof(param->misc3), value_size);
926
927 if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
928 mlx5dr_dbg(matcher->tbl->dmn, "Rule misc3 parameters contains a value not specified by mask\n");
929 return false;
930 }
931 }
932 return true;
933 }
934
dr_rule_destroy_rule_nic(struct mlx5dr_rule * rule,struct mlx5dr_rule_rx_tx * nic_rule)935 static int dr_rule_destroy_rule_nic(struct mlx5dr_rule *rule,
936 struct mlx5dr_rule_rx_tx *nic_rule)
937 {
938 dr_rule_clean_rule_members(rule, nic_rule);
939 return 0;
940 }
941
dr_rule_destroy_rule_fdb(struct mlx5dr_rule * rule)942 static int dr_rule_destroy_rule_fdb(struct mlx5dr_rule *rule)
943 {
944 dr_rule_destroy_rule_nic(rule, &rule->rx);
945 dr_rule_destroy_rule_nic(rule, &rule->tx);
946 return 0;
947 }
948
dr_rule_destroy_rule(struct mlx5dr_rule * rule)949 static int dr_rule_destroy_rule(struct mlx5dr_rule *rule)
950 {
951 struct mlx5dr_domain *dmn = rule->matcher->tbl->dmn;
952
953 switch (dmn->type) {
954 case MLX5DR_DOMAIN_TYPE_NIC_RX:
955 dr_rule_destroy_rule_nic(rule, &rule->rx);
956 break;
957 case MLX5DR_DOMAIN_TYPE_NIC_TX:
958 dr_rule_destroy_rule_nic(rule, &rule->tx);
959 break;
960 case MLX5DR_DOMAIN_TYPE_FDB:
961 dr_rule_destroy_rule_fdb(rule);
962 break;
963 default:
964 return -EINVAL;
965 }
966
967 dr_rule_remove_action_members(rule);
968 kfree(rule);
969 return 0;
970 }
971
dr_rule_is_ipv6(struct mlx5dr_match_param * param)972 static bool dr_rule_is_ipv6(struct mlx5dr_match_param *param)
973 {
974 return (param->outer.ip_version == 6 ||
975 param->inner.ip_version == 6 ||
976 param->outer.ethertype == ETH_P_IPV6 ||
977 param->inner.ethertype == ETH_P_IPV6);
978 }
979
dr_rule_skip(enum mlx5dr_domain_type domain,enum mlx5dr_ste_entry_type ste_type,struct mlx5dr_match_param * mask,struct mlx5dr_match_param * value)980 static bool dr_rule_skip(enum mlx5dr_domain_type domain,
981 enum mlx5dr_ste_entry_type ste_type,
982 struct mlx5dr_match_param *mask,
983 struct mlx5dr_match_param *value)
984 {
985 if (domain != MLX5DR_DOMAIN_TYPE_FDB)
986 return false;
987
988 if (mask->misc.source_port) {
989 if (ste_type == MLX5DR_STE_TYPE_RX)
990 if (value->misc.source_port != WIRE_PORT)
991 return true;
992
993 if (ste_type == MLX5DR_STE_TYPE_TX)
994 if (value->misc.source_port == WIRE_PORT)
995 return true;
996 }
997
998 /* Metadata C can be used to describe the source vport */
999 if (mask->misc2.metadata_reg_c_0) {
1000 if (ste_type == MLX5DR_STE_TYPE_RX)
1001 if ((value->misc2.metadata_reg_c_0 & WIRE_PORT) != WIRE_PORT)
1002 return true;
1003
1004 if (ste_type == MLX5DR_STE_TYPE_TX)
1005 if ((value->misc2.metadata_reg_c_0 & WIRE_PORT) == WIRE_PORT)
1006 return true;
1007 }
1008 return false;
1009 }
1010
1011 static int
dr_rule_create_rule_nic(struct mlx5dr_rule * rule,struct mlx5dr_rule_rx_tx * nic_rule,struct mlx5dr_match_param * param,size_t num_actions,struct mlx5dr_action * actions[])1012 dr_rule_create_rule_nic(struct mlx5dr_rule *rule,
1013 struct mlx5dr_rule_rx_tx *nic_rule,
1014 struct mlx5dr_match_param *param,
1015 size_t num_actions,
1016 struct mlx5dr_action *actions[])
1017 {
1018 struct mlx5dr_ste_send_info *ste_info, *tmp_ste_info;
1019 struct mlx5dr_matcher *matcher = rule->matcher;
1020 struct mlx5dr_domain *dmn = matcher->tbl->dmn;
1021 struct mlx5dr_matcher_rx_tx *nic_matcher;
1022 struct mlx5dr_domain_rx_tx *nic_dmn;
1023 struct mlx5dr_ste_htbl *htbl = NULL;
1024 struct mlx5dr_ste_htbl *cur_htbl;
1025 struct mlx5dr_ste *ste = NULL;
1026 LIST_HEAD(send_ste_list);
1027 u8 *hw_ste_arr = NULL;
1028 u32 new_hw_ste_arr_sz;
1029 int ret, i;
1030
1031 nic_matcher = nic_rule->nic_matcher;
1032 nic_dmn = nic_matcher->nic_tbl->nic_dmn;
1033
1034 INIT_LIST_HEAD(&nic_rule->rule_members_list);
1035
1036 if (dr_rule_skip(dmn->type, nic_dmn->ste_type, &matcher->mask, param))
1037 return 0;
1038
1039 ret = mlx5dr_matcher_select_builders(matcher,
1040 nic_matcher,
1041 dr_rule_is_ipv6(param));
1042 if (ret)
1043 goto out_err;
1044
1045 hw_ste_arr = kzalloc(DR_RULE_MAX_STE_CHAIN * DR_STE_SIZE, GFP_KERNEL);
1046 if (!hw_ste_arr) {
1047 ret = -ENOMEM;
1048 goto out_err;
1049 }
1050
1051 /* Set the tag values inside the ste array */
1052 ret = mlx5dr_ste_build_ste_arr(matcher, nic_matcher, param, hw_ste_arr);
1053 if (ret)
1054 goto free_hw_ste;
1055
1056 /* Set the actions values/addresses inside the ste array */
1057 ret = mlx5dr_actions_build_ste_arr(matcher, nic_matcher, actions,
1058 num_actions, hw_ste_arr,
1059 &new_hw_ste_arr_sz);
1060 if (ret)
1061 goto free_hw_ste;
1062
1063 cur_htbl = nic_matcher->s_htbl;
1064
1065 /* Go over the array of STEs, and build dr_ste accordingly.
1066 * The loop is over only the builders which are equal or less to the
1067 * number of stes, in case we have actions that lives in other stes.
1068 */
1069 for (i = 0; i < nic_matcher->num_of_builders; i++) {
1070 /* Calculate CRC and keep new ste entry */
1071 u8 *cur_hw_ste_ent = hw_ste_arr + (i * DR_STE_SIZE);
1072
1073 ste = dr_rule_handle_ste_branch(rule,
1074 nic_rule,
1075 &send_ste_list,
1076 cur_htbl,
1077 cur_hw_ste_ent,
1078 i + 1,
1079 &htbl);
1080 if (!ste) {
1081 mlx5dr_err(dmn, "Failed creating next branch\n");
1082 ret = -ENOENT;
1083 goto free_rule;
1084 }
1085
1086 cur_htbl = ste->next_htbl;
1087
1088 /* Keep all STEs in the rule struct */
1089 ret = dr_rule_add_member(nic_rule, ste);
1090 if (ret) {
1091 mlx5dr_dbg(dmn, "Failed adding rule member index %d\n", i);
1092 goto free_ste;
1093 }
1094
1095 mlx5dr_ste_get(ste);
1096 }
1097
1098 /* Connect actions */
1099 ret = dr_rule_handle_action_stes(rule, nic_rule, &send_ste_list,
1100 ste, hw_ste_arr, new_hw_ste_arr_sz);
1101 if (ret) {
1102 mlx5dr_dbg(dmn, "Failed apply actions\n");
1103 goto free_rule;
1104 }
1105 ret = dr_rule_send_update_list(&send_ste_list, dmn, true);
1106 if (ret) {
1107 mlx5dr_err(dmn, "Failed sending ste!\n");
1108 goto free_rule;
1109 }
1110
1111 if (htbl)
1112 mlx5dr_htbl_put(htbl);
1113
1114 kfree(hw_ste_arr);
1115
1116 return 0;
1117
1118 free_ste:
1119 mlx5dr_ste_put(ste, matcher, nic_matcher);
1120 free_rule:
1121 dr_rule_clean_rule_members(rule, nic_rule);
1122 /* Clean all ste_info's */
1123 list_for_each_entry_safe(ste_info, tmp_ste_info, &send_ste_list, send_list) {
1124 list_del(&ste_info->send_list);
1125 kfree(ste_info);
1126 }
1127 free_hw_ste:
1128 kfree(hw_ste_arr);
1129 out_err:
1130 return ret;
1131 }
1132
1133 static int
dr_rule_create_rule_fdb(struct mlx5dr_rule * rule,struct mlx5dr_match_param * param,size_t num_actions,struct mlx5dr_action * actions[])1134 dr_rule_create_rule_fdb(struct mlx5dr_rule *rule,
1135 struct mlx5dr_match_param *param,
1136 size_t num_actions,
1137 struct mlx5dr_action *actions[])
1138 {
1139 struct mlx5dr_match_param copy_param = {};
1140 int ret;
1141
1142 /* Copy match_param since they will be consumed during the first
1143 * nic_rule insertion.
1144 */
1145 memcpy(©_param, param, sizeof(struct mlx5dr_match_param));
1146
1147 ret = dr_rule_create_rule_nic(rule, &rule->rx, param,
1148 num_actions, actions);
1149 if (ret)
1150 return ret;
1151
1152 ret = dr_rule_create_rule_nic(rule, &rule->tx, ©_param,
1153 num_actions, actions);
1154 if (ret)
1155 goto destroy_rule_nic_rx;
1156
1157 return 0;
1158
1159 destroy_rule_nic_rx:
1160 dr_rule_destroy_rule_nic(rule, &rule->rx);
1161 return ret;
1162 }
1163
1164 static struct mlx5dr_rule *
dr_rule_create_rule(struct mlx5dr_matcher * matcher,struct mlx5dr_match_parameters * value,size_t num_actions,struct mlx5dr_action * actions[])1165 dr_rule_create_rule(struct mlx5dr_matcher *matcher,
1166 struct mlx5dr_match_parameters *value,
1167 size_t num_actions,
1168 struct mlx5dr_action *actions[])
1169 {
1170 struct mlx5dr_domain *dmn = matcher->tbl->dmn;
1171 struct mlx5dr_match_param param = {};
1172 struct mlx5dr_rule *rule;
1173 int ret;
1174
1175 if (!dr_rule_verify(matcher, value, ¶m))
1176 return NULL;
1177
1178 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
1179 if (!rule)
1180 return NULL;
1181
1182 rule->matcher = matcher;
1183 INIT_LIST_HEAD(&rule->rule_actions_list);
1184
1185 ret = dr_rule_add_action_members(rule, num_actions, actions);
1186 if (ret)
1187 goto free_rule;
1188
1189 switch (dmn->type) {
1190 case MLX5DR_DOMAIN_TYPE_NIC_RX:
1191 rule->rx.nic_matcher = &matcher->rx;
1192 ret = dr_rule_create_rule_nic(rule, &rule->rx, ¶m,
1193 num_actions, actions);
1194 break;
1195 case MLX5DR_DOMAIN_TYPE_NIC_TX:
1196 rule->tx.nic_matcher = &matcher->tx;
1197 ret = dr_rule_create_rule_nic(rule, &rule->tx, ¶m,
1198 num_actions, actions);
1199 break;
1200 case MLX5DR_DOMAIN_TYPE_FDB:
1201 rule->rx.nic_matcher = &matcher->rx;
1202 rule->tx.nic_matcher = &matcher->tx;
1203 ret = dr_rule_create_rule_fdb(rule, ¶m,
1204 num_actions, actions);
1205 break;
1206 default:
1207 ret = -EINVAL;
1208 break;
1209 }
1210
1211 if (ret)
1212 goto remove_action_members;
1213
1214 return rule;
1215
1216 remove_action_members:
1217 dr_rule_remove_action_members(rule);
1218 free_rule:
1219 kfree(rule);
1220 mlx5dr_info(dmn, "Failed creating rule\n");
1221 return NULL;
1222 }
1223
mlx5dr_rule_create(struct mlx5dr_matcher * matcher,struct mlx5dr_match_parameters * value,size_t num_actions,struct mlx5dr_action * actions[])1224 struct mlx5dr_rule *mlx5dr_rule_create(struct mlx5dr_matcher *matcher,
1225 struct mlx5dr_match_parameters *value,
1226 size_t num_actions,
1227 struct mlx5dr_action *actions[])
1228 {
1229 struct mlx5dr_rule *rule;
1230
1231 mutex_lock(&matcher->tbl->dmn->mutex);
1232 refcount_inc(&matcher->refcount);
1233
1234 rule = dr_rule_create_rule(matcher, value, num_actions, actions);
1235 if (!rule)
1236 refcount_dec(&matcher->refcount);
1237
1238 mutex_unlock(&matcher->tbl->dmn->mutex);
1239
1240 return rule;
1241 }
1242
mlx5dr_rule_destroy(struct mlx5dr_rule * rule)1243 int mlx5dr_rule_destroy(struct mlx5dr_rule *rule)
1244 {
1245 struct mlx5dr_matcher *matcher = rule->matcher;
1246 struct mlx5dr_table *tbl = rule->matcher->tbl;
1247 int ret;
1248
1249 mutex_lock(&tbl->dmn->mutex);
1250
1251 ret = dr_rule_destroy_rule(rule);
1252
1253 mutex_unlock(&tbl->dmn->mutex);
1254
1255 if (!ret)
1256 refcount_dec(&matcher->refcount);
1257 return ret;
1258 }
1259