1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019, Intel Corporation. */
3
4 #include "ice_common.h"
5 #include "ice_flex_pipe.h"
6 #include "ice_flow.h"
7
8 /* To support tunneling entries by PF, the package will append the PF number to
9 * the label; for example TNL_VXLAN_PF0, TNL_VXLAN_PF1, TNL_VXLAN_PF2, etc.
10 */
11 static const struct ice_tunnel_type_scan tnls[] = {
12 { TNL_VXLAN, "TNL_VXLAN_PF" },
13 { TNL_GENEVE, "TNL_GENEVE_PF" },
14 { TNL_LAST, "" }
15 };
16
17 static const u32 ice_sect_lkup[ICE_BLK_COUNT][ICE_SECT_COUNT] = {
18 /* SWITCH */
19 {
20 ICE_SID_XLT0_SW,
21 ICE_SID_XLT_KEY_BUILDER_SW,
22 ICE_SID_XLT1_SW,
23 ICE_SID_XLT2_SW,
24 ICE_SID_PROFID_TCAM_SW,
25 ICE_SID_PROFID_REDIR_SW,
26 ICE_SID_FLD_VEC_SW,
27 ICE_SID_CDID_KEY_BUILDER_SW,
28 ICE_SID_CDID_REDIR_SW
29 },
30
31 /* ACL */
32 {
33 ICE_SID_XLT0_ACL,
34 ICE_SID_XLT_KEY_BUILDER_ACL,
35 ICE_SID_XLT1_ACL,
36 ICE_SID_XLT2_ACL,
37 ICE_SID_PROFID_TCAM_ACL,
38 ICE_SID_PROFID_REDIR_ACL,
39 ICE_SID_FLD_VEC_ACL,
40 ICE_SID_CDID_KEY_BUILDER_ACL,
41 ICE_SID_CDID_REDIR_ACL
42 },
43
44 /* FD */
45 {
46 ICE_SID_XLT0_FD,
47 ICE_SID_XLT_KEY_BUILDER_FD,
48 ICE_SID_XLT1_FD,
49 ICE_SID_XLT2_FD,
50 ICE_SID_PROFID_TCAM_FD,
51 ICE_SID_PROFID_REDIR_FD,
52 ICE_SID_FLD_VEC_FD,
53 ICE_SID_CDID_KEY_BUILDER_FD,
54 ICE_SID_CDID_REDIR_FD
55 },
56
57 /* RSS */
58 {
59 ICE_SID_XLT0_RSS,
60 ICE_SID_XLT_KEY_BUILDER_RSS,
61 ICE_SID_XLT1_RSS,
62 ICE_SID_XLT2_RSS,
63 ICE_SID_PROFID_TCAM_RSS,
64 ICE_SID_PROFID_REDIR_RSS,
65 ICE_SID_FLD_VEC_RSS,
66 ICE_SID_CDID_KEY_BUILDER_RSS,
67 ICE_SID_CDID_REDIR_RSS
68 },
69
70 /* PE */
71 {
72 ICE_SID_XLT0_PE,
73 ICE_SID_XLT_KEY_BUILDER_PE,
74 ICE_SID_XLT1_PE,
75 ICE_SID_XLT2_PE,
76 ICE_SID_PROFID_TCAM_PE,
77 ICE_SID_PROFID_REDIR_PE,
78 ICE_SID_FLD_VEC_PE,
79 ICE_SID_CDID_KEY_BUILDER_PE,
80 ICE_SID_CDID_REDIR_PE
81 }
82 };
83
84 /**
85 * ice_sect_id - returns section ID
86 * @blk: block type
87 * @sect: section type
88 *
89 * This helper function returns the proper section ID given a block type and a
90 * section type.
91 */
ice_sect_id(enum ice_block blk,enum ice_sect sect)92 static u32 ice_sect_id(enum ice_block blk, enum ice_sect sect)
93 {
94 return ice_sect_lkup[blk][sect];
95 }
96
97 /**
98 * ice_pkg_val_buf
99 * @buf: pointer to the ice buffer
100 *
101 * This helper function validates a buffer's header.
102 */
ice_pkg_val_buf(struct ice_buf * buf)103 static struct ice_buf_hdr *ice_pkg_val_buf(struct ice_buf *buf)
104 {
105 struct ice_buf_hdr *hdr;
106 u16 section_count;
107 u16 data_end;
108
109 hdr = (struct ice_buf_hdr *)buf->buf;
110 /* verify data */
111 section_count = le16_to_cpu(hdr->section_count);
112 if (section_count < ICE_MIN_S_COUNT || section_count > ICE_MAX_S_COUNT)
113 return NULL;
114
115 data_end = le16_to_cpu(hdr->data_end);
116 if (data_end < ICE_MIN_S_DATA_END || data_end > ICE_MAX_S_DATA_END)
117 return NULL;
118
119 return hdr;
120 }
121
122 /**
123 * ice_find_buf_table
124 * @ice_seg: pointer to the ice segment
125 *
126 * Returns the address of the buffer table within the ice segment.
127 */
ice_find_buf_table(struct ice_seg * ice_seg)128 static struct ice_buf_table *ice_find_buf_table(struct ice_seg *ice_seg)
129 {
130 struct ice_nvm_table *nvms;
131
132 nvms = (struct ice_nvm_table *)
133 (ice_seg->device_table +
134 le32_to_cpu(ice_seg->device_table_count));
135
136 return (__force struct ice_buf_table *)
137 (nvms->vers + le32_to_cpu(nvms->table_count));
138 }
139
140 /**
141 * ice_pkg_enum_buf
142 * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
143 * @state: pointer to the enum state
144 *
145 * This function will enumerate all the buffers in the ice segment. The first
146 * call is made with the ice_seg parameter non-NULL; on subsequent calls,
147 * ice_seg is set to NULL which continues the enumeration. When the function
148 * returns a NULL pointer, then the end of the buffers has been reached, or an
149 * unexpected value has been detected (for example an invalid section count or
150 * an invalid buffer end value).
151 */
152 static struct ice_buf_hdr *
ice_pkg_enum_buf(struct ice_seg * ice_seg,struct ice_pkg_enum * state)153 ice_pkg_enum_buf(struct ice_seg *ice_seg, struct ice_pkg_enum *state)
154 {
155 if (ice_seg) {
156 state->buf_table = ice_find_buf_table(ice_seg);
157 if (!state->buf_table)
158 return NULL;
159
160 state->buf_idx = 0;
161 return ice_pkg_val_buf(state->buf_table->buf_array);
162 }
163
164 if (++state->buf_idx < le32_to_cpu(state->buf_table->buf_count))
165 return ice_pkg_val_buf(state->buf_table->buf_array +
166 state->buf_idx);
167 else
168 return NULL;
169 }
170
171 /**
172 * ice_pkg_advance_sect
173 * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
174 * @state: pointer to the enum state
175 *
176 * This helper function will advance the section within the ice segment,
177 * also advancing the buffer if needed.
178 */
179 static bool
ice_pkg_advance_sect(struct ice_seg * ice_seg,struct ice_pkg_enum * state)180 ice_pkg_advance_sect(struct ice_seg *ice_seg, struct ice_pkg_enum *state)
181 {
182 if (!ice_seg && !state->buf)
183 return false;
184
185 if (!ice_seg && state->buf)
186 if (++state->sect_idx < le16_to_cpu(state->buf->section_count))
187 return true;
188
189 state->buf = ice_pkg_enum_buf(ice_seg, state);
190 if (!state->buf)
191 return false;
192
193 /* start of new buffer, reset section index */
194 state->sect_idx = 0;
195 return true;
196 }
197
198 /**
199 * ice_pkg_enum_section
200 * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
201 * @state: pointer to the enum state
202 * @sect_type: section type to enumerate
203 *
204 * This function will enumerate all the sections of a particular type in the
205 * ice segment. The first call is made with the ice_seg parameter non-NULL;
206 * on subsequent calls, ice_seg is set to NULL which continues the enumeration.
207 * When the function returns a NULL pointer, then the end of the matching
208 * sections has been reached.
209 */
210 static void *
ice_pkg_enum_section(struct ice_seg * ice_seg,struct ice_pkg_enum * state,u32 sect_type)211 ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
212 u32 sect_type)
213 {
214 u16 offset, size;
215
216 if (ice_seg)
217 state->type = sect_type;
218
219 if (!ice_pkg_advance_sect(ice_seg, state))
220 return NULL;
221
222 /* scan for next matching section */
223 while (state->buf->section_entry[state->sect_idx].type !=
224 cpu_to_le32(state->type))
225 if (!ice_pkg_advance_sect(NULL, state))
226 return NULL;
227
228 /* validate section */
229 offset = le16_to_cpu(state->buf->section_entry[state->sect_idx].offset);
230 if (offset < ICE_MIN_S_OFF || offset > ICE_MAX_S_OFF)
231 return NULL;
232
233 size = le16_to_cpu(state->buf->section_entry[state->sect_idx].size);
234 if (size < ICE_MIN_S_SZ || size > ICE_MAX_S_SZ)
235 return NULL;
236
237 /* make sure the section fits in the buffer */
238 if (offset + size > ICE_PKG_BUF_SIZE)
239 return NULL;
240
241 state->sect_type =
242 le32_to_cpu(state->buf->section_entry[state->sect_idx].type);
243
244 /* calc pointer to this section */
245 state->sect = ((u8 *)state->buf) +
246 le16_to_cpu(state->buf->section_entry[state->sect_idx].offset);
247
248 return state->sect;
249 }
250
251 /**
252 * ice_pkg_enum_entry
253 * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
254 * @state: pointer to the enum state
255 * @sect_type: section type to enumerate
256 * @offset: pointer to variable that receives the offset in the table (optional)
257 * @handler: function that handles access to the entries into the section type
258 *
259 * This function will enumerate all the entries in particular section type in
260 * the ice segment. The first call is made with the ice_seg parameter non-NULL;
261 * on subsequent calls, ice_seg is set to NULL which continues the enumeration.
262 * When the function returns a NULL pointer, then the end of the entries has
263 * been reached.
264 *
265 * Since each section may have a different header and entry size, the handler
266 * function is needed to determine the number and location entries in each
267 * section.
268 *
269 * The offset parameter is optional, but should be used for sections that
270 * contain an offset for each section table. For such cases, the section handler
271 * function must return the appropriate offset + index to give the absolution
272 * offset for each entry. For example, if the base for a section's header
273 * indicates a base offset of 10, and the index for the entry is 2, then
274 * section handler function should set the offset to 10 + 2 = 12.
275 */
276 static void *
ice_pkg_enum_entry(struct ice_seg * ice_seg,struct ice_pkg_enum * state,u32 sect_type,u32 * offset,void * (* handler)(u32 sect_type,void * section,u32 index,u32 * offset))277 ice_pkg_enum_entry(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
278 u32 sect_type, u32 *offset,
279 void *(*handler)(u32 sect_type, void *section,
280 u32 index, u32 *offset))
281 {
282 void *entry;
283
284 if (ice_seg) {
285 if (!handler)
286 return NULL;
287
288 if (!ice_pkg_enum_section(ice_seg, state, sect_type))
289 return NULL;
290
291 state->entry_idx = 0;
292 state->handler = handler;
293 } else {
294 state->entry_idx++;
295 }
296
297 if (!state->handler)
298 return NULL;
299
300 /* get entry */
301 entry = state->handler(state->sect_type, state->sect, state->entry_idx,
302 offset);
303 if (!entry) {
304 /* end of a section, look for another section of this type */
305 if (!ice_pkg_enum_section(NULL, state, 0))
306 return NULL;
307
308 state->entry_idx = 0;
309 entry = state->handler(state->sect_type, state->sect,
310 state->entry_idx, offset);
311 }
312
313 return entry;
314 }
315
316 /**
317 * ice_boost_tcam_handler
318 * @sect_type: section type
319 * @section: pointer to section
320 * @index: index of the boost TCAM entry to be returned
321 * @offset: pointer to receive absolute offset, always 0 for boost TCAM sections
322 *
323 * This is a callback function that can be passed to ice_pkg_enum_entry.
324 * Handles enumeration of individual boost TCAM entries.
325 */
326 static void *
ice_boost_tcam_handler(u32 sect_type,void * section,u32 index,u32 * offset)327 ice_boost_tcam_handler(u32 sect_type, void *section, u32 index, u32 *offset)
328 {
329 struct ice_boost_tcam_section *boost;
330
331 if (!section)
332 return NULL;
333
334 if (sect_type != ICE_SID_RXPARSER_BOOST_TCAM)
335 return NULL;
336
337 if (index > ICE_MAX_BST_TCAMS_IN_BUF)
338 return NULL;
339
340 if (offset)
341 *offset = 0;
342
343 boost = section;
344 if (index >= le16_to_cpu(boost->count))
345 return NULL;
346
347 return boost->tcam + index;
348 }
349
350 /**
351 * ice_find_boost_entry
352 * @ice_seg: pointer to the ice segment (non-NULL)
353 * @addr: Boost TCAM address of entry to search for
354 * @entry: returns pointer to the entry
355 *
356 * Finds a particular Boost TCAM entry and returns a pointer to that entry
357 * if it is found. The ice_seg parameter must not be NULL since the first call
358 * to ice_pkg_enum_entry requires a pointer to an actual ice_segment structure.
359 */
360 static enum ice_status
ice_find_boost_entry(struct ice_seg * ice_seg,u16 addr,struct ice_boost_tcam_entry ** entry)361 ice_find_boost_entry(struct ice_seg *ice_seg, u16 addr,
362 struct ice_boost_tcam_entry **entry)
363 {
364 struct ice_boost_tcam_entry *tcam;
365 struct ice_pkg_enum state;
366
367 memset(&state, 0, sizeof(state));
368
369 if (!ice_seg)
370 return ICE_ERR_PARAM;
371
372 do {
373 tcam = ice_pkg_enum_entry(ice_seg, &state,
374 ICE_SID_RXPARSER_BOOST_TCAM, NULL,
375 ice_boost_tcam_handler);
376 if (tcam && le16_to_cpu(tcam->addr) == addr) {
377 *entry = tcam;
378 return 0;
379 }
380
381 ice_seg = NULL;
382 } while (tcam);
383
384 *entry = NULL;
385 return ICE_ERR_CFG;
386 }
387
388 /**
389 * ice_label_enum_handler
390 * @sect_type: section type
391 * @section: pointer to section
392 * @index: index of the label entry to be returned
393 * @offset: pointer to receive absolute offset, always zero for label sections
394 *
395 * This is a callback function that can be passed to ice_pkg_enum_entry.
396 * Handles enumeration of individual label entries.
397 */
398 static void *
ice_label_enum_handler(u32 __always_unused sect_type,void * section,u32 index,u32 * offset)399 ice_label_enum_handler(u32 __always_unused sect_type, void *section, u32 index,
400 u32 *offset)
401 {
402 struct ice_label_section *labels;
403
404 if (!section)
405 return NULL;
406
407 if (index > ICE_MAX_LABELS_IN_BUF)
408 return NULL;
409
410 if (offset)
411 *offset = 0;
412
413 labels = section;
414 if (index >= le16_to_cpu(labels->count))
415 return NULL;
416
417 return labels->label + index;
418 }
419
420 /**
421 * ice_enum_labels
422 * @ice_seg: pointer to the ice segment (NULL on subsequent calls)
423 * @type: the section type that will contain the label (0 on subsequent calls)
424 * @state: ice_pkg_enum structure that will hold the state of the enumeration
425 * @value: pointer to a value that will return the label's value if found
426 *
427 * Enumerates a list of labels in the package. The caller will call
428 * ice_enum_labels(ice_seg, type, ...) to start the enumeration, then call
429 * ice_enum_labels(NULL, 0, ...) to continue. When the function returns a NULL
430 * the end of the list has been reached.
431 */
432 static char *
ice_enum_labels(struct ice_seg * ice_seg,u32 type,struct ice_pkg_enum * state,u16 * value)433 ice_enum_labels(struct ice_seg *ice_seg, u32 type, struct ice_pkg_enum *state,
434 u16 *value)
435 {
436 struct ice_label *label;
437
438 /* Check for valid label section on first call */
439 if (type && !(type >= ICE_SID_LBL_FIRST && type <= ICE_SID_LBL_LAST))
440 return NULL;
441
442 label = ice_pkg_enum_entry(ice_seg, state, type, NULL,
443 ice_label_enum_handler);
444 if (!label)
445 return NULL;
446
447 *value = le16_to_cpu(label->value);
448 return label->name;
449 }
450
451 /**
452 * ice_init_pkg_hints
453 * @hw: pointer to the HW structure
454 * @ice_seg: pointer to the segment of the package scan (non-NULL)
455 *
456 * This function will scan the package and save off relevant information
457 * (hints or metadata) for driver use. The ice_seg parameter must not be NULL
458 * since the first call to ice_enum_labels requires a pointer to an actual
459 * ice_seg structure.
460 */
ice_init_pkg_hints(struct ice_hw * hw,struct ice_seg * ice_seg)461 static void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg)
462 {
463 struct ice_pkg_enum state;
464 char *label_name;
465 u16 val;
466 int i;
467
468 memset(&hw->tnl, 0, sizeof(hw->tnl));
469 memset(&state, 0, sizeof(state));
470
471 if (!ice_seg)
472 return;
473
474 label_name = ice_enum_labels(ice_seg, ICE_SID_LBL_RXPARSER_TMEM, &state,
475 &val);
476
477 while (label_name && hw->tnl.count < ICE_TUNNEL_MAX_ENTRIES) {
478 for (i = 0; tnls[i].type != TNL_LAST; i++) {
479 size_t len = strlen(tnls[i].label_prefix);
480
481 /* Look for matching label start, before continuing */
482 if (strncmp(label_name, tnls[i].label_prefix, len))
483 continue;
484
485 /* Make sure this label matches our PF. Note that the PF
486 * character ('0' - '7') will be located where our
487 * prefix string's null terminator is located.
488 */
489 if ((label_name[len] - '0') == hw->pf_id) {
490 hw->tnl.tbl[hw->tnl.count].type = tnls[i].type;
491 hw->tnl.tbl[hw->tnl.count].valid = false;
492 hw->tnl.tbl[hw->tnl.count].boost_addr = val;
493 hw->tnl.tbl[hw->tnl.count].port = 0;
494 hw->tnl.count++;
495 break;
496 }
497 }
498
499 label_name = ice_enum_labels(NULL, 0, &state, &val);
500 }
501
502 /* Cache the appropriate boost TCAM entry pointers */
503 for (i = 0; i < hw->tnl.count; i++) {
504 ice_find_boost_entry(ice_seg, hw->tnl.tbl[i].boost_addr,
505 &hw->tnl.tbl[i].boost_entry);
506 if (hw->tnl.tbl[i].boost_entry) {
507 hw->tnl.tbl[i].valid = true;
508 if (hw->tnl.tbl[i].type < __TNL_TYPE_CNT)
509 hw->tnl.valid_count[hw->tnl.tbl[i].type]++;
510 }
511 }
512 }
513
514 /* Key creation */
515
516 #define ICE_DC_KEY 0x1 /* don't care */
517 #define ICE_DC_KEYINV 0x1
518 #define ICE_NM_KEY 0x0 /* never match */
519 #define ICE_NM_KEYINV 0x0
520 #define ICE_0_KEY 0x1 /* match 0 */
521 #define ICE_0_KEYINV 0x0
522 #define ICE_1_KEY 0x0 /* match 1 */
523 #define ICE_1_KEYINV 0x1
524
525 /**
526 * ice_gen_key_word - generate 16-bits of a key/mask word
527 * @val: the value
528 * @valid: valid bits mask (change only the valid bits)
529 * @dont_care: don't care mask
530 * @nvr_mtch: never match mask
531 * @key: pointer to an array of where the resulting key portion
532 * @key_inv: pointer to an array of where the resulting key invert portion
533 *
534 * This function generates 16-bits from a 8-bit value, an 8-bit don't care mask
535 * and an 8-bit never match mask. The 16-bits of output are divided into 8 bits
536 * of key and 8 bits of key invert.
537 *
538 * '0' = b01, always match a 0 bit
539 * '1' = b10, always match a 1 bit
540 * '?' = b11, don't care bit (always matches)
541 * '~' = b00, never match bit
542 *
543 * Input:
544 * val: b0 1 0 1 0 1
545 * dont_care: b0 0 1 1 0 0
546 * never_mtch: b0 0 0 0 1 1
547 * ------------------------------
548 * Result: key: b01 10 11 11 00 00
549 */
550 static enum ice_status
ice_gen_key_word(u8 val,u8 valid,u8 dont_care,u8 nvr_mtch,u8 * key,u8 * key_inv)551 ice_gen_key_word(u8 val, u8 valid, u8 dont_care, u8 nvr_mtch, u8 *key,
552 u8 *key_inv)
553 {
554 u8 in_key = *key, in_key_inv = *key_inv;
555 u8 i;
556
557 /* 'dont_care' and 'nvr_mtch' masks cannot overlap */
558 if ((dont_care ^ nvr_mtch) != (dont_care | nvr_mtch))
559 return ICE_ERR_CFG;
560
561 *key = 0;
562 *key_inv = 0;
563
564 /* encode the 8 bits into 8-bit key and 8-bit key invert */
565 for (i = 0; i < 8; i++) {
566 *key >>= 1;
567 *key_inv >>= 1;
568
569 if (!(valid & 0x1)) { /* change only valid bits */
570 *key |= (in_key & 0x1) << 7;
571 *key_inv |= (in_key_inv & 0x1) << 7;
572 } else if (dont_care & 0x1) { /* don't care bit */
573 *key |= ICE_DC_KEY << 7;
574 *key_inv |= ICE_DC_KEYINV << 7;
575 } else if (nvr_mtch & 0x1) { /* never match bit */
576 *key |= ICE_NM_KEY << 7;
577 *key_inv |= ICE_NM_KEYINV << 7;
578 } else if (val & 0x01) { /* exact 1 match */
579 *key |= ICE_1_KEY << 7;
580 *key_inv |= ICE_1_KEYINV << 7;
581 } else { /* exact 0 match */
582 *key |= ICE_0_KEY << 7;
583 *key_inv |= ICE_0_KEYINV << 7;
584 }
585
586 dont_care >>= 1;
587 nvr_mtch >>= 1;
588 valid >>= 1;
589 val >>= 1;
590 in_key >>= 1;
591 in_key_inv >>= 1;
592 }
593
594 return 0;
595 }
596
597 /**
598 * ice_bits_max_set - determine if the number of bits set is within a maximum
599 * @mask: pointer to the byte array which is the mask
600 * @size: the number of bytes in the mask
601 * @max: the max number of set bits
602 *
603 * This function determines if there are at most 'max' number of bits set in an
604 * array. Returns true if the number for bits set is <= max or will return false
605 * otherwise.
606 */
ice_bits_max_set(const u8 * mask,u16 size,u16 max)607 static bool ice_bits_max_set(const u8 *mask, u16 size, u16 max)
608 {
609 u16 count = 0;
610 u16 i;
611
612 /* check each byte */
613 for (i = 0; i < size; i++) {
614 /* if 0, go to next byte */
615 if (!mask[i])
616 continue;
617
618 /* We know there is at least one set bit in this byte because of
619 * the above check; if we already have found 'max' number of
620 * bits set, then we can return failure now.
621 */
622 if (count == max)
623 return false;
624
625 /* count the bits in this byte, checking threshold */
626 count += hweight8(mask[i]);
627 if (count > max)
628 return false;
629 }
630
631 return true;
632 }
633
634 /**
635 * ice_set_key - generate a variable sized key with multiples of 16-bits
636 * @key: pointer to where the key will be stored
637 * @size: the size of the complete key in bytes (must be even)
638 * @val: array of 8-bit values that makes up the value portion of the key
639 * @upd: array of 8-bit masks that determine what key portion to update
640 * @dc: array of 8-bit masks that make up the don't care mask
641 * @nm: array of 8-bit masks that make up the never match mask
642 * @off: the offset of the first byte in the key to update
643 * @len: the number of bytes in the key update
644 *
645 * This function generates a key from a value, a don't care mask and a never
646 * match mask.
647 * upd, dc, and nm are optional parameters, and can be NULL:
648 * upd == NULL --> upd mask is all 1's (update all bits)
649 * dc == NULL --> dc mask is all 0's (no don't care bits)
650 * nm == NULL --> nm mask is all 0's (no never match bits)
651 */
652 static enum ice_status
ice_set_key(u8 * key,u16 size,u8 * val,u8 * upd,u8 * dc,u8 * nm,u16 off,u16 len)653 ice_set_key(u8 *key, u16 size, u8 *val, u8 *upd, u8 *dc, u8 *nm, u16 off,
654 u16 len)
655 {
656 u16 half_size;
657 u16 i;
658
659 /* size must be a multiple of 2 bytes. */
660 if (size % 2)
661 return ICE_ERR_CFG;
662
663 half_size = size / 2;
664 if (off + len > half_size)
665 return ICE_ERR_CFG;
666
667 /* Make sure at most one bit is set in the never match mask. Having more
668 * than one never match mask bit set will cause HW to consume excessive
669 * power otherwise; this is a power management efficiency check.
670 */
671 #define ICE_NVR_MTCH_BITS_MAX 1
672 if (nm && !ice_bits_max_set(nm, len, ICE_NVR_MTCH_BITS_MAX))
673 return ICE_ERR_CFG;
674
675 for (i = 0; i < len; i++)
676 if (ice_gen_key_word(val[i], upd ? upd[i] : 0xff,
677 dc ? dc[i] : 0, nm ? nm[i] : 0,
678 key + off + i, key + half_size + off + i))
679 return ICE_ERR_CFG;
680
681 return 0;
682 }
683
684 /**
685 * ice_acquire_global_cfg_lock
686 * @hw: pointer to the HW structure
687 * @access: access type (read or write)
688 *
689 * This function will request ownership of the global config lock for reading
690 * or writing of the package. When attempting to obtain write access, the
691 * caller must check for the following two return values:
692 *
693 * ICE_SUCCESS - Means the caller has acquired the global config lock
694 * and can perform writing of the package.
695 * ICE_ERR_AQ_NO_WORK - Indicates another driver has already written the
696 * package or has found that no update was necessary; in
697 * this case, the caller can just skip performing any
698 * update of the package.
699 */
700 static enum ice_status
ice_acquire_global_cfg_lock(struct ice_hw * hw,enum ice_aq_res_access_type access)701 ice_acquire_global_cfg_lock(struct ice_hw *hw,
702 enum ice_aq_res_access_type access)
703 {
704 enum ice_status status;
705
706 status = ice_acquire_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID, access,
707 ICE_GLOBAL_CFG_LOCK_TIMEOUT);
708
709 if (!status)
710 mutex_lock(&ice_global_cfg_lock_sw);
711 else if (status == ICE_ERR_AQ_NO_WORK)
712 ice_debug(hw, ICE_DBG_PKG,
713 "Global config lock: No work to do\n");
714
715 return status;
716 }
717
718 /**
719 * ice_release_global_cfg_lock
720 * @hw: pointer to the HW structure
721 *
722 * This function will release the global config lock.
723 */
ice_release_global_cfg_lock(struct ice_hw * hw)724 static void ice_release_global_cfg_lock(struct ice_hw *hw)
725 {
726 mutex_unlock(&ice_global_cfg_lock_sw);
727 ice_release_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID);
728 }
729
730 /**
731 * ice_acquire_change_lock
732 * @hw: pointer to the HW structure
733 * @access: access type (read or write)
734 *
735 * This function will request ownership of the change lock.
736 */
737 static enum ice_status
ice_acquire_change_lock(struct ice_hw * hw,enum ice_aq_res_access_type access)738 ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access)
739 {
740 return ice_acquire_res(hw, ICE_CHANGE_LOCK_RES_ID, access,
741 ICE_CHANGE_LOCK_TIMEOUT);
742 }
743
744 /**
745 * ice_release_change_lock
746 * @hw: pointer to the HW structure
747 *
748 * This function will release the change lock using the proper Admin Command.
749 */
ice_release_change_lock(struct ice_hw * hw)750 static void ice_release_change_lock(struct ice_hw *hw)
751 {
752 ice_release_res(hw, ICE_CHANGE_LOCK_RES_ID);
753 }
754
755 /**
756 * ice_aq_download_pkg
757 * @hw: pointer to the hardware structure
758 * @pkg_buf: the package buffer to transfer
759 * @buf_size: the size of the package buffer
760 * @last_buf: last buffer indicator
761 * @error_offset: returns error offset
762 * @error_info: returns error information
763 * @cd: pointer to command details structure or NULL
764 *
765 * Download Package (0x0C40)
766 */
767 static enum ice_status
ice_aq_download_pkg(struct ice_hw * hw,struct ice_buf_hdr * pkg_buf,u16 buf_size,bool last_buf,u32 * error_offset,u32 * error_info,struct ice_sq_cd * cd)768 ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
769 u16 buf_size, bool last_buf, u32 *error_offset,
770 u32 *error_info, struct ice_sq_cd *cd)
771 {
772 struct ice_aqc_download_pkg *cmd;
773 struct ice_aq_desc desc;
774 enum ice_status status;
775
776 if (error_offset)
777 *error_offset = 0;
778 if (error_info)
779 *error_info = 0;
780
781 cmd = &desc.params.download_pkg;
782 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_download_pkg);
783 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
784
785 if (last_buf)
786 cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF;
787
788 status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
789 if (status == ICE_ERR_AQ_ERROR) {
790 /* Read error from buffer only when the FW returned an error */
791 struct ice_aqc_download_pkg_resp *resp;
792
793 resp = (struct ice_aqc_download_pkg_resp *)pkg_buf;
794 if (error_offset)
795 *error_offset = le32_to_cpu(resp->error_offset);
796 if (error_info)
797 *error_info = le32_to_cpu(resp->error_info);
798 }
799
800 return status;
801 }
802
803 /**
804 * ice_aq_update_pkg
805 * @hw: pointer to the hardware structure
806 * @pkg_buf: the package cmd buffer
807 * @buf_size: the size of the package cmd buffer
808 * @last_buf: last buffer indicator
809 * @error_offset: returns error offset
810 * @error_info: returns error information
811 * @cd: pointer to command details structure or NULL
812 *
813 * Update Package (0x0C42)
814 */
815 static enum ice_status
ice_aq_update_pkg(struct ice_hw * hw,struct ice_buf_hdr * pkg_buf,u16 buf_size,bool last_buf,u32 * error_offset,u32 * error_info,struct ice_sq_cd * cd)816 ice_aq_update_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, u16 buf_size,
817 bool last_buf, u32 *error_offset, u32 *error_info,
818 struct ice_sq_cd *cd)
819 {
820 struct ice_aqc_download_pkg *cmd;
821 struct ice_aq_desc desc;
822 enum ice_status status;
823
824 if (error_offset)
825 *error_offset = 0;
826 if (error_info)
827 *error_info = 0;
828
829 cmd = &desc.params.download_pkg;
830 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_pkg);
831 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
832
833 if (last_buf)
834 cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF;
835
836 status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
837 if (status == ICE_ERR_AQ_ERROR) {
838 /* Read error from buffer only when the FW returned an error */
839 struct ice_aqc_download_pkg_resp *resp;
840
841 resp = (struct ice_aqc_download_pkg_resp *)pkg_buf;
842 if (error_offset)
843 *error_offset = le32_to_cpu(resp->error_offset);
844 if (error_info)
845 *error_info = le32_to_cpu(resp->error_info);
846 }
847
848 return status;
849 }
850
851 /**
852 * ice_find_seg_in_pkg
853 * @hw: pointer to the hardware structure
854 * @seg_type: the segment type to search for (i.e., SEGMENT_TYPE_CPK)
855 * @pkg_hdr: pointer to the package header to be searched
856 *
857 * This function searches a package file for a particular segment type. On
858 * success it returns a pointer to the segment header, otherwise it will
859 * return NULL.
860 */
861 static struct ice_generic_seg_hdr *
ice_find_seg_in_pkg(struct ice_hw * hw,u32 seg_type,struct ice_pkg_hdr * pkg_hdr)862 ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type,
863 struct ice_pkg_hdr *pkg_hdr)
864 {
865 u32 i;
866
867 ice_debug(hw, ICE_DBG_PKG, "Package format version: %d.%d.%d.%d\n",
868 pkg_hdr->pkg_format_ver.major, pkg_hdr->pkg_format_ver.minor,
869 pkg_hdr->pkg_format_ver.update,
870 pkg_hdr->pkg_format_ver.draft);
871
872 /* Search all package segments for the requested segment type */
873 for (i = 0; i < le32_to_cpu(pkg_hdr->seg_count); i++) {
874 struct ice_generic_seg_hdr *seg;
875
876 seg = (struct ice_generic_seg_hdr *)
877 ((u8 *)pkg_hdr + le32_to_cpu(pkg_hdr->seg_offset[i]));
878
879 if (le32_to_cpu(seg->seg_type) == seg_type)
880 return seg;
881 }
882
883 return NULL;
884 }
885
886 /**
887 * ice_update_pkg
888 * @hw: pointer to the hardware structure
889 * @bufs: pointer to an array of buffers
890 * @count: the number of buffers in the array
891 *
892 * Obtains change lock and updates package.
893 */
894 static enum ice_status
ice_update_pkg(struct ice_hw * hw,struct ice_buf * bufs,u32 count)895 ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
896 {
897 enum ice_status status;
898 u32 offset, info, i;
899
900 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
901 if (status)
902 return status;
903
904 for (i = 0; i < count; i++) {
905 struct ice_buf_hdr *bh = (struct ice_buf_hdr *)(bufs + i);
906 bool last = ((i + 1) == count);
907
908 status = ice_aq_update_pkg(hw, bh, le16_to_cpu(bh->data_end),
909 last, &offset, &info, NULL);
910
911 if (status) {
912 ice_debug(hw, ICE_DBG_PKG,
913 "Update pkg failed: err %d off %d inf %d\n",
914 status, offset, info);
915 break;
916 }
917 }
918
919 ice_release_change_lock(hw);
920
921 return status;
922 }
923
924 /**
925 * ice_dwnld_cfg_bufs
926 * @hw: pointer to the hardware structure
927 * @bufs: pointer to an array of buffers
928 * @count: the number of buffers in the array
929 *
930 * Obtains global config lock and downloads the package configuration buffers
931 * to the firmware. Metadata buffers are skipped, and the first metadata buffer
932 * found indicates that the rest of the buffers are all metadata buffers.
933 */
934 static enum ice_status
ice_dwnld_cfg_bufs(struct ice_hw * hw,struct ice_buf * bufs,u32 count)935 ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
936 {
937 enum ice_status status;
938 struct ice_buf_hdr *bh;
939 u32 offset, info, i;
940
941 if (!bufs || !count)
942 return ICE_ERR_PARAM;
943
944 /* If the first buffer's first section has its metadata bit set
945 * then there are no buffers to be downloaded, and the operation is
946 * considered a success.
947 */
948 bh = (struct ice_buf_hdr *)bufs;
949 if (le32_to_cpu(bh->section_entry[0].type) & ICE_METADATA_BUF)
950 return 0;
951
952 /* reset pkg_dwnld_status in case this function is called in the
953 * reset/rebuild flow
954 */
955 hw->pkg_dwnld_status = ICE_AQ_RC_OK;
956
957 status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE);
958 if (status) {
959 if (status == ICE_ERR_AQ_NO_WORK)
960 hw->pkg_dwnld_status = ICE_AQ_RC_EEXIST;
961 else
962 hw->pkg_dwnld_status = hw->adminq.sq_last_status;
963 return status;
964 }
965
966 for (i = 0; i < count; i++) {
967 bool last = ((i + 1) == count);
968
969 if (!last) {
970 /* check next buffer for metadata flag */
971 bh = (struct ice_buf_hdr *)(bufs + i + 1);
972
973 /* A set metadata flag in the next buffer will signal
974 * that the current buffer will be the last buffer
975 * downloaded
976 */
977 if (le16_to_cpu(bh->section_count))
978 if (le32_to_cpu(bh->section_entry[0].type) &
979 ICE_METADATA_BUF)
980 last = true;
981 }
982
983 bh = (struct ice_buf_hdr *)(bufs + i);
984
985 status = ice_aq_download_pkg(hw, bh, ICE_PKG_BUF_SIZE, last,
986 &offset, &info, NULL);
987
988 /* Save AQ status from download package */
989 hw->pkg_dwnld_status = hw->adminq.sq_last_status;
990 if (status) {
991 ice_debug(hw, ICE_DBG_PKG,
992 "Pkg download failed: err %d off %d inf %d\n",
993 status, offset, info);
994
995 break;
996 }
997
998 if (last)
999 break;
1000 }
1001
1002 ice_release_global_cfg_lock(hw);
1003
1004 return status;
1005 }
1006
1007 /**
1008 * ice_aq_get_pkg_info_list
1009 * @hw: pointer to the hardware structure
1010 * @pkg_info: the buffer which will receive the information list
1011 * @buf_size: the size of the pkg_info information buffer
1012 * @cd: pointer to command details structure or NULL
1013 *
1014 * Get Package Info List (0x0C43)
1015 */
1016 static enum ice_status
ice_aq_get_pkg_info_list(struct ice_hw * hw,struct ice_aqc_get_pkg_info_resp * pkg_info,u16 buf_size,struct ice_sq_cd * cd)1017 ice_aq_get_pkg_info_list(struct ice_hw *hw,
1018 struct ice_aqc_get_pkg_info_resp *pkg_info,
1019 u16 buf_size, struct ice_sq_cd *cd)
1020 {
1021 struct ice_aq_desc desc;
1022
1023 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_pkg_info_list);
1024
1025 return ice_aq_send_cmd(hw, &desc, pkg_info, buf_size, cd);
1026 }
1027
1028 /**
1029 * ice_download_pkg
1030 * @hw: pointer to the hardware structure
1031 * @ice_seg: pointer to the segment of the package to be downloaded
1032 *
1033 * Handles the download of a complete package.
1034 */
1035 static enum ice_status
ice_download_pkg(struct ice_hw * hw,struct ice_seg * ice_seg)1036 ice_download_pkg(struct ice_hw *hw, struct ice_seg *ice_seg)
1037 {
1038 struct ice_buf_table *ice_buf_tbl;
1039
1040 ice_debug(hw, ICE_DBG_PKG, "Segment format version: %d.%d.%d.%d\n",
1041 ice_seg->hdr.seg_format_ver.major,
1042 ice_seg->hdr.seg_format_ver.minor,
1043 ice_seg->hdr.seg_format_ver.update,
1044 ice_seg->hdr.seg_format_ver.draft);
1045
1046 ice_debug(hw, ICE_DBG_PKG, "Seg: type 0x%X, size %d, name %s\n",
1047 le32_to_cpu(ice_seg->hdr.seg_type),
1048 le32_to_cpu(ice_seg->hdr.seg_size), ice_seg->hdr.seg_id);
1049
1050 ice_buf_tbl = ice_find_buf_table(ice_seg);
1051
1052 ice_debug(hw, ICE_DBG_PKG, "Seg buf count: %d\n",
1053 le32_to_cpu(ice_buf_tbl->buf_count));
1054
1055 return ice_dwnld_cfg_bufs(hw, ice_buf_tbl->buf_array,
1056 le32_to_cpu(ice_buf_tbl->buf_count));
1057 }
1058
1059 /**
1060 * ice_init_pkg_info
1061 * @hw: pointer to the hardware structure
1062 * @pkg_hdr: pointer to the driver's package hdr
1063 *
1064 * Saves off the package details into the HW structure.
1065 */
1066 static enum ice_status
ice_init_pkg_info(struct ice_hw * hw,struct ice_pkg_hdr * pkg_hdr)1067 ice_init_pkg_info(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr)
1068 {
1069 struct ice_global_metadata_seg *meta_seg;
1070 struct ice_generic_seg_hdr *seg_hdr;
1071
1072 if (!pkg_hdr)
1073 return ICE_ERR_PARAM;
1074
1075 meta_seg = (struct ice_global_metadata_seg *)
1076 ice_find_seg_in_pkg(hw, SEGMENT_TYPE_METADATA, pkg_hdr);
1077 if (meta_seg) {
1078 hw->pkg_ver = meta_seg->pkg_ver;
1079 memcpy(hw->pkg_name, meta_seg->pkg_name, sizeof(hw->pkg_name));
1080
1081 ice_debug(hw, ICE_DBG_PKG, "Pkg: %d.%d.%d.%d, %s\n",
1082 meta_seg->pkg_ver.major, meta_seg->pkg_ver.minor,
1083 meta_seg->pkg_ver.update, meta_seg->pkg_ver.draft,
1084 meta_seg->pkg_name);
1085 } else {
1086 ice_debug(hw, ICE_DBG_INIT,
1087 "Did not find metadata segment in driver package\n");
1088 return ICE_ERR_CFG;
1089 }
1090
1091 seg_hdr = ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, pkg_hdr);
1092 if (seg_hdr) {
1093 hw->ice_pkg_ver = seg_hdr->seg_format_ver;
1094 memcpy(hw->ice_pkg_name, seg_hdr->seg_id,
1095 sizeof(hw->ice_pkg_name));
1096
1097 ice_debug(hw, ICE_DBG_PKG, "Ice Seg: %d.%d.%d.%d, %s\n",
1098 seg_hdr->seg_format_ver.major,
1099 seg_hdr->seg_format_ver.minor,
1100 seg_hdr->seg_format_ver.update,
1101 seg_hdr->seg_format_ver.draft,
1102 seg_hdr->seg_id);
1103 } else {
1104 ice_debug(hw, ICE_DBG_INIT,
1105 "Did not find ice segment in driver package\n");
1106 return ICE_ERR_CFG;
1107 }
1108
1109 return 0;
1110 }
1111
1112 /**
1113 * ice_get_pkg_info
1114 * @hw: pointer to the hardware structure
1115 *
1116 * Store details of the package currently loaded in HW into the HW structure.
1117 */
ice_get_pkg_info(struct ice_hw * hw)1118 static enum ice_status ice_get_pkg_info(struct ice_hw *hw)
1119 {
1120 struct ice_aqc_get_pkg_info_resp *pkg_info;
1121 enum ice_status status;
1122 u16 size;
1123 u32 i;
1124
1125 size = struct_size(pkg_info, pkg_info, ICE_PKG_CNT);
1126 pkg_info = kzalloc(size, GFP_KERNEL);
1127 if (!pkg_info)
1128 return ICE_ERR_NO_MEMORY;
1129
1130 status = ice_aq_get_pkg_info_list(hw, pkg_info, size, NULL);
1131 if (status)
1132 goto init_pkg_free_alloc;
1133
1134 for (i = 0; i < le32_to_cpu(pkg_info->count); i++) {
1135 #define ICE_PKG_FLAG_COUNT 4
1136 char flags[ICE_PKG_FLAG_COUNT + 1] = { 0 };
1137 u8 place = 0;
1138
1139 if (pkg_info->pkg_info[i].is_active) {
1140 flags[place++] = 'A';
1141 hw->active_pkg_ver = pkg_info->pkg_info[i].ver;
1142 hw->active_track_id =
1143 le32_to_cpu(pkg_info->pkg_info[i].track_id);
1144 memcpy(hw->active_pkg_name,
1145 pkg_info->pkg_info[i].name,
1146 sizeof(pkg_info->pkg_info[i].name));
1147 hw->active_pkg_in_nvm = pkg_info->pkg_info[i].is_in_nvm;
1148 }
1149 if (pkg_info->pkg_info[i].is_active_at_boot)
1150 flags[place++] = 'B';
1151 if (pkg_info->pkg_info[i].is_modified)
1152 flags[place++] = 'M';
1153 if (pkg_info->pkg_info[i].is_in_nvm)
1154 flags[place++] = 'N';
1155
1156 ice_debug(hw, ICE_DBG_PKG, "Pkg[%d]: %d.%d.%d.%d,%s,%s\n",
1157 i, pkg_info->pkg_info[i].ver.major,
1158 pkg_info->pkg_info[i].ver.minor,
1159 pkg_info->pkg_info[i].ver.update,
1160 pkg_info->pkg_info[i].ver.draft,
1161 pkg_info->pkg_info[i].name, flags);
1162 }
1163
1164 init_pkg_free_alloc:
1165 kfree(pkg_info);
1166
1167 return status;
1168 }
1169
1170 /**
1171 * ice_verify_pkg - verify package
1172 * @pkg: pointer to the package buffer
1173 * @len: size of the package buffer
1174 *
1175 * Verifies various attributes of the package file, including length, format
1176 * version, and the requirement of at least one segment.
1177 */
ice_verify_pkg(struct ice_pkg_hdr * pkg,u32 len)1178 static enum ice_status ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len)
1179 {
1180 u32 seg_count;
1181 u32 i;
1182
1183 if (len < struct_size(pkg, seg_offset, 1))
1184 return ICE_ERR_BUF_TOO_SHORT;
1185
1186 if (pkg->pkg_format_ver.major != ICE_PKG_FMT_VER_MAJ ||
1187 pkg->pkg_format_ver.minor != ICE_PKG_FMT_VER_MNR ||
1188 pkg->pkg_format_ver.update != ICE_PKG_FMT_VER_UPD ||
1189 pkg->pkg_format_ver.draft != ICE_PKG_FMT_VER_DFT)
1190 return ICE_ERR_CFG;
1191
1192 /* pkg must have at least one segment */
1193 seg_count = le32_to_cpu(pkg->seg_count);
1194 if (seg_count < 1)
1195 return ICE_ERR_CFG;
1196
1197 /* make sure segment array fits in package length */
1198 if (len < struct_size(pkg, seg_offset, seg_count))
1199 return ICE_ERR_BUF_TOO_SHORT;
1200
1201 /* all segments must fit within length */
1202 for (i = 0; i < seg_count; i++) {
1203 u32 off = le32_to_cpu(pkg->seg_offset[i]);
1204 struct ice_generic_seg_hdr *seg;
1205
1206 /* segment header must fit */
1207 if (len < off + sizeof(*seg))
1208 return ICE_ERR_BUF_TOO_SHORT;
1209
1210 seg = (struct ice_generic_seg_hdr *)((u8 *)pkg + off);
1211
1212 /* segment body must fit */
1213 if (len < off + le32_to_cpu(seg->seg_size))
1214 return ICE_ERR_BUF_TOO_SHORT;
1215 }
1216
1217 return 0;
1218 }
1219
1220 /**
1221 * ice_free_seg - free package segment pointer
1222 * @hw: pointer to the hardware structure
1223 *
1224 * Frees the package segment pointer in the proper manner, depending on if the
1225 * segment was allocated or just the passed in pointer was stored.
1226 */
ice_free_seg(struct ice_hw * hw)1227 void ice_free_seg(struct ice_hw *hw)
1228 {
1229 if (hw->pkg_copy) {
1230 devm_kfree(ice_hw_to_dev(hw), hw->pkg_copy);
1231 hw->pkg_copy = NULL;
1232 hw->pkg_size = 0;
1233 }
1234 hw->seg = NULL;
1235 }
1236
1237 /**
1238 * ice_init_pkg_regs - initialize additional package registers
1239 * @hw: pointer to the hardware structure
1240 */
ice_init_pkg_regs(struct ice_hw * hw)1241 static void ice_init_pkg_regs(struct ice_hw *hw)
1242 {
1243 #define ICE_SW_BLK_INP_MASK_L 0xFFFFFFFF
1244 #define ICE_SW_BLK_INP_MASK_H 0x0000FFFF
1245 #define ICE_SW_BLK_IDX 0
1246
1247 /* setup Switch block input mask, which is 48-bits in two parts */
1248 wr32(hw, GL_PREEXT_L2_PMASK0(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_L);
1249 wr32(hw, GL_PREEXT_L2_PMASK1(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_H);
1250 }
1251
1252 /**
1253 * ice_chk_pkg_version - check package version for compatibility with driver
1254 * @pkg_ver: pointer to a version structure to check
1255 *
1256 * Check to make sure that the package about to be downloaded is compatible with
1257 * the driver. To be compatible, the major and minor components of the package
1258 * version must match our ICE_PKG_SUPP_VER_MAJ and ICE_PKG_SUPP_VER_MNR
1259 * definitions.
1260 */
ice_chk_pkg_version(struct ice_pkg_ver * pkg_ver)1261 static enum ice_status ice_chk_pkg_version(struct ice_pkg_ver *pkg_ver)
1262 {
1263 if (pkg_ver->major != ICE_PKG_SUPP_VER_MAJ ||
1264 pkg_ver->minor != ICE_PKG_SUPP_VER_MNR)
1265 return ICE_ERR_NOT_SUPPORTED;
1266
1267 return 0;
1268 }
1269
1270 /**
1271 * ice_chk_pkg_compat
1272 * @hw: pointer to the hardware structure
1273 * @ospkg: pointer to the package hdr
1274 * @seg: pointer to the package segment hdr
1275 *
1276 * This function checks the package version compatibility with driver and NVM
1277 */
1278 static enum ice_status
ice_chk_pkg_compat(struct ice_hw * hw,struct ice_pkg_hdr * ospkg,struct ice_seg ** seg)1279 ice_chk_pkg_compat(struct ice_hw *hw, struct ice_pkg_hdr *ospkg,
1280 struct ice_seg **seg)
1281 {
1282 struct ice_aqc_get_pkg_info_resp *pkg;
1283 enum ice_status status;
1284 u16 size;
1285 u32 i;
1286
1287 /* Check package version compatibility */
1288 status = ice_chk_pkg_version(&hw->pkg_ver);
1289 if (status) {
1290 ice_debug(hw, ICE_DBG_INIT, "Package version check failed.\n");
1291 return status;
1292 }
1293
1294 /* find ICE segment in given package */
1295 *seg = (struct ice_seg *)ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE,
1296 ospkg);
1297 if (!*seg) {
1298 ice_debug(hw, ICE_DBG_INIT, "no ice segment in package.\n");
1299 return ICE_ERR_CFG;
1300 }
1301
1302 /* Check if FW is compatible with the OS package */
1303 size = struct_size(pkg, pkg_info, ICE_PKG_CNT);
1304 pkg = kzalloc(size, GFP_KERNEL);
1305 if (!pkg)
1306 return ICE_ERR_NO_MEMORY;
1307
1308 status = ice_aq_get_pkg_info_list(hw, pkg, size, NULL);
1309 if (status)
1310 goto fw_ddp_compat_free_alloc;
1311
1312 for (i = 0; i < le32_to_cpu(pkg->count); i++) {
1313 /* loop till we find the NVM package */
1314 if (!pkg->pkg_info[i].is_in_nvm)
1315 continue;
1316 if ((*seg)->hdr.seg_format_ver.major !=
1317 pkg->pkg_info[i].ver.major ||
1318 (*seg)->hdr.seg_format_ver.minor >
1319 pkg->pkg_info[i].ver.minor) {
1320 status = ICE_ERR_FW_DDP_MISMATCH;
1321 ice_debug(hw, ICE_DBG_INIT,
1322 "OS package is not compatible with NVM.\n");
1323 }
1324 /* done processing NVM package so break */
1325 break;
1326 }
1327 fw_ddp_compat_free_alloc:
1328 kfree(pkg);
1329 return status;
1330 }
1331
1332 /**
1333 * ice_init_pkg - initialize/download package
1334 * @hw: pointer to the hardware structure
1335 * @buf: pointer to the package buffer
1336 * @len: size of the package buffer
1337 *
1338 * This function initializes a package. The package contains HW tables
1339 * required to do packet processing. First, the function extracts package
1340 * information such as version. Then it finds the ice configuration segment
1341 * within the package; this function then saves a copy of the segment pointer
1342 * within the supplied package buffer. Next, the function will cache any hints
1343 * from the package, followed by downloading the package itself. Note, that if
1344 * a previous PF driver has already downloaded the package successfully, then
1345 * the current driver will not have to download the package again.
1346 *
1347 * The local package contents will be used to query default behavior and to
1348 * update specific sections of the HW's version of the package (e.g. to update
1349 * the parse graph to understand new protocols).
1350 *
1351 * This function stores a pointer to the package buffer memory, and it is
1352 * expected that the supplied buffer will not be freed immediately. If the
1353 * package buffer needs to be freed, such as when read from a file, use
1354 * ice_copy_and_init_pkg() instead of directly calling ice_init_pkg() in this
1355 * case.
1356 */
ice_init_pkg(struct ice_hw * hw,u8 * buf,u32 len)1357 enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len)
1358 {
1359 struct ice_pkg_hdr *pkg;
1360 enum ice_status status;
1361 struct ice_seg *seg;
1362
1363 if (!buf || !len)
1364 return ICE_ERR_PARAM;
1365
1366 pkg = (struct ice_pkg_hdr *)buf;
1367 status = ice_verify_pkg(pkg, len);
1368 if (status) {
1369 ice_debug(hw, ICE_DBG_INIT, "failed to verify pkg (err: %d)\n",
1370 status);
1371 return status;
1372 }
1373
1374 /* initialize package info */
1375 status = ice_init_pkg_info(hw, pkg);
1376 if (status)
1377 return status;
1378
1379 /* before downloading the package, check package version for
1380 * compatibility with driver
1381 */
1382 status = ice_chk_pkg_compat(hw, pkg, &seg);
1383 if (status)
1384 return status;
1385
1386 /* initialize package hints and then download package */
1387 ice_init_pkg_hints(hw, seg);
1388 status = ice_download_pkg(hw, seg);
1389 if (status == ICE_ERR_AQ_NO_WORK) {
1390 ice_debug(hw, ICE_DBG_INIT,
1391 "package previously loaded - no work.\n");
1392 status = 0;
1393 }
1394
1395 /* Get information on the package currently loaded in HW, then make sure
1396 * the driver is compatible with this version.
1397 */
1398 if (!status) {
1399 status = ice_get_pkg_info(hw);
1400 if (!status)
1401 status = ice_chk_pkg_version(&hw->active_pkg_ver);
1402 }
1403
1404 if (!status) {
1405 hw->seg = seg;
1406 /* on successful package download update other required
1407 * registers to support the package and fill HW tables
1408 * with package content.
1409 */
1410 ice_init_pkg_regs(hw);
1411 ice_fill_blk_tbls(hw);
1412 } else {
1413 ice_debug(hw, ICE_DBG_INIT, "package load failed, %d\n",
1414 status);
1415 }
1416
1417 return status;
1418 }
1419
1420 /**
1421 * ice_copy_and_init_pkg - initialize/download a copy of the package
1422 * @hw: pointer to the hardware structure
1423 * @buf: pointer to the package buffer
1424 * @len: size of the package buffer
1425 *
1426 * This function copies the package buffer, and then calls ice_init_pkg() to
1427 * initialize the copied package contents.
1428 *
1429 * The copying is necessary if the package buffer supplied is constant, or if
1430 * the memory may disappear shortly after calling this function.
1431 *
1432 * If the package buffer resides in the data segment and can be modified, the
1433 * caller is free to use ice_init_pkg() instead of ice_copy_and_init_pkg().
1434 *
1435 * However, if the package buffer needs to be copied first, such as when being
1436 * read from a file, the caller should use ice_copy_and_init_pkg().
1437 *
1438 * This function will first copy the package buffer, before calling
1439 * ice_init_pkg(). The caller is free to immediately destroy the original
1440 * package buffer, as the new copy will be managed by this function and
1441 * related routines.
1442 */
ice_copy_and_init_pkg(struct ice_hw * hw,const u8 * buf,u32 len)1443 enum ice_status ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len)
1444 {
1445 enum ice_status status;
1446 u8 *buf_copy;
1447
1448 if (!buf || !len)
1449 return ICE_ERR_PARAM;
1450
1451 buf_copy = devm_kmemdup(ice_hw_to_dev(hw), buf, len, GFP_KERNEL);
1452
1453 status = ice_init_pkg(hw, buf_copy, len);
1454 if (status) {
1455 /* Free the copy, since we failed to initialize the package */
1456 devm_kfree(ice_hw_to_dev(hw), buf_copy);
1457 } else {
1458 /* Track the copied pkg so we can free it later */
1459 hw->pkg_copy = buf_copy;
1460 hw->pkg_size = len;
1461 }
1462
1463 return status;
1464 }
1465
1466 /**
1467 * ice_pkg_buf_alloc
1468 * @hw: pointer to the HW structure
1469 *
1470 * Allocates a package buffer and returns a pointer to the buffer header.
1471 * Note: all package contents must be in Little Endian form.
1472 */
ice_pkg_buf_alloc(struct ice_hw * hw)1473 static struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw)
1474 {
1475 struct ice_buf_build *bld;
1476 struct ice_buf_hdr *buf;
1477
1478 bld = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*bld), GFP_KERNEL);
1479 if (!bld)
1480 return NULL;
1481
1482 buf = (struct ice_buf_hdr *)bld;
1483 buf->data_end = cpu_to_le16(offsetof(struct ice_buf_hdr,
1484 section_entry));
1485 return bld;
1486 }
1487
1488 /**
1489 * ice_pkg_buf_free
1490 * @hw: pointer to the HW structure
1491 * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
1492 *
1493 * Frees a package buffer
1494 */
ice_pkg_buf_free(struct ice_hw * hw,struct ice_buf_build * bld)1495 static void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld)
1496 {
1497 devm_kfree(ice_hw_to_dev(hw), bld);
1498 }
1499
1500 /**
1501 * ice_pkg_buf_reserve_section
1502 * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
1503 * @count: the number of sections to reserve
1504 *
1505 * Reserves one or more section table entries in a package buffer. This routine
1506 * can be called multiple times as long as they are made before calling
1507 * ice_pkg_buf_alloc_section(). Once ice_pkg_buf_alloc_section()
1508 * is called once, the number of sections that can be allocated will not be able
1509 * to be increased; not using all reserved sections is fine, but this will
1510 * result in some wasted space in the buffer.
1511 * Note: all package contents must be in Little Endian form.
1512 */
1513 static enum ice_status
ice_pkg_buf_reserve_section(struct ice_buf_build * bld,u16 count)1514 ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count)
1515 {
1516 struct ice_buf_hdr *buf;
1517 u16 section_count;
1518 u16 data_end;
1519
1520 if (!bld)
1521 return ICE_ERR_PARAM;
1522
1523 buf = (struct ice_buf_hdr *)&bld->buf;
1524
1525 /* already an active section, can't increase table size */
1526 section_count = le16_to_cpu(buf->section_count);
1527 if (section_count > 0)
1528 return ICE_ERR_CFG;
1529
1530 if (bld->reserved_section_table_entries + count > ICE_MAX_S_COUNT)
1531 return ICE_ERR_CFG;
1532 bld->reserved_section_table_entries += count;
1533
1534 data_end = le16_to_cpu(buf->data_end) +
1535 (count * sizeof(buf->section_entry[0]));
1536 buf->data_end = cpu_to_le16(data_end);
1537
1538 return 0;
1539 }
1540
1541 /**
1542 * ice_pkg_buf_alloc_section
1543 * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
1544 * @type: the section type value
1545 * @size: the size of the section to reserve (in bytes)
1546 *
1547 * Reserves memory in the buffer for a section's content and updates the
1548 * buffers' status accordingly. This routine returns a pointer to the first
1549 * byte of the section start within the buffer, which is used to fill in the
1550 * section contents.
1551 * Note: all package contents must be in Little Endian form.
1552 */
1553 static void *
ice_pkg_buf_alloc_section(struct ice_buf_build * bld,u32 type,u16 size)1554 ice_pkg_buf_alloc_section(struct ice_buf_build *bld, u32 type, u16 size)
1555 {
1556 struct ice_buf_hdr *buf;
1557 u16 sect_count;
1558 u16 data_end;
1559
1560 if (!bld || !type || !size)
1561 return NULL;
1562
1563 buf = (struct ice_buf_hdr *)&bld->buf;
1564
1565 /* check for enough space left in buffer */
1566 data_end = le16_to_cpu(buf->data_end);
1567
1568 /* section start must align on 4 byte boundary */
1569 data_end = ALIGN(data_end, 4);
1570
1571 if ((data_end + size) > ICE_MAX_S_DATA_END)
1572 return NULL;
1573
1574 /* check for more available section table entries */
1575 sect_count = le16_to_cpu(buf->section_count);
1576 if (sect_count < bld->reserved_section_table_entries) {
1577 void *section_ptr = ((u8 *)buf) + data_end;
1578
1579 buf->section_entry[sect_count].offset = cpu_to_le16(data_end);
1580 buf->section_entry[sect_count].size = cpu_to_le16(size);
1581 buf->section_entry[sect_count].type = cpu_to_le32(type);
1582
1583 data_end += size;
1584 buf->data_end = cpu_to_le16(data_end);
1585
1586 buf->section_count = cpu_to_le16(sect_count + 1);
1587 return section_ptr;
1588 }
1589
1590 /* no free section table entries */
1591 return NULL;
1592 }
1593
1594 /**
1595 * ice_pkg_buf_get_active_sections
1596 * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
1597 *
1598 * Returns the number of active sections. Before using the package buffer
1599 * in an update package command, the caller should make sure that there is at
1600 * least one active section - otherwise, the buffer is not legal and should
1601 * not be used.
1602 * Note: all package contents must be in Little Endian form.
1603 */
ice_pkg_buf_get_active_sections(struct ice_buf_build * bld)1604 static u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld)
1605 {
1606 struct ice_buf_hdr *buf;
1607
1608 if (!bld)
1609 return 0;
1610
1611 buf = (struct ice_buf_hdr *)&bld->buf;
1612 return le16_to_cpu(buf->section_count);
1613 }
1614
1615 /**
1616 * ice_pkg_buf
1617 * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
1618 *
1619 * Return a pointer to the buffer's header
1620 */
ice_pkg_buf(struct ice_buf_build * bld)1621 static struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld)
1622 {
1623 if (!bld)
1624 return NULL;
1625
1626 return &bld->buf;
1627 }
1628
1629 /**
1630 * ice_get_open_tunnel_port - retrieve an open tunnel port
1631 * @hw: pointer to the HW structure
1632 * @port: returns open port
1633 */
1634 bool
ice_get_open_tunnel_port(struct ice_hw * hw,u16 * port)1635 ice_get_open_tunnel_port(struct ice_hw *hw, u16 *port)
1636 {
1637 bool res = false;
1638 u16 i;
1639
1640 mutex_lock(&hw->tnl_lock);
1641
1642 for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
1643 if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].port) {
1644 *port = hw->tnl.tbl[i].port;
1645 res = true;
1646 break;
1647 }
1648
1649 mutex_unlock(&hw->tnl_lock);
1650
1651 return res;
1652 }
1653
1654 /**
1655 * ice_tunnel_idx_to_entry - convert linear index to the sparse one
1656 * @hw: pointer to the HW structure
1657 * @type: type of tunnel
1658 * @idx: linear index
1659 *
1660 * Stack assumes we have 2 linear tables with indexes [0, count_valid),
1661 * but really the port table may be sprase, and types are mixed, so convert
1662 * the stack index into the device index.
1663 */
ice_tunnel_idx_to_entry(struct ice_hw * hw,enum ice_tunnel_type type,u16 idx)1664 static u16 ice_tunnel_idx_to_entry(struct ice_hw *hw, enum ice_tunnel_type type,
1665 u16 idx)
1666 {
1667 u16 i;
1668
1669 for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
1670 if (hw->tnl.tbl[i].valid &&
1671 hw->tnl.tbl[i].type == type &&
1672 idx--)
1673 return i;
1674
1675 WARN_ON_ONCE(1);
1676 return 0;
1677 }
1678
1679 /**
1680 * ice_create_tunnel
1681 * @hw: pointer to the HW structure
1682 * @index: device table entry
1683 * @type: type of tunnel
1684 * @port: port of tunnel to create
1685 *
1686 * Create a tunnel by updating the parse graph in the parser. We do that by
1687 * creating a package buffer with the tunnel info and issuing an update package
1688 * command.
1689 */
1690 static enum ice_status
ice_create_tunnel(struct ice_hw * hw,u16 index,enum ice_tunnel_type type,u16 port)1691 ice_create_tunnel(struct ice_hw *hw, u16 index,
1692 enum ice_tunnel_type type, u16 port)
1693 {
1694 struct ice_boost_tcam_section *sect_rx, *sect_tx;
1695 enum ice_status status = ICE_ERR_MAX_LIMIT;
1696 struct ice_buf_build *bld;
1697
1698 mutex_lock(&hw->tnl_lock);
1699
1700 bld = ice_pkg_buf_alloc(hw);
1701 if (!bld) {
1702 status = ICE_ERR_NO_MEMORY;
1703 goto ice_create_tunnel_end;
1704 }
1705
1706 /* allocate 2 sections, one for Rx parser, one for Tx parser */
1707 if (ice_pkg_buf_reserve_section(bld, 2))
1708 goto ice_create_tunnel_err;
1709
1710 sect_rx = ice_pkg_buf_alloc_section(bld, ICE_SID_RXPARSER_BOOST_TCAM,
1711 struct_size(sect_rx, tcam, 1));
1712 if (!sect_rx)
1713 goto ice_create_tunnel_err;
1714 sect_rx->count = cpu_to_le16(1);
1715
1716 sect_tx = ice_pkg_buf_alloc_section(bld, ICE_SID_TXPARSER_BOOST_TCAM,
1717 struct_size(sect_tx, tcam, 1));
1718 if (!sect_tx)
1719 goto ice_create_tunnel_err;
1720 sect_tx->count = cpu_to_le16(1);
1721
1722 /* copy original boost entry to update package buffer */
1723 memcpy(sect_rx->tcam, hw->tnl.tbl[index].boost_entry,
1724 sizeof(*sect_rx->tcam));
1725
1726 /* over-write the never-match dest port key bits with the encoded port
1727 * bits
1728 */
1729 ice_set_key((u8 *)§_rx->tcam[0].key, sizeof(sect_rx->tcam[0].key),
1730 (u8 *)&port, NULL, NULL, NULL,
1731 (u16)offsetof(struct ice_boost_key_value, hv_dst_port_key),
1732 sizeof(sect_rx->tcam[0].key.key.hv_dst_port_key));
1733
1734 /* exact copy of entry to Tx section entry */
1735 memcpy(sect_tx->tcam, sect_rx->tcam, sizeof(*sect_tx->tcam));
1736
1737 status = ice_update_pkg(hw, ice_pkg_buf(bld), 1);
1738 if (!status)
1739 hw->tnl.tbl[index].port = port;
1740
1741 ice_create_tunnel_err:
1742 ice_pkg_buf_free(hw, bld);
1743
1744 ice_create_tunnel_end:
1745 mutex_unlock(&hw->tnl_lock);
1746
1747 return status;
1748 }
1749
1750 /**
1751 * ice_destroy_tunnel
1752 * @hw: pointer to the HW structure
1753 * @index: device table entry
1754 * @type: type of tunnel
1755 * @port: port of tunnel to destroy (ignored if the all parameter is true)
1756 *
1757 * Destroys a tunnel or all tunnels by creating an update package buffer
1758 * targeting the specific updates requested and then performing an update
1759 * package.
1760 */
1761 static enum ice_status
ice_destroy_tunnel(struct ice_hw * hw,u16 index,enum ice_tunnel_type type,u16 port)1762 ice_destroy_tunnel(struct ice_hw *hw, u16 index, enum ice_tunnel_type type,
1763 u16 port)
1764 {
1765 struct ice_boost_tcam_section *sect_rx, *sect_tx;
1766 enum ice_status status = ICE_ERR_MAX_LIMIT;
1767 struct ice_buf_build *bld;
1768
1769 mutex_lock(&hw->tnl_lock);
1770
1771 if (WARN_ON(!hw->tnl.tbl[index].valid ||
1772 hw->tnl.tbl[index].type != type ||
1773 hw->tnl.tbl[index].port != port)) {
1774 status = ICE_ERR_OUT_OF_RANGE;
1775 goto ice_destroy_tunnel_end;
1776 }
1777
1778 bld = ice_pkg_buf_alloc(hw);
1779 if (!bld) {
1780 status = ICE_ERR_NO_MEMORY;
1781 goto ice_destroy_tunnel_end;
1782 }
1783
1784 /* allocate 2 sections, one for Rx parser, one for Tx parser */
1785 if (ice_pkg_buf_reserve_section(bld, 2))
1786 goto ice_destroy_tunnel_err;
1787
1788 sect_rx = ice_pkg_buf_alloc_section(bld, ICE_SID_RXPARSER_BOOST_TCAM,
1789 struct_size(sect_rx, tcam, 1));
1790 if (!sect_rx)
1791 goto ice_destroy_tunnel_err;
1792 sect_rx->count = cpu_to_le16(1);
1793
1794 sect_tx = ice_pkg_buf_alloc_section(bld, ICE_SID_TXPARSER_BOOST_TCAM,
1795 struct_size(sect_tx, tcam, 1));
1796 if (!sect_tx)
1797 goto ice_destroy_tunnel_err;
1798 sect_tx->count = cpu_to_le16(1);
1799
1800 /* copy original boost entry to update package buffer, one copy to Rx
1801 * section, another copy to the Tx section
1802 */
1803 memcpy(sect_rx->tcam, hw->tnl.tbl[index].boost_entry,
1804 sizeof(*sect_rx->tcam));
1805 memcpy(sect_tx->tcam, hw->tnl.tbl[index].boost_entry,
1806 sizeof(*sect_tx->tcam));
1807
1808 status = ice_update_pkg(hw, ice_pkg_buf(bld), 1);
1809 if (!status)
1810 hw->tnl.tbl[index].port = 0;
1811
1812 ice_destroy_tunnel_err:
1813 ice_pkg_buf_free(hw, bld);
1814
1815 ice_destroy_tunnel_end:
1816 mutex_unlock(&hw->tnl_lock);
1817
1818 return status;
1819 }
1820
ice_udp_tunnel_set_port(struct net_device * netdev,unsigned int table,unsigned int idx,struct udp_tunnel_info * ti)1821 int ice_udp_tunnel_set_port(struct net_device *netdev, unsigned int table,
1822 unsigned int idx, struct udp_tunnel_info *ti)
1823 {
1824 struct ice_netdev_priv *np = netdev_priv(netdev);
1825 struct ice_vsi *vsi = np->vsi;
1826 struct ice_pf *pf = vsi->back;
1827 enum ice_tunnel_type tnl_type;
1828 enum ice_status status;
1829 u16 index;
1830
1831 tnl_type = ti->type == UDP_TUNNEL_TYPE_VXLAN ? TNL_VXLAN : TNL_GENEVE;
1832 index = ice_tunnel_idx_to_entry(&pf->hw, idx, tnl_type);
1833
1834 status = ice_create_tunnel(&pf->hw, index, tnl_type, ntohs(ti->port));
1835 if (status) {
1836 netdev_err(netdev, "Error adding UDP tunnel - %s\n",
1837 ice_stat_str(status));
1838 return -EIO;
1839 }
1840
1841 udp_tunnel_nic_set_port_priv(netdev, table, idx, index);
1842 return 0;
1843 }
1844
ice_udp_tunnel_unset_port(struct net_device * netdev,unsigned int table,unsigned int idx,struct udp_tunnel_info * ti)1845 int ice_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table,
1846 unsigned int idx, struct udp_tunnel_info *ti)
1847 {
1848 struct ice_netdev_priv *np = netdev_priv(netdev);
1849 struct ice_vsi *vsi = np->vsi;
1850 struct ice_pf *pf = vsi->back;
1851 enum ice_tunnel_type tnl_type;
1852 enum ice_status status;
1853
1854 tnl_type = ti->type == UDP_TUNNEL_TYPE_VXLAN ? TNL_VXLAN : TNL_GENEVE;
1855
1856 status = ice_destroy_tunnel(&pf->hw, ti->hw_priv, tnl_type,
1857 ntohs(ti->port));
1858 if (status) {
1859 netdev_err(netdev, "Error removing UDP tunnel - %s\n",
1860 ice_stat_str(status));
1861 return -EIO;
1862 }
1863
1864 return 0;
1865 }
1866
1867 /* PTG Management */
1868
1869 /**
1870 * ice_ptg_find_ptype - Search for packet type group using packet type (ptype)
1871 * @hw: pointer to the hardware structure
1872 * @blk: HW block
1873 * @ptype: the ptype to search for
1874 * @ptg: pointer to variable that receives the PTG
1875 *
1876 * This function will search the PTGs for a particular ptype, returning the
1877 * PTG ID that contains it through the PTG parameter, with the value of
1878 * ICE_DEFAULT_PTG (0) meaning it is part the default PTG.
1879 */
1880 static enum ice_status
ice_ptg_find_ptype(struct ice_hw * hw,enum ice_block blk,u16 ptype,u8 * ptg)1881 ice_ptg_find_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 *ptg)
1882 {
1883 if (ptype >= ICE_XLT1_CNT || !ptg)
1884 return ICE_ERR_PARAM;
1885
1886 *ptg = hw->blk[blk].xlt1.ptypes[ptype].ptg;
1887 return 0;
1888 }
1889
1890 /**
1891 * ice_ptg_alloc_val - Allocates a new packet type group ID by value
1892 * @hw: pointer to the hardware structure
1893 * @blk: HW block
1894 * @ptg: the PTG to allocate
1895 *
1896 * This function allocates a given packet type group ID specified by the PTG
1897 * parameter.
1898 */
ice_ptg_alloc_val(struct ice_hw * hw,enum ice_block blk,u8 ptg)1899 static void ice_ptg_alloc_val(struct ice_hw *hw, enum ice_block blk, u8 ptg)
1900 {
1901 hw->blk[blk].xlt1.ptg_tbl[ptg].in_use = true;
1902 }
1903
1904 /**
1905 * ice_ptg_remove_ptype - Removes ptype from a particular packet type group
1906 * @hw: pointer to the hardware structure
1907 * @blk: HW block
1908 * @ptype: the ptype to remove
1909 * @ptg: the PTG to remove the ptype from
1910 *
1911 * This function will remove the ptype from the specific PTG, and move it to
1912 * the default PTG (ICE_DEFAULT_PTG).
1913 */
1914 static enum ice_status
ice_ptg_remove_ptype(struct ice_hw * hw,enum ice_block blk,u16 ptype,u8 ptg)1915 ice_ptg_remove_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg)
1916 {
1917 struct ice_ptg_ptype **ch;
1918 struct ice_ptg_ptype *p;
1919
1920 if (ptype > ICE_XLT1_CNT - 1)
1921 return ICE_ERR_PARAM;
1922
1923 if (!hw->blk[blk].xlt1.ptg_tbl[ptg].in_use)
1924 return ICE_ERR_DOES_NOT_EXIST;
1925
1926 /* Should not happen if .in_use is set, bad config */
1927 if (!hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype)
1928 return ICE_ERR_CFG;
1929
1930 /* find the ptype within this PTG, and bypass the link over it */
1931 p = hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype;
1932 ch = &hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype;
1933 while (p) {
1934 if (ptype == (p - hw->blk[blk].xlt1.ptypes)) {
1935 *ch = p->next_ptype;
1936 break;
1937 }
1938
1939 ch = &p->next_ptype;
1940 p = p->next_ptype;
1941 }
1942
1943 hw->blk[blk].xlt1.ptypes[ptype].ptg = ICE_DEFAULT_PTG;
1944 hw->blk[blk].xlt1.ptypes[ptype].next_ptype = NULL;
1945
1946 return 0;
1947 }
1948
1949 /**
1950 * ice_ptg_add_mv_ptype - Adds/moves ptype to a particular packet type group
1951 * @hw: pointer to the hardware structure
1952 * @blk: HW block
1953 * @ptype: the ptype to add or move
1954 * @ptg: the PTG to add or move the ptype to
1955 *
1956 * This function will either add or move a ptype to a particular PTG depending
1957 * on if the ptype is already part of another group. Note that using a
1958 * a destination PTG ID of ICE_DEFAULT_PTG (0) will move the ptype to the
1959 * default PTG.
1960 */
1961 static enum ice_status
ice_ptg_add_mv_ptype(struct ice_hw * hw,enum ice_block blk,u16 ptype,u8 ptg)1962 ice_ptg_add_mv_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg)
1963 {
1964 enum ice_status status;
1965 u8 original_ptg;
1966
1967 if (ptype > ICE_XLT1_CNT - 1)
1968 return ICE_ERR_PARAM;
1969
1970 if (!hw->blk[blk].xlt1.ptg_tbl[ptg].in_use && ptg != ICE_DEFAULT_PTG)
1971 return ICE_ERR_DOES_NOT_EXIST;
1972
1973 status = ice_ptg_find_ptype(hw, blk, ptype, &original_ptg);
1974 if (status)
1975 return status;
1976
1977 /* Is ptype already in the correct PTG? */
1978 if (original_ptg == ptg)
1979 return 0;
1980
1981 /* Remove from original PTG and move back to the default PTG */
1982 if (original_ptg != ICE_DEFAULT_PTG)
1983 ice_ptg_remove_ptype(hw, blk, ptype, original_ptg);
1984
1985 /* Moving to default PTG? Then we're done with this request */
1986 if (ptg == ICE_DEFAULT_PTG)
1987 return 0;
1988
1989 /* Add ptype to PTG at beginning of list */
1990 hw->blk[blk].xlt1.ptypes[ptype].next_ptype =
1991 hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype;
1992 hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype =
1993 &hw->blk[blk].xlt1.ptypes[ptype];
1994
1995 hw->blk[blk].xlt1.ptypes[ptype].ptg = ptg;
1996 hw->blk[blk].xlt1.t[ptype] = ptg;
1997
1998 return 0;
1999 }
2000
2001 /* Block / table size info */
2002 struct ice_blk_size_details {
2003 u16 xlt1; /* # XLT1 entries */
2004 u16 xlt2; /* # XLT2 entries */
2005 u16 prof_tcam; /* # profile ID TCAM entries */
2006 u16 prof_id; /* # profile IDs */
2007 u8 prof_cdid_bits; /* # CDID one-hot bits used in key */
2008 u16 prof_redir; /* # profile redirection entries */
2009 u16 es; /* # extraction sequence entries */
2010 u16 fvw; /* # field vector words */
2011 u8 overwrite; /* overwrite existing entries allowed */
2012 u8 reverse; /* reverse FV order */
2013 };
2014
2015 static const struct ice_blk_size_details blk_sizes[ICE_BLK_COUNT] = {
2016 /**
2017 * Table Definitions
2018 * XLT1 - Number of entries in XLT1 table
2019 * XLT2 - Number of entries in XLT2 table
2020 * TCAM - Number of entries Profile ID TCAM table
2021 * CDID - Control Domain ID of the hardware block
2022 * PRED - Number of entries in the Profile Redirection Table
2023 * FV - Number of entries in the Field Vector
2024 * FVW - Width (in WORDs) of the Field Vector
2025 * OVR - Overwrite existing table entries
2026 * REV - Reverse FV
2027 */
2028 /* XLT1 , XLT2 ,TCAM, PID,CDID,PRED, FV, FVW */
2029 /* Overwrite , Reverse FV */
2030 /* SW */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 256, 0, 256, 256, 48,
2031 false, false },
2032 /* ACL */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128, 0, 128, 128, 32,
2033 false, false },
2034 /* FD */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128, 0, 128, 128, 24,
2035 false, true },
2036 /* RSS */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128, 0, 128, 128, 24,
2037 true, true },
2038 /* PE */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 64, 32, 0, 32, 32, 24,
2039 false, false },
2040 };
2041
2042 enum ice_sid_all {
2043 ICE_SID_XLT1_OFF = 0,
2044 ICE_SID_XLT2_OFF,
2045 ICE_SID_PR_OFF,
2046 ICE_SID_PR_REDIR_OFF,
2047 ICE_SID_ES_OFF,
2048 ICE_SID_OFF_COUNT,
2049 };
2050
2051 /* Characteristic handling */
2052
2053 /**
2054 * ice_match_prop_lst - determine if properties of two lists match
2055 * @list1: first properties list
2056 * @list2: second properties list
2057 *
2058 * Count, cookies and the order must match in order to be considered equivalent.
2059 */
2060 static bool
ice_match_prop_lst(struct list_head * list1,struct list_head * list2)2061 ice_match_prop_lst(struct list_head *list1, struct list_head *list2)
2062 {
2063 struct ice_vsig_prof *tmp1;
2064 struct ice_vsig_prof *tmp2;
2065 u16 chk_count = 0;
2066 u16 count = 0;
2067
2068 /* compare counts */
2069 list_for_each_entry(tmp1, list1, list)
2070 count++;
2071 list_for_each_entry(tmp2, list2, list)
2072 chk_count++;
2073 if (!count || count != chk_count)
2074 return false;
2075
2076 tmp1 = list_first_entry(list1, struct ice_vsig_prof, list);
2077 tmp2 = list_first_entry(list2, struct ice_vsig_prof, list);
2078
2079 /* profile cookies must compare, and in the exact same order to take
2080 * into account priority
2081 */
2082 while (count--) {
2083 if (tmp2->profile_cookie != tmp1->profile_cookie)
2084 return false;
2085
2086 tmp1 = list_next_entry(tmp1, list);
2087 tmp2 = list_next_entry(tmp2, list);
2088 }
2089
2090 return true;
2091 }
2092
2093 /* VSIG Management */
2094
2095 /**
2096 * ice_vsig_find_vsi - find a VSIG that contains a specified VSI
2097 * @hw: pointer to the hardware structure
2098 * @blk: HW block
2099 * @vsi: VSI of interest
2100 * @vsig: pointer to receive the VSI group
2101 *
2102 * This function will lookup the VSI entry in the XLT2 list and return
2103 * the VSI group its associated with.
2104 */
2105 static enum ice_status
ice_vsig_find_vsi(struct ice_hw * hw,enum ice_block blk,u16 vsi,u16 * vsig)2106 ice_vsig_find_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 *vsig)
2107 {
2108 if (!vsig || vsi >= ICE_MAX_VSI)
2109 return ICE_ERR_PARAM;
2110
2111 /* As long as there's a default or valid VSIG associated with the input
2112 * VSI, the functions returns a success. Any handling of VSIG will be
2113 * done by the following add, update or remove functions.
2114 */
2115 *vsig = hw->blk[blk].xlt2.vsis[vsi].vsig;
2116
2117 return 0;
2118 }
2119
2120 /**
2121 * ice_vsig_alloc_val - allocate a new VSIG by value
2122 * @hw: pointer to the hardware structure
2123 * @blk: HW block
2124 * @vsig: the VSIG to allocate
2125 *
2126 * This function will allocate a given VSIG specified by the VSIG parameter.
2127 */
ice_vsig_alloc_val(struct ice_hw * hw,enum ice_block blk,u16 vsig)2128 static u16 ice_vsig_alloc_val(struct ice_hw *hw, enum ice_block blk, u16 vsig)
2129 {
2130 u16 idx = vsig & ICE_VSIG_IDX_M;
2131
2132 if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use) {
2133 INIT_LIST_HEAD(&hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst);
2134 hw->blk[blk].xlt2.vsig_tbl[idx].in_use = true;
2135 }
2136
2137 return ICE_VSIG_VALUE(idx, hw->pf_id);
2138 }
2139
2140 /**
2141 * ice_vsig_alloc - Finds a free entry and allocates a new VSIG
2142 * @hw: pointer to the hardware structure
2143 * @blk: HW block
2144 *
2145 * This function will iterate through the VSIG list and mark the first
2146 * unused entry for the new VSIG entry as used and return that value.
2147 */
ice_vsig_alloc(struct ice_hw * hw,enum ice_block blk)2148 static u16 ice_vsig_alloc(struct ice_hw *hw, enum ice_block blk)
2149 {
2150 u16 i;
2151
2152 for (i = 1; i < ICE_MAX_VSIGS; i++)
2153 if (!hw->blk[blk].xlt2.vsig_tbl[i].in_use)
2154 return ice_vsig_alloc_val(hw, blk, i);
2155
2156 return ICE_DEFAULT_VSIG;
2157 }
2158
2159 /**
2160 * ice_find_dup_props_vsig - find VSI group with a specified set of properties
2161 * @hw: pointer to the hardware structure
2162 * @blk: HW block
2163 * @chs: characteristic list
2164 * @vsig: returns the VSIG with the matching profiles, if found
2165 *
2166 * Each VSIG is associated with a characteristic set; i.e. all VSIs under
2167 * a group have the same characteristic set. To check if there exists a VSIG
2168 * which has the same characteristics as the input characteristics; this
2169 * function will iterate through the XLT2 list and return the VSIG that has a
2170 * matching configuration. In order to make sure that priorities are accounted
2171 * for, the list must match exactly, including the order in which the
2172 * characteristics are listed.
2173 */
2174 static enum ice_status
ice_find_dup_props_vsig(struct ice_hw * hw,enum ice_block blk,struct list_head * chs,u16 * vsig)2175 ice_find_dup_props_vsig(struct ice_hw *hw, enum ice_block blk,
2176 struct list_head *chs, u16 *vsig)
2177 {
2178 struct ice_xlt2 *xlt2 = &hw->blk[blk].xlt2;
2179 u16 i;
2180
2181 for (i = 0; i < xlt2->count; i++)
2182 if (xlt2->vsig_tbl[i].in_use &&
2183 ice_match_prop_lst(chs, &xlt2->vsig_tbl[i].prop_lst)) {
2184 *vsig = ICE_VSIG_VALUE(i, hw->pf_id);
2185 return 0;
2186 }
2187
2188 return ICE_ERR_DOES_NOT_EXIST;
2189 }
2190
2191 /**
2192 * ice_vsig_free - free VSI group
2193 * @hw: pointer to the hardware structure
2194 * @blk: HW block
2195 * @vsig: VSIG to remove
2196 *
2197 * The function will remove all VSIs associated with the input VSIG and move
2198 * them to the DEFAULT_VSIG and mark the VSIG available.
2199 */
2200 static enum ice_status
ice_vsig_free(struct ice_hw * hw,enum ice_block blk,u16 vsig)2201 ice_vsig_free(struct ice_hw *hw, enum ice_block blk, u16 vsig)
2202 {
2203 struct ice_vsig_prof *dtmp, *del;
2204 struct ice_vsig_vsi *vsi_cur;
2205 u16 idx;
2206
2207 idx = vsig & ICE_VSIG_IDX_M;
2208 if (idx >= ICE_MAX_VSIGS)
2209 return ICE_ERR_PARAM;
2210
2211 if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
2212 return ICE_ERR_DOES_NOT_EXIST;
2213
2214 hw->blk[blk].xlt2.vsig_tbl[idx].in_use = false;
2215
2216 vsi_cur = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
2217 /* If the VSIG has at least 1 VSI then iterate through the
2218 * list and remove the VSIs before deleting the group.
2219 */
2220 if (vsi_cur) {
2221 /* remove all vsis associated with this VSIG XLT2 entry */
2222 do {
2223 struct ice_vsig_vsi *tmp = vsi_cur->next_vsi;
2224
2225 vsi_cur->vsig = ICE_DEFAULT_VSIG;
2226 vsi_cur->changed = 1;
2227 vsi_cur->next_vsi = NULL;
2228 vsi_cur = tmp;
2229 } while (vsi_cur);
2230
2231 /* NULL terminate head of VSI list */
2232 hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi = NULL;
2233 }
2234
2235 /* free characteristic list */
2236 list_for_each_entry_safe(del, dtmp,
2237 &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
2238 list) {
2239 list_del(&del->list);
2240 devm_kfree(ice_hw_to_dev(hw), del);
2241 }
2242
2243 /* if VSIG characteristic list was cleared for reset
2244 * re-initialize the list head
2245 */
2246 INIT_LIST_HEAD(&hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst);
2247
2248 return 0;
2249 }
2250
2251 /**
2252 * ice_vsig_remove_vsi - remove VSI from VSIG
2253 * @hw: pointer to the hardware structure
2254 * @blk: HW block
2255 * @vsi: VSI to remove
2256 * @vsig: VSI group to remove from
2257 *
2258 * The function will remove the input VSI from its VSI group and move it
2259 * to the DEFAULT_VSIG.
2260 */
2261 static enum ice_status
ice_vsig_remove_vsi(struct ice_hw * hw,enum ice_block blk,u16 vsi,u16 vsig)2262 ice_vsig_remove_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
2263 {
2264 struct ice_vsig_vsi **vsi_head, *vsi_cur, *vsi_tgt;
2265 u16 idx;
2266
2267 idx = vsig & ICE_VSIG_IDX_M;
2268
2269 if (vsi >= ICE_MAX_VSI || idx >= ICE_MAX_VSIGS)
2270 return ICE_ERR_PARAM;
2271
2272 if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
2273 return ICE_ERR_DOES_NOT_EXIST;
2274
2275 /* entry already in default VSIG, don't have to remove */
2276 if (idx == ICE_DEFAULT_VSIG)
2277 return 0;
2278
2279 vsi_head = &hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
2280 if (!(*vsi_head))
2281 return ICE_ERR_CFG;
2282
2283 vsi_tgt = &hw->blk[blk].xlt2.vsis[vsi];
2284 vsi_cur = (*vsi_head);
2285
2286 /* iterate the VSI list, skip over the entry to be removed */
2287 while (vsi_cur) {
2288 if (vsi_tgt == vsi_cur) {
2289 (*vsi_head) = vsi_cur->next_vsi;
2290 break;
2291 }
2292 vsi_head = &vsi_cur->next_vsi;
2293 vsi_cur = vsi_cur->next_vsi;
2294 }
2295
2296 /* verify if VSI was removed from group list */
2297 if (!vsi_cur)
2298 return ICE_ERR_DOES_NOT_EXIST;
2299
2300 vsi_cur->vsig = ICE_DEFAULT_VSIG;
2301 vsi_cur->changed = 1;
2302 vsi_cur->next_vsi = NULL;
2303
2304 return 0;
2305 }
2306
2307 /**
2308 * ice_vsig_add_mv_vsi - add or move a VSI to a VSI group
2309 * @hw: pointer to the hardware structure
2310 * @blk: HW block
2311 * @vsi: VSI to move
2312 * @vsig: destination VSI group
2313 *
2314 * This function will move or add the input VSI to the target VSIG.
2315 * The function will find the original VSIG the VSI belongs to and
2316 * move the entry to the DEFAULT_VSIG, update the original VSIG and
2317 * then move entry to the new VSIG.
2318 */
2319 static enum ice_status
ice_vsig_add_mv_vsi(struct ice_hw * hw,enum ice_block blk,u16 vsi,u16 vsig)2320 ice_vsig_add_mv_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
2321 {
2322 struct ice_vsig_vsi *tmp;
2323 enum ice_status status;
2324 u16 orig_vsig, idx;
2325
2326 idx = vsig & ICE_VSIG_IDX_M;
2327
2328 if (vsi >= ICE_MAX_VSI || idx >= ICE_MAX_VSIGS)
2329 return ICE_ERR_PARAM;
2330
2331 /* if VSIG not in use and VSIG is not default type this VSIG
2332 * doesn't exist.
2333 */
2334 if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use &&
2335 vsig != ICE_DEFAULT_VSIG)
2336 return ICE_ERR_DOES_NOT_EXIST;
2337
2338 status = ice_vsig_find_vsi(hw, blk, vsi, &orig_vsig);
2339 if (status)
2340 return status;
2341
2342 /* no update required if vsigs match */
2343 if (orig_vsig == vsig)
2344 return 0;
2345
2346 if (orig_vsig != ICE_DEFAULT_VSIG) {
2347 /* remove entry from orig_vsig and add to default VSIG */
2348 status = ice_vsig_remove_vsi(hw, blk, vsi, orig_vsig);
2349 if (status)
2350 return status;
2351 }
2352
2353 if (idx == ICE_DEFAULT_VSIG)
2354 return 0;
2355
2356 /* Create VSI entry and add VSIG and prop_mask values */
2357 hw->blk[blk].xlt2.vsis[vsi].vsig = vsig;
2358 hw->blk[blk].xlt2.vsis[vsi].changed = 1;
2359
2360 /* Add new entry to the head of the VSIG list */
2361 tmp = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
2362 hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi =
2363 &hw->blk[blk].xlt2.vsis[vsi];
2364 hw->blk[blk].xlt2.vsis[vsi].next_vsi = tmp;
2365 hw->blk[blk].xlt2.t[vsi] = vsig;
2366
2367 return 0;
2368 }
2369
2370 /**
2371 * ice_find_prof_id - find profile ID for a given field vector
2372 * @hw: pointer to the hardware structure
2373 * @blk: HW block
2374 * @fv: field vector to search for
2375 * @prof_id: receives the profile ID
2376 */
2377 static enum ice_status
ice_find_prof_id(struct ice_hw * hw,enum ice_block blk,struct ice_fv_word * fv,u8 * prof_id)2378 ice_find_prof_id(struct ice_hw *hw, enum ice_block blk,
2379 struct ice_fv_word *fv, u8 *prof_id)
2380 {
2381 struct ice_es *es = &hw->blk[blk].es;
2382 u16 off;
2383 u8 i;
2384
2385 /* For FD, we don't want to re-use a existed profile with the same
2386 * field vector and mask. This will cause rule interference.
2387 */
2388 if (blk == ICE_BLK_FD)
2389 return ICE_ERR_DOES_NOT_EXIST;
2390
2391 for (i = 0; i < (u8)es->count; i++) {
2392 off = i * es->fvw;
2393
2394 if (memcmp(&es->t[off], fv, es->fvw * sizeof(*fv)))
2395 continue;
2396
2397 *prof_id = i;
2398 return 0;
2399 }
2400
2401 return ICE_ERR_DOES_NOT_EXIST;
2402 }
2403
2404 /**
2405 * ice_prof_id_rsrc_type - get profile ID resource type for a block type
2406 * @blk: the block type
2407 * @rsrc_type: pointer to variable to receive the resource type
2408 */
ice_prof_id_rsrc_type(enum ice_block blk,u16 * rsrc_type)2409 static bool ice_prof_id_rsrc_type(enum ice_block blk, u16 *rsrc_type)
2410 {
2411 switch (blk) {
2412 case ICE_BLK_FD:
2413 *rsrc_type = ICE_AQC_RES_TYPE_FD_PROF_BLDR_PROFID;
2414 break;
2415 case ICE_BLK_RSS:
2416 *rsrc_type = ICE_AQC_RES_TYPE_HASH_PROF_BLDR_PROFID;
2417 break;
2418 default:
2419 return false;
2420 }
2421 return true;
2422 }
2423
2424 /**
2425 * ice_tcam_ent_rsrc_type - get TCAM entry resource type for a block type
2426 * @blk: the block type
2427 * @rsrc_type: pointer to variable to receive the resource type
2428 */
ice_tcam_ent_rsrc_type(enum ice_block blk,u16 * rsrc_type)2429 static bool ice_tcam_ent_rsrc_type(enum ice_block blk, u16 *rsrc_type)
2430 {
2431 switch (blk) {
2432 case ICE_BLK_FD:
2433 *rsrc_type = ICE_AQC_RES_TYPE_FD_PROF_BLDR_TCAM;
2434 break;
2435 case ICE_BLK_RSS:
2436 *rsrc_type = ICE_AQC_RES_TYPE_HASH_PROF_BLDR_TCAM;
2437 break;
2438 default:
2439 return false;
2440 }
2441 return true;
2442 }
2443
2444 /**
2445 * ice_alloc_tcam_ent - allocate hardware TCAM entry
2446 * @hw: pointer to the HW struct
2447 * @blk: the block to allocate the TCAM for
2448 * @tcam_idx: pointer to variable to receive the TCAM entry
2449 *
2450 * This function allocates a new entry in a Profile ID TCAM for a specific
2451 * block.
2452 */
2453 static enum ice_status
ice_alloc_tcam_ent(struct ice_hw * hw,enum ice_block blk,u16 * tcam_idx)2454 ice_alloc_tcam_ent(struct ice_hw *hw, enum ice_block blk, u16 *tcam_idx)
2455 {
2456 u16 res_type;
2457
2458 if (!ice_tcam_ent_rsrc_type(blk, &res_type))
2459 return ICE_ERR_PARAM;
2460
2461 return ice_alloc_hw_res(hw, res_type, 1, true, tcam_idx);
2462 }
2463
2464 /**
2465 * ice_free_tcam_ent - free hardware TCAM entry
2466 * @hw: pointer to the HW struct
2467 * @blk: the block from which to free the TCAM entry
2468 * @tcam_idx: the TCAM entry to free
2469 *
2470 * This function frees an entry in a Profile ID TCAM for a specific block.
2471 */
2472 static enum ice_status
ice_free_tcam_ent(struct ice_hw * hw,enum ice_block blk,u16 tcam_idx)2473 ice_free_tcam_ent(struct ice_hw *hw, enum ice_block blk, u16 tcam_idx)
2474 {
2475 u16 res_type;
2476
2477 if (!ice_tcam_ent_rsrc_type(blk, &res_type))
2478 return ICE_ERR_PARAM;
2479
2480 return ice_free_hw_res(hw, res_type, 1, &tcam_idx);
2481 }
2482
2483 /**
2484 * ice_alloc_prof_id - allocate profile ID
2485 * @hw: pointer to the HW struct
2486 * @blk: the block to allocate the profile ID for
2487 * @prof_id: pointer to variable to receive the profile ID
2488 *
2489 * This function allocates a new profile ID, which also corresponds to a Field
2490 * Vector (Extraction Sequence) entry.
2491 */
2492 static enum ice_status
ice_alloc_prof_id(struct ice_hw * hw,enum ice_block blk,u8 * prof_id)2493 ice_alloc_prof_id(struct ice_hw *hw, enum ice_block blk, u8 *prof_id)
2494 {
2495 enum ice_status status;
2496 u16 res_type;
2497 u16 get_prof;
2498
2499 if (!ice_prof_id_rsrc_type(blk, &res_type))
2500 return ICE_ERR_PARAM;
2501
2502 status = ice_alloc_hw_res(hw, res_type, 1, false, &get_prof);
2503 if (!status)
2504 *prof_id = (u8)get_prof;
2505
2506 return status;
2507 }
2508
2509 /**
2510 * ice_free_prof_id - free profile ID
2511 * @hw: pointer to the HW struct
2512 * @blk: the block from which to free the profile ID
2513 * @prof_id: the profile ID to free
2514 *
2515 * This function frees a profile ID, which also corresponds to a Field Vector.
2516 */
2517 static enum ice_status
ice_free_prof_id(struct ice_hw * hw,enum ice_block blk,u8 prof_id)2518 ice_free_prof_id(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
2519 {
2520 u16 tmp_prof_id = (u16)prof_id;
2521 u16 res_type;
2522
2523 if (!ice_prof_id_rsrc_type(blk, &res_type))
2524 return ICE_ERR_PARAM;
2525
2526 return ice_free_hw_res(hw, res_type, 1, &tmp_prof_id);
2527 }
2528
2529 /**
2530 * ice_prof_inc_ref - increment reference count for profile
2531 * @hw: pointer to the HW struct
2532 * @blk: the block from which to free the profile ID
2533 * @prof_id: the profile ID for which to increment the reference count
2534 */
2535 static enum ice_status
ice_prof_inc_ref(struct ice_hw * hw,enum ice_block blk,u8 prof_id)2536 ice_prof_inc_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
2537 {
2538 if (prof_id > hw->blk[blk].es.count)
2539 return ICE_ERR_PARAM;
2540
2541 hw->blk[blk].es.ref_count[prof_id]++;
2542
2543 return 0;
2544 }
2545
2546 /**
2547 * ice_write_es - write an extraction sequence to hardware
2548 * @hw: pointer to the HW struct
2549 * @blk: the block in which to write the extraction sequence
2550 * @prof_id: the profile ID to write
2551 * @fv: pointer to the extraction sequence to write - NULL to clear extraction
2552 */
2553 static void
ice_write_es(struct ice_hw * hw,enum ice_block blk,u8 prof_id,struct ice_fv_word * fv)2554 ice_write_es(struct ice_hw *hw, enum ice_block blk, u8 prof_id,
2555 struct ice_fv_word *fv)
2556 {
2557 u16 off;
2558
2559 off = prof_id * hw->blk[blk].es.fvw;
2560 if (!fv) {
2561 memset(&hw->blk[blk].es.t[off], 0,
2562 hw->blk[blk].es.fvw * sizeof(*fv));
2563 hw->blk[blk].es.written[prof_id] = false;
2564 } else {
2565 memcpy(&hw->blk[blk].es.t[off], fv,
2566 hw->blk[blk].es.fvw * sizeof(*fv));
2567 }
2568 }
2569
2570 /**
2571 * ice_prof_dec_ref - decrement reference count for profile
2572 * @hw: pointer to the HW struct
2573 * @blk: the block from which to free the profile ID
2574 * @prof_id: the profile ID for which to decrement the reference count
2575 */
2576 static enum ice_status
ice_prof_dec_ref(struct ice_hw * hw,enum ice_block blk,u8 prof_id)2577 ice_prof_dec_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
2578 {
2579 if (prof_id > hw->blk[blk].es.count)
2580 return ICE_ERR_PARAM;
2581
2582 if (hw->blk[blk].es.ref_count[prof_id] > 0) {
2583 if (!--hw->blk[blk].es.ref_count[prof_id]) {
2584 ice_write_es(hw, blk, prof_id, NULL);
2585 return ice_free_prof_id(hw, blk, prof_id);
2586 }
2587 }
2588
2589 return 0;
2590 }
2591
2592 /* Block / table section IDs */
2593 static const u32 ice_blk_sids[ICE_BLK_COUNT][ICE_SID_OFF_COUNT] = {
2594 /* SWITCH */
2595 { ICE_SID_XLT1_SW,
2596 ICE_SID_XLT2_SW,
2597 ICE_SID_PROFID_TCAM_SW,
2598 ICE_SID_PROFID_REDIR_SW,
2599 ICE_SID_FLD_VEC_SW
2600 },
2601
2602 /* ACL */
2603 { ICE_SID_XLT1_ACL,
2604 ICE_SID_XLT2_ACL,
2605 ICE_SID_PROFID_TCAM_ACL,
2606 ICE_SID_PROFID_REDIR_ACL,
2607 ICE_SID_FLD_VEC_ACL
2608 },
2609
2610 /* FD */
2611 { ICE_SID_XLT1_FD,
2612 ICE_SID_XLT2_FD,
2613 ICE_SID_PROFID_TCAM_FD,
2614 ICE_SID_PROFID_REDIR_FD,
2615 ICE_SID_FLD_VEC_FD
2616 },
2617
2618 /* RSS */
2619 { ICE_SID_XLT1_RSS,
2620 ICE_SID_XLT2_RSS,
2621 ICE_SID_PROFID_TCAM_RSS,
2622 ICE_SID_PROFID_REDIR_RSS,
2623 ICE_SID_FLD_VEC_RSS
2624 },
2625
2626 /* PE */
2627 { ICE_SID_XLT1_PE,
2628 ICE_SID_XLT2_PE,
2629 ICE_SID_PROFID_TCAM_PE,
2630 ICE_SID_PROFID_REDIR_PE,
2631 ICE_SID_FLD_VEC_PE
2632 }
2633 };
2634
2635 /**
2636 * ice_init_sw_xlt1_db - init software XLT1 database from HW tables
2637 * @hw: pointer to the hardware structure
2638 * @blk: the HW block to initialize
2639 */
ice_init_sw_xlt1_db(struct ice_hw * hw,enum ice_block blk)2640 static void ice_init_sw_xlt1_db(struct ice_hw *hw, enum ice_block blk)
2641 {
2642 u16 pt;
2643
2644 for (pt = 0; pt < hw->blk[blk].xlt1.count; pt++) {
2645 u8 ptg;
2646
2647 ptg = hw->blk[blk].xlt1.t[pt];
2648 if (ptg != ICE_DEFAULT_PTG) {
2649 ice_ptg_alloc_val(hw, blk, ptg);
2650 ice_ptg_add_mv_ptype(hw, blk, pt, ptg);
2651 }
2652 }
2653 }
2654
2655 /**
2656 * ice_init_sw_xlt2_db - init software XLT2 database from HW tables
2657 * @hw: pointer to the hardware structure
2658 * @blk: the HW block to initialize
2659 */
ice_init_sw_xlt2_db(struct ice_hw * hw,enum ice_block blk)2660 static void ice_init_sw_xlt2_db(struct ice_hw *hw, enum ice_block blk)
2661 {
2662 u16 vsi;
2663
2664 for (vsi = 0; vsi < hw->blk[blk].xlt2.count; vsi++) {
2665 u16 vsig;
2666
2667 vsig = hw->blk[blk].xlt2.t[vsi];
2668 if (vsig) {
2669 ice_vsig_alloc_val(hw, blk, vsig);
2670 ice_vsig_add_mv_vsi(hw, blk, vsi, vsig);
2671 /* no changes at this time, since this has been
2672 * initialized from the original package
2673 */
2674 hw->blk[blk].xlt2.vsis[vsi].changed = 0;
2675 }
2676 }
2677 }
2678
2679 /**
2680 * ice_init_sw_db - init software database from HW tables
2681 * @hw: pointer to the hardware structure
2682 */
ice_init_sw_db(struct ice_hw * hw)2683 static void ice_init_sw_db(struct ice_hw *hw)
2684 {
2685 u16 i;
2686
2687 for (i = 0; i < ICE_BLK_COUNT; i++) {
2688 ice_init_sw_xlt1_db(hw, (enum ice_block)i);
2689 ice_init_sw_xlt2_db(hw, (enum ice_block)i);
2690 }
2691 }
2692
2693 /**
2694 * ice_fill_tbl - Reads content of a single table type into database
2695 * @hw: pointer to the hardware structure
2696 * @block_id: Block ID of the table to copy
2697 * @sid: Section ID of the table to copy
2698 *
2699 * Will attempt to read the entire content of a given table of a single block
2700 * into the driver database. We assume that the buffer will always
2701 * be as large or larger than the data contained in the package. If
2702 * this condition is not met, there is most likely an error in the package
2703 * contents.
2704 */
ice_fill_tbl(struct ice_hw * hw,enum ice_block block_id,u32 sid)2705 static void ice_fill_tbl(struct ice_hw *hw, enum ice_block block_id, u32 sid)
2706 {
2707 u32 dst_len, sect_len, offset = 0;
2708 struct ice_prof_redir_section *pr;
2709 struct ice_prof_id_section *pid;
2710 struct ice_xlt1_section *xlt1;
2711 struct ice_xlt2_section *xlt2;
2712 struct ice_sw_fv_section *es;
2713 struct ice_pkg_enum state;
2714 u8 *src, *dst;
2715 void *sect;
2716
2717 /* if the HW segment pointer is null then the first iteration of
2718 * ice_pkg_enum_section() will fail. In this case the HW tables will
2719 * not be filled and return success.
2720 */
2721 if (!hw->seg) {
2722 ice_debug(hw, ICE_DBG_PKG, "hw->seg is NULL, tables are not filled\n");
2723 return;
2724 }
2725
2726 memset(&state, 0, sizeof(state));
2727
2728 sect = ice_pkg_enum_section(hw->seg, &state, sid);
2729
2730 while (sect) {
2731 switch (sid) {
2732 case ICE_SID_XLT1_SW:
2733 case ICE_SID_XLT1_FD:
2734 case ICE_SID_XLT1_RSS:
2735 case ICE_SID_XLT1_ACL:
2736 case ICE_SID_XLT1_PE:
2737 xlt1 = (struct ice_xlt1_section *)sect;
2738 src = xlt1->value;
2739 sect_len = le16_to_cpu(xlt1->count) *
2740 sizeof(*hw->blk[block_id].xlt1.t);
2741 dst = hw->blk[block_id].xlt1.t;
2742 dst_len = hw->blk[block_id].xlt1.count *
2743 sizeof(*hw->blk[block_id].xlt1.t);
2744 break;
2745 case ICE_SID_XLT2_SW:
2746 case ICE_SID_XLT2_FD:
2747 case ICE_SID_XLT2_RSS:
2748 case ICE_SID_XLT2_ACL:
2749 case ICE_SID_XLT2_PE:
2750 xlt2 = (struct ice_xlt2_section *)sect;
2751 src = (__force u8 *)xlt2->value;
2752 sect_len = le16_to_cpu(xlt2->count) *
2753 sizeof(*hw->blk[block_id].xlt2.t);
2754 dst = (u8 *)hw->blk[block_id].xlt2.t;
2755 dst_len = hw->blk[block_id].xlt2.count *
2756 sizeof(*hw->blk[block_id].xlt2.t);
2757 break;
2758 case ICE_SID_PROFID_TCAM_SW:
2759 case ICE_SID_PROFID_TCAM_FD:
2760 case ICE_SID_PROFID_TCAM_RSS:
2761 case ICE_SID_PROFID_TCAM_ACL:
2762 case ICE_SID_PROFID_TCAM_PE:
2763 pid = (struct ice_prof_id_section *)sect;
2764 src = (u8 *)pid->entry;
2765 sect_len = le16_to_cpu(pid->count) *
2766 sizeof(*hw->blk[block_id].prof.t);
2767 dst = (u8 *)hw->blk[block_id].prof.t;
2768 dst_len = hw->blk[block_id].prof.count *
2769 sizeof(*hw->blk[block_id].prof.t);
2770 break;
2771 case ICE_SID_PROFID_REDIR_SW:
2772 case ICE_SID_PROFID_REDIR_FD:
2773 case ICE_SID_PROFID_REDIR_RSS:
2774 case ICE_SID_PROFID_REDIR_ACL:
2775 case ICE_SID_PROFID_REDIR_PE:
2776 pr = (struct ice_prof_redir_section *)sect;
2777 src = pr->redir_value;
2778 sect_len = le16_to_cpu(pr->count) *
2779 sizeof(*hw->blk[block_id].prof_redir.t);
2780 dst = hw->blk[block_id].prof_redir.t;
2781 dst_len = hw->blk[block_id].prof_redir.count *
2782 sizeof(*hw->blk[block_id].prof_redir.t);
2783 break;
2784 case ICE_SID_FLD_VEC_SW:
2785 case ICE_SID_FLD_VEC_FD:
2786 case ICE_SID_FLD_VEC_RSS:
2787 case ICE_SID_FLD_VEC_ACL:
2788 case ICE_SID_FLD_VEC_PE:
2789 es = (struct ice_sw_fv_section *)sect;
2790 src = (u8 *)es->fv;
2791 sect_len = (u32)(le16_to_cpu(es->count) *
2792 hw->blk[block_id].es.fvw) *
2793 sizeof(*hw->blk[block_id].es.t);
2794 dst = (u8 *)hw->blk[block_id].es.t;
2795 dst_len = (u32)(hw->blk[block_id].es.count *
2796 hw->blk[block_id].es.fvw) *
2797 sizeof(*hw->blk[block_id].es.t);
2798 break;
2799 default:
2800 return;
2801 }
2802
2803 /* if the section offset exceeds destination length, terminate
2804 * table fill.
2805 */
2806 if (offset > dst_len)
2807 return;
2808
2809 /* if the sum of section size and offset exceed destination size
2810 * then we are out of bounds of the HW table size for that PF.
2811 * Changing section length to fill the remaining table space
2812 * of that PF.
2813 */
2814 if ((offset + sect_len) > dst_len)
2815 sect_len = dst_len - offset;
2816
2817 memcpy(dst + offset, src, sect_len);
2818 offset += sect_len;
2819 sect = ice_pkg_enum_section(NULL, &state, sid);
2820 }
2821 }
2822
2823 /**
2824 * ice_fill_blk_tbls - Read package context for tables
2825 * @hw: pointer to the hardware structure
2826 *
2827 * Reads the current package contents and populates the driver
2828 * database with the data iteratively for all advanced feature
2829 * blocks. Assume that the HW tables have been allocated.
2830 */
ice_fill_blk_tbls(struct ice_hw * hw)2831 void ice_fill_blk_tbls(struct ice_hw *hw)
2832 {
2833 u8 i;
2834
2835 for (i = 0; i < ICE_BLK_COUNT; i++) {
2836 enum ice_block blk_id = (enum ice_block)i;
2837
2838 ice_fill_tbl(hw, blk_id, hw->blk[blk_id].xlt1.sid);
2839 ice_fill_tbl(hw, blk_id, hw->blk[blk_id].xlt2.sid);
2840 ice_fill_tbl(hw, blk_id, hw->blk[blk_id].prof.sid);
2841 ice_fill_tbl(hw, blk_id, hw->blk[blk_id].prof_redir.sid);
2842 ice_fill_tbl(hw, blk_id, hw->blk[blk_id].es.sid);
2843 }
2844
2845 ice_init_sw_db(hw);
2846 }
2847
2848 /**
2849 * ice_free_prof_map - free profile map
2850 * @hw: pointer to the hardware structure
2851 * @blk_idx: HW block index
2852 */
ice_free_prof_map(struct ice_hw * hw,u8 blk_idx)2853 static void ice_free_prof_map(struct ice_hw *hw, u8 blk_idx)
2854 {
2855 struct ice_es *es = &hw->blk[blk_idx].es;
2856 struct ice_prof_map *del, *tmp;
2857
2858 mutex_lock(&es->prof_map_lock);
2859 list_for_each_entry_safe(del, tmp, &es->prof_map, list) {
2860 list_del(&del->list);
2861 devm_kfree(ice_hw_to_dev(hw), del);
2862 }
2863 INIT_LIST_HEAD(&es->prof_map);
2864 mutex_unlock(&es->prof_map_lock);
2865 }
2866
2867 /**
2868 * ice_free_flow_profs - free flow profile entries
2869 * @hw: pointer to the hardware structure
2870 * @blk_idx: HW block index
2871 */
ice_free_flow_profs(struct ice_hw * hw,u8 blk_idx)2872 static void ice_free_flow_profs(struct ice_hw *hw, u8 blk_idx)
2873 {
2874 struct ice_flow_prof *p, *tmp;
2875
2876 mutex_lock(&hw->fl_profs_locks[blk_idx]);
2877 list_for_each_entry_safe(p, tmp, &hw->fl_profs[blk_idx], l_entry) {
2878 struct ice_flow_entry *e, *t;
2879
2880 list_for_each_entry_safe(e, t, &p->entries, l_entry)
2881 ice_flow_rem_entry(hw, (enum ice_block)blk_idx,
2882 ICE_FLOW_ENTRY_HNDL(e));
2883
2884 list_del(&p->l_entry);
2885
2886 mutex_destroy(&p->entries_lock);
2887 devm_kfree(ice_hw_to_dev(hw), p);
2888 }
2889 mutex_unlock(&hw->fl_profs_locks[blk_idx]);
2890
2891 /* if driver is in reset and tables are being cleared
2892 * re-initialize the flow profile list heads
2893 */
2894 INIT_LIST_HEAD(&hw->fl_profs[blk_idx]);
2895 }
2896
2897 /**
2898 * ice_free_vsig_tbl - free complete VSIG table entries
2899 * @hw: pointer to the hardware structure
2900 * @blk: the HW block on which to free the VSIG table entries
2901 */
ice_free_vsig_tbl(struct ice_hw * hw,enum ice_block blk)2902 static void ice_free_vsig_tbl(struct ice_hw *hw, enum ice_block blk)
2903 {
2904 u16 i;
2905
2906 if (!hw->blk[blk].xlt2.vsig_tbl)
2907 return;
2908
2909 for (i = 1; i < ICE_MAX_VSIGS; i++)
2910 if (hw->blk[blk].xlt2.vsig_tbl[i].in_use)
2911 ice_vsig_free(hw, blk, i);
2912 }
2913
2914 /**
2915 * ice_free_hw_tbls - free hardware table memory
2916 * @hw: pointer to the hardware structure
2917 */
ice_free_hw_tbls(struct ice_hw * hw)2918 void ice_free_hw_tbls(struct ice_hw *hw)
2919 {
2920 struct ice_rss_cfg *r, *rt;
2921 u8 i;
2922
2923 for (i = 0; i < ICE_BLK_COUNT; i++) {
2924 if (hw->blk[i].is_list_init) {
2925 struct ice_es *es = &hw->blk[i].es;
2926
2927 ice_free_prof_map(hw, i);
2928 mutex_destroy(&es->prof_map_lock);
2929
2930 ice_free_flow_profs(hw, i);
2931 mutex_destroy(&hw->fl_profs_locks[i]);
2932
2933 hw->blk[i].is_list_init = false;
2934 }
2935 ice_free_vsig_tbl(hw, (enum ice_block)i);
2936 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt1.ptypes);
2937 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt1.ptg_tbl);
2938 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt1.t);
2939 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt2.t);
2940 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt2.vsig_tbl);
2941 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt2.vsis);
2942 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].prof.t);
2943 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].prof_redir.t);
2944 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.t);
2945 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.ref_count);
2946 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.written);
2947 }
2948
2949 list_for_each_entry_safe(r, rt, &hw->rss_list_head, l_entry) {
2950 list_del(&r->l_entry);
2951 devm_kfree(ice_hw_to_dev(hw), r);
2952 }
2953 mutex_destroy(&hw->rss_locks);
2954 memset(hw->blk, 0, sizeof(hw->blk));
2955 }
2956
2957 /**
2958 * ice_init_flow_profs - init flow profile locks and list heads
2959 * @hw: pointer to the hardware structure
2960 * @blk_idx: HW block index
2961 */
ice_init_flow_profs(struct ice_hw * hw,u8 blk_idx)2962 static void ice_init_flow_profs(struct ice_hw *hw, u8 blk_idx)
2963 {
2964 mutex_init(&hw->fl_profs_locks[blk_idx]);
2965 INIT_LIST_HEAD(&hw->fl_profs[blk_idx]);
2966 }
2967
2968 /**
2969 * ice_clear_hw_tbls - clear HW tables and flow profiles
2970 * @hw: pointer to the hardware structure
2971 */
ice_clear_hw_tbls(struct ice_hw * hw)2972 void ice_clear_hw_tbls(struct ice_hw *hw)
2973 {
2974 u8 i;
2975
2976 for (i = 0; i < ICE_BLK_COUNT; i++) {
2977 struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir;
2978 struct ice_prof_tcam *prof = &hw->blk[i].prof;
2979 struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1;
2980 struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2;
2981 struct ice_es *es = &hw->blk[i].es;
2982
2983 if (hw->blk[i].is_list_init) {
2984 ice_free_prof_map(hw, i);
2985 ice_free_flow_profs(hw, i);
2986 }
2987
2988 ice_free_vsig_tbl(hw, (enum ice_block)i);
2989
2990 memset(xlt1->ptypes, 0, xlt1->count * sizeof(*xlt1->ptypes));
2991 memset(xlt1->ptg_tbl, 0,
2992 ICE_MAX_PTGS * sizeof(*xlt1->ptg_tbl));
2993 memset(xlt1->t, 0, xlt1->count * sizeof(*xlt1->t));
2994
2995 memset(xlt2->vsis, 0, xlt2->count * sizeof(*xlt2->vsis));
2996 memset(xlt2->vsig_tbl, 0,
2997 xlt2->count * sizeof(*xlt2->vsig_tbl));
2998 memset(xlt2->t, 0, xlt2->count * sizeof(*xlt2->t));
2999
3000 memset(prof->t, 0, prof->count * sizeof(*prof->t));
3001 memset(prof_redir->t, 0,
3002 prof_redir->count * sizeof(*prof_redir->t));
3003
3004 memset(es->t, 0, es->count * sizeof(*es->t) * es->fvw);
3005 memset(es->ref_count, 0, es->count * sizeof(*es->ref_count));
3006 memset(es->written, 0, es->count * sizeof(*es->written));
3007 }
3008 }
3009
3010 /**
3011 * ice_init_hw_tbls - init hardware table memory
3012 * @hw: pointer to the hardware structure
3013 */
ice_init_hw_tbls(struct ice_hw * hw)3014 enum ice_status ice_init_hw_tbls(struct ice_hw *hw)
3015 {
3016 u8 i;
3017
3018 mutex_init(&hw->rss_locks);
3019 INIT_LIST_HEAD(&hw->rss_list_head);
3020 for (i = 0; i < ICE_BLK_COUNT; i++) {
3021 struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir;
3022 struct ice_prof_tcam *prof = &hw->blk[i].prof;
3023 struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1;
3024 struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2;
3025 struct ice_es *es = &hw->blk[i].es;
3026 u16 j;
3027
3028 if (hw->blk[i].is_list_init)
3029 continue;
3030
3031 ice_init_flow_profs(hw, i);
3032 mutex_init(&es->prof_map_lock);
3033 INIT_LIST_HEAD(&es->prof_map);
3034 hw->blk[i].is_list_init = true;
3035
3036 hw->blk[i].overwrite = blk_sizes[i].overwrite;
3037 es->reverse = blk_sizes[i].reverse;
3038
3039 xlt1->sid = ice_blk_sids[i][ICE_SID_XLT1_OFF];
3040 xlt1->count = blk_sizes[i].xlt1;
3041
3042 xlt1->ptypes = devm_kcalloc(ice_hw_to_dev(hw), xlt1->count,
3043 sizeof(*xlt1->ptypes), GFP_KERNEL);
3044
3045 if (!xlt1->ptypes)
3046 goto err;
3047
3048 xlt1->ptg_tbl = devm_kcalloc(ice_hw_to_dev(hw), ICE_MAX_PTGS,
3049 sizeof(*xlt1->ptg_tbl),
3050 GFP_KERNEL);
3051
3052 if (!xlt1->ptg_tbl)
3053 goto err;
3054
3055 xlt1->t = devm_kcalloc(ice_hw_to_dev(hw), xlt1->count,
3056 sizeof(*xlt1->t), GFP_KERNEL);
3057 if (!xlt1->t)
3058 goto err;
3059
3060 xlt2->sid = ice_blk_sids[i][ICE_SID_XLT2_OFF];
3061 xlt2->count = blk_sizes[i].xlt2;
3062
3063 xlt2->vsis = devm_kcalloc(ice_hw_to_dev(hw), xlt2->count,
3064 sizeof(*xlt2->vsis), GFP_KERNEL);
3065
3066 if (!xlt2->vsis)
3067 goto err;
3068
3069 xlt2->vsig_tbl = devm_kcalloc(ice_hw_to_dev(hw), xlt2->count,
3070 sizeof(*xlt2->vsig_tbl),
3071 GFP_KERNEL);
3072 if (!xlt2->vsig_tbl)
3073 goto err;
3074
3075 for (j = 0; j < xlt2->count; j++)
3076 INIT_LIST_HEAD(&xlt2->vsig_tbl[j].prop_lst);
3077
3078 xlt2->t = devm_kcalloc(ice_hw_to_dev(hw), xlt2->count,
3079 sizeof(*xlt2->t), GFP_KERNEL);
3080 if (!xlt2->t)
3081 goto err;
3082
3083 prof->sid = ice_blk_sids[i][ICE_SID_PR_OFF];
3084 prof->count = blk_sizes[i].prof_tcam;
3085 prof->max_prof_id = blk_sizes[i].prof_id;
3086 prof->cdid_bits = blk_sizes[i].prof_cdid_bits;
3087 prof->t = devm_kcalloc(ice_hw_to_dev(hw), prof->count,
3088 sizeof(*prof->t), GFP_KERNEL);
3089
3090 if (!prof->t)
3091 goto err;
3092
3093 prof_redir->sid = ice_blk_sids[i][ICE_SID_PR_REDIR_OFF];
3094 prof_redir->count = blk_sizes[i].prof_redir;
3095 prof_redir->t = devm_kcalloc(ice_hw_to_dev(hw),
3096 prof_redir->count,
3097 sizeof(*prof_redir->t),
3098 GFP_KERNEL);
3099
3100 if (!prof_redir->t)
3101 goto err;
3102
3103 es->sid = ice_blk_sids[i][ICE_SID_ES_OFF];
3104 es->count = blk_sizes[i].es;
3105 es->fvw = blk_sizes[i].fvw;
3106 es->t = devm_kcalloc(ice_hw_to_dev(hw),
3107 (u32)(es->count * es->fvw),
3108 sizeof(*es->t), GFP_KERNEL);
3109 if (!es->t)
3110 goto err;
3111
3112 es->ref_count = devm_kcalloc(ice_hw_to_dev(hw), es->count,
3113 sizeof(*es->ref_count),
3114 GFP_KERNEL);
3115 if (!es->ref_count)
3116 goto err;
3117
3118 es->written = devm_kcalloc(ice_hw_to_dev(hw), es->count,
3119 sizeof(*es->written), GFP_KERNEL);
3120 if (!es->written)
3121 goto err;
3122 }
3123 return 0;
3124
3125 err:
3126 ice_free_hw_tbls(hw);
3127 return ICE_ERR_NO_MEMORY;
3128 }
3129
3130 /**
3131 * ice_prof_gen_key - generate profile ID key
3132 * @hw: pointer to the HW struct
3133 * @blk: the block in which to write profile ID to
3134 * @ptg: packet type group (PTG) portion of key
3135 * @vsig: VSIG portion of key
3136 * @cdid: CDID portion of key
3137 * @flags: flag portion of key
3138 * @vl_msk: valid mask
3139 * @dc_msk: don't care mask
3140 * @nm_msk: never match mask
3141 * @key: output of profile ID key
3142 */
3143 static enum ice_status
ice_prof_gen_key(struct ice_hw * hw,enum ice_block blk,u8 ptg,u16 vsig,u8 cdid,u16 flags,u8 vl_msk[ICE_TCAM_KEY_VAL_SZ],u8 dc_msk[ICE_TCAM_KEY_VAL_SZ],u8 nm_msk[ICE_TCAM_KEY_VAL_SZ],u8 key[ICE_TCAM_KEY_SZ])3144 ice_prof_gen_key(struct ice_hw *hw, enum ice_block blk, u8 ptg, u16 vsig,
3145 u8 cdid, u16 flags, u8 vl_msk[ICE_TCAM_KEY_VAL_SZ],
3146 u8 dc_msk[ICE_TCAM_KEY_VAL_SZ], u8 nm_msk[ICE_TCAM_KEY_VAL_SZ],
3147 u8 key[ICE_TCAM_KEY_SZ])
3148 {
3149 struct ice_prof_id_key inkey;
3150
3151 inkey.xlt1 = ptg;
3152 inkey.xlt2_cdid = cpu_to_le16(vsig);
3153 inkey.flags = cpu_to_le16(flags);
3154
3155 switch (hw->blk[blk].prof.cdid_bits) {
3156 case 0:
3157 break;
3158 case 2:
3159 #define ICE_CD_2_M 0xC000U
3160 #define ICE_CD_2_S 14
3161 inkey.xlt2_cdid &= ~cpu_to_le16(ICE_CD_2_M);
3162 inkey.xlt2_cdid |= cpu_to_le16(BIT(cdid) << ICE_CD_2_S);
3163 break;
3164 case 4:
3165 #define ICE_CD_4_M 0xF000U
3166 #define ICE_CD_4_S 12
3167 inkey.xlt2_cdid &= ~cpu_to_le16(ICE_CD_4_M);
3168 inkey.xlt2_cdid |= cpu_to_le16(BIT(cdid) << ICE_CD_4_S);
3169 break;
3170 case 8:
3171 #define ICE_CD_8_M 0xFF00U
3172 #define ICE_CD_8_S 16
3173 inkey.xlt2_cdid &= ~cpu_to_le16(ICE_CD_8_M);
3174 inkey.xlt2_cdid |= cpu_to_le16(BIT(cdid) << ICE_CD_8_S);
3175 break;
3176 default:
3177 ice_debug(hw, ICE_DBG_PKG, "Error in profile config\n");
3178 break;
3179 }
3180
3181 return ice_set_key(key, ICE_TCAM_KEY_SZ, (u8 *)&inkey, vl_msk, dc_msk,
3182 nm_msk, 0, ICE_TCAM_KEY_SZ / 2);
3183 }
3184
3185 /**
3186 * ice_tcam_write_entry - write TCAM entry
3187 * @hw: pointer to the HW struct
3188 * @blk: the block in which to write profile ID to
3189 * @idx: the entry index to write to
3190 * @prof_id: profile ID
3191 * @ptg: packet type group (PTG) portion of key
3192 * @vsig: VSIG portion of key
3193 * @cdid: CDID portion of key
3194 * @flags: flag portion of key
3195 * @vl_msk: valid mask
3196 * @dc_msk: don't care mask
3197 * @nm_msk: never match mask
3198 */
3199 static enum ice_status
ice_tcam_write_entry(struct ice_hw * hw,enum ice_block blk,u16 idx,u8 prof_id,u8 ptg,u16 vsig,u8 cdid,u16 flags,u8 vl_msk[ICE_TCAM_KEY_VAL_SZ],u8 dc_msk[ICE_TCAM_KEY_VAL_SZ],u8 nm_msk[ICE_TCAM_KEY_VAL_SZ])3200 ice_tcam_write_entry(struct ice_hw *hw, enum ice_block blk, u16 idx,
3201 u8 prof_id, u8 ptg, u16 vsig, u8 cdid, u16 flags,
3202 u8 vl_msk[ICE_TCAM_KEY_VAL_SZ],
3203 u8 dc_msk[ICE_TCAM_KEY_VAL_SZ],
3204 u8 nm_msk[ICE_TCAM_KEY_VAL_SZ])
3205 {
3206 struct ice_prof_tcam_entry;
3207 enum ice_status status;
3208
3209 status = ice_prof_gen_key(hw, blk, ptg, vsig, cdid, flags, vl_msk,
3210 dc_msk, nm_msk, hw->blk[blk].prof.t[idx].key);
3211 if (!status) {
3212 hw->blk[blk].prof.t[idx].addr = cpu_to_le16(idx);
3213 hw->blk[blk].prof.t[idx].prof_id = prof_id;
3214 }
3215
3216 return status;
3217 }
3218
3219 /**
3220 * ice_vsig_get_ref - returns number of VSIs belong to a VSIG
3221 * @hw: pointer to the hardware structure
3222 * @blk: HW block
3223 * @vsig: VSIG to query
3224 * @refs: pointer to variable to receive the reference count
3225 */
3226 static enum ice_status
ice_vsig_get_ref(struct ice_hw * hw,enum ice_block blk,u16 vsig,u16 * refs)3227 ice_vsig_get_ref(struct ice_hw *hw, enum ice_block blk, u16 vsig, u16 *refs)
3228 {
3229 u16 idx = vsig & ICE_VSIG_IDX_M;
3230 struct ice_vsig_vsi *ptr;
3231
3232 *refs = 0;
3233
3234 if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
3235 return ICE_ERR_DOES_NOT_EXIST;
3236
3237 ptr = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
3238 while (ptr) {
3239 (*refs)++;
3240 ptr = ptr->next_vsi;
3241 }
3242
3243 return 0;
3244 }
3245
3246 /**
3247 * ice_has_prof_vsig - check to see if VSIG has a specific profile
3248 * @hw: pointer to the hardware structure
3249 * @blk: HW block
3250 * @vsig: VSIG to check against
3251 * @hdl: profile handle
3252 */
3253 static bool
ice_has_prof_vsig(struct ice_hw * hw,enum ice_block blk,u16 vsig,u64 hdl)3254 ice_has_prof_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl)
3255 {
3256 u16 idx = vsig & ICE_VSIG_IDX_M;
3257 struct ice_vsig_prof *ent;
3258
3259 list_for_each_entry(ent, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
3260 list)
3261 if (ent->profile_cookie == hdl)
3262 return true;
3263
3264 ice_debug(hw, ICE_DBG_INIT,
3265 "Characteristic list for VSI group %d not found.\n",
3266 vsig);
3267 return false;
3268 }
3269
3270 /**
3271 * ice_prof_bld_es - build profile ID extraction sequence changes
3272 * @hw: pointer to the HW struct
3273 * @blk: hardware block
3274 * @bld: the update package buffer build to add to
3275 * @chgs: the list of changes to make in hardware
3276 */
3277 static enum ice_status
ice_prof_bld_es(struct ice_hw * hw,enum ice_block blk,struct ice_buf_build * bld,struct list_head * chgs)3278 ice_prof_bld_es(struct ice_hw *hw, enum ice_block blk,
3279 struct ice_buf_build *bld, struct list_head *chgs)
3280 {
3281 u16 vec_size = hw->blk[blk].es.fvw * sizeof(struct ice_fv_word);
3282 struct ice_chs_chg *tmp;
3283
3284 list_for_each_entry(tmp, chgs, list_entry)
3285 if (tmp->type == ICE_PTG_ES_ADD && tmp->add_prof) {
3286 u16 off = tmp->prof_id * hw->blk[blk].es.fvw;
3287 struct ice_pkg_es *p;
3288 u32 id;
3289
3290 id = ice_sect_id(blk, ICE_VEC_TBL);
3291 p = ice_pkg_buf_alloc_section(bld, id,
3292 struct_size(p, es, 1) +
3293 vec_size -
3294 sizeof(p->es[0]));
3295
3296 if (!p)
3297 return ICE_ERR_MAX_LIMIT;
3298
3299 p->count = cpu_to_le16(1);
3300 p->offset = cpu_to_le16(tmp->prof_id);
3301
3302 memcpy(p->es, &hw->blk[blk].es.t[off], vec_size);
3303 }
3304
3305 return 0;
3306 }
3307
3308 /**
3309 * ice_prof_bld_tcam - build profile ID TCAM changes
3310 * @hw: pointer to the HW struct
3311 * @blk: hardware block
3312 * @bld: the update package buffer build to add to
3313 * @chgs: the list of changes to make in hardware
3314 */
3315 static enum ice_status
ice_prof_bld_tcam(struct ice_hw * hw,enum ice_block blk,struct ice_buf_build * bld,struct list_head * chgs)3316 ice_prof_bld_tcam(struct ice_hw *hw, enum ice_block blk,
3317 struct ice_buf_build *bld, struct list_head *chgs)
3318 {
3319 struct ice_chs_chg *tmp;
3320
3321 list_for_each_entry(tmp, chgs, list_entry)
3322 if (tmp->type == ICE_TCAM_ADD && tmp->add_tcam_idx) {
3323 struct ice_prof_id_section *p;
3324 u32 id;
3325
3326 id = ice_sect_id(blk, ICE_PROF_TCAM);
3327 p = ice_pkg_buf_alloc_section(bld, id,
3328 struct_size(p, entry, 1));
3329
3330 if (!p)
3331 return ICE_ERR_MAX_LIMIT;
3332
3333 p->count = cpu_to_le16(1);
3334 p->entry[0].addr = cpu_to_le16(tmp->tcam_idx);
3335 p->entry[0].prof_id = tmp->prof_id;
3336
3337 memcpy(p->entry[0].key,
3338 &hw->blk[blk].prof.t[tmp->tcam_idx].key,
3339 sizeof(hw->blk[blk].prof.t->key));
3340 }
3341
3342 return 0;
3343 }
3344
3345 /**
3346 * ice_prof_bld_xlt1 - build XLT1 changes
3347 * @blk: hardware block
3348 * @bld: the update package buffer build to add to
3349 * @chgs: the list of changes to make in hardware
3350 */
3351 static enum ice_status
ice_prof_bld_xlt1(enum ice_block blk,struct ice_buf_build * bld,struct list_head * chgs)3352 ice_prof_bld_xlt1(enum ice_block blk, struct ice_buf_build *bld,
3353 struct list_head *chgs)
3354 {
3355 struct ice_chs_chg *tmp;
3356
3357 list_for_each_entry(tmp, chgs, list_entry)
3358 if (tmp->type == ICE_PTG_ES_ADD && tmp->add_ptg) {
3359 struct ice_xlt1_section *p;
3360 u32 id;
3361
3362 id = ice_sect_id(blk, ICE_XLT1);
3363 p = ice_pkg_buf_alloc_section(bld, id,
3364 struct_size(p, value, 1));
3365
3366 if (!p)
3367 return ICE_ERR_MAX_LIMIT;
3368
3369 p->count = cpu_to_le16(1);
3370 p->offset = cpu_to_le16(tmp->ptype);
3371 p->value[0] = tmp->ptg;
3372 }
3373
3374 return 0;
3375 }
3376
3377 /**
3378 * ice_prof_bld_xlt2 - build XLT2 changes
3379 * @blk: hardware block
3380 * @bld: the update package buffer build to add to
3381 * @chgs: the list of changes to make in hardware
3382 */
3383 static enum ice_status
ice_prof_bld_xlt2(enum ice_block blk,struct ice_buf_build * bld,struct list_head * chgs)3384 ice_prof_bld_xlt2(enum ice_block blk, struct ice_buf_build *bld,
3385 struct list_head *chgs)
3386 {
3387 struct ice_chs_chg *tmp;
3388
3389 list_for_each_entry(tmp, chgs, list_entry) {
3390 struct ice_xlt2_section *p;
3391 u32 id;
3392
3393 switch (tmp->type) {
3394 case ICE_VSIG_ADD:
3395 case ICE_VSI_MOVE:
3396 case ICE_VSIG_REM:
3397 id = ice_sect_id(blk, ICE_XLT2);
3398 p = ice_pkg_buf_alloc_section(bld, id,
3399 struct_size(p, value, 1));
3400
3401 if (!p)
3402 return ICE_ERR_MAX_LIMIT;
3403
3404 p->count = cpu_to_le16(1);
3405 p->offset = cpu_to_le16(tmp->vsi);
3406 p->value[0] = cpu_to_le16(tmp->vsig);
3407 break;
3408 default:
3409 break;
3410 }
3411 }
3412
3413 return 0;
3414 }
3415
3416 /**
3417 * ice_upd_prof_hw - update hardware using the change list
3418 * @hw: pointer to the HW struct
3419 * @blk: hardware block
3420 * @chgs: the list of changes to make in hardware
3421 */
3422 static enum ice_status
ice_upd_prof_hw(struct ice_hw * hw,enum ice_block blk,struct list_head * chgs)3423 ice_upd_prof_hw(struct ice_hw *hw, enum ice_block blk,
3424 struct list_head *chgs)
3425 {
3426 struct ice_buf_build *b;
3427 struct ice_chs_chg *tmp;
3428 enum ice_status status;
3429 u16 pkg_sects;
3430 u16 xlt1 = 0;
3431 u16 xlt2 = 0;
3432 u16 tcam = 0;
3433 u16 es = 0;
3434 u16 sects;
3435
3436 /* count number of sections we need */
3437 list_for_each_entry(tmp, chgs, list_entry) {
3438 switch (tmp->type) {
3439 case ICE_PTG_ES_ADD:
3440 if (tmp->add_ptg)
3441 xlt1++;
3442 if (tmp->add_prof)
3443 es++;
3444 break;
3445 case ICE_TCAM_ADD:
3446 tcam++;
3447 break;
3448 case ICE_VSIG_ADD:
3449 case ICE_VSI_MOVE:
3450 case ICE_VSIG_REM:
3451 xlt2++;
3452 break;
3453 default:
3454 break;
3455 }
3456 }
3457 sects = xlt1 + xlt2 + tcam + es;
3458
3459 if (!sects)
3460 return 0;
3461
3462 /* Build update package buffer */
3463 b = ice_pkg_buf_alloc(hw);
3464 if (!b)
3465 return ICE_ERR_NO_MEMORY;
3466
3467 status = ice_pkg_buf_reserve_section(b, sects);
3468 if (status)
3469 goto error_tmp;
3470
3471 /* Preserve order of table update: ES, TCAM, PTG, VSIG */
3472 if (es) {
3473 status = ice_prof_bld_es(hw, blk, b, chgs);
3474 if (status)
3475 goto error_tmp;
3476 }
3477
3478 if (tcam) {
3479 status = ice_prof_bld_tcam(hw, blk, b, chgs);
3480 if (status)
3481 goto error_tmp;
3482 }
3483
3484 if (xlt1) {
3485 status = ice_prof_bld_xlt1(blk, b, chgs);
3486 if (status)
3487 goto error_tmp;
3488 }
3489
3490 if (xlt2) {
3491 status = ice_prof_bld_xlt2(blk, b, chgs);
3492 if (status)
3493 goto error_tmp;
3494 }
3495
3496 /* After package buffer build check if the section count in buffer is
3497 * non-zero and matches the number of sections detected for package
3498 * update.
3499 */
3500 pkg_sects = ice_pkg_buf_get_active_sections(b);
3501 if (!pkg_sects || pkg_sects != sects) {
3502 status = ICE_ERR_INVAL_SIZE;
3503 goto error_tmp;
3504 }
3505
3506 /* update package */
3507 status = ice_update_pkg(hw, ice_pkg_buf(b), 1);
3508 if (status == ICE_ERR_AQ_ERROR)
3509 ice_debug(hw, ICE_DBG_INIT, "Unable to update HW profile\n");
3510
3511 error_tmp:
3512 ice_pkg_buf_free(hw, b);
3513 return status;
3514 }
3515
3516 /**
3517 * ice_update_fd_mask - set Flow Director Field Vector mask for a profile
3518 * @hw: pointer to the HW struct
3519 * @prof_id: profile ID
3520 * @mask_sel: mask select
3521 *
3522 * This function enable any of the masks selected by the mask select parameter
3523 * for the profile specified.
3524 */
ice_update_fd_mask(struct ice_hw * hw,u16 prof_id,u32 mask_sel)3525 static void ice_update_fd_mask(struct ice_hw *hw, u16 prof_id, u32 mask_sel)
3526 {
3527 wr32(hw, GLQF_FDMASK_SEL(prof_id), mask_sel);
3528
3529 ice_debug(hw, ICE_DBG_INIT, "fd mask(%d): %x = %x\n", prof_id,
3530 GLQF_FDMASK_SEL(prof_id), mask_sel);
3531 }
3532
3533 struct ice_fd_src_dst_pair {
3534 u8 prot_id;
3535 u8 count;
3536 u16 off;
3537 };
3538
3539 static const struct ice_fd_src_dst_pair ice_fd_pairs[] = {
3540 /* These are defined in pairs */
3541 { ICE_PROT_IPV4_OF_OR_S, 2, 12 },
3542 { ICE_PROT_IPV4_OF_OR_S, 2, 16 },
3543
3544 { ICE_PROT_IPV4_IL, 2, 12 },
3545 { ICE_PROT_IPV4_IL, 2, 16 },
3546
3547 { ICE_PROT_IPV6_OF_OR_S, 8, 8 },
3548 { ICE_PROT_IPV6_OF_OR_S, 8, 24 },
3549
3550 { ICE_PROT_IPV6_IL, 8, 8 },
3551 { ICE_PROT_IPV6_IL, 8, 24 },
3552
3553 { ICE_PROT_TCP_IL, 1, 0 },
3554 { ICE_PROT_TCP_IL, 1, 2 },
3555
3556 { ICE_PROT_UDP_OF, 1, 0 },
3557 { ICE_PROT_UDP_OF, 1, 2 },
3558
3559 { ICE_PROT_UDP_IL_OR_S, 1, 0 },
3560 { ICE_PROT_UDP_IL_OR_S, 1, 2 },
3561
3562 { ICE_PROT_SCTP_IL, 1, 0 },
3563 { ICE_PROT_SCTP_IL, 1, 2 }
3564 };
3565
3566 #define ICE_FD_SRC_DST_PAIR_COUNT ARRAY_SIZE(ice_fd_pairs)
3567
3568 /**
3569 * ice_update_fd_swap - set register appropriately for a FD FV extraction
3570 * @hw: pointer to the HW struct
3571 * @prof_id: profile ID
3572 * @es: extraction sequence (length of array is determined by the block)
3573 */
3574 static enum ice_status
ice_update_fd_swap(struct ice_hw * hw,u16 prof_id,struct ice_fv_word * es)3575 ice_update_fd_swap(struct ice_hw *hw, u16 prof_id, struct ice_fv_word *es)
3576 {
3577 DECLARE_BITMAP(pair_list, ICE_FD_SRC_DST_PAIR_COUNT);
3578 u8 pair_start[ICE_FD_SRC_DST_PAIR_COUNT] = { 0 };
3579 #define ICE_FD_FV_NOT_FOUND (-2)
3580 s8 first_free = ICE_FD_FV_NOT_FOUND;
3581 u8 used[ICE_MAX_FV_WORDS] = { 0 };
3582 s8 orig_free, si;
3583 u32 mask_sel = 0;
3584 u8 i, j, k;
3585
3586 bitmap_zero(pair_list, ICE_FD_SRC_DST_PAIR_COUNT);
3587
3588 /* This code assumes that the Flow Director field vectors are assigned
3589 * from the end of the FV indexes working towards the zero index, that
3590 * only complete fields will be included and will be consecutive, and
3591 * that there are no gaps between valid indexes.
3592 */
3593
3594 /* Determine swap fields present */
3595 for (i = 0; i < hw->blk[ICE_BLK_FD].es.fvw; i++) {
3596 /* Find the first free entry, assuming right to left population.
3597 * This is where we can start adding additional pairs if needed.
3598 */
3599 if (first_free == ICE_FD_FV_NOT_FOUND && es[i].prot_id !=
3600 ICE_PROT_INVALID)
3601 first_free = i - 1;
3602
3603 for (j = 0; j < ICE_FD_SRC_DST_PAIR_COUNT; j++)
3604 if (es[i].prot_id == ice_fd_pairs[j].prot_id &&
3605 es[i].off == ice_fd_pairs[j].off) {
3606 set_bit(j, pair_list);
3607 pair_start[j] = i;
3608 }
3609 }
3610
3611 orig_free = first_free;
3612
3613 /* determine missing swap fields that need to be added */
3614 for (i = 0; i < ICE_FD_SRC_DST_PAIR_COUNT; i += 2) {
3615 u8 bit1 = test_bit(i + 1, pair_list);
3616 u8 bit0 = test_bit(i, pair_list);
3617
3618 if (bit0 ^ bit1) {
3619 u8 index;
3620
3621 /* add the appropriate 'paired' entry */
3622 if (!bit0)
3623 index = i;
3624 else
3625 index = i + 1;
3626
3627 /* check for room */
3628 if (first_free + 1 < (s8)ice_fd_pairs[index].count)
3629 return ICE_ERR_MAX_LIMIT;
3630
3631 /* place in extraction sequence */
3632 for (k = 0; k < ice_fd_pairs[index].count; k++) {
3633 es[first_free - k].prot_id =
3634 ice_fd_pairs[index].prot_id;
3635 es[first_free - k].off =
3636 ice_fd_pairs[index].off + (k * 2);
3637
3638 if (k > first_free)
3639 return ICE_ERR_OUT_OF_RANGE;
3640
3641 /* keep track of non-relevant fields */
3642 mask_sel |= BIT(first_free - k);
3643 }
3644
3645 pair_start[index] = first_free;
3646 first_free -= ice_fd_pairs[index].count;
3647 }
3648 }
3649
3650 /* fill in the swap array */
3651 si = hw->blk[ICE_BLK_FD].es.fvw - 1;
3652 while (si >= 0) {
3653 u8 indexes_used = 1;
3654
3655 /* assume flat at this index */
3656 #define ICE_SWAP_VALID 0x80
3657 used[si] = si | ICE_SWAP_VALID;
3658
3659 if (orig_free == ICE_FD_FV_NOT_FOUND || si <= orig_free) {
3660 si -= indexes_used;
3661 continue;
3662 }
3663
3664 /* check for a swap location */
3665 for (j = 0; j < ICE_FD_SRC_DST_PAIR_COUNT; j++)
3666 if (es[si].prot_id == ice_fd_pairs[j].prot_id &&
3667 es[si].off == ice_fd_pairs[j].off) {
3668 u8 idx;
3669
3670 /* determine the appropriate matching field */
3671 idx = j + ((j % 2) ? -1 : 1);
3672
3673 indexes_used = ice_fd_pairs[idx].count;
3674 for (k = 0; k < indexes_used; k++) {
3675 used[si - k] = (pair_start[idx] - k) |
3676 ICE_SWAP_VALID;
3677 }
3678
3679 break;
3680 }
3681
3682 si -= indexes_used;
3683 }
3684
3685 /* for each set of 4 swap and 4 inset indexes, write the appropriate
3686 * register
3687 */
3688 for (j = 0; j < hw->blk[ICE_BLK_FD].es.fvw / 4; j++) {
3689 u32 raw_swap = 0;
3690 u32 raw_in = 0;
3691
3692 for (k = 0; k < 4; k++) {
3693 u8 idx;
3694
3695 idx = (j * 4) + k;
3696 if (used[idx] && !(mask_sel & BIT(idx))) {
3697 raw_swap |= used[idx] << (k * BITS_PER_BYTE);
3698 #define ICE_INSET_DFLT 0x9f
3699 raw_in |= ICE_INSET_DFLT << (k * BITS_PER_BYTE);
3700 }
3701 }
3702
3703 /* write the appropriate swap register set */
3704 wr32(hw, GLQF_FDSWAP(prof_id, j), raw_swap);
3705
3706 ice_debug(hw, ICE_DBG_INIT, "swap wr(%d, %d): %x = %08x\n",
3707 prof_id, j, GLQF_FDSWAP(prof_id, j), raw_swap);
3708
3709 /* write the appropriate inset register set */
3710 wr32(hw, GLQF_FDINSET(prof_id, j), raw_in);
3711
3712 ice_debug(hw, ICE_DBG_INIT, "inset wr(%d, %d): %x = %08x\n",
3713 prof_id, j, GLQF_FDINSET(prof_id, j), raw_in);
3714 }
3715
3716 /* initially clear the mask select for this profile */
3717 ice_update_fd_mask(hw, prof_id, 0);
3718
3719 return 0;
3720 }
3721
3722 /**
3723 * ice_add_prof - add profile
3724 * @hw: pointer to the HW struct
3725 * @blk: hardware block
3726 * @id: profile tracking ID
3727 * @ptypes: array of bitmaps indicating ptypes (ICE_FLOW_PTYPE_MAX bits)
3728 * @es: extraction sequence (length of array is determined by the block)
3729 *
3730 * This function registers a profile, which matches a set of PTGs with a
3731 * particular extraction sequence. While the hardware profile is allocated
3732 * it will not be written until the first call to ice_add_flow that specifies
3733 * the ID value used here.
3734 */
3735 enum ice_status
ice_add_prof(struct ice_hw * hw,enum ice_block blk,u64 id,u8 ptypes[],struct ice_fv_word * es)3736 ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
3737 struct ice_fv_word *es)
3738 {
3739 u32 bytes = DIV_ROUND_UP(ICE_FLOW_PTYPE_MAX, BITS_PER_BYTE);
3740 DECLARE_BITMAP(ptgs_used, ICE_XLT1_CNT);
3741 struct ice_prof_map *prof;
3742 enum ice_status status;
3743 u8 byte = 0;
3744 u8 prof_id;
3745
3746 bitmap_zero(ptgs_used, ICE_XLT1_CNT);
3747
3748 mutex_lock(&hw->blk[blk].es.prof_map_lock);
3749
3750 /* search for existing profile */
3751 status = ice_find_prof_id(hw, blk, es, &prof_id);
3752 if (status) {
3753 /* allocate profile ID */
3754 status = ice_alloc_prof_id(hw, blk, &prof_id);
3755 if (status)
3756 goto err_ice_add_prof;
3757 if (blk == ICE_BLK_FD) {
3758 /* For Flow Director block, the extraction sequence may
3759 * need to be altered in the case where there are paired
3760 * fields that have no match. This is necessary because
3761 * for Flow Director, src and dest fields need to paired
3762 * for filter programming and these values are swapped
3763 * during Tx.
3764 */
3765 status = ice_update_fd_swap(hw, prof_id, es);
3766 if (status)
3767 goto err_ice_add_prof;
3768 }
3769
3770 /* and write new es */
3771 ice_write_es(hw, blk, prof_id, es);
3772 }
3773
3774 ice_prof_inc_ref(hw, blk, prof_id);
3775
3776 /* add profile info */
3777 prof = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*prof), GFP_KERNEL);
3778 if (!prof) {
3779 status = ICE_ERR_NO_MEMORY;
3780 goto err_ice_add_prof;
3781 }
3782
3783 prof->profile_cookie = id;
3784 prof->prof_id = prof_id;
3785 prof->ptg_cnt = 0;
3786 prof->context = 0;
3787
3788 /* build list of ptgs */
3789 while (bytes && prof->ptg_cnt < ICE_MAX_PTG_PER_PROFILE) {
3790 u8 bit;
3791
3792 if (!ptypes[byte]) {
3793 bytes--;
3794 byte++;
3795 continue;
3796 }
3797
3798 /* Examine 8 bits per byte */
3799 for_each_set_bit(bit, (unsigned long *)&ptypes[byte],
3800 BITS_PER_BYTE) {
3801 u16 ptype;
3802 u8 ptg;
3803 u8 m;
3804
3805 ptype = byte * BITS_PER_BYTE + bit;
3806
3807 /* The package should place all ptypes in a non-zero
3808 * PTG, so the following call should never fail.
3809 */
3810 if (ice_ptg_find_ptype(hw, blk, ptype, &ptg))
3811 continue;
3812
3813 /* If PTG is already added, skip and continue */
3814 if (test_bit(ptg, ptgs_used))
3815 continue;
3816
3817 set_bit(ptg, ptgs_used);
3818 prof->ptg[prof->ptg_cnt] = ptg;
3819
3820 if (++prof->ptg_cnt >= ICE_MAX_PTG_PER_PROFILE)
3821 break;
3822
3823 /* nothing left in byte, then exit */
3824 m = ~(u8)((1 << (bit + 1)) - 1);
3825 if (!(ptypes[byte] & m))
3826 break;
3827 }
3828
3829 bytes--;
3830 byte++;
3831 }
3832
3833 list_add(&prof->list, &hw->blk[blk].es.prof_map);
3834 status = 0;
3835
3836 err_ice_add_prof:
3837 mutex_unlock(&hw->blk[blk].es.prof_map_lock);
3838 return status;
3839 }
3840
3841 /**
3842 * ice_search_prof_id - Search for a profile tracking ID
3843 * @hw: pointer to the HW struct
3844 * @blk: hardware block
3845 * @id: profile tracking ID
3846 *
3847 * This will search for a profile tracking ID which was previously added.
3848 * The profile map lock should be held before calling this function.
3849 */
3850 static struct ice_prof_map *
ice_search_prof_id(struct ice_hw * hw,enum ice_block blk,u64 id)3851 ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id)
3852 {
3853 struct ice_prof_map *entry = NULL;
3854 struct ice_prof_map *map;
3855
3856 list_for_each_entry(map, &hw->blk[blk].es.prof_map, list)
3857 if (map->profile_cookie == id) {
3858 entry = map;
3859 break;
3860 }
3861
3862 return entry;
3863 }
3864
3865 /**
3866 * ice_vsig_prof_id_count - count profiles in a VSIG
3867 * @hw: pointer to the HW struct
3868 * @blk: hardware block
3869 * @vsig: VSIG to remove the profile from
3870 */
3871 static u16
ice_vsig_prof_id_count(struct ice_hw * hw,enum ice_block blk,u16 vsig)3872 ice_vsig_prof_id_count(struct ice_hw *hw, enum ice_block blk, u16 vsig)
3873 {
3874 u16 idx = vsig & ICE_VSIG_IDX_M, count = 0;
3875 struct ice_vsig_prof *p;
3876
3877 list_for_each_entry(p, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
3878 list)
3879 count++;
3880
3881 return count;
3882 }
3883
3884 /**
3885 * ice_rel_tcam_idx - release a TCAM index
3886 * @hw: pointer to the HW struct
3887 * @blk: hardware block
3888 * @idx: the index to release
3889 */
3890 static enum ice_status
ice_rel_tcam_idx(struct ice_hw * hw,enum ice_block blk,u16 idx)3891 ice_rel_tcam_idx(struct ice_hw *hw, enum ice_block blk, u16 idx)
3892 {
3893 /* Masks to invoke a never match entry */
3894 u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
3895 u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFE, 0xFF, 0xFF, 0xFF, 0xFF };
3896 u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x01, 0x00, 0x00, 0x00, 0x00 };
3897 enum ice_status status;
3898
3899 /* write the TCAM entry */
3900 status = ice_tcam_write_entry(hw, blk, idx, 0, 0, 0, 0, 0, vl_msk,
3901 dc_msk, nm_msk);
3902 if (status)
3903 return status;
3904
3905 /* release the TCAM entry */
3906 status = ice_free_tcam_ent(hw, blk, idx);
3907
3908 return status;
3909 }
3910
3911 /**
3912 * ice_rem_prof_id - remove one profile from a VSIG
3913 * @hw: pointer to the HW struct
3914 * @blk: hardware block
3915 * @prof: pointer to profile structure to remove
3916 */
3917 static enum ice_status
ice_rem_prof_id(struct ice_hw * hw,enum ice_block blk,struct ice_vsig_prof * prof)3918 ice_rem_prof_id(struct ice_hw *hw, enum ice_block blk,
3919 struct ice_vsig_prof *prof)
3920 {
3921 enum ice_status status;
3922 u16 i;
3923
3924 for (i = 0; i < prof->tcam_count; i++)
3925 if (prof->tcam[i].in_use) {
3926 prof->tcam[i].in_use = false;
3927 status = ice_rel_tcam_idx(hw, blk,
3928 prof->tcam[i].tcam_idx);
3929 if (status)
3930 return ICE_ERR_HW_TABLE;
3931 }
3932
3933 return 0;
3934 }
3935
3936 /**
3937 * ice_rem_vsig - remove VSIG
3938 * @hw: pointer to the HW struct
3939 * @blk: hardware block
3940 * @vsig: the VSIG to remove
3941 * @chg: the change list
3942 */
3943 static enum ice_status
ice_rem_vsig(struct ice_hw * hw,enum ice_block blk,u16 vsig,struct list_head * chg)3944 ice_rem_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig,
3945 struct list_head *chg)
3946 {
3947 u16 idx = vsig & ICE_VSIG_IDX_M;
3948 struct ice_vsig_vsi *vsi_cur;
3949 struct ice_vsig_prof *d, *t;
3950 enum ice_status status;
3951
3952 /* remove TCAM entries */
3953 list_for_each_entry_safe(d, t,
3954 &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
3955 list) {
3956 status = ice_rem_prof_id(hw, blk, d);
3957 if (status)
3958 return status;
3959
3960 list_del(&d->list);
3961 devm_kfree(ice_hw_to_dev(hw), d);
3962 }
3963
3964 /* Move all VSIS associated with this VSIG to the default VSIG */
3965 vsi_cur = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
3966 /* If the VSIG has at least 1 VSI then iterate through the list
3967 * and remove the VSIs before deleting the group.
3968 */
3969 if (vsi_cur)
3970 do {
3971 struct ice_vsig_vsi *tmp = vsi_cur->next_vsi;
3972 struct ice_chs_chg *p;
3973
3974 p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p),
3975 GFP_KERNEL);
3976 if (!p)
3977 return ICE_ERR_NO_MEMORY;
3978
3979 p->type = ICE_VSIG_REM;
3980 p->orig_vsig = vsig;
3981 p->vsig = ICE_DEFAULT_VSIG;
3982 p->vsi = vsi_cur - hw->blk[blk].xlt2.vsis;
3983
3984 list_add(&p->list_entry, chg);
3985
3986 vsi_cur = tmp;
3987 } while (vsi_cur);
3988
3989 return ice_vsig_free(hw, blk, vsig);
3990 }
3991
3992 /**
3993 * ice_rem_prof_id_vsig - remove a specific profile from a VSIG
3994 * @hw: pointer to the HW struct
3995 * @blk: hardware block
3996 * @vsig: VSIG to remove the profile from
3997 * @hdl: profile handle indicating which profile to remove
3998 * @chg: list to receive a record of changes
3999 */
4000 static enum ice_status
ice_rem_prof_id_vsig(struct ice_hw * hw,enum ice_block blk,u16 vsig,u64 hdl,struct list_head * chg)4001 ice_rem_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
4002 struct list_head *chg)
4003 {
4004 u16 idx = vsig & ICE_VSIG_IDX_M;
4005 struct ice_vsig_prof *p, *t;
4006 enum ice_status status;
4007
4008 list_for_each_entry_safe(p, t,
4009 &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
4010 list)
4011 if (p->profile_cookie == hdl) {
4012 if (ice_vsig_prof_id_count(hw, blk, vsig) == 1)
4013 /* this is the last profile, remove the VSIG */
4014 return ice_rem_vsig(hw, blk, vsig, chg);
4015
4016 status = ice_rem_prof_id(hw, blk, p);
4017 if (!status) {
4018 list_del(&p->list);
4019 devm_kfree(ice_hw_to_dev(hw), p);
4020 }
4021 return status;
4022 }
4023
4024 return ICE_ERR_DOES_NOT_EXIST;
4025 }
4026
4027 /**
4028 * ice_rem_flow_all - remove all flows with a particular profile
4029 * @hw: pointer to the HW struct
4030 * @blk: hardware block
4031 * @id: profile tracking ID
4032 */
4033 static enum ice_status
ice_rem_flow_all(struct ice_hw * hw,enum ice_block blk,u64 id)4034 ice_rem_flow_all(struct ice_hw *hw, enum ice_block blk, u64 id)
4035 {
4036 struct ice_chs_chg *del, *tmp;
4037 enum ice_status status;
4038 struct list_head chg;
4039 u16 i;
4040
4041 INIT_LIST_HEAD(&chg);
4042
4043 for (i = 1; i < ICE_MAX_VSIGS; i++)
4044 if (hw->blk[blk].xlt2.vsig_tbl[i].in_use) {
4045 if (ice_has_prof_vsig(hw, blk, i, id)) {
4046 status = ice_rem_prof_id_vsig(hw, blk, i, id,
4047 &chg);
4048 if (status)
4049 goto err_ice_rem_flow_all;
4050 }
4051 }
4052
4053 status = ice_upd_prof_hw(hw, blk, &chg);
4054
4055 err_ice_rem_flow_all:
4056 list_for_each_entry_safe(del, tmp, &chg, list_entry) {
4057 list_del(&del->list_entry);
4058 devm_kfree(ice_hw_to_dev(hw), del);
4059 }
4060
4061 return status;
4062 }
4063
4064 /**
4065 * ice_rem_prof - remove profile
4066 * @hw: pointer to the HW struct
4067 * @blk: hardware block
4068 * @id: profile tracking ID
4069 *
4070 * This will remove the profile specified by the ID parameter, which was
4071 * previously created through ice_add_prof. If any existing entries
4072 * are associated with this profile, they will be removed as well.
4073 */
ice_rem_prof(struct ice_hw * hw,enum ice_block blk,u64 id)4074 enum ice_status ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id)
4075 {
4076 struct ice_prof_map *pmap;
4077 enum ice_status status;
4078
4079 mutex_lock(&hw->blk[blk].es.prof_map_lock);
4080
4081 pmap = ice_search_prof_id(hw, blk, id);
4082 if (!pmap) {
4083 status = ICE_ERR_DOES_NOT_EXIST;
4084 goto err_ice_rem_prof;
4085 }
4086
4087 /* remove all flows with this profile */
4088 status = ice_rem_flow_all(hw, blk, pmap->profile_cookie);
4089 if (status)
4090 goto err_ice_rem_prof;
4091
4092 /* dereference profile, and possibly remove */
4093 ice_prof_dec_ref(hw, blk, pmap->prof_id);
4094
4095 list_del(&pmap->list);
4096 devm_kfree(ice_hw_to_dev(hw), pmap);
4097
4098 err_ice_rem_prof:
4099 mutex_unlock(&hw->blk[blk].es.prof_map_lock);
4100 return status;
4101 }
4102
4103 /**
4104 * ice_get_prof - get profile
4105 * @hw: pointer to the HW struct
4106 * @blk: hardware block
4107 * @hdl: profile handle
4108 * @chg: change list
4109 */
4110 static enum ice_status
ice_get_prof(struct ice_hw * hw,enum ice_block blk,u64 hdl,struct list_head * chg)4111 ice_get_prof(struct ice_hw *hw, enum ice_block blk, u64 hdl,
4112 struct list_head *chg)
4113 {
4114 enum ice_status status = 0;
4115 struct ice_prof_map *map;
4116 struct ice_chs_chg *p;
4117 u16 i;
4118
4119 mutex_lock(&hw->blk[blk].es.prof_map_lock);
4120 /* Get the details on the profile specified by the handle ID */
4121 map = ice_search_prof_id(hw, blk, hdl);
4122 if (!map) {
4123 status = ICE_ERR_DOES_NOT_EXIST;
4124 goto err_ice_get_prof;
4125 }
4126
4127 for (i = 0; i < map->ptg_cnt; i++)
4128 if (!hw->blk[blk].es.written[map->prof_id]) {
4129 /* add ES to change list */
4130 p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p),
4131 GFP_KERNEL);
4132 if (!p) {
4133 status = ICE_ERR_NO_MEMORY;
4134 goto err_ice_get_prof;
4135 }
4136
4137 p->type = ICE_PTG_ES_ADD;
4138 p->ptype = 0;
4139 p->ptg = map->ptg[i];
4140 p->add_ptg = 0;
4141
4142 p->add_prof = 1;
4143 p->prof_id = map->prof_id;
4144
4145 hw->blk[blk].es.written[map->prof_id] = true;
4146
4147 list_add(&p->list_entry, chg);
4148 }
4149
4150 err_ice_get_prof:
4151 mutex_unlock(&hw->blk[blk].es.prof_map_lock);
4152 /* let caller clean up the change list */
4153 return status;
4154 }
4155
4156 /**
4157 * ice_get_profs_vsig - get a copy of the list of profiles from a VSIG
4158 * @hw: pointer to the HW struct
4159 * @blk: hardware block
4160 * @vsig: VSIG from which to copy the list
4161 * @lst: output list
4162 *
4163 * This routine makes a copy of the list of profiles in the specified VSIG.
4164 */
4165 static enum ice_status
ice_get_profs_vsig(struct ice_hw * hw,enum ice_block blk,u16 vsig,struct list_head * lst)4166 ice_get_profs_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig,
4167 struct list_head *lst)
4168 {
4169 struct ice_vsig_prof *ent1, *ent2;
4170 u16 idx = vsig & ICE_VSIG_IDX_M;
4171
4172 list_for_each_entry(ent1, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
4173 list) {
4174 struct ice_vsig_prof *p;
4175
4176 /* copy to the input list */
4177 p = devm_kmemdup(ice_hw_to_dev(hw), ent1, sizeof(*p),
4178 GFP_KERNEL);
4179 if (!p)
4180 goto err_ice_get_profs_vsig;
4181
4182 list_add_tail(&p->list, lst);
4183 }
4184
4185 return 0;
4186
4187 err_ice_get_profs_vsig:
4188 list_for_each_entry_safe(ent1, ent2, lst, list) {
4189 list_del(&ent1->list);
4190 devm_kfree(ice_hw_to_dev(hw), ent1);
4191 }
4192
4193 return ICE_ERR_NO_MEMORY;
4194 }
4195
4196 /**
4197 * ice_add_prof_to_lst - add profile entry to a list
4198 * @hw: pointer to the HW struct
4199 * @blk: hardware block
4200 * @lst: the list to be added to
4201 * @hdl: profile handle of entry to add
4202 */
4203 static enum ice_status
ice_add_prof_to_lst(struct ice_hw * hw,enum ice_block blk,struct list_head * lst,u64 hdl)4204 ice_add_prof_to_lst(struct ice_hw *hw, enum ice_block blk,
4205 struct list_head *lst, u64 hdl)
4206 {
4207 enum ice_status status = 0;
4208 struct ice_prof_map *map;
4209 struct ice_vsig_prof *p;
4210 u16 i;
4211
4212 mutex_lock(&hw->blk[blk].es.prof_map_lock);
4213 map = ice_search_prof_id(hw, blk, hdl);
4214 if (!map) {
4215 status = ICE_ERR_DOES_NOT_EXIST;
4216 goto err_ice_add_prof_to_lst;
4217 }
4218
4219 p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
4220 if (!p) {
4221 status = ICE_ERR_NO_MEMORY;
4222 goto err_ice_add_prof_to_lst;
4223 }
4224
4225 p->profile_cookie = map->profile_cookie;
4226 p->prof_id = map->prof_id;
4227 p->tcam_count = map->ptg_cnt;
4228
4229 for (i = 0; i < map->ptg_cnt; i++) {
4230 p->tcam[i].prof_id = map->prof_id;
4231 p->tcam[i].tcam_idx = ICE_INVALID_TCAM;
4232 p->tcam[i].ptg = map->ptg[i];
4233 }
4234
4235 list_add(&p->list, lst);
4236
4237 err_ice_add_prof_to_lst:
4238 mutex_unlock(&hw->blk[blk].es.prof_map_lock);
4239 return status;
4240 }
4241
4242 /**
4243 * ice_move_vsi - move VSI to another VSIG
4244 * @hw: pointer to the HW struct
4245 * @blk: hardware block
4246 * @vsi: the VSI to move
4247 * @vsig: the VSIG to move the VSI to
4248 * @chg: the change list
4249 */
4250 static enum ice_status
ice_move_vsi(struct ice_hw * hw,enum ice_block blk,u16 vsi,u16 vsig,struct list_head * chg)4251 ice_move_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig,
4252 struct list_head *chg)
4253 {
4254 enum ice_status status;
4255 struct ice_chs_chg *p;
4256 u16 orig_vsig;
4257
4258 p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
4259 if (!p)
4260 return ICE_ERR_NO_MEMORY;
4261
4262 status = ice_vsig_find_vsi(hw, blk, vsi, &orig_vsig);
4263 if (!status)
4264 status = ice_vsig_add_mv_vsi(hw, blk, vsi, vsig);
4265
4266 if (status) {
4267 devm_kfree(ice_hw_to_dev(hw), p);
4268 return status;
4269 }
4270
4271 p->type = ICE_VSI_MOVE;
4272 p->vsi = vsi;
4273 p->orig_vsig = orig_vsig;
4274 p->vsig = vsig;
4275
4276 list_add(&p->list_entry, chg);
4277
4278 return 0;
4279 }
4280
4281 /**
4282 * ice_rem_chg_tcam_ent - remove a specific TCAM entry from change list
4283 * @hw: pointer to the HW struct
4284 * @idx: the index of the TCAM entry to remove
4285 * @chg: the list of change structures to search
4286 */
4287 static void
ice_rem_chg_tcam_ent(struct ice_hw * hw,u16 idx,struct list_head * chg)4288 ice_rem_chg_tcam_ent(struct ice_hw *hw, u16 idx, struct list_head *chg)
4289 {
4290 struct ice_chs_chg *pos, *tmp;
4291
4292 list_for_each_entry_safe(tmp, pos, chg, list_entry)
4293 if (tmp->type == ICE_TCAM_ADD && tmp->tcam_idx == idx) {
4294 list_del(&tmp->list_entry);
4295 devm_kfree(ice_hw_to_dev(hw), tmp);
4296 }
4297 }
4298
4299 /**
4300 * ice_prof_tcam_ena_dis - add enable or disable TCAM change
4301 * @hw: pointer to the HW struct
4302 * @blk: hardware block
4303 * @enable: true to enable, false to disable
4304 * @vsig: the VSIG of the TCAM entry
4305 * @tcam: pointer the TCAM info structure of the TCAM to disable
4306 * @chg: the change list
4307 *
4308 * This function appends an enable or disable TCAM entry in the change log
4309 */
4310 static enum ice_status
ice_prof_tcam_ena_dis(struct ice_hw * hw,enum ice_block blk,bool enable,u16 vsig,struct ice_tcam_inf * tcam,struct list_head * chg)4311 ice_prof_tcam_ena_dis(struct ice_hw *hw, enum ice_block blk, bool enable,
4312 u16 vsig, struct ice_tcam_inf *tcam,
4313 struct list_head *chg)
4314 {
4315 enum ice_status status;
4316 struct ice_chs_chg *p;
4317
4318 u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
4319 u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0x00, 0x00, 0x00 };
4320 u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x00, 0x00, 0x00, 0x00, 0x00 };
4321
4322 /* if disabling, free the TCAM */
4323 if (!enable) {
4324 status = ice_rel_tcam_idx(hw, blk, tcam->tcam_idx);
4325
4326 /* if we have already created a change for this TCAM entry, then
4327 * we need to remove that entry, in order to prevent writing to
4328 * a TCAM entry we no longer will have ownership of.
4329 */
4330 ice_rem_chg_tcam_ent(hw, tcam->tcam_idx, chg);
4331 tcam->tcam_idx = 0;
4332 tcam->in_use = 0;
4333 return status;
4334 }
4335
4336 /* for re-enabling, reallocate a TCAM */
4337 status = ice_alloc_tcam_ent(hw, blk, &tcam->tcam_idx);
4338 if (status)
4339 return status;
4340
4341 /* add TCAM to change list */
4342 p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
4343 if (!p)
4344 return ICE_ERR_NO_MEMORY;
4345
4346 status = ice_tcam_write_entry(hw, blk, tcam->tcam_idx, tcam->prof_id,
4347 tcam->ptg, vsig, 0, 0, vl_msk, dc_msk,
4348 nm_msk);
4349 if (status)
4350 goto err_ice_prof_tcam_ena_dis;
4351
4352 tcam->in_use = 1;
4353
4354 p->type = ICE_TCAM_ADD;
4355 p->add_tcam_idx = true;
4356 p->prof_id = tcam->prof_id;
4357 p->ptg = tcam->ptg;
4358 p->vsig = 0;
4359 p->tcam_idx = tcam->tcam_idx;
4360
4361 /* log change */
4362 list_add(&p->list_entry, chg);
4363
4364 return 0;
4365
4366 err_ice_prof_tcam_ena_dis:
4367 devm_kfree(ice_hw_to_dev(hw), p);
4368 return status;
4369 }
4370
4371 /**
4372 * ice_adj_prof_priorities - adjust profile based on priorities
4373 * @hw: pointer to the HW struct
4374 * @blk: hardware block
4375 * @vsig: the VSIG for which to adjust profile priorities
4376 * @chg: the change list
4377 */
4378 static enum ice_status
ice_adj_prof_priorities(struct ice_hw * hw,enum ice_block blk,u16 vsig,struct list_head * chg)4379 ice_adj_prof_priorities(struct ice_hw *hw, enum ice_block blk, u16 vsig,
4380 struct list_head *chg)
4381 {
4382 DECLARE_BITMAP(ptgs_used, ICE_XLT1_CNT);
4383 struct ice_vsig_prof *t;
4384 enum ice_status status;
4385 u16 idx;
4386
4387 bitmap_zero(ptgs_used, ICE_XLT1_CNT);
4388 idx = vsig & ICE_VSIG_IDX_M;
4389
4390 /* Priority is based on the order in which the profiles are added. The
4391 * newest added profile has highest priority and the oldest added
4392 * profile has the lowest priority. Since the profile property list for
4393 * a VSIG is sorted from newest to oldest, this code traverses the list
4394 * in order and enables the first of each PTG that it finds (that is not
4395 * already enabled); it also disables any duplicate PTGs that it finds
4396 * in the older profiles (that are currently enabled).
4397 */
4398
4399 list_for_each_entry(t, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
4400 list) {
4401 u16 i;
4402
4403 for (i = 0; i < t->tcam_count; i++) {
4404 /* Scan the priorities from newest to oldest.
4405 * Make sure that the newest profiles take priority.
4406 */
4407 if (test_bit(t->tcam[i].ptg, ptgs_used) &&
4408 t->tcam[i].in_use) {
4409 /* need to mark this PTG as never match, as it
4410 * was already in use and therefore duplicate
4411 * (and lower priority)
4412 */
4413 status = ice_prof_tcam_ena_dis(hw, blk, false,
4414 vsig,
4415 &t->tcam[i],
4416 chg);
4417 if (status)
4418 return status;
4419 } else if (!test_bit(t->tcam[i].ptg, ptgs_used) &&
4420 !t->tcam[i].in_use) {
4421 /* need to enable this PTG, as it in not in use
4422 * and not enabled (highest priority)
4423 */
4424 status = ice_prof_tcam_ena_dis(hw, blk, true,
4425 vsig,
4426 &t->tcam[i],
4427 chg);
4428 if (status)
4429 return status;
4430 }
4431
4432 /* keep track of used ptgs */
4433 set_bit(t->tcam[i].ptg, ptgs_used);
4434 }
4435 }
4436
4437 return 0;
4438 }
4439
4440 /**
4441 * ice_add_prof_id_vsig - add profile to VSIG
4442 * @hw: pointer to the HW struct
4443 * @blk: hardware block
4444 * @vsig: the VSIG to which this profile is to be added
4445 * @hdl: the profile handle indicating the profile to add
4446 * @rev: true to add entries to the end of the list
4447 * @chg: the change list
4448 */
4449 static enum ice_status
ice_add_prof_id_vsig(struct ice_hw * hw,enum ice_block blk,u16 vsig,u64 hdl,bool rev,struct list_head * chg)4450 ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
4451 bool rev, struct list_head *chg)
4452 {
4453 /* Masks that ignore flags */
4454 u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
4455 u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0x00, 0x00, 0x00 };
4456 u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x00, 0x00, 0x00, 0x00, 0x00 };
4457 enum ice_status status = 0;
4458 struct ice_prof_map *map;
4459 struct ice_vsig_prof *t;
4460 struct ice_chs_chg *p;
4461 u16 vsig_idx, i;
4462
4463 /* Error, if this VSIG already has this profile */
4464 if (ice_has_prof_vsig(hw, blk, vsig, hdl))
4465 return ICE_ERR_ALREADY_EXISTS;
4466
4467 /* new VSIG profile structure */
4468 t = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*t), GFP_KERNEL);
4469 if (!t)
4470 return ICE_ERR_NO_MEMORY;
4471
4472 mutex_lock(&hw->blk[blk].es.prof_map_lock);
4473 /* Get the details on the profile specified by the handle ID */
4474 map = ice_search_prof_id(hw, blk, hdl);
4475 if (!map) {
4476 status = ICE_ERR_DOES_NOT_EXIST;
4477 goto err_ice_add_prof_id_vsig;
4478 }
4479
4480 t->profile_cookie = map->profile_cookie;
4481 t->prof_id = map->prof_id;
4482 t->tcam_count = map->ptg_cnt;
4483
4484 /* create TCAM entries */
4485 for (i = 0; i < map->ptg_cnt; i++) {
4486 u16 tcam_idx;
4487
4488 /* add TCAM to change list */
4489 p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
4490 if (!p) {
4491 status = ICE_ERR_NO_MEMORY;
4492 goto err_ice_add_prof_id_vsig;
4493 }
4494
4495 /* allocate the TCAM entry index */
4496 status = ice_alloc_tcam_ent(hw, blk, &tcam_idx);
4497 if (status) {
4498 devm_kfree(ice_hw_to_dev(hw), p);
4499 goto err_ice_add_prof_id_vsig;
4500 }
4501
4502 t->tcam[i].ptg = map->ptg[i];
4503 t->tcam[i].prof_id = map->prof_id;
4504 t->tcam[i].tcam_idx = tcam_idx;
4505 t->tcam[i].in_use = true;
4506
4507 p->type = ICE_TCAM_ADD;
4508 p->add_tcam_idx = true;
4509 p->prof_id = t->tcam[i].prof_id;
4510 p->ptg = t->tcam[i].ptg;
4511 p->vsig = vsig;
4512 p->tcam_idx = t->tcam[i].tcam_idx;
4513
4514 /* write the TCAM entry */
4515 status = ice_tcam_write_entry(hw, blk, t->tcam[i].tcam_idx,
4516 t->tcam[i].prof_id,
4517 t->tcam[i].ptg, vsig, 0, 0,
4518 vl_msk, dc_msk, nm_msk);
4519 if (status) {
4520 devm_kfree(ice_hw_to_dev(hw), p);
4521 goto err_ice_add_prof_id_vsig;
4522 }
4523
4524 /* log change */
4525 list_add(&p->list_entry, chg);
4526 }
4527
4528 /* add profile to VSIG */
4529 vsig_idx = vsig & ICE_VSIG_IDX_M;
4530 if (rev)
4531 list_add_tail(&t->list,
4532 &hw->blk[blk].xlt2.vsig_tbl[vsig_idx].prop_lst);
4533 else
4534 list_add(&t->list,
4535 &hw->blk[blk].xlt2.vsig_tbl[vsig_idx].prop_lst);
4536
4537 mutex_unlock(&hw->blk[blk].es.prof_map_lock);
4538 return status;
4539
4540 err_ice_add_prof_id_vsig:
4541 mutex_unlock(&hw->blk[blk].es.prof_map_lock);
4542 /* let caller clean up the change list */
4543 devm_kfree(ice_hw_to_dev(hw), t);
4544 return status;
4545 }
4546
4547 /**
4548 * ice_create_prof_id_vsig - add a new VSIG with a single profile
4549 * @hw: pointer to the HW struct
4550 * @blk: hardware block
4551 * @vsi: the initial VSI that will be in VSIG
4552 * @hdl: the profile handle of the profile that will be added to the VSIG
4553 * @chg: the change list
4554 */
4555 static enum ice_status
ice_create_prof_id_vsig(struct ice_hw * hw,enum ice_block blk,u16 vsi,u64 hdl,struct list_head * chg)4556 ice_create_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl,
4557 struct list_head *chg)
4558 {
4559 enum ice_status status;
4560 struct ice_chs_chg *p;
4561 u16 new_vsig;
4562
4563 p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
4564 if (!p)
4565 return ICE_ERR_NO_MEMORY;
4566
4567 new_vsig = ice_vsig_alloc(hw, blk);
4568 if (!new_vsig) {
4569 status = ICE_ERR_HW_TABLE;
4570 goto err_ice_create_prof_id_vsig;
4571 }
4572
4573 status = ice_move_vsi(hw, blk, vsi, new_vsig, chg);
4574 if (status)
4575 goto err_ice_create_prof_id_vsig;
4576
4577 status = ice_add_prof_id_vsig(hw, blk, new_vsig, hdl, false, chg);
4578 if (status)
4579 goto err_ice_create_prof_id_vsig;
4580
4581 p->type = ICE_VSIG_ADD;
4582 p->vsi = vsi;
4583 p->orig_vsig = ICE_DEFAULT_VSIG;
4584 p->vsig = new_vsig;
4585
4586 list_add(&p->list_entry, chg);
4587
4588 return 0;
4589
4590 err_ice_create_prof_id_vsig:
4591 /* let caller clean up the change list */
4592 devm_kfree(ice_hw_to_dev(hw), p);
4593 return status;
4594 }
4595
4596 /**
4597 * ice_create_vsig_from_lst - create a new VSIG with a list of profiles
4598 * @hw: pointer to the HW struct
4599 * @blk: hardware block
4600 * @vsi: the initial VSI that will be in VSIG
4601 * @lst: the list of profile that will be added to the VSIG
4602 * @new_vsig: return of new VSIG
4603 * @chg: the change list
4604 */
4605 static enum ice_status
ice_create_vsig_from_lst(struct ice_hw * hw,enum ice_block blk,u16 vsi,struct list_head * lst,u16 * new_vsig,struct list_head * chg)4606 ice_create_vsig_from_lst(struct ice_hw *hw, enum ice_block blk, u16 vsi,
4607 struct list_head *lst, u16 *new_vsig,
4608 struct list_head *chg)
4609 {
4610 struct ice_vsig_prof *t;
4611 enum ice_status status;
4612 u16 vsig;
4613
4614 vsig = ice_vsig_alloc(hw, blk);
4615 if (!vsig)
4616 return ICE_ERR_HW_TABLE;
4617
4618 status = ice_move_vsi(hw, blk, vsi, vsig, chg);
4619 if (status)
4620 return status;
4621
4622 list_for_each_entry(t, lst, list) {
4623 /* Reverse the order here since we are copying the list */
4624 status = ice_add_prof_id_vsig(hw, blk, vsig, t->profile_cookie,
4625 true, chg);
4626 if (status)
4627 return status;
4628 }
4629
4630 *new_vsig = vsig;
4631
4632 return 0;
4633 }
4634
4635 /**
4636 * ice_find_prof_vsig - find a VSIG with a specific profile handle
4637 * @hw: pointer to the HW struct
4638 * @blk: hardware block
4639 * @hdl: the profile handle of the profile to search for
4640 * @vsig: returns the VSIG with the matching profile
4641 */
4642 static bool
ice_find_prof_vsig(struct ice_hw * hw,enum ice_block blk,u64 hdl,u16 * vsig)4643 ice_find_prof_vsig(struct ice_hw *hw, enum ice_block blk, u64 hdl, u16 *vsig)
4644 {
4645 struct ice_vsig_prof *t;
4646 enum ice_status status;
4647 struct list_head lst;
4648
4649 INIT_LIST_HEAD(&lst);
4650
4651 t = kzalloc(sizeof(*t), GFP_KERNEL);
4652 if (!t)
4653 return false;
4654
4655 t->profile_cookie = hdl;
4656 list_add(&t->list, &lst);
4657
4658 status = ice_find_dup_props_vsig(hw, blk, &lst, vsig);
4659
4660 list_del(&t->list);
4661 kfree(t);
4662
4663 return !status;
4664 }
4665
4666 /**
4667 * ice_add_prof_id_flow - add profile flow
4668 * @hw: pointer to the HW struct
4669 * @blk: hardware block
4670 * @vsi: the VSI to enable with the profile specified by ID
4671 * @hdl: profile handle
4672 *
4673 * Calling this function will update the hardware tables to enable the
4674 * profile indicated by the ID parameter for the VSIs specified in the VSI
4675 * array. Once successfully called, the flow will be enabled.
4676 */
4677 enum ice_status
ice_add_prof_id_flow(struct ice_hw * hw,enum ice_block blk,u16 vsi,u64 hdl)4678 ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
4679 {
4680 struct ice_vsig_prof *tmp1, *del1;
4681 struct ice_chs_chg *tmp, *del;
4682 struct list_head union_lst;
4683 enum ice_status status;
4684 struct list_head chg;
4685 u16 vsig;
4686
4687 INIT_LIST_HEAD(&union_lst);
4688 INIT_LIST_HEAD(&chg);
4689
4690 /* Get profile */
4691 status = ice_get_prof(hw, blk, hdl, &chg);
4692 if (status)
4693 return status;
4694
4695 /* determine if VSI is already part of a VSIG */
4696 status = ice_vsig_find_vsi(hw, blk, vsi, &vsig);
4697 if (!status && vsig) {
4698 bool only_vsi;
4699 u16 or_vsig;
4700 u16 ref;
4701
4702 /* found in VSIG */
4703 or_vsig = vsig;
4704
4705 /* make sure that there is no overlap/conflict between the new
4706 * characteristics and the existing ones; we don't support that
4707 * scenario
4708 */
4709 if (ice_has_prof_vsig(hw, blk, vsig, hdl)) {
4710 status = ICE_ERR_ALREADY_EXISTS;
4711 goto err_ice_add_prof_id_flow;
4712 }
4713
4714 /* last VSI in the VSIG? */
4715 status = ice_vsig_get_ref(hw, blk, vsig, &ref);
4716 if (status)
4717 goto err_ice_add_prof_id_flow;
4718 only_vsi = (ref == 1);
4719
4720 /* create a union of the current profiles and the one being
4721 * added
4722 */
4723 status = ice_get_profs_vsig(hw, blk, vsig, &union_lst);
4724 if (status)
4725 goto err_ice_add_prof_id_flow;
4726
4727 status = ice_add_prof_to_lst(hw, blk, &union_lst, hdl);
4728 if (status)
4729 goto err_ice_add_prof_id_flow;
4730
4731 /* search for an existing VSIG with an exact charc match */
4732 status = ice_find_dup_props_vsig(hw, blk, &union_lst, &vsig);
4733 if (!status) {
4734 /* move VSI to the VSIG that matches */
4735 status = ice_move_vsi(hw, blk, vsi, vsig, &chg);
4736 if (status)
4737 goto err_ice_add_prof_id_flow;
4738
4739 /* VSI has been moved out of or_vsig. If the or_vsig had
4740 * only that VSI it is now empty and can be removed.
4741 */
4742 if (only_vsi) {
4743 status = ice_rem_vsig(hw, blk, or_vsig, &chg);
4744 if (status)
4745 goto err_ice_add_prof_id_flow;
4746 }
4747 } else if (only_vsi) {
4748 /* If the original VSIG only contains one VSI, then it
4749 * will be the requesting VSI. In this case the VSI is
4750 * not sharing entries and we can simply add the new
4751 * profile to the VSIG.
4752 */
4753 status = ice_add_prof_id_vsig(hw, blk, vsig, hdl, false,
4754 &chg);
4755 if (status)
4756 goto err_ice_add_prof_id_flow;
4757
4758 /* Adjust priorities */
4759 status = ice_adj_prof_priorities(hw, blk, vsig, &chg);
4760 if (status)
4761 goto err_ice_add_prof_id_flow;
4762 } else {
4763 /* No match, so we need a new VSIG */
4764 status = ice_create_vsig_from_lst(hw, blk, vsi,
4765 &union_lst, &vsig,
4766 &chg);
4767 if (status)
4768 goto err_ice_add_prof_id_flow;
4769
4770 /* Adjust priorities */
4771 status = ice_adj_prof_priorities(hw, blk, vsig, &chg);
4772 if (status)
4773 goto err_ice_add_prof_id_flow;
4774 }
4775 } else {
4776 /* need to find or add a VSIG */
4777 /* search for an existing VSIG with an exact charc match */
4778 if (ice_find_prof_vsig(hw, blk, hdl, &vsig)) {
4779 /* found an exact match */
4780 /* add or move VSI to the VSIG that matches */
4781 status = ice_move_vsi(hw, blk, vsi, vsig, &chg);
4782 if (status)
4783 goto err_ice_add_prof_id_flow;
4784 } else {
4785 /* we did not find an exact match */
4786 /* we need to add a VSIG */
4787 status = ice_create_prof_id_vsig(hw, blk, vsi, hdl,
4788 &chg);
4789 if (status)
4790 goto err_ice_add_prof_id_flow;
4791 }
4792 }
4793
4794 /* update hardware */
4795 if (!status)
4796 status = ice_upd_prof_hw(hw, blk, &chg);
4797
4798 err_ice_add_prof_id_flow:
4799 list_for_each_entry_safe(del, tmp, &chg, list_entry) {
4800 list_del(&del->list_entry);
4801 devm_kfree(ice_hw_to_dev(hw), del);
4802 }
4803
4804 list_for_each_entry_safe(del1, tmp1, &union_lst, list) {
4805 list_del(&del1->list);
4806 devm_kfree(ice_hw_to_dev(hw), del1);
4807 }
4808
4809 return status;
4810 }
4811
4812 /**
4813 * ice_rem_prof_from_list - remove a profile from list
4814 * @hw: pointer to the HW struct
4815 * @lst: list to remove the profile from
4816 * @hdl: the profile handle indicating the profile to remove
4817 */
4818 static enum ice_status
ice_rem_prof_from_list(struct ice_hw * hw,struct list_head * lst,u64 hdl)4819 ice_rem_prof_from_list(struct ice_hw *hw, struct list_head *lst, u64 hdl)
4820 {
4821 struct ice_vsig_prof *ent, *tmp;
4822
4823 list_for_each_entry_safe(ent, tmp, lst, list)
4824 if (ent->profile_cookie == hdl) {
4825 list_del(&ent->list);
4826 devm_kfree(ice_hw_to_dev(hw), ent);
4827 return 0;
4828 }
4829
4830 return ICE_ERR_DOES_NOT_EXIST;
4831 }
4832
4833 /**
4834 * ice_rem_prof_id_flow - remove flow
4835 * @hw: pointer to the HW struct
4836 * @blk: hardware block
4837 * @vsi: the VSI from which to remove the profile specified by ID
4838 * @hdl: profile tracking handle
4839 *
4840 * Calling this function will update the hardware tables to remove the
4841 * profile indicated by the ID parameter for the VSIs specified in the VSI
4842 * array. Once successfully called, the flow will be disabled.
4843 */
4844 enum ice_status
ice_rem_prof_id_flow(struct ice_hw * hw,enum ice_block blk,u16 vsi,u64 hdl)4845 ice_rem_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
4846 {
4847 struct ice_vsig_prof *tmp1, *del1;
4848 struct ice_chs_chg *tmp, *del;
4849 struct list_head chg, copy;
4850 enum ice_status status;
4851 u16 vsig;
4852
4853 INIT_LIST_HEAD(©);
4854 INIT_LIST_HEAD(&chg);
4855
4856 /* determine if VSI is already part of a VSIG */
4857 status = ice_vsig_find_vsi(hw, blk, vsi, &vsig);
4858 if (!status && vsig) {
4859 bool last_profile;
4860 bool only_vsi;
4861 u16 ref;
4862
4863 /* found in VSIG */
4864 last_profile = ice_vsig_prof_id_count(hw, blk, vsig) == 1;
4865 status = ice_vsig_get_ref(hw, blk, vsig, &ref);
4866 if (status)
4867 goto err_ice_rem_prof_id_flow;
4868 only_vsi = (ref == 1);
4869
4870 if (only_vsi) {
4871 /* If the original VSIG only contains one reference,
4872 * which will be the requesting VSI, then the VSI is not
4873 * sharing entries and we can simply remove the specific
4874 * characteristics from the VSIG.
4875 */
4876
4877 if (last_profile) {
4878 /* If there are no profiles left for this VSIG,
4879 * then simply remove the VSIG.
4880 */
4881 status = ice_rem_vsig(hw, blk, vsig, &chg);
4882 if (status)
4883 goto err_ice_rem_prof_id_flow;
4884 } else {
4885 status = ice_rem_prof_id_vsig(hw, blk, vsig,
4886 hdl, &chg);
4887 if (status)
4888 goto err_ice_rem_prof_id_flow;
4889
4890 /* Adjust priorities */
4891 status = ice_adj_prof_priorities(hw, blk, vsig,
4892 &chg);
4893 if (status)
4894 goto err_ice_rem_prof_id_flow;
4895 }
4896
4897 } else {
4898 /* Make a copy of the VSIG's list of Profiles */
4899 status = ice_get_profs_vsig(hw, blk, vsig, ©);
4900 if (status)
4901 goto err_ice_rem_prof_id_flow;
4902
4903 /* Remove specified profile entry from the list */
4904 status = ice_rem_prof_from_list(hw, ©, hdl);
4905 if (status)
4906 goto err_ice_rem_prof_id_flow;
4907
4908 if (list_empty(©)) {
4909 status = ice_move_vsi(hw, blk, vsi,
4910 ICE_DEFAULT_VSIG, &chg);
4911 if (status)
4912 goto err_ice_rem_prof_id_flow;
4913
4914 } else if (!ice_find_dup_props_vsig(hw, blk, ©,
4915 &vsig)) {
4916 /* found an exact match */
4917 /* add or move VSI to the VSIG that matches */
4918 /* Search for a VSIG with a matching profile
4919 * list
4920 */
4921
4922 /* Found match, move VSI to the matching VSIG */
4923 status = ice_move_vsi(hw, blk, vsi, vsig, &chg);
4924 if (status)
4925 goto err_ice_rem_prof_id_flow;
4926 } else {
4927 /* since no existing VSIG supports this
4928 * characteristic pattern, we need to create a
4929 * new VSIG and TCAM entries
4930 */
4931 status = ice_create_vsig_from_lst(hw, blk, vsi,
4932 ©, &vsig,
4933 &chg);
4934 if (status)
4935 goto err_ice_rem_prof_id_flow;
4936
4937 /* Adjust priorities */
4938 status = ice_adj_prof_priorities(hw, blk, vsig,
4939 &chg);
4940 if (status)
4941 goto err_ice_rem_prof_id_flow;
4942 }
4943 }
4944 } else {
4945 status = ICE_ERR_DOES_NOT_EXIST;
4946 }
4947
4948 /* update hardware tables */
4949 if (!status)
4950 status = ice_upd_prof_hw(hw, blk, &chg);
4951
4952 err_ice_rem_prof_id_flow:
4953 list_for_each_entry_safe(del, tmp, &chg, list_entry) {
4954 list_del(&del->list_entry);
4955 devm_kfree(ice_hw_to_dev(hw), del);
4956 }
4957
4958 list_for_each_entry_safe(del1, tmp1, ©, list) {
4959 list_del(&del1->list);
4960 devm_kfree(ice_hw_to_dev(hw), del1);
4961 }
4962
4963 return status;
4964 }
4965