1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019, Intel Corporation. */
3
4 #include "ice_common.h"
5 #include "ice_flex_pipe.h"
6
7 /**
8 * ice_pkg_val_buf
9 * @buf: pointer to the ice buffer
10 *
11 * This helper function validates a buffer's header.
12 */
ice_pkg_val_buf(struct ice_buf * buf)13 static struct ice_buf_hdr *ice_pkg_val_buf(struct ice_buf *buf)
14 {
15 struct ice_buf_hdr *hdr;
16 u16 section_count;
17 u16 data_end;
18
19 hdr = (struct ice_buf_hdr *)buf->buf;
20 /* verify data */
21 section_count = le16_to_cpu(hdr->section_count);
22 if (section_count < ICE_MIN_S_COUNT || section_count > ICE_MAX_S_COUNT)
23 return NULL;
24
25 data_end = le16_to_cpu(hdr->data_end);
26 if (data_end < ICE_MIN_S_DATA_END || data_end > ICE_MAX_S_DATA_END)
27 return NULL;
28
29 return hdr;
30 }
31
32 /**
33 * ice_find_buf_table
34 * @ice_seg: pointer to the ice segment
35 *
36 * Returns the address of the buffer table within the ice segment.
37 */
ice_find_buf_table(struct ice_seg * ice_seg)38 static struct ice_buf_table *ice_find_buf_table(struct ice_seg *ice_seg)
39 {
40 struct ice_nvm_table *nvms;
41
42 nvms = (struct ice_nvm_table *)
43 (ice_seg->device_table +
44 le32_to_cpu(ice_seg->device_table_count));
45
46 return (__force struct ice_buf_table *)
47 (nvms->vers + le32_to_cpu(nvms->table_count));
48 }
49
50 /**
51 * ice_pkg_enum_buf
52 * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
53 * @state: pointer to the enum state
54 *
55 * This function will enumerate all the buffers in the ice segment. The first
56 * call is made with the ice_seg parameter non-NULL; on subsequent calls,
57 * ice_seg is set to NULL which continues the enumeration. When the function
58 * returns a NULL pointer, then the end of the buffers has been reached, or an
59 * unexpected value has been detected (for example an invalid section count or
60 * an invalid buffer end value).
61 */
62 static struct ice_buf_hdr *
ice_pkg_enum_buf(struct ice_seg * ice_seg,struct ice_pkg_enum * state)63 ice_pkg_enum_buf(struct ice_seg *ice_seg, struct ice_pkg_enum *state)
64 {
65 if (ice_seg) {
66 state->buf_table = ice_find_buf_table(ice_seg);
67 if (!state->buf_table)
68 return NULL;
69
70 state->buf_idx = 0;
71 return ice_pkg_val_buf(state->buf_table->buf_array);
72 }
73
74 if (++state->buf_idx < le32_to_cpu(state->buf_table->buf_count))
75 return ice_pkg_val_buf(state->buf_table->buf_array +
76 state->buf_idx);
77 else
78 return NULL;
79 }
80
81 /**
82 * ice_pkg_advance_sect
83 * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
84 * @state: pointer to the enum state
85 *
86 * This helper function will advance the section within the ice segment,
87 * also advancing the buffer if needed.
88 */
89 static bool
ice_pkg_advance_sect(struct ice_seg * ice_seg,struct ice_pkg_enum * state)90 ice_pkg_advance_sect(struct ice_seg *ice_seg, struct ice_pkg_enum *state)
91 {
92 if (!ice_seg && !state->buf)
93 return false;
94
95 if (!ice_seg && state->buf)
96 if (++state->sect_idx < le16_to_cpu(state->buf->section_count))
97 return true;
98
99 state->buf = ice_pkg_enum_buf(ice_seg, state);
100 if (!state->buf)
101 return false;
102
103 /* start of new buffer, reset section index */
104 state->sect_idx = 0;
105 return true;
106 }
107
108 /**
109 * ice_pkg_enum_section
110 * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
111 * @state: pointer to the enum state
112 * @sect_type: section type to enumerate
113 *
114 * This function will enumerate all the sections of a particular type in the
115 * ice segment. The first call is made with the ice_seg parameter non-NULL;
116 * on subsequent calls, ice_seg is set to NULL which continues the enumeration.
117 * When the function returns a NULL pointer, then the end of the matching
118 * sections has been reached.
119 */
120 static void *
ice_pkg_enum_section(struct ice_seg * ice_seg,struct ice_pkg_enum * state,u32 sect_type)121 ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
122 u32 sect_type)
123 {
124 u16 offset, size;
125
126 if (ice_seg)
127 state->type = sect_type;
128
129 if (!ice_pkg_advance_sect(ice_seg, state))
130 return NULL;
131
132 /* scan for next matching section */
133 while (state->buf->section_entry[state->sect_idx].type !=
134 cpu_to_le32(state->type))
135 if (!ice_pkg_advance_sect(NULL, state))
136 return NULL;
137
138 /* validate section */
139 offset = le16_to_cpu(state->buf->section_entry[state->sect_idx].offset);
140 if (offset < ICE_MIN_S_OFF || offset > ICE_MAX_S_OFF)
141 return NULL;
142
143 size = le16_to_cpu(state->buf->section_entry[state->sect_idx].size);
144 if (size < ICE_MIN_S_SZ || size > ICE_MAX_S_SZ)
145 return NULL;
146
147 /* make sure the section fits in the buffer */
148 if (offset + size > ICE_PKG_BUF_SIZE)
149 return NULL;
150
151 state->sect_type =
152 le32_to_cpu(state->buf->section_entry[state->sect_idx].type);
153
154 /* calc pointer to this section */
155 state->sect = ((u8 *)state->buf) +
156 le16_to_cpu(state->buf->section_entry[state->sect_idx].offset);
157
158 return state->sect;
159 }
160
161 /**
162 * ice_acquire_global_cfg_lock
163 * @hw: pointer to the HW structure
164 * @access: access type (read or write)
165 *
166 * This function will request ownership of the global config lock for reading
167 * or writing of the package. When attempting to obtain write access, the
168 * caller must check for the following two return values:
169 *
170 * ICE_SUCCESS - Means the caller has acquired the global config lock
171 * and can perform writing of the package.
172 * ICE_ERR_AQ_NO_WORK - Indicates another driver has already written the
173 * package or has found that no update was necessary; in
174 * this case, the caller can just skip performing any
175 * update of the package.
176 */
177 static enum ice_status
ice_acquire_global_cfg_lock(struct ice_hw * hw,enum ice_aq_res_access_type access)178 ice_acquire_global_cfg_lock(struct ice_hw *hw,
179 enum ice_aq_res_access_type access)
180 {
181 enum ice_status status;
182
183 status = ice_acquire_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID, access,
184 ICE_GLOBAL_CFG_LOCK_TIMEOUT);
185
186 if (!status)
187 mutex_lock(&ice_global_cfg_lock_sw);
188 else if (status == ICE_ERR_AQ_NO_WORK)
189 ice_debug(hw, ICE_DBG_PKG,
190 "Global config lock: No work to do\n");
191
192 return status;
193 }
194
195 /**
196 * ice_release_global_cfg_lock
197 * @hw: pointer to the HW structure
198 *
199 * This function will release the global config lock.
200 */
ice_release_global_cfg_lock(struct ice_hw * hw)201 static void ice_release_global_cfg_lock(struct ice_hw *hw)
202 {
203 mutex_unlock(&ice_global_cfg_lock_sw);
204 ice_release_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID);
205 }
206
207 /**
208 * ice_aq_download_pkg
209 * @hw: pointer to the hardware structure
210 * @pkg_buf: the package buffer to transfer
211 * @buf_size: the size of the package buffer
212 * @last_buf: last buffer indicator
213 * @error_offset: returns error offset
214 * @error_info: returns error information
215 * @cd: pointer to command details structure or NULL
216 *
217 * Download Package (0x0C40)
218 */
219 static enum ice_status
ice_aq_download_pkg(struct ice_hw * hw,struct ice_buf_hdr * pkg_buf,u16 buf_size,bool last_buf,u32 * error_offset,u32 * error_info,struct ice_sq_cd * cd)220 ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
221 u16 buf_size, bool last_buf, u32 *error_offset,
222 u32 *error_info, struct ice_sq_cd *cd)
223 {
224 struct ice_aqc_download_pkg *cmd;
225 struct ice_aq_desc desc;
226 enum ice_status status;
227
228 if (error_offset)
229 *error_offset = 0;
230 if (error_info)
231 *error_info = 0;
232
233 cmd = &desc.params.download_pkg;
234 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_download_pkg);
235 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
236
237 if (last_buf)
238 cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF;
239
240 status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
241 if (status == ICE_ERR_AQ_ERROR) {
242 /* Read error from buffer only when the FW returned an error */
243 struct ice_aqc_download_pkg_resp *resp;
244
245 resp = (struct ice_aqc_download_pkg_resp *)pkg_buf;
246 if (error_offset)
247 *error_offset = le32_to_cpu(resp->error_offset);
248 if (error_info)
249 *error_info = le32_to_cpu(resp->error_info);
250 }
251
252 return status;
253 }
254
255 /**
256 * ice_find_seg_in_pkg
257 * @hw: pointer to the hardware structure
258 * @seg_type: the segment type to search for (i.e., SEGMENT_TYPE_CPK)
259 * @pkg_hdr: pointer to the package header to be searched
260 *
261 * This function searches a package file for a particular segment type. On
262 * success it returns a pointer to the segment header, otherwise it will
263 * return NULL.
264 */
265 static struct ice_generic_seg_hdr *
ice_find_seg_in_pkg(struct ice_hw * hw,u32 seg_type,struct ice_pkg_hdr * pkg_hdr)266 ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type,
267 struct ice_pkg_hdr *pkg_hdr)
268 {
269 u32 i;
270
271 ice_debug(hw, ICE_DBG_PKG, "Package format version: %d.%d.%d.%d\n",
272 pkg_hdr->format_ver.major, pkg_hdr->format_ver.minor,
273 pkg_hdr->format_ver.update, pkg_hdr->format_ver.draft);
274
275 /* Search all package segments for the requested segment type */
276 for (i = 0; i < le32_to_cpu(pkg_hdr->seg_count); i++) {
277 struct ice_generic_seg_hdr *seg;
278
279 seg = (struct ice_generic_seg_hdr *)
280 ((u8 *)pkg_hdr + le32_to_cpu(pkg_hdr->seg_offset[i]));
281
282 if (le32_to_cpu(seg->seg_type) == seg_type)
283 return seg;
284 }
285
286 return NULL;
287 }
288
289 /**
290 * ice_dwnld_cfg_bufs
291 * @hw: pointer to the hardware structure
292 * @bufs: pointer to an array of buffers
293 * @count: the number of buffers in the array
294 *
295 * Obtains global config lock and downloads the package configuration buffers
296 * to the firmware. Metadata buffers are skipped, and the first metadata buffer
297 * found indicates that the rest of the buffers are all metadata buffers.
298 */
299 static enum ice_status
ice_dwnld_cfg_bufs(struct ice_hw * hw,struct ice_buf * bufs,u32 count)300 ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
301 {
302 enum ice_status status;
303 struct ice_buf_hdr *bh;
304 u32 offset, info, i;
305
306 if (!bufs || !count)
307 return ICE_ERR_PARAM;
308
309 /* If the first buffer's first section has its metadata bit set
310 * then there are no buffers to be downloaded, and the operation is
311 * considered a success.
312 */
313 bh = (struct ice_buf_hdr *)bufs;
314 if (le32_to_cpu(bh->section_entry[0].type) & ICE_METADATA_BUF)
315 return 0;
316
317 /* reset pkg_dwnld_status in case this function is called in the
318 * reset/rebuild flow
319 */
320 hw->pkg_dwnld_status = ICE_AQ_RC_OK;
321
322 status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE);
323 if (status) {
324 if (status == ICE_ERR_AQ_NO_WORK)
325 hw->pkg_dwnld_status = ICE_AQ_RC_EEXIST;
326 else
327 hw->pkg_dwnld_status = hw->adminq.sq_last_status;
328 return status;
329 }
330
331 for (i = 0; i < count; i++) {
332 bool last = ((i + 1) == count);
333
334 if (!last) {
335 /* check next buffer for metadata flag */
336 bh = (struct ice_buf_hdr *)(bufs + i + 1);
337
338 /* A set metadata flag in the next buffer will signal
339 * that the current buffer will be the last buffer
340 * downloaded
341 */
342 if (le16_to_cpu(bh->section_count))
343 if (le32_to_cpu(bh->section_entry[0].type) &
344 ICE_METADATA_BUF)
345 last = true;
346 }
347
348 bh = (struct ice_buf_hdr *)(bufs + i);
349
350 status = ice_aq_download_pkg(hw, bh, ICE_PKG_BUF_SIZE, last,
351 &offset, &info, NULL);
352
353 /* Save AQ status from download package */
354 hw->pkg_dwnld_status = hw->adminq.sq_last_status;
355 if (status) {
356 ice_debug(hw, ICE_DBG_PKG,
357 "Pkg download failed: err %d off %d inf %d\n",
358 status, offset, info);
359
360 break;
361 }
362
363 if (last)
364 break;
365 }
366
367 ice_release_global_cfg_lock(hw);
368
369 return status;
370 }
371
372 /**
373 * ice_aq_get_pkg_info_list
374 * @hw: pointer to the hardware structure
375 * @pkg_info: the buffer which will receive the information list
376 * @buf_size: the size of the pkg_info information buffer
377 * @cd: pointer to command details structure or NULL
378 *
379 * Get Package Info List (0x0C43)
380 */
381 static enum ice_status
ice_aq_get_pkg_info_list(struct ice_hw * hw,struct ice_aqc_get_pkg_info_resp * pkg_info,u16 buf_size,struct ice_sq_cd * cd)382 ice_aq_get_pkg_info_list(struct ice_hw *hw,
383 struct ice_aqc_get_pkg_info_resp *pkg_info,
384 u16 buf_size, struct ice_sq_cd *cd)
385 {
386 struct ice_aq_desc desc;
387
388 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_pkg_info_list);
389
390 return ice_aq_send_cmd(hw, &desc, pkg_info, buf_size, cd);
391 }
392
393 /**
394 * ice_download_pkg
395 * @hw: pointer to the hardware structure
396 * @ice_seg: pointer to the segment of the package to be downloaded
397 *
398 * Handles the download of a complete package.
399 */
400 static enum ice_status
ice_download_pkg(struct ice_hw * hw,struct ice_seg * ice_seg)401 ice_download_pkg(struct ice_hw *hw, struct ice_seg *ice_seg)
402 {
403 struct ice_buf_table *ice_buf_tbl;
404
405 ice_debug(hw, ICE_DBG_PKG, "Segment version: %d.%d.%d.%d\n",
406 ice_seg->hdr.seg_ver.major, ice_seg->hdr.seg_ver.minor,
407 ice_seg->hdr.seg_ver.update, ice_seg->hdr.seg_ver.draft);
408
409 ice_debug(hw, ICE_DBG_PKG, "Seg: type 0x%X, size %d, name %s\n",
410 le32_to_cpu(ice_seg->hdr.seg_type),
411 le32_to_cpu(ice_seg->hdr.seg_size), ice_seg->hdr.seg_name);
412
413 ice_buf_tbl = ice_find_buf_table(ice_seg);
414
415 ice_debug(hw, ICE_DBG_PKG, "Seg buf count: %d\n",
416 le32_to_cpu(ice_buf_tbl->buf_count));
417
418 return ice_dwnld_cfg_bufs(hw, ice_buf_tbl->buf_array,
419 le32_to_cpu(ice_buf_tbl->buf_count));
420 }
421
422 /**
423 * ice_init_pkg_info
424 * @hw: pointer to the hardware structure
425 * @pkg_hdr: pointer to the driver's package hdr
426 *
427 * Saves off the package details into the HW structure.
428 */
429 static enum ice_status
ice_init_pkg_info(struct ice_hw * hw,struct ice_pkg_hdr * pkg_hdr)430 ice_init_pkg_info(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr)
431 {
432 struct ice_global_metadata_seg *meta_seg;
433 struct ice_generic_seg_hdr *seg_hdr;
434
435 if (!pkg_hdr)
436 return ICE_ERR_PARAM;
437
438 meta_seg = (struct ice_global_metadata_seg *)
439 ice_find_seg_in_pkg(hw, SEGMENT_TYPE_METADATA, pkg_hdr);
440 if (meta_seg) {
441 hw->pkg_ver = meta_seg->pkg_ver;
442 memcpy(hw->pkg_name, meta_seg->pkg_name, sizeof(hw->pkg_name));
443
444 ice_debug(hw, ICE_DBG_PKG, "Pkg: %d.%d.%d.%d, %s\n",
445 meta_seg->pkg_ver.major, meta_seg->pkg_ver.minor,
446 meta_seg->pkg_ver.update, meta_seg->pkg_ver.draft,
447 meta_seg->pkg_name);
448 } else {
449 ice_debug(hw, ICE_DBG_INIT,
450 "Did not find metadata segment in driver package\n");
451 return ICE_ERR_CFG;
452 }
453
454 seg_hdr = ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, pkg_hdr);
455 if (seg_hdr) {
456 hw->ice_pkg_ver = seg_hdr->seg_ver;
457 memcpy(hw->ice_pkg_name, seg_hdr->seg_name,
458 sizeof(hw->ice_pkg_name));
459
460 ice_debug(hw, ICE_DBG_PKG, "Ice Pkg: %d.%d.%d.%d, %s\n",
461 seg_hdr->seg_ver.major, seg_hdr->seg_ver.minor,
462 seg_hdr->seg_ver.update, seg_hdr->seg_ver.draft,
463 seg_hdr->seg_name);
464 } else {
465 ice_debug(hw, ICE_DBG_INIT,
466 "Did not find ice segment in driver package\n");
467 return ICE_ERR_CFG;
468 }
469
470 return 0;
471 }
472
473 /**
474 * ice_get_pkg_info
475 * @hw: pointer to the hardware structure
476 *
477 * Store details of the package currently loaded in HW into the HW structure.
478 */
ice_get_pkg_info(struct ice_hw * hw)479 static enum ice_status ice_get_pkg_info(struct ice_hw *hw)
480 {
481 struct ice_aqc_get_pkg_info_resp *pkg_info;
482 enum ice_status status;
483 u16 size;
484 u32 i;
485
486 size = sizeof(*pkg_info) + (sizeof(pkg_info->pkg_info[0]) *
487 (ICE_PKG_CNT - 1));
488 pkg_info = kzalloc(size, GFP_KERNEL);
489 if (!pkg_info)
490 return ICE_ERR_NO_MEMORY;
491
492 status = ice_aq_get_pkg_info_list(hw, pkg_info, size, NULL);
493 if (status)
494 goto init_pkg_free_alloc;
495
496 for (i = 0; i < le32_to_cpu(pkg_info->count); i++) {
497 #define ICE_PKG_FLAG_COUNT 4
498 char flags[ICE_PKG_FLAG_COUNT + 1] = { 0 };
499 u8 place = 0;
500
501 if (pkg_info->pkg_info[i].is_active) {
502 flags[place++] = 'A';
503 hw->active_pkg_ver = pkg_info->pkg_info[i].ver;
504 memcpy(hw->active_pkg_name,
505 pkg_info->pkg_info[i].name,
506 sizeof(hw->active_pkg_name));
507 hw->active_pkg_in_nvm = pkg_info->pkg_info[i].is_in_nvm;
508 }
509 if (pkg_info->pkg_info[i].is_active_at_boot)
510 flags[place++] = 'B';
511 if (pkg_info->pkg_info[i].is_modified)
512 flags[place++] = 'M';
513 if (pkg_info->pkg_info[i].is_in_nvm)
514 flags[place++] = 'N';
515
516 ice_debug(hw, ICE_DBG_PKG, "Pkg[%d]: %d.%d.%d.%d,%s,%s\n",
517 i, pkg_info->pkg_info[i].ver.major,
518 pkg_info->pkg_info[i].ver.minor,
519 pkg_info->pkg_info[i].ver.update,
520 pkg_info->pkg_info[i].ver.draft,
521 pkg_info->pkg_info[i].name, flags);
522 }
523
524 init_pkg_free_alloc:
525 kfree(pkg_info);
526
527 return status;
528 }
529
530 /**
531 * ice_verify_pkg - verify package
532 * @pkg: pointer to the package buffer
533 * @len: size of the package buffer
534 *
535 * Verifies various attributes of the package file, including length, format
536 * version, and the requirement of at least one segment.
537 */
ice_verify_pkg(struct ice_pkg_hdr * pkg,u32 len)538 static enum ice_status ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len)
539 {
540 u32 seg_count;
541 u32 i;
542
543 if (len < sizeof(*pkg))
544 return ICE_ERR_BUF_TOO_SHORT;
545
546 if (pkg->format_ver.major != ICE_PKG_FMT_VER_MAJ ||
547 pkg->format_ver.minor != ICE_PKG_FMT_VER_MNR ||
548 pkg->format_ver.update != ICE_PKG_FMT_VER_UPD ||
549 pkg->format_ver.draft != ICE_PKG_FMT_VER_DFT)
550 return ICE_ERR_CFG;
551
552 /* pkg must have at least one segment */
553 seg_count = le32_to_cpu(pkg->seg_count);
554 if (seg_count < 1)
555 return ICE_ERR_CFG;
556
557 /* make sure segment array fits in package length */
558 if (len < sizeof(*pkg) + ((seg_count - 1) * sizeof(pkg->seg_offset)))
559 return ICE_ERR_BUF_TOO_SHORT;
560
561 /* all segments must fit within length */
562 for (i = 0; i < seg_count; i++) {
563 u32 off = le32_to_cpu(pkg->seg_offset[i]);
564 struct ice_generic_seg_hdr *seg;
565
566 /* segment header must fit */
567 if (len < off + sizeof(*seg))
568 return ICE_ERR_BUF_TOO_SHORT;
569
570 seg = (struct ice_generic_seg_hdr *)((u8 *)pkg + off);
571
572 /* segment body must fit */
573 if (len < off + le32_to_cpu(seg->seg_size))
574 return ICE_ERR_BUF_TOO_SHORT;
575 }
576
577 return 0;
578 }
579
580 /**
581 * ice_free_seg - free package segment pointer
582 * @hw: pointer to the hardware structure
583 *
584 * Frees the package segment pointer in the proper manner, depending on if the
585 * segment was allocated or just the passed in pointer was stored.
586 */
ice_free_seg(struct ice_hw * hw)587 void ice_free_seg(struct ice_hw *hw)
588 {
589 if (hw->pkg_copy) {
590 devm_kfree(ice_hw_to_dev(hw), hw->pkg_copy);
591 hw->pkg_copy = NULL;
592 hw->pkg_size = 0;
593 }
594 hw->seg = NULL;
595 }
596
597 /**
598 * ice_init_pkg_regs - initialize additional package registers
599 * @hw: pointer to the hardware structure
600 */
ice_init_pkg_regs(struct ice_hw * hw)601 static void ice_init_pkg_regs(struct ice_hw *hw)
602 {
603 #define ICE_SW_BLK_INP_MASK_L 0xFFFFFFFF
604 #define ICE_SW_BLK_INP_MASK_H 0x0000FFFF
605 #define ICE_SW_BLK_IDX 0
606
607 /* setup Switch block input mask, which is 48-bits in two parts */
608 wr32(hw, GL_PREEXT_L2_PMASK0(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_L);
609 wr32(hw, GL_PREEXT_L2_PMASK1(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_H);
610 }
611
612 /**
613 * ice_chk_pkg_version - check package version for compatibility with driver
614 * @pkg_ver: pointer to a version structure to check
615 *
616 * Check to make sure that the package about to be downloaded is compatible with
617 * the driver. To be compatible, the major and minor components of the package
618 * version must match our ICE_PKG_SUPP_VER_MAJ and ICE_PKG_SUPP_VER_MNR
619 * definitions.
620 */
ice_chk_pkg_version(struct ice_pkg_ver * pkg_ver)621 static enum ice_status ice_chk_pkg_version(struct ice_pkg_ver *pkg_ver)
622 {
623 if (pkg_ver->major != ICE_PKG_SUPP_VER_MAJ ||
624 pkg_ver->minor != ICE_PKG_SUPP_VER_MNR)
625 return ICE_ERR_NOT_SUPPORTED;
626
627 return 0;
628 }
629
630 /**
631 * ice_init_pkg - initialize/download package
632 * @hw: pointer to the hardware structure
633 * @buf: pointer to the package buffer
634 * @len: size of the package buffer
635 *
636 * This function initializes a package. The package contains HW tables
637 * required to do packet processing. First, the function extracts package
638 * information such as version. Then it finds the ice configuration segment
639 * within the package; this function then saves a copy of the segment pointer
640 * within the supplied package buffer. Next, the function will cache any hints
641 * from the package, followed by downloading the package itself. Note, that if
642 * a previous PF driver has already downloaded the package successfully, then
643 * the current driver will not have to download the package again.
644 *
645 * The local package contents will be used to query default behavior and to
646 * update specific sections of the HW's version of the package (e.g. to update
647 * the parse graph to understand new protocols).
648 *
649 * This function stores a pointer to the package buffer memory, and it is
650 * expected that the supplied buffer will not be freed immediately. If the
651 * package buffer needs to be freed, such as when read from a file, use
652 * ice_copy_and_init_pkg() instead of directly calling ice_init_pkg() in this
653 * case.
654 */
ice_init_pkg(struct ice_hw * hw,u8 * buf,u32 len)655 enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len)
656 {
657 struct ice_pkg_hdr *pkg;
658 enum ice_status status;
659 struct ice_seg *seg;
660
661 if (!buf || !len)
662 return ICE_ERR_PARAM;
663
664 pkg = (struct ice_pkg_hdr *)buf;
665 status = ice_verify_pkg(pkg, len);
666 if (status) {
667 ice_debug(hw, ICE_DBG_INIT, "failed to verify pkg (err: %d)\n",
668 status);
669 return status;
670 }
671
672 /* initialize package info */
673 status = ice_init_pkg_info(hw, pkg);
674 if (status)
675 return status;
676
677 /* before downloading the package, check package version for
678 * compatibility with driver
679 */
680 status = ice_chk_pkg_version(&hw->pkg_ver);
681 if (status)
682 return status;
683
684 /* find segment in given package */
685 seg = (struct ice_seg *)ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, pkg);
686 if (!seg) {
687 ice_debug(hw, ICE_DBG_INIT, "no ice segment in package.\n");
688 return ICE_ERR_CFG;
689 }
690
691 /* download package */
692 status = ice_download_pkg(hw, seg);
693 if (status == ICE_ERR_AQ_NO_WORK) {
694 ice_debug(hw, ICE_DBG_INIT,
695 "package previously loaded - no work.\n");
696 status = 0;
697 }
698
699 /* Get information on the package currently loaded in HW, then make sure
700 * the driver is compatible with this version.
701 */
702 if (!status) {
703 status = ice_get_pkg_info(hw);
704 if (!status)
705 status = ice_chk_pkg_version(&hw->active_pkg_ver);
706 }
707
708 if (!status) {
709 hw->seg = seg;
710 /* on successful package download update other required
711 * registers to support the package and fill HW tables
712 * with package content.
713 */
714 ice_init_pkg_regs(hw);
715 ice_fill_blk_tbls(hw);
716 } else {
717 ice_debug(hw, ICE_DBG_INIT, "package load failed, %d\n",
718 status);
719 }
720
721 return status;
722 }
723
724 /**
725 * ice_copy_and_init_pkg - initialize/download a copy of the package
726 * @hw: pointer to the hardware structure
727 * @buf: pointer to the package buffer
728 * @len: size of the package buffer
729 *
730 * This function copies the package buffer, and then calls ice_init_pkg() to
731 * initialize the copied package contents.
732 *
733 * The copying is necessary if the package buffer supplied is constant, or if
734 * the memory may disappear shortly after calling this function.
735 *
736 * If the package buffer resides in the data segment and can be modified, the
737 * caller is free to use ice_init_pkg() instead of ice_copy_and_init_pkg().
738 *
739 * However, if the package buffer needs to be copied first, such as when being
740 * read from a file, the caller should use ice_copy_and_init_pkg().
741 *
742 * This function will first copy the package buffer, before calling
743 * ice_init_pkg(). The caller is free to immediately destroy the original
744 * package buffer, as the new copy will be managed by this function and
745 * related routines.
746 */
ice_copy_and_init_pkg(struct ice_hw * hw,const u8 * buf,u32 len)747 enum ice_status ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len)
748 {
749 enum ice_status status;
750 u8 *buf_copy;
751
752 if (!buf || !len)
753 return ICE_ERR_PARAM;
754
755 buf_copy = devm_kmemdup(ice_hw_to_dev(hw), buf, len, GFP_KERNEL);
756
757 status = ice_init_pkg(hw, buf_copy, len);
758 if (status) {
759 /* Free the copy, since we failed to initialize the package */
760 devm_kfree(ice_hw_to_dev(hw), buf_copy);
761 } else {
762 /* Track the copied pkg so we can free it later */
763 hw->pkg_copy = buf_copy;
764 hw->pkg_size = len;
765 }
766
767 return status;
768 }
769
770 /* PTG Management */
771
772 /**
773 * ice_ptg_find_ptype - Search for packet type group using packet type (ptype)
774 * @hw: pointer to the hardware structure
775 * @blk: HW block
776 * @ptype: the ptype to search for
777 * @ptg: pointer to variable that receives the PTG
778 *
779 * This function will search the PTGs for a particular ptype, returning the
780 * PTG ID that contains it through the PTG parameter, with the value of
781 * ICE_DEFAULT_PTG (0) meaning it is part the default PTG.
782 */
783 static enum ice_status
ice_ptg_find_ptype(struct ice_hw * hw,enum ice_block blk,u16 ptype,u8 * ptg)784 ice_ptg_find_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 *ptg)
785 {
786 if (ptype >= ICE_XLT1_CNT || !ptg)
787 return ICE_ERR_PARAM;
788
789 *ptg = hw->blk[blk].xlt1.ptypes[ptype].ptg;
790 return 0;
791 }
792
793 /**
794 * ice_ptg_alloc_val - Allocates a new packet type group ID by value
795 * @hw: pointer to the hardware structure
796 * @blk: HW block
797 * @ptg: the PTG to allocate
798 *
799 * This function allocates a given packet type group ID specified by the PTG
800 * parameter.
801 */
ice_ptg_alloc_val(struct ice_hw * hw,enum ice_block blk,u8 ptg)802 static void ice_ptg_alloc_val(struct ice_hw *hw, enum ice_block blk, u8 ptg)
803 {
804 hw->blk[blk].xlt1.ptg_tbl[ptg].in_use = true;
805 }
806
807 /**
808 * ice_ptg_remove_ptype - Removes ptype from a particular packet type group
809 * @hw: pointer to the hardware structure
810 * @blk: HW block
811 * @ptype: the ptype to remove
812 * @ptg: the PTG to remove the ptype from
813 *
814 * This function will remove the ptype from the specific PTG, and move it to
815 * the default PTG (ICE_DEFAULT_PTG).
816 */
817 static enum ice_status
ice_ptg_remove_ptype(struct ice_hw * hw,enum ice_block blk,u16 ptype,u8 ptg)818 ice_ptg_remove_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg)
819 {
820 struct ice_ptg_ptype **ch;
821 struct ice_ptg_ptype *p;
822
823 if (ptype > ICE_XLT1_CNT - 1)
824 return ICE_ERR_PARAM;
825
826 if (!hw->blk[blk].xlt1.ptg_tbl[ptg].in_use)
827 return ICE_ERR_DOES_NOT_EXIST;
828
829 /* Should not happen if .in_use is set, bad config */
830 if (!hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype)
831 return ICE_ERR_CFG;
832
833 /* find the ptype within this PTG, and bypass the link over it */
834 p = hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype;
835 ch = &hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype;
836 while (p) {
837 if (ptype == (p - hw->blk[blk].xlt1.ptypes)) {
838 *ch = p->next_ptype;
839 break;
840 }
841
842 ch = &p->next_ptype;
843 p = p->next_ptype;
844 }
845
846 hw->blk[blk].xlt1.ptypes[ptype].ptg = ICE_DEFAULT_PTG;
847 hw->blk[blk].xlt1.ptypes[ptype].next_ptype = NULL;
848
849 return 0;
850 }
851
852 /**
853 * ice_ptg_add_mv_ptype - Adds/moves ptype to a particular packet type group
854 * @hw: pointer to the hardware structure
855 * @blk: HW block
856 * @ptype: the ptype to add or move
857 * @ptg: the PTG to add or move the ptype to
858 *
859 * This function will either add or move a ptype to a particular PTG depending
860 * on if the ptype is already part of another group. Note that using a
861 * a destination PTG ID of ICE_DEFAULT_PTG (0) will move the ptype to the
862 * default PTG.
863 */
864 static enum ice_status
ice_ptg_add_mv_ptype(struct ice_hw * hw,enum ice_block blk,u16 ptype,u8 ptg)865 ice_ptg_add_mv_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg)
866 {
867 enum ice_status status;
868 u8 original_ptg;
869
870 if (ptype > ICE_XLT1_CNT - 1)
871 return ICE_ERR_PARAM;
872
873 if (!hw->blk[blk].xlt1.ptg_tbl[ptg].in_use && ptg != ICE_DEFAULT_PTG)
874 return ICE_ERR_DOES_NOT_EXIST;
875
876 status = ice_ptg_find_ptype(hw, blk, ptype, &original_ptg);
877 if (status)
878 return status;
879
880 /* Is ptype already in the correct PTG? */
881 if (original_ptg == ptg)
882 return 0;
883
884 /* Remove from original PTG and move back to the default PTG */
885 if (original_ptg != ICE_DEFAULT_PTG)
886 ice_ptg_remove_ptype(hw, blk, ptype, original_ptg);
887
888 /* Moving to default PTG? Then we're done with this request */
889 if (ptg == ICE_DEFAULT_PTG)
890 return 0;
891
892 /* Add ptype to PTG at beginning of list */
893 hw->blk[blk].xlt1.ptypes[ptype].next_ptype =
894 hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype;
895 hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype =
896 &hw->blk[blk].xlt1.ptypes[ptype];
897
898 hw->blk[blk].xlt1.ptypes[ptype].ptg = ptg;
899 hw->blk[blk].xlt1.t[ptype] = ptg;
900
901 return 0;
902 }
903
904 /* Block / table size info */
905 struct ice_blk_size_details {
906 u16 xlt1; /* # XLT1 entries */
907 u16 xlt2; /* # XLT2 entries */
908 u16 prof_tcam; /* # profile ID TCAM entries */
909 u16 prof_id; /* # profile IDs */
910 u8 prof_cdid_bits; /* # CDID one-hot bits used in key */
911 u16 prof_redir; /* # profile redirection entries */
912 u16 es; /* # extraction sequence entries */
913 u16 fvw; /* # field vector words */
914 u8 overwrite; /* overwrite existing entries allowed */
915 u8 reverse; /* reverse FV order */
916 };
917
918 static const struct ice_blk_size_details blk_sizes[ICE_BLK_COUNT] = {
919 /**
920 * Table Definitions
921 * XLT1 - Number of entries in XLT1 table
922 * XLT2 - Number of entries in XLT2 table
923 * TCAM - Number of entries Profile ID TCAM table
924 * CDID - Control Domain ID of the hardware block
925 * PRED - Number of entries in the Profile Redirection Table
926 * FV - Number of entries in the Field Vector
927 * FVW - Width (in WORDs) of the Field Vector
928 * OVR - Overwrite existing table entries
929 * REV - Reverse FV
930 */
931 /* XLT1 , XLT2 ,TCAM, PID,CDID,PRED, FV, FVW */
932 /* Overwrite , Reverse FV */
933 /* SW */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 256, 0, 256, 256, 48,
934 false, false },
935 /* ACL */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128, 0, 128, 128, 32,
936 false, false },
937 /* FD */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128, 0, 128, 128, 24,
938 false, true },
939 /* RSS */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128, 0, 128, 128, 24,
940 true, true },
941 /* PE */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 64, 32, 0, 32, 32, 24,
942 false, false },
943 };
944
945 enum ice_sid_all {
946 ICE_SID_XLT1_OFF = 0,
947 ICE_SID_XLT2_OFF,
948 ICE_SID_PR_OFF,
949 ICE_SID_PR_REDIR_OFF,
950 ICE_SID_ES_OFF,
951 ICE_SID_OFF_COUNT,
952 };
953
954 /* VSIG Management */
955
956 /**
957 * ice_vsig_find_vsi - find a VSIG that contains a specified VSI
958 * @hw: pointer to the hardware structure
959 * @blk: HW block
960 * @vsi: VSI of interest
961 * @vsig: pointer to receive the VSI group
962 *
963 * This function will lookup the VSI entry in the XLT2 list and return
964 * the VSI group its associated with.
965 */
966 static enum ice_status
ice_vsig_find_vsi(struct ice_hw * hw,enum ice_block blk,u16 vsi,u16 * vsig)967 ice_vsig_find_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 *vsig)
968 {
969 if (!vsig || vsi >= ICE_MAX_VSI)
970 return ICE_ERR_PARAM;
971
972 /* As long as there's a default or valid VSIG associated with the input
973 * VSI, the functions returns a success. Any handling of VSIG will be
974 * done by the following add, update or remove functions.
975 */
976 *vsig = hw->blk[blk].xlt2.vsis[vsi].vsig;
977
978 return 0;
979 }
980
981 /**
982 * ice_vsig_alloc_val - allocate a new VSIG by value
983 * @hw: pointer to the hardware structure
984 * @blk: HW block
985 * @vsig: the VSIG to allocate
986 *
987 * This function will allocate a given VSIG specified by the VSIG parameter.
988 */
ice_vsig_alloc_val(struct ice_hw * hw,enum ice_block blk,u16 vsig)989 static u16 ice_vsig_alloc_val(struct ice_hw *hw, enum ice_block blk, u16 vsig)
990 {
991 u16 idx = vsig & ICE_VSIG_IDX_M;
992
993 if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use) {
994 INIT_LIST_HEAD(&hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst);
995 hw->blk[blk].xlt2.vsig_tbl[idx].in_use = true;
996 }
997
998 return ICE_VSIG_VALUE(idx, hw->pf_id);
999 }
1000
1001 /**
1002 * ice_vsig_remove_vsi - remove VSI from VSIG
1003 * @hw: pointer to the hardware structure
1004 * @blk: HW block
1005 * @vsi: VSI to remove
1006 * @vsig: VSI group to remove from
1007 *
1008 * The function will remove the input VSI from its VSI group and move it
1009 * to the DEFAULT_VSIG.
1010 */
1011 static enum ice_status
ice_vsig_remove_vsi(struct ice_hw * hw,enum ice_block blk,u16 vsi,u16 vsig)1012 ice_vsig_remove_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
1013 {
1014 struct ice_vsig_vsi **vsi_head, *vsi_cur, *vsi_tgt;
1015 u16 idx;
1016
1017 idx = vsig & ICE_VSIG_IDX_M;
1018
1019 if (vsi >= ICE_MAX_VSI || idx >= ICE_MAX_VSIGS)
1020 return ICE_ERR_PARAM;
1021
1022 if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
1023 return ICE_ERR_DOES_NOT_EXIST;
1024
1025 /* entry already in default VSIG, don't have to remove */
1026 if (idx == ICE_DEFAULT_VSIG)
1027 return 0;
1028
1029 vsi_head = &hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
1030 if (!(*vsi_head))
1031 return ICE_ERR_CFG;
1032
1033 vsi_tgt = &hw->blk[blk].xlt2.vsis[vsi];
1034 vsi_cur = (*vsi_head);
1035
1036 /* iterate the VSI list, skip over the entry to be removed */
1037 while (vsi_cur) {
1038 if (vsi_tgt == vsi_cur) {
1039 (*vsi_head) = vsi_cur->next_vsi;
1040 break;
1041 }
1042 vsi_head = &vsi_cur->next_vsi;
1043 vsi_cur = vsi_cur->next_vsi;
1044 }
1045
1046 /* verify if VSI was removed from group list */
1047 if (!vsi_cur)
1048 return ICE_ERR_DOES_NOT_EXIST;
1049
1050 vsi_cur->vsig = ICE_DEFAULT_VSIG;
1051 vsi_cur->changed = 1;
1052 vsi_cur->next_vsi = NULL;
1053
1054 return 0;
1055 }
1056
1057 /**
1058 * ice_vsig_add_mv_vsi - add or move a VSI to a VSI group
1059 * @hw: pointer to the hardware structure
1060 * @blk: HW block
1061 * @vsi: VSI to move
1062 * @vsig: destination VSI group
1063 *
1064 * This function will move or add the input VSI to the target VSIG.
1065 * The function will find the original VSIG the VSI belongs to and
1066 * move the entry to the DEFAULT_VSIG, update the original VSIG and
1067 * then move entry to the new VSIG.
1068 */
1069 static enum ice_status
ice_vsig_add_mv_vsi(struct ice_hw * hw,enum ice_block blk,u16 vsi,u16 vsig)1070 ice_vsig_add_mv_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
1071 {
1072 struct ice_vsig_vsi *tmp;
1073 enum ice_status status;
1074 u16 orig_vsig, idx;
1075
1076 idx = vsig & ICE_VSIG_IDX_M;
1077
1078 if (vsi >= ICE_MAX_VSI || idx >= ICE_MAX_VSIGS)
1079 return ICE_ERR_PARAM;
1080
1081 /* if VSIG not in use and VSIG is not default type this VSIG
1082 * doesn't exist.
1083 */
1084 if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use &&
1085 vsig != ICE_DEFAULT_VSIG)
1086 return ICE_ERR_DOES_NOT_EXIST;
1087
1088 status = ice_vsig_find_vsi(hw, blk, vsi, &orig_vsig);
1089 if (status)
1090 return status;
1091
1092 /* no update required if vsigs match */
1093 if (orig_vsig == vsig)
1094 return 0;
1095
1096 if (orig_vsig != ICE_DEFAULT_VSIG) {
1097 /* remove entry from orig_vsig and add to default VSIG */
1098 status = ice_vsig_remove_vsi(hw, blk, vsi, orig_vsig);
1099 if (status)
1100 return status;
1101 }
1102
1103 if (idx == ICE_DEFAULT_VSIG)
1104 return 0;
1105
1106 /* Create VSI entry and add VSIG and prop_mask values */
1107 hw->blk[blk].xlt2.vsis[vsi].vsig = vsig;
1108 hw->blk[blk].xlt2.vsis[vsi].changed = 1;
1109
1110 /* Add new entry to the head of the VSIG list */
1111 tmp = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
1112 hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi =
1113 &hw->blk[blk].xlt2.vsis[vsi];
1114 hw->blk[blk].xlt2.vsis[vsi].next_vsi = tmp;
1115 hw->blk[blk].xlt2.t[vsi] = vsig;
1116
1117 return 0;
1118 }
1119
1120 /* Block / table section IDs */
1121 static const u32 ice_blk_sids[ICE_BLK_COUNT][ICE_SID_OFF_COUNT] = {
1122 /* SWITCH */
1123 { ICE_SID_XLT1_SW,
1124 ICE_SID_XLT2_SW,
1125 ICE_SID_PROFID_TCAM_SW,
1126 ICE_SID_PROFID_REDIR_SW,
1127 ICE_SID_FLD_VEC_SW
1128 },
1129
1130 /* ACL */
1131 { ICE_SID_XLT1_ACL,
1132 ICE_SID_XLT2_ACL,
1133 ICE_SID_PROFID_TCAM_ACL,
1134 ICE_SID_PROFID_REDIR_ACL,
1135 ICE_SID_FLD_VEC_ACL
1136 },
1137
1138 /* FD */
1139 { ICE_SID_XLT1_FD,
1140 ICE_SID_XLT2_FD,
1141 ICE_SID_PROFID_TCAM_FD,
1142 ICE_SID_PROFID_REDIR_FD,
1143 ICE_SID_FLD_VEC_FD
1144 },
1145
1146 /* RSS */
1147 { ICE_SID_XLT1_RSS,
1148 ICE_SID_XLT2_RSS,
1149 ICE_SID_PROFID_TCAM_RSS,
1150 ICE_SID_PROFID_REDIR_RSS,
1151 ICE_SID_FLD_VEC_RSS
1152 },
1153
1154 /* PE */
1155 { ICE_SID_XLT1_PE,
1156 ICE_SID_XLT2_PE,
1157 ICE_SID_PROFID_TCAM_PE,
1158 ICE_SID_PROFID_REDIR_PE,
1159 ICE_SID_FLD_VEC_PE
1160 }
1161 };
1162
1163 /**
1164 * ice_init_sw_xlt1_db - init software XLT1 database from HW tables
1165 * @hw: pointer to the hardware structure
1166 * @blk: the HW block to initialize
1167 */
ice_init_sw_xlt1_db(struct ice_hw * hw,enum ice_block blk)1168 static void ice_init_sw_xlt1_db(struct ice_hw *hw, enum ice_block blk)
1169 {
1170 u16 pt;
1171
1172 for (pt = 0; pt < hw->blk[blk].xlt1.count; pt++) {
1173 u8 ptg;
1174
1175 ptg = hw->blk[blk].xlt1.t[pt];
1176 if (ptg != ICE_DEFAULT_PTG) {
1177 ice_ptg_alloc_val(hw, blk, ptg);
1178 ice_ptg_add_mv_ptype(hw, blk, pt, ptg);
1179 }
1180 }
1181 }
1182
1183 /**
1184 * ice_init_sw_xlt2_db - init software XLT2 database from HW tables
1185 * @hw: pointer to the hardware structure
1186 * @blk: the HW block to initialize
1187 */
ice_init_sw_xlt2_db(struct ice_hw * hw,enum ice_block blk)1188 static void ice_init_sw_xlt2_db(struct ice_hw *hw, enum ice_block blk)
1189 {
1190 u16 vsi;
1191
1192 for (vsi = 0; vsi < hw->blk[blk].xlt2.count; vsi++) {
1193 u16 vsig;
1194
1195 vsig = hw->blk[blk].xlt2.t[vsi];
1196 if (vsig) {
1197 ice_vsig_alloc_val(hw, blk, vsig);
1198 ice_vsig_add_mv_vsi(hw, blk, vsi, vsig);
1199 /* no changes at this time, since this has been
1200 * initialized from the original package
1201 */
1202 hw->blk[blk].xlt2.vsis[vsi].changed = 0;
1203 }
1204 }
1205 }
1206
1207 /**
1208 * ice_init_sw_db - init software database from HW tables
1209 * @hw: pointer to the hardware structure
1210 */
ice_init_sw_db(struct ice_hw * hw)1211 static void ice_init_sw_db(struct ice_hw *hw)
1212 {
1213 u16 i;
1214
1215 for (i = 0; i < ICE_BLK_COUNT; i++) {
1216 ice_init_sw_xlt1_db(hw, (enum ice_block)i);
1217 ice_init_sw_xlt2_db(hw, (enum ice_block)i);
1218 }
1219 }
1220
1221 /**
1222 * ice_fill_tbl - Reads content of a single table type into database
1223 * @hw: pointer to the hardware structure
1224 * @block_id: Block ID of the table to copy
1225 * @sid: Section ID of the table to copy
1226 *
1227 * Will attempt to read the entire content of a given table of a single block
1228 * into the driver database. We assume that the buffer will always
1229 * be as large or larger than the data contained in the package. If
1230 * this condition is not met, there is most likely an error in the package
1231 * contents.
1232 */
ice_fill_tbl(struct ice_hw * hw,enum ice_block block_id,u32 sid)1233 static void ice_fill_tbl(struct ice_hw *hw, enum ice_block block_id, u32 sid)
1234 {
1235 u32 dst_len, sect_len, offset = 0;
1236 struct ice_prof_redir_section *pr;
1237 struct ice_prof_id_section *pid;
1238 struct ice_xlt1_section *xlt1;
1239 struct ice_xlt2_section *xlt2;
1240 struct ice_sw_fv_section *es;
1241 struct ice_pkg_enum state;
1242 u8 *src, *dst;
1243 void *sect;
1244
1245 /* if the HW segment pointer is null then the first iteration of
1246 * ice_pkg_enum_section() will fail. In this case the HW tables will
1247 * not be filled and return success.
1248 */
1249 if (!hw->seg) {
1250 ice_debug(hw, ICE_DBG_PKG, "hw->seg is NULL, tables are not filled\n");
1251 return;
1252 }
1253
1254 memset(&state, 0, sizeof(state));
1255
1256 sect = ice_pkg_enum_section(hw->seg, &state, sid);
1257
1258 while (sect) {
1259 switch (sid) {
1260 case ICE_SID_XLT1_SW:
1261 case ICE_SID_XLT1_FD:
1262 case ICE_SID_XLT1_RSS:
1263 case ICE_SID_XLT1_ACL:
1264 case ICE_SID_XLT1_PE:
1265 xlt1 = (struct ice_xlt1_section *)sect;
1266 src = xlt1->value;
1267 sect_len = le16_to_cpu(xlt1->count) *
1268 sizeof(*hw->blk[block_id].xlt1.t);
1269 dst = hw->blk[block_id].xlt1.t;
1270 dst_len = hw->blk[block_id].xlt1.count *
1271 sizeof(*hw->blk[block_id].xlt1.t);
1272 break;
1273 case ICE_SID_XLT2_SW:
1274 case ICE_SID_XLT2_FD:
1275 case ICE_SID_XLT2_RSS:
1276 case ICE_SID_XLT2_ACL:
1277 case ICE_SID_XLT2_PE:
1278 xlt2 = (struct ice_xlt2_section *)sect;
1279 src = (__force u8 *)xlt2->value;
1280 sect_len = le16_to_cpu(xlt2->count) *
1281 sizeof(*hw->blk[block_id].xlt2.t);
1282 dst = (u8 *)hw->blk[block_id].xlt2.t;
1283 dst_len = hw->blk[block_id].xlt2.count *
1284 sizeof(*hw->blk[block_id].xlt2.t);
1285 break;
1286 case ICE_SID_PROFID_TCAM_SW:
1287 case ICE_SID_PROFID_TCAM_FD:
1288 case ICE_SID_PROFID_TCAM_RSS:
1289 case ICE_SID_PROFID_TCAM_ACL:
1290 case ICE_SID_PROFID_TCAM_PE:
1291 pid = (struct ice_prof_id_section *)sect;
1292 src = (u8 *)pid->entry;
1293 sect_len = le16_to_cpu(pid->count) *
1294 sizeof(*hw->blk[block_id].prof.t);
1295 dst = (u8 *)hw->blk[block_id].prof.t;
1296 dst_len = hw->blk[block_id].prof.count *
1297 sizeof(*hw->blk[block_id].prof.t);
1298 break;
1299 case ICE_SID_PROFID_REDIR_SW:
1300 case ICE_SID_PROFID_REDIR_FD:
1301 case ICE_SID_PROFID_REDIR_RSS:
1302 case ICE_SID_PROFID_REDIR_ACL:
1303 case ICE_SID_PROFID_REDIR_PE:
1304 pr = (struct ice_prof_redir_section *)sect;
1305 src = pr->redir_value;
1306 sect_len = le16_to_cpu(pr->count) *
1307 sizeof(*hw->blk[block_id].prof_redir.t);
1308 dst = hw->blk[block_id].prof_redir.t;
1309 dst_len = hw->blk[block_id].prof_redir.count *
1310 sizeof(*hw->blk[block_id].prof_redir.t);
1311 break;
1312 case ICE_SID_FLD_VEC_SW:
1313 case ICE_SID_FLD_VEC_FD:
1314 case ICE_SID_FLD_VEC_RSS:
1315 case ICE_SID_FLD_VEC_ACL:
1316 case ICE_SID_FLD_VEC_PE:
1317 es = (struct ice_sw_fv_section *)sect;
1318 src = (u8 *)es->fv;
1319 sect_len = (u32)(le16_to_cpu(es->count) *
1320 hw->blk[block_id].es.fvw) *
1321 sizeof(*hw->blk[block_id].es.t);
1322 dst = (u8 *)hw->blk[block_id].es.t;
1323 dst_len = (u32)(hw->blk[block_id].es.count *
1324 hw->blk[block_id].es.fvw) *
1325 sizeof(*hw->blk[block_id].es.t);
1326 break;
1327 default:
1328 return;
1329 }
1330
1331 /* if the section offset exceeds destination length, terminate
1332 * table fill.
1333 */
1334 if (offset > dst_len)
1335 return;
1336
1337 /* if the sum of section size and offset exceed destination size
1338 * then we are out of bounds of the HW table size for that PF.
1339 * Changing section length to fill the remaining table space
1340 * of that PF.
1341 */
1342 if ((offset + sect_len) > dst_len)
1343 sect_len = dst_len - offset;
1344
1345 memcpy(dst + offset, src, sect_len);
1346 offset += sect_len;
1347 sect = ice_pkg_enum_section(NULL, &state, sid);
1348 }
1349 }
1350
1351 /**
1352 * ice_fill_blk_tbls - Read package context for tables
1353 * @hw: pointer to the hardware structure
1354 *
1355 * Reads the current package contents and populates the driver
1356 * database with the data iteratively for all advanced feature
1357 * blocks. Assume that the HW tables have been allocated.
1358 */
ice_fill_blk_tbls(struct ice_hw * hw)1359 void ice_fill_blk_tbls(struct ice_hw *hw)
1360 {
1361 u8 i;
1362
1363 for (i = 0; i < ICE_BLK_COUNT; i++) {
1364 enum ice_block blk_id = (enum ice_block)i;
1365
1366 ice_fill_tbl(hw, blk_id, hw->blk[blk_id].xlt1.sid);
1367 ice_fill_tbl(hw, blk_id, hw->blk[blk_id].xlt2.sid);
1368 ice_fill_tbl(hw, blk_id, hw->blk[blk_id].prof.sid);
1369 ice_fill_tbl(hw, blk_id, hw->blk[blk_id].prof_redir.sid);
1370 ice_fill_tbl(hw, blk_id, hw->blk[blk_id].es.sid);
1371 }
1372
1373 ice_init_sw_db(hw);
1374 }
1375
1376 /**
1377 * ice_free_hw_tbls - free hardware table memory
1378 * @hw: pointer to the hardware structure
1379 */
ice_free_hw_tbls(struct ice_hw * hw)1380 void ice_free_hw_tbls(struct ice_hw *hw)
1381 {
1382 u8 i;
1383
1384 for (i = 0; i < ICE_BLK_COUNT; i++) {
1385 hw->blk[i].is_list_init = false;
1386
1387 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt1.ptypes);
1388 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt1.ptg_tbl);
1389 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt1.t);
1390 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt2.t);
1391 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt2.vsig_tbl);
1392 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt2.vsis);
1393 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].prof.t);
1394 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].prof_redir.t);
1395 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.t);
1396 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.ref_count);
1397 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.written);
1398 }
1399
1400 memset(hw->blk, 0, sizeof(hw->blk));
1401 }
1402
1403 /**
1404 * ice_clear_hw_tbls - clear HW tables and flow profiles
1405 * @hw: pointer to the hardware structure
1406 */
ice_clear_hw_tbls(struct ice_hw * hw)1407 void ice_clear_hw_tbls(struct ice_hw *hw)
1408 {
1409 u8 i;
1410
1411 for (i = 0; i < ICE_BLK_COUNT; i++) {
1412 struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir;
1413 struct ice_prof_tcam *prof = &hw->blk[i].prof;
1414 struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1;
1415 struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2;
1416 struct ice_es *es = &hw->blk[i].es;
1417
1418 memset(xlt1->ptypes, 0, xlt1->count * sizeof(*xlt1->ptypes));
1419 memset(xlt1->ptg_tbl, 0,
1420 ICE_MAX_PTGS * sizeof(*xlt1->ptg_tbl));
1421 memset(xlt1->t, 0, xlt1->count * sizeof(*xlt1->t));
1422
1423 memset(xlt2->vsis, 0, xlt2->count * sizeof(*xlt2->vsis));
1424 memset(xlt2->vsig_tbl, 0,
1425 xlt2->count * sizeof(*xlt2->vsig_tbl));
1426 memset(xlt2->t, 0, xlt2->count * sizeof(*xlt2->t));
1427
1428 memset(prof->t, 0, prof->count * sizeof(*prof->t));
1429 memset(prof_redir->t, 0,
1430 prof_redir->count * sizeof(*prof_redir->t));
1431
1432 memset(es->t, 0, es->count * sizeof(*es->t));
1433 memset(es->ref_count, 0, es->count * sizeof(*es->ref_count));
1434 memset(es->written, 0, es->count * sizeof(*es->written));
1435 }
1436 }
1437
1438 /**
1439 * ice_init_hw_tbls - init hardware table memory
1440 * @hw: pointer to the hardware structure
1441 */
ice_init_hw_tbls(struct ice_hw * hw)1442 enum ice_status ice_init_hw_tbls(struct ice_hw *hw)
1443 {
1444 u8 i;
1445
1446 for (i = 0; i < ICE_BLK_COUNT; i++) {
1447 struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir;
1448 struct ice_prof_tcam *prof = &hw->blk[i].prof;
1449 struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1;
1450 struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2;
1451 struct ice_es *es = &hw->blk[i].es;
1452 u16 j;
1453
1454 if (hw->blk[i].is_list_init)
1455 continue;
1456
1457 hw->blk[i].is_list_init = true;
1458
1459 hw->blk[i].overwrite = blk_sizes[i].overwrite;
1460 es->reverse = blk_sizes[i].reverse;
1461
1462 xlt1->sid = ice_blk_sids[i][ICE_SID_XLT1_OFF];
1463 xlt1->count = blk_sizes[i].xlt1;
1464
1465 xlt1->ptypes = devm_kcalloc(ice_hw_to_dev(hw), xlt1->count,
1466 sizeof(*xlt1->ptypes), GFP_KERNEL);
1467
1468 if (!xlt1->ptypes)
1469 goto err;
1470
1471 xlt1->ptg_tbl = devm_kcalloc(ice_hw_to_dev(hw), ICE_MAX_PTGS,
1472 sizeof(*xlt1->ptg_tbl),
1473 GFP_KERNEL);
1474
1475 if (!xlt1->ptg_tbl)
1476 goto err;
1477
1478 xlt1->t = devm_kcalloc(ice_hw_to_dev(hw), xlt1->count,
1479 sizeof(*xlt1->t), GFP_KERNEL);
1480 if (!xlt1->t)
1481 goto err;
1482
1483 xlt2->sid = ice_blk_sids[i][ICE_SID_XLT2_OFF];
1484 xlt2->count = blk_sizes[i].xlt2;
1485
1486 xlt2->vsis = devm_kcalloc(ice_hw_to_dev(hw), xlt2->count,
1487 sizeof(*xlt2->vsis), GFP_KERNEL);
1488
1489 if (!xlt2->vsis)
1490 goto err;
1491
1492 xlt2->vsig_tbl = devm_kcalloc(ice_hw_to_dev(hw), xlt2->count,
1493 sizeof(*xlt2->vsig_tbl),
1494 GFP_KERNEL);
1495 if (!xlt2->vsig_tbl)
1496 goto err;
1497
1498 for (j = 0; j < xlt2->count; j++)
1499 INIT_LIST_HEAD(&xlt2->vsig_tbl[j].prop_lst);
1500
1501 xlt2->t = devm_kcalloc(ice_hw_to_dev(hw), xlt2->count,
1502 sizeof(*xlt2->t), GFP_KERNEL);
1503 if (!xlt2->t)
1504 goto err;
1505
1506 prof->sid = ice_blk_sids[i][ICE_SID_PR_OFF];
1507 prof->count = blk_sizes[i].prof_tcam;
1508 prof->max_prof_id = blk_sizes[i].prof_id;
1509 prof->cdid_bits = blk_sizes[i].prof_cdid_bits;
1510 prof->t = devm_kcalloc(ice_hw_to_dev(hw), prof->count,
1511 sizeof(*prof->t), GFP_KERNEL);
1512
1513 if (!prof->t)
1514 goto err;
1515
1516 prof_redir->sid = ice_blk_sids[i][ICE_SID_PR_REDIR_OFF];
1517 prof_redir->count = blk_sizes[i].prof_redir;
1518 prof_redir->t = devm_kcalloc(ice_hw_to_dev(hw),
1519 prof_redir->count,
1520 sizeof(*prof_redir->t),
1521 GFP_KERNEL);
1522
1523 if (!prof_redir->t)
1524 goto err;
1525
1526 es->sid = ice_blk_sids[i][ICE_SID_ES_OFF];
1527 es->count = blk_sizes[i].es;
1528 es->fvw = blk_sizes[i].fvw;
1529 es->t = devm_kcalloc(ice_hw_to_dev(hw),
1530 (u32)(es->count * es->fvw),
1531 sizeof(*es->t), GFP_KERNEL);
1532 if (!es->t)
1533 goto err;
1534
1535 es->ref_count = devm_kcalloc(ice_hw_to_dev(hw), es->count,
1536 sizeof(*es->ref_count),
1537 GFP_KERNEL);
1538
1539 es->written = devm_kcalloc(ice_hw_to_dev(hw), es->count,
1540 sizeof(*es->written), GFP_KERNEL);
1541 if (!es->ref_count)
1542 goto err;
1543 }
1544 return 0;
1545
1546 err:
1547 ice_free_hw_tbls(hw);
1548 return ICE_ERR_NO_MEMORY;
1549 }
1550