1 // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
2 /* Copyright(c) 2020 - 2021 Intel Corporation */
3 #include <linux/iopoll.h>
4 #include <adf_accel_devices.h>
5 #include <adf_cfg.h>
6 #include <adf_clock.h>
7 #include <adf_common_drv.h>
8 #include <adf_gen4_dc.h>
9 #include <adf_gen4_hw_data.h>
10 #include <adf_gen4_pfvf.h>
11 #include <adf_gen4_pm.h>
12 #include <adf_gen4_timer.h>
13 #include "adf_4xxx_hw_data.h"
14 #include "icp_qat_hw.h"
15
16 enum adf_fw_objs {
17 ADF_FW_SYM_OBJ,
18 ADF_FW_ASYM_OBJ,
19 ADF_FW_DC_OBJ,
20 ADF_FW_ADMIN_OBJ,
21 };
22
23 static const char * const adf_4xxx_fw_objs[] = {
24 [ADF_FW_SYM_OBJ] = ADF_4XXX_SYM_OBJ,
25 [ADF_FW_ASYM_OBJ] = ADF_4XXX_ASYM_OBJ,
26 [ADF_FW_DC_OBJ] = ADF_4XXX_DC_OBJ,
27 [ADF_FW_ADMIN_OBJ] = ADF_4XXX_ADMIN_OBJ,
28 };
29
30 static const char * const adf_402xx_fw_objs[] = {
31 [ADF_FW_SYM_OBJ] = ADF_402XX_SYM_OBJ,
32 [ADF_FW_ASYM_OBJ] = ADF_402XX_ASYM_OBJ,
33 [ADF_FW_DC_OBJ] = ADF_402XX_DC_OBJ,
34 [ADF_FW_ADMIN_OBJ] = ADF_402XX_ADMIN_OBJ,
35 };
36
37 struct adf_fw_config {
38 u32 ae_mask;
39 enum adf_fw_objs obj;
40 };
41
42 static const struct adf_fw_config adf_fw_cy_config[] = {
43 {0xF0, ADF_FW_SYM_OBJ},
44 {0xF, ADF_FW_ASYM_OBJ},
45 {0x100, ADF_FW_ADMIN_OBJ},
46 };
47
48 static const struct adf_fw_config adf_fw_dc_config[] = {
49 {0xF0, ADF_FW_DC_OBJ},
50 {0xF, ADF_FW_DC_OBJ},
51 {0x100, ADF_FW_ADMIN_OBJ},
52 };
53
54 static const struct adf_fw_config adf_fw_sym_config[] = {
55 {0xF0, ADF_FW_SYM_OBJ},
56 {0xF, ADF_FW_SYM_OBJ},
57 {0x100, ADF_FW_ADMIN_OBJ},
58 };
59
60 static const struct adf_fw_config adf_fw_asym_config[] = {
61 {0xF0, ADF_FW_ASYM_OBJ},
62 {0xF, ADF_FW_ASYM_OBJ},
63 {0x100, ADF_FW_ADMIN_OBJ},
64 };
65
66 static const struct adf_fw_config adf_fw_asym_dc_config[] = {
67 {0xF0, ADF_FW_ASYM_OBJ},
68 {0xF, ADF_FW_DC_OBJ},
69 {0x100, ADF_FW_ADMIN_OBJ},
70 };
71
72 static const struct adf_fw_config adf_fw_sym_dc_config[] = {
73 {0xF0, ADF_FW_SYM_OBJ},
74 {0xF, ADF_FW_DC_OBJ},
75 {0x100, ADF_FW_ADMIN_OBJ},
76 };
77
78 static_assert(ARRAY_SIZE(adf_fw_cy_config) == ARRAY_SIZE(adf_fw_dc_config));
79 static_assert(ARRAY_SIZE(adf_fw_cy_config) == ARRAY_SIZE(adf_fw_sym_config));
80 static_assert(ARRAY_SIZE(adf_fw_cy_config) == ARRAY_SIZE(adf_fw_asym_config));
81 static_assert(ARRAY_SIZE(adf_fw_cy_config) == ARRAY_SIZE(adf_fw_asym_dc_config));
82 static_assert(ARRAY_SIZE(adf_fw_cy_config) == ARRAY_SIZE(adf_fw_sym_dc_config));
83
84 /* Worker thread to service arbiter mappings */
85 static const u32 default_thrd_to_arb_map[ADF_4XXX_MAX_ACCELENGINES] = {
86 0x5555555, 0x5555555, 0x5555555, 0x5555555,
87 0xAAAAAAA, 0xAAAAAAA, 0xAAAAAAA, 0xAAAAAAA,
88 0x0
89 };
90
91 static const u32 thrd_to_arb_map_dc[ADF_4XXX_MAX_ACCELENGINES] = {
92 0x000000FF, 0x000000FF, 0x000000FF, 0x000000FF,
93 0x000000FF, 0x000000FF, 0x000000FF, 0x000000FF,
94 0x0
95 };
96
97 static struct adf_hw_device_class adf_4xxx_class = {
98 .name = ADF_4XXX_DEVICE_NAME,
99 .type = DEV_4XXX,
100 .instances = 0,
101 };
102
103 enum dev_services {
104 SVC_CY = 0,
105 SVC_CY2,
106 SVC_DC,
107 SVC_SYM,
108 SVC_ASYM,
109 SVC_DC_ASYM,
110 SVC_ASYM_DC,
111 SVC_DC_SYM,
112 SVC_SYM_DC,
113 };
114
115 static const char *const dev_cfg_services[] = {
116 [SVC_CY] = ADF_CFG_CY,
117 [SVC_CY2] = ADF_CFG_ASYM_SYM,
118 [SVC_DC] = ADF_CFG_DC,
119 [SVC_SYM] = ADF_CFG_SYM,
120 [SVC_ASYM] = ADF_CFG_ASYM,
121 [SVC_DC_ASYM] = ADF_CFG_DC_ASYM,
122 [SVC_ASYM_DC] = ADF_CFG_ASYM_DC,
123 [SVC_DC_SYM] = ADF_CFG_DC_SYM,
124 [SVC_SYM_DC] = ADF_CFG_SYM_DC,
125 };
126
get_service_enabled(struct adf_accel_dev * accel_dev)127 static int get_service_enabled(struct adf_accel_dev *accel_dev)
128 {
129 char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0};
130 int ret;
131
132 ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC,
133 ADF_SERVICES_ENABLED, services);
134 if (ret) {
135 dev_err(&GET_DEV(accel_dev),
136 ADF_SERVICES_ENABLED " param not found\n");
137 return ret;
138 }
139
140 ret = match_string(dev_cfg_services, ARRAY_SIZE(dev_cfg_services),
141 services);
142 if (ret < 0)
143 dev_err(&GET_DEV(accel_dev),
144 "Invalid value of " ADF_SERVICES_ENABLED " param: %s\n",
145 services);
146
147 return ret;
148 }
149
get_accel_mask(struct adf_hw_device_data * self)150 static u32 get_accel_mask(struct adf_hw_device_data *self)
151 {
152 return ADF_4XXX_ACCELERATORS_MASK;
153 }
154
get_ae_mask(struct adf_hw_device_data * self)155 static u32 get_ae_mask(struct adf_hw_device_data *self)
156 {
157 u32 me_disable = self->fuses;
158
159 return ~me_disable & ADF_4XXX_ACCELENGINES_MASK;
160 }
161
get_num_accels(struct adf_hw_device_data * self)162 static u32 get_num_accels(struct adf_hw_device_data *self)
163 {
164 return ADF_4XXX_MAX_ACCELERATORS;
165 }
166
get_num_aes(struct adf_hw_device_data * self)167 static u32 get_num_aes(struct adf_hw_device_data *self)
168 {
169 if (!self || !self->ae_mask)
170 return 0;
171
172 return hweight32(self->ae_mask);
173 }
174
get_misc_bar_id(struct adf_hw_device_data * self)175 static u32 get_misc_bar_id(struct adf_hw_device_data *self)
176 {
177 return ADF_4XXX_PMISC_BAR;
178 }
179
get_etr_bar_id(struct adf_hw_device_data * self)180 static u32 get_etr_bar_id(struct adf_hw_device_data *self)
181 {
182 return ADF_4XXX_ETR_BAR;
183 }
184
get_sram_bar_id(struct adf_hw_device_data * self)185 static u32 get_sram_bar_id(struct adf_hw_device_data *self)
186 {
187 return ADF_4XXX_SRAM_BAR;
188 }
189
190 /*
191 * The vector routing table is used to select the MSI-X entry to use for each
192 * interrupt source.
193 * The first ADF_4XXX_ETR_MAX_BANKS entries correspond to ring interrupts.
194 * The final entry corresponds to VF2PF or error interrupts.
195 * This vector table could be used to configure one MSI-X entry to be shared
196 * between multiple interrupt sources.
197 *
198 * The default routing is set to have a one to one correspondence between the
199 * interrupt source and the MSI-X entry used.
200 */
set_msix_default_rttable(struct adf_accel_dev * accel_dev)201 static void set_msix_default_rttable(struct adf_accel_dev *accel_dev)
202 {
203 void __iomem *csr;
204 int i;
205
206 csr = (&GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR])->virt_addr;
207 for (i = 0; i <= ADF_4XXX_ETR_MAX_BANKS; i++)
208 ADF_CSR_WR(csr, ADF_4XXX_MSIX_RTTABLE_OFFSET(i), i);
209 }
210
get_accel_cap(struct adf_accel_dev * accel_dev)211 static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
212 {
213 struct pci_dev *pdev = accel_dev->accel_pci_dev.pci_dev;
214 u32 capabilities_sym, capabilities_asym, capabilities_dc;
215 u32 fusectl1;
216
217 /* Read accelerator capabilities mask */
218 pci_read_config_dword(pdev, ADF_4XXX_FUSECTL1_OFFSET, &fusectl1);
219
220 capabilities_sym = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC |
221 ICP_ACCEL_CAPABILITIES_CIPHER |
222 ICP_ACCEL_CAPABILITIES_AUTHENTICATION |
223 ICP_ACCEL_CAPABILITIES_SHA3 |
224 ICP_ACCEL_CAPABILITIES_SHA3_EXT |
225 ICP_ACCEL_CAPABILITIES_HKDF |
226 ICP_ACCEL_CAPABILITIES_CHACHA_POLY |
227 ICP_ACCEL_CAPABILITIES_AESGCM_SPC |
228 ICP_ACCEL_CAPABILITIES_SM3 |
229 ICP_ACCEL_CAPABILITIES_SM4 |
230 ICP_ACCEL_CAPABILITIES_AES_V2;
231
232 /* A set bit in fusectl1 means the feature is OFF in this SKU */
233 if (fusectl1 & ICP_ACCEL_4XXX_MASK_CIPHER_SLICE) {
234 capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC;
235 capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_HKDF;
236 capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
237 }
238
239 if (fusectl1 & ICP_ACCEL_4XXX_MASK_UCS_SLICE) {
240 capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CHACHA_POLY;
241 capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_AESGCM_SPC;
242 capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_AES_V2;
243 capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
244 }
245
246 if (fusectl1 & ICP_ACCEL_4XXX_MASK_AUTH_SLICE) {
247 capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_AUTHENTICATION;
248 capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SHA3;
249 capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SHA3_EXT;
250 capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
251 }
252
253 if (fusectl1 & ICP_ACCEL_4XXX_MASK_SMX_SLICE) {
254 capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SM3;
255 capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SM4;
256 }
257
258 capabilities_asym = ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC |
259 ICP_ACCEL_CAPABILITIES_CIPHER |
260 ICP_ACCEL_CAPABILITIES_SM2 |
261 ICP_ACCEL_CAPABILITIES_ECEDMONT;
262
263 if (fusectl1 & ICP_ACCEL_4XXX_MASK_PKE_SLICE) {
264 capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC;
265 capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_SM2;
266 capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_ECEDMONT;
267 }
268
269 capabilities_dc = ICP_ACCEL_CAPABILITIES_COMPRESSION |
270 ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION |
271 ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION |
272 ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64;
273
274 if (fusectl1 & ICP_ACCEL_4XXX_MASK_COMPRESS_SLICE) {
275 capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_COMPRESSION;
276 capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION;
277 capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION;
278 capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64;
279 }
280
281 switch (get_service_enabled(accel_dev)) {
282 case SVC_CY:
283 case SVC_CY2:
284 return capabilities_sym | capabilities_asym;
285 case SVC_DC:
286 return capabilities_dc;
287 case SVC_SYM:
288 return capabilities_sym;
289 case SVC_ASYM:
290 return capabilities_asym;
291 case SVC_ASYM_DC:
292 case SVC_DC_ASYM:
293 return capabilities_asym | capabilities_dc;
294 case SVC_SYM_DC:
295 case SVC_DC_SYM:
296 return capabilities_sym | capabilities_dc;
297 default:
298 return 0;
299 }
300 }
301
get_sku(struct adf_hw_device_data * self)302 static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
303 {
304 return DEV_SKU_1;
305 }
306
adf_get_arbiter_mapping(struct adf_accel_dev * accel_dev)307 static const u32 *adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev)
308 {
309 switch (get_service_enabled(accel_dev)) {
310 case SVC_DC:
311 return thrd_to_arb_map_dc;
312 default:
313 return default_thrd_to_arb_map;
314 }
315 }
316
get_arb_info(struct arb_info * arb_info)317 static void get_arb_info(struct arb_info *arb_info)
318 {
319 arb_info->arb_cfg = ADF_4XXX_ARB_CONFIG;
320 arb_info->arb_offset = ADF_4XXX_ARB_OFFSET;
321 arb_info->wt2sam_offset = ADF_4XXX_ARB_WRK_2_SER_MAP_OFFSET;
322 }
323
get_admin_info(struct admin_info * admin_csrs_info)324 static void get_admin_info(struct admin_info *admin_csrs_info)
325 {
326 admin_csrs_info->mailbox_offset = ADF_4XXX_MAILBOX_BASE_OFFSET;
327 admin_csrs_info->admin_msg_ur = ADF_4XXX_ADMINMSGUR_OFFSET;
328 admin_csrs_info->admin_msg_lr = ADF_4XXX_ADMINMSGLR_OFFSET;
329 }
330
get_heartbeat_clock(struct adf_hw_device_data * self)331 static u32 get_heartbeat_clock(struct adf_hw_device_data *self)
332 {
333 /*
334 * 4XXX uses KPT counter for HB
335 */
336 return ADF_4XXX_KPT_COUNTER_FREQ;
337 }
338
adf_enable_error_correction(struct adf_accel_dev * accel_dev)339 static void adf_enable_error_correction(struct adf_accel_dev *accel_dev)
340 {
341 struct adf_bar *misc_bar = &GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR];
342 void __iomem *csr = misc_bar->virt_addr;
343
344 /* Enable all in errsou3 except VFLR notification on host */
345 ADF_CSR_WR(csr, ADF_GEN4_ERRMSK3, ADF_GEN4_VFLNOTIFY);
346 }
347
adf_enable_ints(struct adf_accel_dev * accel_dev)348 static void adf_enable_ints(struct adf_accel_dev *accel_dev)
349 {
350 void __iomem *addr;
351
352 addr = (&GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR])->virt_addr;
353
354 /* Enable bundle interrupts */
355 ADF_CSR_WR(addr, ADF_4XXX_SMIAPF_RP_X0_MASK_OFFSET, 0);
356 ADF_CSR_WR(addr, ADF_4XXX_SMIAPF_RP_X1_MASK_OFFSET, 0);
357
358 /* Enable misc interrupts */
359 ADF_CSR_WR(addr, ADF_4XXX_SMIAPF_MASK_OFFSET, 0);
360 }
361
adf_init_device(struct adf_accel_dev * accel_dev)362 static int adf_init_device(struct adf_accel_dev *accel_dev)
363 {
364 void __iomem *addr;
365 u32 status;
366 u32 csr;
367 int ret;
368
369 addr = (&GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR])->virt_addr;
370
371 /* Temporarily mask PM interrupt */
372 csr = ADF_CSR_RD(addr, ADF_GEN4_ERRMSK2);
373 csr |= ADF_GEN4_PM_SOU;
374 ADF_CSR_WR(addr, ADF_GEN4_ERRMSK2, csr);
375
376 /* Set DRV_ACTIVE bit to power up the device */
377 ADF_CSR_WR(addr, ADF_GEN4_PM_INTERRUPT, ADF_GEN4_PM_DRV_ACTIVE);
378
379 /* Poll status register to make sure the device is powered up */
380 ret = read_poll_timeout(ADF_CSR_RD, status,
381 status & ADF_GEN4_PM_INIT_STATE,
382 ADF_GEN4_PM_POLL_DELAY_US,
383 ADF_GEN4_PM_POLL_TIMEOUT_US, true, addr,
384 ADF_GEN4_PM_STATUS);
385 if (ret)
386 dev_err(&GET_DEV(accel_dev), "Failed to power up the device\n");
387
388 return ret;
389 }
390
uof_get_num_objs(void)391 static u32 uof_get_num_objs(void)
392 {
393 return ARRAY_SIZE(adf_fw_cy_config);
394 }
395
uof_get_name(struct adf_accel_dev * accel_dev,u32 obj_num,const char * const fw_objs[],int num_objs)396 static const char *uof_get_name(struct adf_accel_dev *accel_dev, u32 obj_num,
397 const char * const fw_objs[], int num_objs)
398 {
399 int id;
400
401 switch (get_service_enabled(accel_dev)) {
402 case SVC_CY:
403 case SVC_CY2:
404 id = adf_fw_cy_config[obj_num].obj;
405 break;
406 case SVC_DC:
407 id = adf_fw_dc_config[obj_num].obj;
408 break;
409 case SVC_SYM:
410 id = adf_fw_sym_config[obj_num].obj;
411 break;
412 case SVC_ASYM:
413 id = adf_fw_asym_config[obj_num].obj;
414 break;
415 case SVC_ASYM_DC:
416 case SVC_DC_ASYM:
417 id = adf_fw_asym_dc_config[obj_num].obj;
418 break;
419 case SVC_SYM_DC:
420 case SVC_DC_SYM:
421 id = adf_fw_sym_dc_config[obj_num].obj;
422 break;
423 default:
424 id = -EINVAL;
425 break;
426 }
427
428 if (id < 0 || id > num_objs)
429 return NULL;
430
431 return fw_objs[id];
432 }
433
uof_get_name_4xxx(struct adf_accel_dev * accel_dev,u32 obj_num)434 static const char *uof_get_name_4xxx(struct adf_accel_dev *accel_dev, u32 obj_num)
435 {
436 int num_fw_objs = ARRAY_SIZE(adf_4xxx_fw_objs);
437
438 return uof_get_name(accel_dev, obj_num, adf_4xxx_fw_objs, num_fw_objs);
439 }
440
uof_get_name_402xx(struct adf_accel_dev * accel_dev,u32 obj_num)441 static const char *uof_get_name_402xx(struct adf_accel_dev *accel_dev, u32 obj_num)
442 {
443 int num_fw_objs = ARRAY_SIZE(adf_402xx_fw_objs);
444
445 return uof_get_name(accel_dev, obj_num, adf_402xx_fw_objs, num_fw_objs);
446 }
447
uof_get_ae_mask(struct adf_accel_dev * accel_dev,u32 obj_num)448 static u32 uof_get_ae_mask(struct adf_accel_dev *accel_dev, u32 obj_num)
449 {
450 switch (get_service_enabled(accel_dev)) {
451 case SVC_CY:
452 return adf_fw_cy_config[obj_num].ae_mask;
453 case SVC_DC:
454 return adf_fw_dc_config[obj_num].ae_mask;
455 case SVC_CY2:
456 return adf_fw_cy_config[obj_num].ae_mask;
457 case SVC_SYM:
458 return adf_fw_sym_config[obj_num].ae_mask;
459 case SVC_ASYM:
460 return adf_fw_asym_config[obj_num].ae_mask;
461 case SVC_ASYM_DC:
462 case SVC_DC_ASYM:
463 return adf_fw_asym_dc_config[obj_num].ae_mask;
464 case SVC_SYM_DC:
465 case SVC_DC_SYM:
466 return adf_fw_sym_dc_config[obj_num].ae_mask;
467 default:
468 return 0;
469 }
470 }
471
adf_init_hw_data_4xxx(struct adf_hw_device_data * hw_data,u32 dev_id)472 void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id)
473 {
474 hw_data->dev_class = &adf_4xxx_class;
475 hw_data->instance_id = adf_4xxx_class.instances++;
476 hw_data->num_banks = ADF_4XXX_ETR_MAX_BANKS;
477 hw_data->num_banks_per_vf = ADF_4XXX_NUM_BANKS_PER_VF;
478 hw_data->num_rings_per_bank = ADF_4XXX_NUM_RINGS_PER_BANK;
479 hw_data->num_accel = ADF_4XXX_MAX_ACCELERATORS;
480 hw_data->num_engines = ADF_4XXX_MAX_ACCELENGINES;
481 hw_data->num_logical_accel = 1;
482 hw_data->tx_rx_gap = ADF_4XXX_RX_RINGS_OFFSET;
483 hw_data->tx_rings_mask = ADF_4XXX_TX_RINGS_MASK;
484 hw_data->ring_to_svc_map = ADF_GEN4_DEFAULT_RING_TO_SRV_MAP;
485 hw_data->alloc_irq = adf_isr_resource_alloc;
486 hw_data->free_irq = adf_isr_resource_free;
487 hw_data->enable_error_correction = adf_enable_error_correction;
488 hw_data->get_accel_mask = get_accel_mask;
489 hw_data->get_ae_mask = get_ae_mask;
490 hw_data->get_num_accels = get_num_accels;
491 hw_data->get_num_aes = get_num_aes;
492 hw_data->get_sram_bar_id = get_sram_bar_id;
493 hw_data->get_etr_bar_id = get_etr_bar_id;
494 hw_data->get_misc_bar_id = get_misc_bar_id;
495 hw_data->get_arb_info = get_arb_info;
496 hw_data->get_admin_info = get_admin_info;
497 hw_data->get_accel_cap = get_accel_cap;
498 hw_data->get_sku = get_sku;
499 hw_data->init_admin_comms = adf_init_admin_comms;
500 hw_data->exit_admin_comms = adf_exit_admin_comms;
501 hw_data->send_admin_init = adf_send_admin_init;
502 hw_data->init_arb = adf_init_arb;
503 hw_data->exit_arb = adf_exit_arb;
504 hw_data->get_arb_mapping = adf_get_arbiter_mapping;
505 hw_data->enable_ints = adf_enable_ints;
506 hw_data->init_device = adf_init_device;
507 hw_data->reset_device = adf_reset_flr;
508 hw_data->admin_ae_mask = ADF_4XXX_ADMIN_AE_MASK;
509 switch (dev_id) {
510 case ADF_402XX_PCI_DEVICE_ID:
511 hw_data->fw_name = ADF_402XX_FW;
512 hw_data->fw_mmp_name = ADF_402XX_MMP;
513 hw_data->uof_get_name = uof_get_name_402xx;
514 break;
515
516 default:
517 hw_data->fw_name = ADF_4XXX_FW;
518 hw_data->fw_mmp_name = ADF_4XXX_MMP;
519 hw_data->uof_get_name = uof_get_name_4xxx;
520 }
521 hw_data->uof_get_num_objs = uof_get_num_objs;
522 hw_data->uof_get_ae_mask = uof_get_ae_mask;
523 hw_data->set_msix_rttable = set_msix_default_rttable;
524 hw_data->set_ssm_wdtimer = adf_gen4_set_ssm_wdtimer;
525 hw_data->disable_iov = adf_disable_sriov;
526 hw_data->ring_pair_reset = adf_gen4_ring_pair_reset;
527 hw_data->enable_pm = adf_gen4_enable_pm;
528 hw_data->handle_pm_interrupt = adf_gen4_handle_pm_interrupt;
529 hw_data->dev_config = adf_gen4_dev_config;
530 hw_data->start_timer = adf_gen4_timer_start;
531 hw_data->stop_timer = adf_gen4_timer_stop;
532 hw_data->get_hb_clock = get_heartbeat_clock;
533 hw_data->num_hb_ctrs = ADF_NUM_HB_CNT_PER_AE;
534
535 adf_gen4_init_hw_csr_ops(&hw_data->csr_ops);
536 adf_gen4_init_pf_pfvf_ops(&hw_data->pfvf_ops);
537 adf_gen4_init_dc_ops(&hw_data->dc_ops);
538 }
539
adf_clean_hw_data_4xxx(struct adf_hw_device_data * hw_data)540 void adf_clean_hw_data_4xxx(struct adf_hw_device_data *hw_data)
541 {
542 hw_data->dev_class->instances--;
543 }
544