1 // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
2 /* Copyright(c) 2020 - 2021 Intel Corporation */
3 #include <linux/iopoll.h>
4 #include <adf_accel_devices.h>
5 #include <adf_cfg.h>
6 #include <adf_common_drv.h>
7 #include <adf_gen4_hw_data.h>
8 #include <adf_gen4_pfvf.h>
9 #include <adf_gen4_pm.h>
10 #include "adf_4xxx_hw_data.h"
11 #include "icp_qat_hw.h"
12
13 struct adf_fw_config {
14 u32 ae_mask;
15 char *obj_name;
16 };
17
18 static struct adf_fw_config adf_4xxx_fw_cy_config[] = {
19 {0xF0, ADF_4XXX_SYM_OBJ},
20 {0xF, ADF_4XXX_ASYM_OBJ},
21 {0x100, ADF_4XXX_ADMIN_OBJ},
22 };
23
24 static struct adf_fw_config adf_4xxx_fw_dc_config[] = {
25 {0xF0, ADF_4XXX_DC_OBJ},
26 {0xF, ADF_4XXX_DC_OBJ},
27 {0x100, ADF_4XXX_ADMIN_OBJ},
28 };
29
30 /* Worker thread to service arbiter mappings */
31 static const u32 thrd_to_arb_map[ADF_4XXX_MAX_ACCELENGINES] = {
32 0x5555555, 0x5555555, 0x5555555, 0x5555555,
33 0xAAAAAAA, 0xAAAAAAA, 0xAAAAAAA, 0xAAAAAAA,
34 0x0
35 };
36
37 static struct adf_hw_device_class adf_4xxx_class = {
38 .name = ADF_4XXX_DEVICE_NAME,
39 .type = DEV_4XXX,
40 .instances = 0,
41 };
42
43 enum dev_services {
44 SVC_CY = 0,
45 SVC_DC,
46 };
47
48 static const char *const dev_cfg_services[] = {
49 [SVC_CY] = ADF_CFG_CY,
50 [SVC_DC] = ADF_CFG_DC,
51 };
52
get_service_enabled(struct adf_accel_dev * accel_dev)53 static int get_service_enabled(struct adf_accel_dev *accel_dev)
54 {
55 char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0};
56 int ret;
57
58 ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC,
59 ADF_SERVICES_ENABLED, services);
60 if (ret) {
61 dev_err(&GET_DEV(accel_dev),
62 ADF_SERVICES_ENABLED " param not found\n");
63 return ret;
64 }
65
66 ret = match_string(dev_cfg_services, ARRAY_SIZE(dev_cfg_services),
67 services);
68 if (ret < 0)
69 dev_err(&GET_DEV(accel_dev),
70 "Invalid value of " ADF_SERVICES_ENABLED " param: %s\n",
71 services);
72
73 return ret;
74 }
75
get_accel_mask(struct adf_hw_device_data * self)76 static u32 get_accel_mask(struct adf_hw_device_data *self)
77 {
78 return ADF_4XXX_ACCELERATORS_MASK;
79 }
80
get_ae_mask(struct adf_hw_device_data * self)81 static u32 get_ae_mask(struct adf_hw_device_data *self)
82 {
83 u32 me_disable = self->fuses;
84
85 return ~me_disable & ADF_4XXX_ACCELENGINES_MASK;
86 }
87
get_num_accels(struct adf_hw_device_data * self)88 static u32 get_num_accels(struct adf_hw_device_data *self)
89 {
90 return ADF_4XXX_MAX_ACCELERATORS;
91 }
92
get_num_aes(struct adf_hw_device_data * self)93 static u32 get_num_aes(struct adf_hw_device_data *self)
94 {
95 if (!self || !self->ae_mask)
96 return 0;
97
98 return hweight32(self->ae_mask);
99 }
100
get_misc_bar_id(struct adf_hw_device_data * self)101 static u32 get_misc_bar_id(struct adf_hw_device_data *self)
102 {
103 return ADF_4XXX_PMISC_BAR;
104 }
105
get_etr_bar_id(struct adf_hw_device_data * self)106 static u32 get_etr_bar_id(struct adf_hw_device_data *self)
107 {
108 return ADF_4XXX_ETR_BAR;
109 }
110
get_sram_bar_id(struct adf_hw_device_data * self)111 static u32 get_sram_bar_id(struct adf_hw_device_data *self)
112 {
113 return ADF_4XXX_SRAM_BAR;
114 }
115
116 /*
117 * The vector routing table is used to select the MSI-X entry to use for each
118 * interrupt source.
119 * The first ADF_4XXX_ETR_MAX_BANKS entries correspond to ring interrupts.
120 * The final entry corresponds to VF2PF or error interrupts.
121 * This vector table could be used to configure one MSI-X entry to be shared
122 * between multiple interrupt sources.
123 *
124 * The default routing is set to have a one to one correspondence between the
125 * interrupt source and the MSI-X entry used.
126 */
set_msix_default_rttable(struct adf_accel_dev * accel_dev)127 static void set_msix_default_rttable(struct adf_accel_dev *accel_dev)
128 {
129 void __iomem *csr;
130 int i;
131
132 csr = (&GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR])->virt_addr;
133 for (i = 0; i <= ADF_4XXX_ETR_MAX_BANKS; i++)
134 ADF_CSR_WR(csr, ADF_4XXX_MSIX_RTTABLE_OFFSET(i), i);
135 }
136
get_accel_cap(struct adf_accel_dev * accel_dev)137 static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
138 {
139 struct pci_dev *pdev = accel_dev->accel_pci_dev.pci_dev;
140 u32 capabilities_cy, capabilities_dc;
141 u32 fusectl1;
142
143 /* Read accelerator capabilities mask */
144 pci_read_config_dword(pdev, ADF_4XXX_FUSECTL1_OFFSET, &fusectl1);
145
146 capabilities_cy = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC |
147 ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC |
148 ICP_ACCEL_CAPABILITIES_CIPHER |
149 ICP_ACCEL_CAPABILITIES_AUTHENTICATION |
150 ICP_ACCEL_CAPABILITIES_SHA3 |
151 ICP_ACCEL_CAPABILITIES_SHA3_EXT |
152 ICP_ACCEL_CAPABILITIES_HKDF |
153 ICP_ACCEL_CAPABILITIES_ECEDMONT |
154 ICP_ACCEL_CAPABILITIES_CHACHA_POLY |
155 ICP_ACCEL_CAPABILITIES_AESGCM_SPC |
156 ICP_ACCEL_CAPABILITIES_AES_V2;
157
158 /* A set bit in fusectl1 means the feature is OFF in this SKU */
159 if (fusectl1 & ICP_ACCEL_4XXX_MASK_CIPHER_SLICE) {
160 capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC;
161 capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_HKDF;
162 capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
163 }
164 if (fusectl1 & ICP_ACCEL_4XXX_MASK_UCS_SLICE) {
165 capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_CHACHA_POLY;
166 capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_AESGCM_SPC;
167 capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_AES_V2;
168 capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
169 }
170 if (fusectl1 & ICP_ACCEL_4XXX_MASK_AUTH_SLICE) {
171 capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_AUTHENTICATION;
172 capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_SHA3;
173 capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_SHA3_EXT;
174 capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
175 }
176 if (fusectl1 & ICP_ACCEL_4XXX_MASK_PKE_SLICE) {
177 capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC;
178 capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_ECEDMONT;
179 }
180
181 capabilities_dc = ICP_ACCEL_CAPABILITIES_COMPRESSION |
182 ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION |
183 ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION |
184 ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64;
185
186 if (fusectl1 & ICP_ACCEL_4XXX_MASK_COMPRESS_SLICE) {
187 capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_COMPRESSION;
188 capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION;
189 capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION;
190 capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64;
191 }
192
193 switch (get_service_enabled(accel_dev)) {
194 case SVC_CY:
195 return capabilities_cy;
196 case SVC_DC:
197 return capabilities_dc;
198 }
199
200 return 0;
201 }
202
get_sku(struct adf_hw_device_data * self)203 static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
204 {
205 return DEV_SKU_1;
206 }
207
adf_get_arbiter_mapping(void)208 static const u32 *adf_get_arbiter_mapping(void)
209 {
210 return thrd_to_arb_map;
211 }
212
get_arb_info(struct arb_info * arb_info)213 static void get_arb_info(struct arb_info *arb_info)
214 {
215 arb_info->arb_cfg = ADF_4XXX_ARB_CONFIG;
216 arb_info->arb_offset = ADF_4XXX_ARB_OFFSET;
217 arb_info->wt2sam_offset = ADF_4XXX_ARB_WRK_2_SER_MAP_OFFSET;
218 }
219
get_admin_info(struct admin_info * admin_csrs_info)220 static void get_admin_info(struct admin_info *admin_csrs_info)
221 {
222 admin_csrs_info->mailbox_offset = ADF_4XXX_MAILBOX_BASE_OFFSET;
223 admin_csrs_info->admin_msg_ur = ADF_4XXX_ADMINMSGUR_OFFSET;
224 admin_csrs_info->admin_msg_lr = ADF_4XXX_ADMINMSGLR_OFFSET;
225 }
226
adf_enable_error_correction(struct adf_accel_dev * accel_dev)227 static void adf_enable_error_correction(struct adf_accel_dev *accel_dev)
228 {
229 struct adf_bar *misc_bar = &GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR];
230 void __iomem *csr = misc_bar->virt_addr;
231
232 /* Enable all in errsou3 except VFLR notification on host */
233 ADF_CSR_WR(csr, ADF_GEN4_ERRMSK3, ADF_GEN4_VFLNOTIFY);
234 }
235
adf_enable_ints(struct adf_accel_dev * accel_dev)236 static void adf_enable_ints(struct adf_accel_dev *accel_dev)
237 {
238 void __iomem *addr;
239
240 addr = (&GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR])->virt_addr;
241
242 /* Enable bundle interrupts */
243 ADF_CSR_WR(addr, ADF_4XXX_SMIAPF_RP_X0_MASK_OFFSET, 0);
244 ADF_CSR_WR(addr, ADF_4XXX_SMIAPF_RP_X1_MASK_OFFSET, 0);
245
246 /* Enable misc interrupts */
247 ADF_CSR_WR(addr, ADF_4XXX_SMIAPF_MASK_OFFSET, 0);
248 }
249
adf_init_device(struct adf_accel_dev * accel_dev)250 static int adf_init_device(struct adf_accel_dev *accel_dev)
251 {
252 void __iomem *addr;
253 u32 status;
254 u32 csr;
255 int ret;
256
257 addr = (&GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR])->virt_addr;
258
259 /* Temporarily mask PM interrupt */
260 csr = ADF_CSR_RD(addr, ADF_GEN4_ERRMSK2);
261 csr |= ADF_GEN4_PM_SOU;
262 ADF_CSR_WR(addr, ADF_GEN4_ERRMSK2, csr);
263
264 /* Set DRV_ACTIVE bit to power up the device */
265 ADF_CSR_WR(addr, ADF_GEN4_PM_INTERRUPT, ADF_GEN4_PM_DRV_ACTIVE);
266
267 /* Poll status register to make sure the device is powered up */
268 ret = read_poll_timeout(ADF_CSR_RD, status,
269 status & ADF_GEN4_PM_INIT_STATE,
270 ADF_GEN4_PM_POLL_DELAY_US,
271 ADF_GEN4_PM_POLL_TIMEOUT_US, true, addr,
272 ADF_GEN4_PM_STATUS);
273 if (ret)
274 dev_err(&GET_DEV(accel_dev), "Failed to power up the device\n");
275
276 return ret;
277 }
278
uof_get_num_objs(void)279 static u32 uof_get_num_objs(void)
280 {
281 BUILD_BUG_ON_MSG(ARRAY_SIZE(adf_4xxx_fw_cy_config) !=
282 ARRAY_SIZE(adf_4xxx_fw_dc_config),
283 "Size mismatch between adf_4xxx_fw_*_config arrays");
284
285 return ARRAY_SIZE(adf_4xxx_fw_cy_config);
286 }
287
uof_get_name(struct adf_accel_dev * accel_dev,u32 obj_num)288 static char *uof_get_name(struct adf_accel_dev *accel_dev, u32 obj_num)
289 {
290 switch (get_service_enabled(accel_dev)) {
291 case SVC_CY:
292 return adf_4xxx_fw_cy_config[obj_num].obj_name;
293 case SVC_DC:
294 return adf_4xxx_fw_dc_config[obj_num].obj_name;
295 }
296
297 return NULL;
298 }
299
uof_get_ae_mask(struct adf_accel_dev * accel_dev,u32 obj_num)300 static u32 uof_get_ae_mask(struct adf_accel_dev *accel_dev, u32 obj_num)
301 {
302 switch (get_service_enabled(accel_dev)) {
303 case SVC_CY:
304 return adf_4xxx_fw_cy_config[obj_num].ae_mask;
305 case SVC_DC:
306 return adf_4xxx_fw_dc_config[obj_num].ae_mask;
307 }
308
309 return 0;
310 }
311
adf_init_hw_data_4xxx(struct adf_hw_device_data * hw_data)312 void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data)
313 {
314 hw_data->dev_class = &adf_4xxx_class;
315 hw_data->instance_id = adf_4xxx_class.instances++;
316 hw_data->num_banks = ADF_4XXX_ETR_MAX_BANKS;
317 hw_data->num_banks_per_vf = ADF_4XXX_NUM_BANKS_PER_VF;
318 hw_data->num_rings_per_bank = ADF_4XXX_NUM_RINGS_PER_BANK;
319 hw_data->num_accel = ADF_4XXX_MAX_ACCELERATORS;
320 hw_data->num_engines = ADF_4XXX_MAX_ACCELENGINES;
321 hw_data->num_logical_accel = 1;
322 hw_data->tx_rx_gap = ADF_4XXX_RX_RINGS_OFFSET;
323 hw_data->tx_rings_mask = ADF_4XXX_TX_RINGS_MASK;
324 hw_data->ring_to_svc_map = ADF_GEN4_DEFAULT_RING_TO_SRV_MAP;
325 hw_data->alloc_irq = adf_isr_resource_alloc;
326 hw_data->free_irq = adf_isr_resource_free;
327 hw_data->enable_error_correction = adf_enable_error_correction;
328 hw_data->get_accel_mask = get_accel_mask;
329 hw_data->get_ae_mask = get_ae_mask;
330 hw_data->get_num_accels = get_num_accels;
331 hw_data->get_num_aes = get_num_aes;
332 hw_data->get_sram_bar_id = get_sram_bar_id;
333 hw_data->get_etr_bar_id = get_etr_bar_id;
334 hw_data->get_misc_bar_id = get_misc_bar_id;
335 hw_data->get_arb_info = get_arb_info;
336 hw_data->get_admin_info = get_admin_info;
337 hw_data->get_accel_cap = get_accel_cap;
338 hw_data->get_sku = get_sku;
339 hw_data->fw_name = ADF_4XXX_FW;
340 hw_data->fw_mmp_name = ADF_4XXX_MMP;
341 hw_data->init_admin_comms = adf_init_admin_comms;
342 hw_data->exit_admin_comms = adf_exit_admin_comms;
343 hw_data->send_admin_init = adf_send_admin_init;
344 hw_data->init_arb = adf_init_arb;
345 hw_data->exit_arb = adf_exit_arb;
346 hw_data->get_arb_mapping = adf_get_arbiter_mapping;
347 hw_data->enable_ints = adf_enable_ints;
348 hw_data->init_device = adf_init_device;
349 hw_data->reset_device = adf_reset_flr;
350 hw_data->admin_ae_mask = ADF_4XXX_ADMIN_AE_MASK;
351 hw_data->uof_get_num_objs = uof_get_num_objs;
352 hw_data->uof_get_name = uof_get_name;
353 hw_data->uof_get_ae_mask = uof_get_ae_mask;
354 hw_data->set_msix_rttable = set_msix_default_rttable;
355 hw_data->set_ssm_wdtimer = adf_gen4_set_ssm_wdtimer;
356 hw_data->disable_iov = adf_disable_sriov;
357 hw_data->ring_pair_reset = adf_gen4_ring_pair_reset;
358 hw_data->enable_pm = adf_gen4_enable_pm;
359 hw_data->handle_pm_interrupt = adf_gen4_handle_pm_interrupt;
360 hw_data->dev_config = adf_crypto_dev_config;
361
362 adf_gen4_init_hw_csr_ops(&hw_data->csr_ops);
363 adf_gen4_init_pf_pfvf_ops(&hw_data->pfvf_ops);
364 }
365
adf_clean_hw_data_4xxx(struct adf_hw_device_data * hw_data)366 void adf_clean_hw_data_4xxx(struct adf_hw_device_data *hw_data)
367 {
368 hw_data->dev_class->instances--;
369 }
370