1 // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
2 /* Copyright(c) 2014 - 2020 Intel Corporation */
3 #include <linux/module.h>
4 #include <linux/slab.h>
5 #include "adf_accel_devices.h"
6 #include "adf_common_drv.h"
7 #include "adf_transport.h"
8 #include "adf_transport_access_macros.h"
9 #include "adf_cfg.h"
10 #include "adf_cfg_strings.h"
11 #include "qat_crypto.h"
12 #include "icp_qat_fw.h"
13
14 #define SEC ADF_KERNEL_SEC
15
16 static struct service_hndl qat_crypto;
17
qat_crypto_put_instance(struct qat_crypto_instance * inst)18 void qat_crypto_put_instance(struct qat_crypto_instance *inst)
19 {
20 atomic_dec(&inst->refctr);
21 adf_dev_put(inst->accel_dev);
22 }
23
qat_crypto_free_instances(struct adf_accel_dev * accel_dev)24 static int qat_crypto_free_instances(struct adf_accel_dev *accel_dev)
25 {
26 struct qat_crypto_instance *inst, *tmp;
27 int i;
28
29 list_for_each_entry_safe(inst, tmp, &accel_dev->crypto_list, list) {
30 for (i = 0; i < atomic_read(&inst->refctr); i++)
31 qat_crypto_put_instance(inst);
32
33 if (inst->sym_tx)
34 adf_remove_ring(inst->sym_tx);
35
36 if (inst->sym_rx)
37 adf_remove_ring(inst->sym_rx);
38
39 if (inst->pke_tx)
40 adf_remove_ring(inst->pke_tx);
41
42 if (inst->pke_rx)
43 adf_remove_ring(inst->pke_rx);
44
45 list_del(&inst->list);
46 kfree(inst);
47 }
48 return 0;
49 }
50
qat_crypto_get_instance_node(int node)51 struct qat_crypto_instance *qat_crypto_get_instance_node(int node)
52 {
53 struct adf_accel_dev *accel_dev = NULL, *tmp_dev;
54 struct qat_crypto_instance *inst = NULL, *tmp_inst;
55 unsigned long best = ~0;
56
57 list_for_each_entry(tmp_dev, adf_devmgr_get_head(), list) {
58 unsigned long ctr;
59
60 if ((node == dev_to_node(&GET_DEV(tmp_dev)) ||
61 dev_to_node(&GET_DEV(tmp_dev)) < 0) &&
62 adf_dev_started(tmp_dev) &&
63 !list_empty(&tmp_dev->crypto_list)) {
64 ctr = atomic_read(&tmp_dev->ref_count);
65 if (best > ctr) {
66 accel_dev = tmp_dev;
67 best = ctr;
68 }
69 }
70 }
71
72 if (!accel_dev) {
73 pr_info("QAT: Could not find a device on node %d\n", node);
74 /* Get any started device */
75 list_for_each_entry(tmp_dev, adf_devmgr_get_head(), list) {
76 if (adf_dev_started(tmp_dev) &&
77 !list_empty(&tmp_dev->crypto_list)) {
78 accel_dev = tmp_dev;
79 break;
80 }
81 }
82 }
83
84 if (!accel_dev)
85 return NULL;
86
87 best = ~0;
88 list_for_each_entry(tmp_inst, &accel_dev->crypto_list, list) {
89 unsigned long ctr;
90
91 ctr = atomic_read(&tmp_inst->refctr);
92 if (best > ctr) {
93 inst = tmp_inst;
94 best = ctr;
95 }
96 }
97 if (inst) {
98 if (adf_dev_get(accel_dev)) {
99 dev_err(&GET_DEV(accel_dev), "Could not increment dev refctr\n");
100 return NULL;
101 }
102 atomic_inc(&inst->refctr);
103 }
104 return inst;
105 }
106
107 /**
108 * qat_crypto_dev_config() - create dev config required to create crypto inst.
109 *
110 * @accel_dev: Pointer to acceleration device.
111 *
112 * Function creates device configuration required to create crypto instances
113 *
114 * Return: 0 on success, error code otherwise.
115 */
qat_crypto_dev_config(struct adf_accel_dev * accel_dev)116 int qat_crypto_dev_config(struct adf_accel_dev *accel_dev)
117 {
118 int cpus = num_online_cpus();
119 int banks = GET_MAX_BANKS(accel_dev);
120 int instances = min(cpus, banks);
121 char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
122 int i;
123 unsigned long val;
124
125 if (adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC))
126 goto err;
127 if (adf_cfg_section_add(accel_dev, "Accelerator0"))
128 goto err;
129 for (i = 0; i < instances; i++) {
130 val = i;
131 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_BANK_NUM, i);
132 if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
133 key, (void *)&val, ADF_DEC))
134 goto err;
135
136 snprintf(key, sizeof(key), ADF_CY "%d" ADF_ETRMGR_CORE_AFFINITY,
137 i);
138 if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
139 key, (void *)&val, ADF_DEC))
140 goto err;
141
142 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, i);
143 val = 128;
144 if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
145 key, (void *)&val, ADF_DEC))
146 goto err;
147
148 val = 512;
149 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_SIZE, i);
150 if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
151 key, (void *)&val, ADF_DEC))
152 goto err;
153
154 val = 0;
155 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, i);
156 if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
157 key, (void *)&val, ADF_DEC))
158 goto err;
159
160 val = 2;
161 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_TX, i);
162 if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
163 key, (void *)&val, ADF_DEC))
164 goto err;
165
166 val = 8;
167 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i);
168 if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
169 key, (void *)&val, ADF_DEC))
170 goto err;
171
172 val = 10;
173 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_RX, i);
174 if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
175 key, (void *)&val, ADF_DEC))
176 goto err;
177
178 val = ADF_COALESCING_DEF_TIME;
179 snprintf(key, sizeof(key), ADF_ETRMGR_COALESCE_TIMER_FORMAT, i);
180 if (adf_cfg_add_key_value_param(accel_dev, "Accelerator0",
181 key, (void *)&val, ADF_DEC))
182 goto err;
183 }
184
185 val = i;
186 if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
187 ADF_NUM_CY, (void *)&val, ADF_DEC))
188 goto err;
189
190 set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
191 return 0;
192 err:
193 dev_err(&GET_DEV(accel_dev), "Failed to start QAT accel dev\n");
194 return -EINVAL;
195 }
196 EXPORT_SYMBOL_GPL(qat_crypto_dev_config);
197
qat_crypto_create_instances(struct adf_accel_dev * accel_dev)198 static int qat_crypto_create_instances(struct adf_accel_dev *accel_dev)
199 {
200 int i;
201 unsigned long bank;
202 unsigned long num_inst, num_msg_sym, num_msg_asym;
203 int msg_size;
204 struct qat_crypto_instance *inst;
205 char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
206 char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
207
208 INIT_LIST_HEAD(&accel_dev->crypto_list);
209 if (adf_cfg_get_param_value(accel_dev, SEC, ADF_NUM_CY, val))
210 return -EFAULT;
211
212 if (kstrtoul(val, 0, &num_inst))
213 return -EFAULT;
214
215 for (i = 0; i < num_inst; i++) {
216 inst = kzalloc_node(sizeof(*inst), GFP_KERNEL,
217 dev_to_node(&GET_DEV(accel_dev)));
218 if (!inst)
219 goto err;
220
221 list_add_tail(&inst->list, &accel_dev->crypto_list);
222 inst->id = i;
223 atomic_set(&inst->refctr, 0);
224 inst->accel_dev = accel_dev;
225 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_BANK_NUM, i);
226 if (adf_cfg_get_param_value(accel_dev, SEC, key, val))
227 goto err;
228
229 if (kstrtoul(val, 10, &bank))
230 goto err;
231 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_SIZE, i);
232 if (adf_cfg_get_param_value(accel_dev, SEC, key, val))
233 goto err;
234
235 if (kstrtoul(val, 10, &num_msg_sym))
236 goto err;
237
238 num_msg_sym = num_msg_sym >> 1;
239
240 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, i);
241 if (adf_cfg_get_param_value(accel_dev, SEC, key, val))
242 goto err;
243
244 if (kstrtoul(val, 10, &num_msg_asym))
245 goto err;
246 num_msg_asym = num_msg_asym >> 1;
247
248 msg_size = ICP_QAT_FW_REQ_DEFAULT_SZ;
249 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_TX, i);
250 if (adf_create_ring(accel_dev, SEC, bank, num_msg_sym,
251 msg_size, key, NULL, 0, &inst->sym_tx))
252 goto err;
253
254 msg_size = msg_size >> 1;
255 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, i);
256 if (adf_create_ring(accel_dev, SEC, bank, num_msg_asym,
257 msg_size, key, NULL, 0, &inst->pke_tx))
258 goto err;
259
260 msg_size = ICP_QAT_FW_RESP_DEFAULT_SZ;
261 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_RX, i);
262 if (adf_create_ring(accel_dev, SEC, bank, num_msg_sym,
263 msg_size, key, qat_alg_callback, 0,
264 &inst->sym_rx))
265 goto err;
266
267 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i);
268 if (adf_create_ring(accel_dev, SEC, bank, num_msg_asym,
269 msg_size, key, qat_alg_asym_callback, 0,
270 &inst->pke_rx))
271 goto err;
272 }
273 return 0;
274 err:
275 qat_crypto_free_instances(accel_dev);
276 return -ENOMEM;
277 }
278
qat_crypto_init(struct adf_accel_dev * accel_dev)279 static int qat_crypto_init(struct adf_accel_dev *accel_dev)
280 {
281 if (qat_crypto_create_instances(accel_dev))
282 return -EFAULT;
283
284 return 0;
285 }
286
qat_crypto_shutdown(struct adf_accel_dev * accel_dev)287 static int qat_crypto_shutdown(struct adf_accel_dev *accel_dev)
288 {
289 return qat_crypto_free_instances(accel_dev);
290 }
291
qat_crypto_event_handler(struct adf_accel_dev * accel_dev,enum adf_event event)292 static int qat_crypto_event_handler(struct adf_accel_dev *accel_dev,
293 enum adf_event event)
294 {
295 int ret;
296
297 switch (event) {
298 case ADF_EVENT_INIT:
299 ret = qat_crypto_init(accel_dev);
300 break;
301 case ADF_EVENT_SHUTDOWN:
302 ret = qat_crypto_shutdown(accel_dev);
303 break;
304 case ADF_EVENT_RESTARTING:
305 case ADF_EVENT_RESTARTED:
306 case ADF_EVENT_START:
307 case ADF_EVENT_STOP:
308 default:
309 ret = 0;
310 }
311 return ret;
312 }
313
qat_crypto_register(void)314 int qat_crypto_register(void)
315 {
316 memset(&qat_crypto, 0, sizeof(qat_crypto));
317 qat_crypto.event_hld = qat_crypto_event_handler;
318 qat_crypto.name = "qat_crypto";
319 return adf_service_register(&qat_crypto);
320 }
321
qat_crypto_unregister(void)322 int qat_crypto_unregister(void)
323 {
324 return adf_service_unregister(&qat_crypto);
325 }
326