1 // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
2 /* Copyright(c) 2014 - 2020 Intel Corporation */
3 #include <linux/module.h>
4 #include <linux/slab.h>
5 #include <linux/crypto.h>
6 #include <crypto/internal/aead.h>
7 #include <crypto/internal/skcipher.h>
8 #include <crypto/aes.h>
9 #include <crypto/sha.h>
10 #include <crypto/hash.h>
11 #include <crypto/hmac.h>
12 #include <crypto/algapi.h>
13 #include <crypto/authenc.h>
14 #include <crypto/xts.h>
15 #include <linux/dma-mapping.h>
16 #include "adf_accel_devices.h"
17 #include "adf_transport.h"
18 #include "adf_common_drv.h"
19 #include "qat_crypto.h"
20 #include "icp_qat_hw.h"
21 #include "icp_qat_fw.h"
22 #include "icp_qat_fw_la.h"
23
24 #define QAT_AES_HW_CONFIG_ENC(alg, mode) \
25 ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \
26 ICP_QAT_HW_CIPHER_NO_CONVERT, \
27 ICP_QAT_HW_CIPHER_ENCRYPT)
28
29 #define QAT_AES_HW_CONFIG_DEC(alg, mode) \
30 ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \
31 ICP_QAT_HW_CIPHER_KEY_CONVERT, \
32 ICP_QAT_HW_CIPHER_DECRYPT)
33
34 static DEFINE_MUTEX(algs_lock);
35 static unsigned int active_devs;
36
37 struct qat_alg_buf {
38 u32 len;
39 u32 resrvd;
40 u64 addr;
41 } __packed;
42
43 struct qat_alg_buf_list {
44 u64 resrvd;
45 u32 num_bufs;
46 u32 num_mapped_bufs;
47 struct qat_alg_buf bufers[];
48 } __packed __aligned(64);
49
50 /* Common content descriptor */
51 struct qat_alg_cd {
52 union {
53 struct qat_enc { /* Encrypt content desc */
54 struct icp_qat_hw_cipher_algo_blk cipher;
55 struct icp_qat_hw_auth_algo_blk hash;
56 } qat_enc_cd;
57 struct qat_dec { /* Decrypt content desc */
58 struct icp_qat_hw_auth_algo_blk hash;
59 struct icp_qat_hw_cipher_algo_blk cipher;
60 } qat_dec_cd;
61 };
62 } __aligned(64);
63
64 struct qat_alg_aead_ctx {
65 struct qat_alg_cd *enc_cd;
66 struct qat_alg_cd *dec_cd;
67 dma_addr_t enc_cd_paddr;
68 dma_addr_t dec_cd_paddr;
69 struct icp_qat_fw_la_bulk_req enc_fw_req;
70 struct icp_qat_fw_la_bulk_req dec_fw_req;
71 struct crypto_shash *hash_tfm;
72 enum icp_qat_hw_auth_algo qat_hash_alg;
73 struct qat_crypto_instance *inst;
74 union {
75 struct sha1_state sha1;
76 struct sha256_state sha256;
77 struct sha512_state sha512;
78 };
79 char ipad[SHA512_BLOCK_SIZE]; /* sufficient for SHA-1/SHA-256 as well */
80 char opad[SHA512_BLOCK_SIZE];
81 };
82
83 struct qat_alg_skcipher_ctx {
84 struct icp_qat_hw_cipher_algo_blk *enc_cd;
85 struct icp_qat_hw_cipher_algo_blk *dec_cd;
86 dma_addr_t enc_cd_paddr;
87 dma_addr_t dec_cd_paddr;
88 struct icp_qat_fw_la_bulk_req enc_fw_req;
89 struct icp_qat_fw_la_bulk_req dec_fw_req;
90 struct qat_crypto_instance *inst;
91 struct crypto_skcipher *ftfm;
92 bool fallback;
93 };
94
qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)95 static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)
96 {
97 switch (qat_hash_alg) {
98 case ICP_QAT_HW_AUTH_ALGO_SHA1:
99 return ICP_QAT_HW_SHA1_STATE1_SZ;
100 case ICP_QAT_HW_AUTH_ALGO_SHA256:
101 return ICP_QAT_HW_SHA256_STATE1_SZ;
102 case ICP_QAT_HW_AUTH_ALGO_SHA512:
103 return ICP_QAT_HW_SHA512_STATE1_SZ;
104 default:
105 return -EFAULT;
106 };
107 return -EFAULT;
108 }
109
qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk * hash,struct qat_alg_aead_ctx * ctx,const u8 * auth_key,unsigned int auth_keylen)110 static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
111 struct qat_alg_aead_ctx *ctx,
112 const u8 *auth_key,
113 unsigned int auth_keylen)
114 {
115 SHASH_DESC_ON_STACK(shash, ctx->hash_tfm);
116 int block_size = crypto_shash_blocksize(ctx->hash_tfm);
117 int digest_size = crypto_shash_digestsize(ctx->hash_tfm);
118 __be32 *hash_state_out;
119 __be64 *hash512_state_out;
120 int i, offset;
121
122 memset(ctx->ipad, 0, block_size);
123 memset(ctx->opad, 0, block_size);
124 shash->tfm = ctx->hash_tfm;
125
126 if (auth_keylen > block_size) {
127 int ret = crypto_shash_digest(shash, auth_key,
128 auth_keylen, ctx->ipad);
129 if (ret)
130 return ret;
131
132 memcpy(ctx->opad, ctx->ipad, digest_size);
133 } else {
134 memcpy(ctx->ipad, auth_key, auth_keylen);
135 memcpy(ctx->opad, auth_key, auth_keylen);
136 }
137
138 for (i = 0; i < block_size; i++) {
139 char *ipad_ptr = ctx->ipad + i;
140 char *opad_ptr = ctx->opad + i;
141 *ipad_ptr ^= HMAC_IPAD_VALUE;
142 *opad_ptr ^= HMAC_OPAD_VALUE;
143 }
144
145 if (crypto_shash_init(shash))
146 return -EFAULT;
147
148 if (crypto_shash_update(shash, ctx->ipad, block_size))
149 return -EFAULT;
150
151 hash_state_out = (__be32 *)hash->sha.state1;
152 hash512_state_out = (__be64 *)hash_state_out;
153
154 switch (ctx->qat_hash_alg) {
155 case ICP_QAT_HW_AUTH_ALGO_SHA1:
156 if (crypto_shash_export(shash, &ctx->sha1))
157 return -EFAULT;
158 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
159 *hash_state_out = cpu_to_be32(ctx->sha1.state[i]);
160 break;
161 case ICP_QAT_HW_AUTH_ALGO_SHA256:
162 if (crypto_shash_export(shash, &ctx->sha256))
163 return -EFAULT;
164 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
165 *hash_state_out = cpu_to_be32(ctx->sha256.state[i]);
166 break;
167 case ICP_QAT_HW_AUTH_ALGO_SHA512:
168 if (crypto_shash_export(shash, &ctx->sha512))
169 return -EFAULT;
170 for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
171 *hash512_state_out = cpu_to_be64(ctx->sha512.state[i]);
172 break;
173 default:
174 return -EFAULT;
175 }
176
177 if (crypto_shash_init(shash))
178 return -EFAULT;
179
180 if (crypto_shash_update(shash, ctx->opad, block_size))
181 return -EFAULT;
182
183 offset = round_up(qat_get_inter_state_size(ctx->qat_hash_alg), 8);
184 if (offset < 0)
185 return -EFAULT;
186
187 hash_state_out = (__be32 *)(hash->sha.state1 + offset);
188 hash512_state_out = (__be64 *)hash_state_out;
189
190 switch (ctx->qat_hash_alg) {
191 case ICP_QAT_HW_AUTH_ALGO_SHA1:
192 if (crypto_shash_export(shash, &ctx->sha1))
193 return -EFAULT;
194 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
195 *hash_state_out = cpu_to_be32(ctx->sha1.state[i]);
196 break;
197 case ICP_QAT_HW_AUTH_ALGO_SHA256:
198 if (crypto_shash_export(shash, &ctx->sha256))
199 return -EFAULT;
200 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
201 *hash_state_out = cpu_to_be32(ctx->sha256.state[i]);
202 break;
203 case ICP_QAT_HW_AUTH_ALGO_SHA512:
204 if (crypto_shash_export(shash, &ctx->sha512))
205 return -EFAULT;
206 for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
207 *hash512_state_out = cpu_to_be64(ctx->sha512.state[i]);
208 break;
209 default:
210 return -EFAULT;
211 }
212 memzero_explicit(ctx->ipad, block_size);
213 memzero_explicit(ctx->opad, block_size);
214 return 0;
215 }
216
qat_alg_init_hdr_iv_updt(struct icp_qat_fw_comn_req_hdr * header)217 static void qat_alg_init_hdr_iv_updt(struct icp_qat_fw_comn_req_hdr *header)
218 {
219 ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
220 ICP_QAT_FW_CIPH_IV_64BIT_PTR);
221 ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
222 ICP_QAT_FW_LA_UPDATE_STATE);
223 }
224
qat_alg_init_hdr_no_iv_updt(struct icp_qat_fw_comn_req_hdr * header)225 static void qat_alg_init_hdr_no_iv_updt(struct icp_qat_fw_comn_req_hdr *header)
226 {
227 ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
228 ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
229 ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
230 ICP_QAT_FW_LA_NO_UPDATE_STATE);
231 }
232
qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr * header,int aead)233 static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header,
234 int aead)
235 {
236 header->hdr_flags =
237 ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
238 header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
239 header->comn_req_flags =
240 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
241 QAT_COMN_PTR_TYPE_SGL);
242 ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
243 ICP_QAT_FW_LA_PARTIAL_NONE);
244 if (aead)
245 qat_alg_init_hdr_no_iv_updt(header);
246 else
247 qat_alg_init_hdr_iv_updt(header);
248 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
249 ICP_QAT_FW_LA_NO_PROTO);
250 }
251
qat_alg_aead_init_enc_session(struct crypto_aead * aead_tfm,int alg,struct crypto_authenc_keys * keys,int mode)252 static int qat_alg_aead_init_enc_session(struct crypto_aead *aead_tfm,
253 int alg,
254 struct crypto_authenc_keys *keys,
255 int mode)
256 {
257 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(aead_tfm);
258 unsigned int digestsize = crypto_aead_authsize(aead_tfm);
259 struct qat_enc *enc_ctx = &ctx->enc_cd->qat_enc_cd;
260 struct icp_qat_hw_cipher_algo_blk *cipher = &enc_ctx->cipher;
261 struct icp_qat_hw_auth_algo_blk *hash =
262 (struct icp_qat_hw_auth_algo_blk *)((char *)enc_ctx +
263 sizeof(struct icp_qat_hw_auth_setup) + keys->enckeylen);
264 struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->enc_fw_req;
265 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
266 struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
267 void *ptr = &req_tmpl->cd_ctrl;
268 struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
269 struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
270
271 /* CD setup */
272 cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg, mode);
273 memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
274 hash->sha.inner_setup.auth_config.config =
275 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
276 ctx->qat_hash_alg, digestsize);
277 hash->sha.inner_setup.auth_counter.counter =
278 cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
279
280 if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
281 return -EFAULT;
282
283 /* Request setup */
284 qat_alg_init_common_hdr(header, 1);
285 header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
286 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
287 ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
288 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
289 ICP_QAT_FW_LA_RET_AUTH_RES);
290 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
291 ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
292 cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
293 cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
294
295 /* Cipher CD config setup */
296 cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
297 cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
298 cipher_cd_ctrl->cipher_cfg_offset = 0;
299 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
300 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
301 /* Auth CD config setup */
302 hash_cd_ctrl->hash_cfg_offset = ((char *)hash - (char *)cipher) >> 3;
303 hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
304 hash_cd_ctrl->inner_res_sz = digestsize;
305 hash_cd_ctrl->final_sz = digestsize;
306
307 switch (ctx->qat_hash_alg) {
308 case ICP_QAT_HW_AUTH_ALGO_SHA1:
309 hash_cd_ctrl->inner_state1_sz =
310 round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
311 hash_cd_ctrl->inner_state2_sz =
312 round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
313 break;
314 case ICP_QAT_HW_AUTH_ALGO_SHA256:
315 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
316 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
317 break;
318 case ICP_QAT_HW_AUTH_ALGO_SHA512:
319 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
320 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
321 break;
322 default:
323 break;
324 }
325 hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
326 ((sizeof(struct icp_qat_hw_auth_setup) +
327 round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
328 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
329 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
330 return 0;
331 }
332
qat_alg_aead_init_dec_session(struct crypto_aead * aead_tfm,int alg,struct crypto_authenc_keys * keys,int mode)333 static int qat_alg_aead_init_dec_session(struct crypto_aead *aead_tfm,
334 int alg,
335 struct crypto_authenc_keys *keys,
336 int mode)
337 {
338 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(aead_tfm);
339 unsigned int digestsize = crypto_aead_authsize(aead_tfm);
340 struct qat_dec *dec_ctx = &ctx->dec_cd->qat_dec_cd;
341 struct icp_qat_hw_auth_algo_blk *hash = &dec_ctx->hash;
342 struct icp_qat_hw_cipher_algo_blk *cipher =
343 (struct icp_qat_hw_cipher_algo_blk *)((char *)dec_ctx +
344 sizeof(struct icp_qat_hw_auth_setup) +
345 roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2);
346 struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->dec_fw_req;
347 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
348 struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
349 void *ptr = &req_tmpl->cd_ctrl;
350 struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
351 struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
352 struct icp_qat_fw_la_auth_req_params *auth_param =
353 (struct icp_qat_fw_la_auth_req_params *)
354 ((char *)&req_tmpl->serv_specif_rqpars +
355 sizeof(struct icp_qat_fw_la_cipher_req_params));
356
357 /* CD setup */
358 cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_DEC(alg, mode);
359 memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
360 hash->sha.inner_setup.auth_config.config =
361 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
362 ctx->qat_hash_alg,
363 digestsize);
364 hash->sha.inner_setup.auth_counter.counter =
365 cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
366
367 if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
368 return -EFAULT;
369
370 /* Request setup */
371 qat_alg_init_common_hdr(header, 1);
372 header->service_cmd_id = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
373 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
374 ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
375 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
376 ICP_QAT_FW_LA_NO_RET_AUTH_RES);
377 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
378 ICP_QAT_FW_LA_CMP_AUTH_RES);
379 cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
380 cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
381
382 /* Cipher CD config setup */
383 cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
384 cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
385 cipher_cd_ctrl->cipher_cfg_offset =
386 (sizeof(struct icp_qat_hw_auth_setup) +
387 roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2) >> 3;
388 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
389 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
390
391 /* Auth CD config setup */
392 hash_cd_ctrl->hash_cfg_offset = 0;
393 hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
394 hash_cd_ctrl->inner_res_sz = digestsize;
395 hash_cd_ctrl->final_sz = digestsize;
396
397 switch (ctx->qat_hash_alg) {
398 case ICP_QAT_HW_AUTH_ALGO_SHA1:
399 hash_cd_ctrl->inner_state1_sz =
400 round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
401 hash_cd_ctrl->inner_state2_sz =
402 round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
403 break;
404 case ICP_QAT_HW_AUTH_ALGO_SHA256:
405 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
406 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
407 break;
408 case ICP_QAT_HW_AUTH_ALGO_SHA512:
409 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
410 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
411 break;
412 default:
413 break;
414 }
415
416 hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
417 ((sizeof(struct icp_qat_hw_auth_setup) +
418 round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
419 auth_param->auth_res_sz = digestsize;
420 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
421 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
422 return 0;
423 }
424
qat_alg_skcipher_init_com(struct qat_alg_skcipher_ctx * ctx,struct icp_qat_fw_la_bulk_req * req,struct icp_qat_hw_cipher_algo_blk * cd,const u8 * key,unsigned int keylen)425 static void qat_alg_skcipher_init_com(struct qat_alg_skcipher_ctx *ctx,
426 struct icp_qat_fw_la_bulk_req *req,
427 struct icp_qat_hw_cipher_algo_blk *cd,
428 const u8 *key, unsigned int keylen)
429 {
430 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
431 struct icp_qat_fw_comn_req_hdr *header = &req->comn_hdr;
432 struct icp_qat_fw_cipher_cd_ctrl_hdr *cd_ctrl = (void *)&req->cd_ctrl;
433
434 memcpy(cd->aes.key, key, keylen);
435 qat_alg_init_common_hdr(header, 0);
436 header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
437 cd_pars->u.s.content_desc_params_sz =
438 sizeof(struct icp_qat_hw_cipher_algo_blk) >> 3;
439 /* Cipher CD config setup */
440 cd_ctrl->cipher_key_sz = keylen >> 3;
441 cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
442 cd_ctrl->cipher_cfg_offset = 0;
443 ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
444 ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
445 }
446
qat_alg_skcipher_init_enc(struct qat_alg_skcipher_ctx * ctx,int alg,const u8 * key,unsigned int keylen,int mode)447 static void qat_alg_skcipher_init_enc(struct qat_alg_skcipher_ctx *ctx,
448 int alg, const u8 *key,
449 unsigned int keylen, int mode)
450 {
451 struct icp_qat_hw_cipher_algo_blk *enc_cd = ctx->enc_cd;
452 struct icp_qat_fw_la_bulk_req *req = &ctx->enc_fw_req;
453 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
454
455 qat_alg_skcipher_init_com(ctx, req, enc_cd, key, keylen);
456 cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
457 enc_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg, mode);
458 }
459
qat_alg_skcipher_init_dec(struct qat_alg_skcipher_ctx * ctx,int alg,const u8 * key,unsigned int keylen,int mode)460 static void qat_alg_skcipher_init_dec(struct qat_alg_skcipher_ctx *ctx,
461 int alg, const u8 *key,
462 unsigned int keylen, int mode)
463 {
464 struct icp_qat_hw_cipher_algo_blk *dec_cd = ctx->dec_cd;
465 struct icp_qat_fw_la_bulk_req *req = &ctx->dec_fw_req;
466 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
467
468 qat_alg_skcipher_init_com(ctx, req, dec_cd, key, keylen);
469 cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
470
471 if (mode != ICP_QAT_HW_CIPHER_CTR_MODE)
472 dec_cd->aes.cipher_config.val =
473 QAT_AES_HW_CONFIG_DEC(alg, mode);
474 else
475 dec_cd->aes.cipher_config.val =
476 QAT_AES_HW_CONFIG_ENC(alg, mode);
477 }
478
qat_alg_validate_key(int key_len,int * alg,int mode)479 static int qat_alg_validate_key(int key_len, int *alg, int mode)
480 {
481 if (mode != ICP_QAT_HW_CIPHER_XTS_MODE) {
482 switch (key_len) {
483 case AES_KEYSIZE_128:
484 *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
485 break;
486 case AES_KEYSIZE_192:
487 *alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
488 break;
489 case AES_KEYSIZE_256:
490 *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
491 break;
492 default:
493 return -EINVAL;
494 }
495 } else {
496 switch (key_len) {
497 case AES_KEYSIZE_128 << 1:
498 *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
499 break;
500 case AES_KEYSIZE_256 << 1:
501 *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
502 break;
503 default:
504 return -EINVAL;
505 }
506 }
507 return 0;
508 }
509
qat_alg_aead_init_sessions(struct crypto_aead * tfm,const u8 * key,unsigned int keylen,int mode)510 static int qat_alg_aead_init_sessions(struct crypto_aead *tfm, const u8 *key,
511 unsigned int keylen, int mode)
512 {
513 struct crypto_authenc_keys keys;
514 int alg;
515
516 if (crypto_authenc_extractkeys(&keys, key, keylen))
517 goto bad_key;
518
519 if (qat_alg_validate_key(keys.enckeylen, &alg, mode))
520 goto bad_key;
521
522 if (qat_alg_aead_init_enc_session(tfm, alg, &keys, mode))
523 goto error;
524
525 if (qat_alg_aead_init_dec_session(tfm, alg, &keys, mode))
526 goto error;
527
528 memzero_explicit(&keys, sizeof(keys));
529 return 0;
530 bad_key:
531 memzero_explicit(&keys, sizeof(keys));
532 return -EINVAL;
533 error:
534 memzero_explicit(&keys, sizeof(keys));
535 return -EFAULT;
536 }
537
qat_alg_skcipher_init_sessions(struct qat_alg_skcipher_ctx * ctx,const u8 * key,unsigned int keylen,int mode)538 static int qat_alg_skcipher_init_sessions(struct qat_alg_skcipher_ctx *ctx,
539 const u8 *key,
540 unsigned int keylen,
541 int mode)
542 {
543 int alg;
544
545 if (qat_alg_validate_key(keylen, &alg, mode))
546 return -EINVAL;
547
548 qat_alg_skcipher_init_enc(ctx, alg, key, keylen, mode);
549 qat_alg_skcipher_init_dec(ctx, alg, key, keylen, mode);
550 return 0;
551 }
552
qat_alg_aead_rekey(struct crypto_aead * tfm,const u8 * key,unsigned int keylen)553 static int qat_alg_aead_rekey(struct crypto_aead *tfm, const u8 *key,
554 unsigned int keylen)
555 {
556 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
557
558 memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
559 memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
560 memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
561 memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
562
563 return qat_alg_aead_init_sessions(tfm, key, keylen,
564 ICP_QAT_HW_CIPHER_CBC_MODE);
565 }
566
qat_alg_aead_newkey(struct crypto_aead * tfm,const u8 * key,unsigned int keylen)567 static int qat_alg_aead_newkey(struct crypto_aead *tfm, const u8 *key,
568 unsigned int keylen)
569 {
570 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
571 struct qat_crypto_instance *inst = NULL;
572 int node = get_current_node();
573 struct device *dev;
574 int ret;
575
576 inst = qat_crypto_get_instance_node(node);
577 if (!inst)
578 return -EINVAL;
579 dev = &GET_DEV(inst->accel_dev);
580 ctx->inst = inst;
581 ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd),
582 &ctx->enc_cd_paddr,
583 GFP_ATOMIC);
584 if (!ctx->enc_cd) {
585 ret = -ENOMEM;
586 goto out_free_inst;
587 }
588 ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd),
589 &ctx->dec_cd_paddr,
590 GFP_ATOMIC);
591 if (!ctx->dec_cd) {
592 ret = -ENOMEM;
593 goto out_free_enc;
594 }
595
596 ret = qat_alg_aead_init_sessions(tfm, key, keylen,
597 ICP_QAT_HW_CIPHER_CBC_MODE);
598 if (ret)
599 goto out_free_all;
600
601 return 0;
602
603 out_free_all:
604 memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
605 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
606 ctx->dec_cd, ctx->dec_cd_paddr);
607 ctx->dec_cd = NULL;
608 out_free_enc:
609 memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
610 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
611 ctx->enc_cd, ctx->enc_cd_paddr);
612 ctx->enc_cd = NULL;
613 out_free_inst:
614 ctx->inst = NULL;
615 qat_crypto_put_instance(inst);
616 return ret;
617 }
618
qat_alg_aead_setkey(struct crypto_aead * tfm,const u8 * key,unsigned int keylen)619 static int qat_alg_aead_setkey(struct crypto_aead *tfm, const u8 *key,
620 unsigned int keylen)
621 {
622 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
623
624 if (ctx->enc_cd)
625 return qat_alg_aead_rekey(tfm, key, keylen);
626 else
627 return qat_alg_aead_newkey(tfm, key, keylen);
628 }
629
qat_alg_free_bufl(struct qat_crypto_instance * inst,struct qat_crypto_request * qat_req)630 static void qat_alg_free_bufl(struct qat_crypto_instance *inst,
631 struct qat_crypto_request *qat_req)
632 {
633 struct device *dev = &GET_DEV(inst->accel_dev);
634 struct qat_alg_buf_list *bl = qat_req->buf.bl;
635 struct qat_alg_buf_list *blout = qat_req->buf.blout;
636 dma_addr_t blp = qat_req->buf.blp;
637 dma_addr_t blpout = qat_req->buf.bloutp;
638 size_t sz = qat_req->buf.sz;
639 size_t sz_out = qat_req->buf.sz_out;
640 int i;
641
642 for (i = 0; i < bl->num_bufs; i++)
643 dma_unmap_single(dev, bl->bufers[i].addr,
644 bl->bufers[i].len, DMA_BIDIRECTIONAL);
645
646 dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
647 kfree(bl);
648 if (blp != blpout) {
649 /* If out of place operation dma unmap only data */
650 int bufless = blout->num_bufs - blout->num_mapped_bufs;
651
652 for (i = bufless; i < blout->num_bufs; i++) {
653 dma_unmap_single(dev, blout->bufers[i].addr,
654 blout->bufers[i].len,
655 DMA_BIDIRECTIONAL);
656 }
657 dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE);
658 kfree(blout);
659 }
660 }
661
qat_alg_sgl_to_bufl(struct qat_crypto_instance * inst,struct scatterlist * sgl,struct scatterlist * sglout,struct qat_crypto_request * qat_req)662 static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
663 struct scatterlist *sgl,
664 struct scatterlist *sglout,
665 struct qat_crypto_request *qat_req)
666 {
667 struct device *dev = &GET_DEV(inst->accel_dev);
668 int i, sg_nctr = 0;
669 int n = sg_nents(sgl);
670 struct qat_alg_buf_list *bufl;
671 struct qat_alg_buf_list *buflout = NULL;
672 dma_addr_t blp;
673 dma_addr_t bloutp = 0;
674 struct scatterlist *sg;
675 size_t sz_out, sz = struct_size(bufl, bufers, n + 1);
676
677 if (unlikely(!n))
678 return -EINVAL;
679
680 bufl = kzalloc_node(sz, GFP_ATOMIC,
681 dev_to_node(&GET_DEV(inst->accel_dev)));
682 if (unlikely(!bufl))
683 return -ENOMEM;
684
685 blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
686 if (unlikely(dma_mapping_error(dev, blp)))
687 goto err_in;
688
689 for_each_sg(sgl, sg, n, i) {
690 int y = sg_nctr;
691
692 if (!sg->length)
693 continue;
694
695 bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg),
696 sg->length,
697 DMA_BIDIRECTIONAL);
698 bufl->bufers[y].len = sg->length;
699 if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr)))
700 goto err_in;
701 sg_nctr++;
702 }
703 bufl->num_bufs = sg_nctr;
704 qat_req->buf.bl = bufl;
705 qat_req->buf.blp = blp;
706 qat_req->buf.sz = sz;
707 /* Handle out of place operation */
708 if (sgl != sglout) {
709 struct qat_alg_buf *bufers;
710
711 n = sg_nents(sglout);
712 sz_out = struct_size(buflout, bufers, n + 1);
713 sg_nctr = 0;
714 buflout = kzalloc_node(sz_out, GFP_ATOMIC,
715 dev_to_node(&GET_DEV(inst->accel_dev)));
716 if (unlikely(!buflout))
717 goto err_in;
718 bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE);
719 if (unlikely(dma_mapping_error(dev, bloutp)))
720 goto err_out;
721 bufers = buflout->bufers;
722 for_each_sg(sglout, sg, n, i) {
723 int y = sg_nctr;
724
725 if (!sg->length)
726 continue;
727
728 bufers[y].addr = dma_map_single(dev, sg_virt(sg),
729 sg->length,
730 DMA_BIDIRECTIONAL);
731 if (unlikely(dma_mapping_error(dev, bufers[y].addr)))
732 goto err_out;
733 bufers[y].len = sg->length;
734 sg_nctr++;
735 }
736 buflout->num_bufs = sg_nctr;
737 buflout->num_mapped_bufs = sg_nctr;
738 qat_req->buf.blout = buflout;
739 qat_req->buf.bloutp = bloutp;
740 qat_req->buf.sz_out = sz_out;
741 } else {
742 /* Otherwise set the src and dst to the same address */
743 qat_req->buf.bloutp = qat_req->buf.blp;
744 qat_req->buf.sz_out = 0;
745 }
746 return 0;
747
748 err_out:
749 n = sg_nents(sglout);
750 for (i = 0; i < n; i++)
751 if (!dma_mapping_error(dev, buflout->bufers[i].addr))
752 dma_unmap_single(dev, buflout->bufers[i].addr,
753 buflout->bufers[i].len,
754 DMA_BIDIRECTIONAL);
755 if (!dma_mapping_error(dev, bloutp))
756 dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE);
757 kfree(buflout);
758
759 err_in:
760 n = sg_nents(sgl);
761 for (i = 0; i < n; i++)
762 if (!dma_mapping_error(dev, bufl->bufers[i].addr))
763 dma_unmap_single(dev, bufl->bufers[i].addr,
764 bufl->bufers[i].len,
765 DMA_BIDIRECTIONAL);
766
767 if (!dma_mapping_error(dev, blp))
768 dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
769 kfree(bufl);
770
771 dev_err(dev, "Failed to map buf for dma\n");
772 return -ENOMEM;
773 }
774
qat_aead_alg_callback(struct icp_qat_fw_la_resp * qat_resp,struct qat_crypto_request * qat_req)775 static void qat_aead_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
776 struct qat_crypto_request *qat_req)
777 {
778 struct qat_alg_aead_ctx *ctx = qat_req->aead_ctx;
779 struct qat_crypto_instance *inst = ctx->inst;
780 struct aead_request *areq = qat_req->aead_req;
781 u8 stat_filed = qat_resp->comn_resp.comn_status;
782 int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
783
784 qat_alg_free_bufl(inst, qat_req);
785 if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
786 res = -EBADMSG;
787 areq->base.complete(&areq->base, res);
788 }
789
qat_skcipher_alg_callback(struct icp_qat_fw_la_resp * qat_resp,struct qat_crypto_request * qat_req)790 static void qat_skcipher_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
791 struct qat_crypto_request *qat_req)
792 {
793 struct qat_alg_skcipher_ctx *ctx = qat_req->skcipher_ctx;
794 struct qat_crypto_instance *inst = ctx->inst;
795 struct skcipher_request *sreq = qat_req->skcipher_req;
796 u8 stat_filed = qat_resp->comn_resp.comn_status;
797 struct device *dev = &GET_DEV(ctx->inst->accel_dev);
798 int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
799
800 qat_alg_free_bufl(inst, qat_req);
801 if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
802 res = -EINVAL;
803
804 memcpy(sreq->iv, qat_req->iv, AES_BLOCK_SIZE);
805 dma_free_coherent(dev, AES_BLOCK_SIZE, qat_req->iv,
806 qat_req->iv_paddr);
807
808 sreq->base.complete(&sreq->base, res);
809 }
810
qat_alg_callback(void * resp)811 void qat_alg_callback(void *resp)
812 {
813 struct icp_qat_fw_la_resp *qat_resp = resp;
814 struct qat_crypto_request *qat_req =
815 (void *)(__force long)qat_resp->opaque_data;
816
817 qat_req->cb(qat_resp, qat_req);
818 }
819
qat_alg_aead_dec(struct aead_request * areq)820 static int qat_alg_aead_dec(struct aead_request *areq)
821 {
822 struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
823 struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
824 struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
825 struct qat_crypto_request *qat_req = aead_request_ctx(areq);
826 struct icp_qat_fw_la_cipher_req_params *cipher_param;
827 struct icp_qat_fw_la_auth_req_params *auth_param;
828 struct icp_qat_fw_la_bulk_req *msg;
829 int digst_size = crypto_aead_authsize(aead_tfm);
830 int ret, ctr = 0;
831 u32 cipher_len;
832
833 cipher_len = areq->cryptlen - digst_size;
834 if (cipher_len % AES_BLOCK_SIZE != 0)
835 return -EINVAL;
836
837 ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req);
838 if (unlikely(ret))
839 return ret;
840
841 msg = &qat_req->req;
842 *msg = ctx->dec_fw_req;
843 qat_req->aead_ctx = ctx;
844 qat_req->aead_req = areq;
845 qat_req->cb = qat_aead_alg_callback;
846 qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req;
847 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
848 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
849 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
850 cipher_param->cipher_length = cipher_len;
851 cipher_param->cipher_offset = areq->assoclen;
852 memcpy(cipher_param->u.cipher_IV_array, areq->iv, AES_BLOCK_SIZE);
853 auth_param = (void *)((u8 *)cipher_param + sizeof(*cipher_param));
854 auth_param->auth_off = 0;
855 auth_param->auth_len = areq->assoclen + cipher_param->cipher_length;
856 do {
857 ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg);
858 } while (ret == -EAGAIN && ctr++ < 10);
859
860 if (ret == -EAGAIN) {
861 qat_alg_free_bufl(ctx->inst, qat_req);
862 return -EBUSY;
863 }
864 return -EINPROGRESS;
865 }
866
qat_alg_aead_enc(struct aead_request * areq)867 static int qat_alg_aead_enc(struct aead_request *areq)
868 {
869 struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
870 struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
871 struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
872 struct qat_crypto_request *qat_req = aead_request_ctx(areq);
873 struct icp_qat_fw_la_cipher_req_params *cipher_param;
874 struct icp_qat_fw_la_auth_req_params *auth_param;
875 struct icp_qat_fw_la_bulk_req *msg;
876 u8 *iv = areq->iv;
877 int ret, ctr = 0;
878
879 if (areq->cryptlen % AES_BLOCK_SIZE != 0)
880 return -EINVAL;
881
882 ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req);
883 if (unlikely(ret))
884 return ret;
885
886 msg = &qat_req->req;
887 *msg = ctx->enc_fw_req;
888 qat_req->aead_ctx = ctx;
889 qat_req->aead_req = areq;
890 qat_req->cb = qat_aead_alg_callback;
891 qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req;
892 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
893 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
894 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
895 auth_param = (void *)((u8 *)cipher_param + sizeof(*cipher_param));
896
897 memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE);
898 cipher_param->cipher_length = areq->cryptlen;
899 cipher_param->cipher_offset = areq->assoclen;
900
901 auth_param->auth_off = 0;
902 auth_param->auth_len = areq->assoclen + areq->cryptlen;
903
904 do {
905 ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg);
906 } while (ret == -EAGAIN && ctr++ < 10);
907
908 if (ret == -EAGAIN) {
909 qat_alg_free_bufl(ctx->inst, qat_req);
910 return -EBUSY;
911 }
912 return -EINPROGRESS;
913 }
914
qat_alg_skcipher_rekey(struct qat_alg_skcipher_ctx * ctx,const u8 * key,unsigned int keylen,int mode)915 static int qat_alg_skcipher_rekey(struct qat_alg_skcipher_ctx *ctx,
916 const u8 *key, unsigned int keylen,
917 int mode)
918 {
919 memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
920 memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
921 memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
922 memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
923
924 return qat_alg_skcipher_init_sessions(ctx, key, keylen, mode);
925 }
926
qat_alg_skcipher_newkey(struct qat_alg_skcipher_ctx * ctx,const u8 * key,unsigned int keylen,int mode)927 static int qat_alg_skcipher_newkey(struct qat_alg_skcipher_ctx *ctx,
928 const u8 *key, unsigned int keylen,
929 int mode)
930 {
931 struct qat_crypto_instance *inst = NULL;
932 struct device *dev;
933 int node = get_current_node();
934 int ret;
935
936 inst = qat_crypto_get_instance_node(node);
937 if (!inst)
938 return -EINVAL;
939 dev = &GET_DEV(inst->accel_dev);
940 ctx->inst = inst;
941 ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd),
942 &ctx->enc_cd_paddr,
943 GFP_ATOMIC);
944 if (!ctx->enc_cd) {
945 ret = -ENOMEM;
946 goto out_free_instance;
947 }
948 ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd),
949 &ctx->dec_cd_paddr,
950 GFP_ATOMIC);
951 if (!ctx->dec_cd) {
952 ret = -ENOMEM;
953 goto out_free_enc;
954 }
955
956 ret = qat_alg_skcipher_init_sessions(ctx, key, keylen, mode);
957 if (ret)
958 goto out_free_all;
959
960 return 0;
961
962 out_free_all:
963 memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
964 dma_free_coherent(dev, sizeof(*ctx->dec_cd),
965 ctx->dec_cd, ctx->dec_cd_paddr);
966 ctx->dec_cd = NULL;
967 out_free_enc:
968 memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
969 dma_free_coherent(dev, sizeof(*ctx->enc_cd),
970 ctx->enc_cd, ctx->enc_cd_paddr);
971 ctx->enc_cd = NULL;
972 out_free_instance:
973 ctx->inst = NULL;
974 qat_crypto_put_instance(inst);
975 return ret;
976 }
977
qat_alg_skcipher_setkey(struct crypto_skcipher * tfm,const u8 * key,unsigned int keylen,int mode)978 static int qat_alg_skcipher_setkey(struct crypto_skcipher *tfm,
979 const u8 *key, unsigned int keylen,
980 int mode)
981 {
982 struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
983
984 if (ctx->enc_cd)
985 return qat_alg_skcipher_rekey(ctx, key, keylen, mode);
986 else
987 return qat_alg_skcipher_newkey(ctx, key, keylen, mode);
988 }
989
qat_alg_skcipher_cbc_setkey(struct crypto_skcipher * tfm,const u8 * key,unsigned int keylen)990 static int qat_alg_skcipher_cbc_setkey(struct crypto_skcipher *tfm,
991 const u8 *key, unsigned int keylen)
992 {
993 return qat_alg_skcipher_setkey(tfm, key, keylen,
994 ICP_QAT_HW_CIPHER_CBC_MODE);
995 }
996
qat_alg_skcipher_ctr_setkey(struct crypto_skcipher * tfm,const u8 * key,unsigned int keylen)997 static int qat_alg_skcipher_ctr_setkey(struct crypto_skcipher *tfm,
998 const u8 *key, unsigned int keylen)
999 {
1000 return qat_alg_skcipher_setkey(tfm, key, keylen,
1001 ICP_QAT_HW_CIPHER_CTR_MODE);
1002 }
1003
qat_alg_skcipher_xts_setkey(struct crypto_skcipher * tfm,const u8 * key,unsigned int keylen)1004 static int qat_alg_skcipher_xts_setkey(struct crypto_skcipher *tfm,
1005 const u8 *key, unsigned int keylen)
1006 {
1007 struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
1008 int ret;
1009
1010 ret = xts_verify_key(tfm, key, keylen);
1011 if (ret)
1012 return ret;
1013
1014 if (keylen >> 1 == AES_KEYSIZE_192) {
1015 ret = crypto_skcipher_setkey(ctx->ftfm, key, keylen);
1016 if (ret)
1017 return ret;
1018
1019 ctx->fallback = true;
1020
1021 return 0;
1022 }
1023
1024 ctx->fallback = false;
1025
1026 return qat_alg_skcipher_setkey(tfm, key, keylen,
1027 ICP_QAT_HW_CIPHER_XTS_MODE);
1028 }
1029
qat_alg_skcipher_encrypt(struct skcipher_request * req)1030 static int qat_alg_skcipher_encrypt(struct skcipher_request *req)
1031 {
1032 struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
1033 struct crypto_tfm *tfm = crypto_skcipher_tfm(stfm);
1034 struct qat_alg_skcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1035 struct qat_crypto_request *qat_req = skcipher_request_ctx(req);
1036 struct icp_qat_fw_la_cipher_req_params *cipher_param;
1037 struct icp_qat_fw_la_bulk_req *msg;
1038 struct device *dev = &GET_DEV(ctx->inst->accel_dev);
1039 int ret, ctr = 0;
1040
1041 if (req->cryptlen == 0)
1042 return 0;
1043
1044 qat_req->iv = dma_alloc_coherent(dev, AES_BLOCK_SIZE,
1045 &qat_req->iv_paddr, GFP_ATOMIC);
1046 if (!qat_req->iv)
1047 return -ENOMEM;
1048
1049 ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req);
1050 if (unlikely(ret)) {
1051 dma_free_coherent(dev, AES_BLOCK_SIZE, qat_req->iv,
1052 qat_req->iv_paddr);
1053 return ret;
1054 }
1055
1056 msg = &qat_req->req;
1057 *msg = ctx->enc_fw_req;
1058 qat_req->skcipher_ctx = ctx;
1059 qat_req->skcipher_req = req;
1060 qat_req->cb = qat_skcipher_alg_callback;
1061 qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req;
1062 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
1063 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
1064 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
1065 cipher_param->cipher_length = req->cryptlen;
1066 cipher_param->cipher_offset = 0;
1067 cipher_param->u.s.cipher_IV_ptr = qat_req->iv_paddr;
1068 memcpy(qat_req->iv, req->iv, AES_BLOCK_SIZE);
1069 do {
1070 ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg);
1071 } while (ret == -EAGAIN && ctr++ < 10);
1072
1073 if (ret == -EAGAIN) {
1074 qat_alg_free_bufl(ctx->inst, qat_req);
1075 dma_free_coherent(dev, AES_BLOCK_SIZE, qat_req->iv,
1076 qat_req->iv_paddr);
1077 return -EBUSY;
1078 }
1079 return -EINPROGRESS;
1080 }
1081
qat_alg_skcipher_blk_encrypt(struct skcipher_request * req)1082 static int qat_alg_skcipher_blk_encrypt(struct skcipher_request *req)
1083 {
1084 if (req->cryptlen % AES_BLOCK_SIZE != 0)
1085 return -EINVAL;
1086
1087 return qat_alg_skcipher_encrypt(req);
1088 }
1089
qat_alg_skcipher_xts_encrypt(struct skcipher_request * req)1090 static int qat_alg_skcipher_xts_encrypt(struct skcipher_request *req)
1091 {
1092 struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
1093 struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(stfm);
1094 struct skcipher_request *nreq = skcipher_request_ctx(req);
1095
1096 if (req->cryptlen < XTS_BLOCK_SIZE)
1097 return -EINVAL;
1098
1099 if (ctx->fallback) {
1100 memcpy(nreq, req, sizeof(*req));
1101 skcipher_request_set_tfm(nreq, ctx->ftfm);
1102 return crypto_skcipher_encrypt(nreq);
1103 }
1104
1105 return qat_alg_skcipher_encrypt(req);
1106 }
1107
qat_alg_skcipher_decrypt(struct skcipher_request * req)1108 static int qat_alg_skcipher_decrypt(struct skcipher_request *req)
1109 {
1110 struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
1111 struct crypto_tfm *tfm = crypto_skcipher_tfm(stfm);
1112 struct qat_alg_skcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1113 struct qat_crypto_request *qat_req = skcipher_request_ctx(req);
1114 struct icp_qat_fw_la_cipher_req_params *cipher_param;
1115 struct icp_qat_fw_la_bulk_req *msg;
1116 struct device *dev = &GET_DEV(ctx->inst->accel_dev);
1117 int ret, ctr = 0;
1118
1119 if (req->cryptlen == 0)
1120 return 0;
1121
1122 qat_req->iv = dma_alloc_coherent(dev, AES_BLOCK_SIZE,
1123 &qat_req->iv_paddr, GFP_ATOMIC);
1124 if (!qat_req->iv)
1125 return -ENOMEM;
1126
1127 ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req);
1128 if (unlikely(ret)) {
1129 dma_free_coherent(dev, AES_BLOCK_SIZE, qat_req->iv,
1130 qat_req->iv_paddr);
1131 return ret;
1132 }
1133
1134 msg = &qat_req->req;
1135 *msg = ctx->dec_fw_req;
1136 qat_req->skcipher_ctx = ctx;
1137 qat_req->skcipher_req = req;
1138 qat_req->cb = qat_skcipher_alg_callback;
1139 qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req;
1140 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
1141 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
1142 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
1143 cipher_param->cipher_length = req->cryptlen;
1144 cipher_param->cipher_offset = 0;
1145 cipher_param->u.s.cipher_IV_ptr = qat_req->iv_paddr;
1146 memcpy(qat_req->iv, req->iv, AES_BLOCK_SIZE);
1147 do {
1148 ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg);
1149 } while (ret == -EAGAIN && ctr++ < 10);
1150
1151 if (ret == -EAGAIN) {
1152 qat_alg_free_bufl(ctx->inst, qat_req);
1153 dma_free_coherent(dev, AES_BLOCK_SIZE, qat_req->iv,
1154 qat_req->iv_paddr);
1155 return -EBUSY;
1156 }
1157 return -EINPROGRESS;
1158 }
1159
qat_alg_skcipher_blk_decrypt(struct skcipher_request * req)1160 static int qat_alg_skcipher_blk_decrypt(struct skcipher_request *req)
1161 {
1162 if (req->cryptlen % AES_BLOCK_SIZE != 0)
1163 return -EINVAL;
1164
1165 return qat_alg_skcipher_decrypt(req);
1166 }
1167
qat_alg_skcipher_xts_decrypt(struct skcipher_request * req)1168 static int qat_alg_skcipher_xts_decrypt(struct skcipher_request *req)
1169 {
1170 struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
1171 struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(stfm);
1172 struct skcipher_request *nreq = skcipher_request_ctx(req);
1173
1174 if (req->cryptlen < XTS_BLOCK_SIZE)
1175 return -EINVAL;
1176
1177 if (ctx->fallback) {
1178 memcpy(nreq, req, sizeof(*req));
1179 skcipher_request_set_tfm(nreq, ctx->ftfm);
1180 return crypto_skcipher_decrypt(nreq);
1181 }
1182
1183 return qat_alg_skcipher_decrypt(req);
1184 }
1185
qat_alg_aead_init(struct crypto_aead * tfm,enum icp_qat_hw_auth_algo hash,const char * hash_name)1186 static int qat_alg_aead_init(struct crypto_aead *tfm,
1187 enum icp_qat_hw_auth_algo hash,
1188 const char *hash_name)
1189 {
1190 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
1191
1192 ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
1193 if (IS_ERR(ctx->hash_tfm))
1194 return PTR_ERR(ctx->hash_tfm);
1195 ctx->qat_hash_alg = hash;
1196 crypto_aead_set_reqsize(tfm, sizeof(struct qat_crypto_request));
1197 return 0;
1198 }
1199
qat_alg_aead_sha1_init(struct crypto_aead * tfm)1200 static int qat_alg_aead_sha1_init(struct crypto_aead *tfm)
1201 {
1202 return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA1, "sha1");
1203 }
1204
qat_alg_aead_sha256_init(struct crypto_aead * tfm)1205 static int qat_alg_aead_sha256_init(struct crypto_aead *tfm)
1206 {
1207 return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA256, "sha256");
1208 }
1209
qat_alg_aead_sha512_init(struct crypto_aead * tfm)1210 static int qat_alg_aead_sha512_init(struct crypto_aead *tfm)
1211 {
1212 return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA512, "sha512");
1213 }
1214
qat_alg_aead_exit(struct crypto_aead * tfm)1215 static void qat_alg_aead_exit(struct crypto_aead *tfm)
1216 {
1217 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
1218 struct qat_crypto_instance *inst = ctx->inst;
1219 struct device *dev;
1220
1221 crypto_free_shash(ctx->hash_tfm);
1222
1223 if (!inst)
1224 return;
1225
1226 dev = &GET_DEV(inst->accel_dev);
1227 if (ctx->enc_cd) {
1228 memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
1229 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
1230 ctx->enc_cd, ctx->enc_cd_paddr);
1231 }
1232 if (ctx->dec_cd) {
1233 memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
1234 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
1235 ctx->dec_cd, ctx->dec_cd_paddr);
1236 }
1237 qat_crypto_put_instance(inst);
1238 }
1239
qat_alg_skcipher_init_tfm(struct crypto_skcipher * tfm)1240 static int qat_alg_skcipher_init_tfm(struct crypto_skcipher *tfm)
1241 {
1242 crypto_skcipher_set_reqsize(tfm, sizeof(struct qat_crypto_request));
1243 return 0;
1244 }
1245
qat_alg_skcipher_init_xts_tfm(struct crypto_skcipher * tfm)1246 static int qat_alg_skcipher_init_xts_tfm(struct crypto_skcipher *tfm)
1247 {
1248 struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
1249 int reqsize;
1250
1251 ctx->ftfm = crypto_alloc_skcipher("xts(aes)", 0,
1252 CRYPTO_ALG_NEED_FALLBACK);
1253 if (IS_ERR(ctx->ftfm))
1254 return PTR_ERR(ctx->ftfm);
1255
1256 reqsize = max(sizeof(struct qat_crypto_request),
1257 sizeof(struct skcipher_request) +
1258 crypto_skcipher_reqsize(ctx->ftfm));
1259 crypto_skcipher_set_reqsize(tfm, reqsize);
1260
1261 return 0;
1262 }
1263
qat_alg_skcipher_exit_tfm(struct crypto_skcipher * tfm)1264 static void qat_alg_skcipher_exit_tfm(struct crypto_skcipher *tfm)
1265 {
1266 struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
1267 struct qat_crypto_instance *inst = ctx->inst;
1268 struct device *dev;
1269
1270 if (!inst)
1271 return;
1272
1273 dev = &GET_DEV(inst->accel_dev);
1274 if (ctx->enc_cd) {
1275 memset(ctx->enc_cd, 0,
1276 sizeof(struct icp_qat_hw_cipher_algo_blk));
1277 dma_free_coherent(dev,
1278 sizeof(struct icp_qat_hw_cipher_algo_blk),
1279 ctx->enc_cd, ctx->enc_cd_paddr);
1280 }
1281 if (ctx->dec_cd) {
1282 memset(ctx->dec_cd, 0,
1283 sizeof(struct icp_qat_hw_cipher_algo_blk));
1284 dma_free_coherent(dev,
1285 sizeof(struct icp_qat_hw_cipher_algo_blk),
1286 ctx->dec_cd, ctx->dec_cd_paddr);
1287 }
1288 qat_crypto_put_instance(inst);
1289 }
1290
qat_alg_skcipher_exit_xts_tfm(struct crypto_skcipher * tfm)1291 static void qat_alg_skcipher_exit_xts_tfm(struct crypto_skcipher *tfm)
1292 {
1293 struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
1294
1295 if (ctx->ftfm)
1296 crypto_free_skcipher(ctx->ftfm);
1297
1298 qat_alg_skcipher_exit_tfm(tfm);
1299 }
1300
1301 static struct aead_alg qat_aeads[] = { {
1302 .base = {
1303 .cra_name = "authenc(hmac(sha1),cbc(aes))",
1304 .cra_driver_name = "qat_aes_cbc_hmac_sha1",
1305 .cra_priority = 4001,
1306 .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1307 .cra_blocksize = AES_BLOCK_SIZE,
1308 .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1309 .cra_module = THIS_MODULE,
1310 },
1311 .init = qat_alg_aead_sha1_init,
1312 .exit = qat_alg_aead_exit,
1313 .setkey = qat_alg_aead_setkey,
1314 .decrypt = qat_alg_aead_dec,
1315 .encrypt = qat_alg_aead_enc,
1316 .ivsize = AES_BLOCK_SIZE,
1317 .maxauthsize = SHA1_DIGEST_SIZE,
1318 }, {
1319 .base = {
1320 .cra_name = "authenc(hmac(sha256),cbc(aes))",
1321 .cra_driver_name = "qat_aes_cbc_hmac_sha256",
1322 .cra_priority = 4001,
1323 .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1324 .cra_blocksize = AES_BLOCK_SIZE,
1325 .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1326 .cra_module = THIS_MODULE,
1327 },
1328 .init = qat_alg_aead_sha256_init,
1329 .exit = qat_alg_aead_exit,
1330 .setkey = qat_alg_aead_setkey,
1331 .decrypt = qat_alg_aead_dec,
1332 .encrypt = qat_alg_aead_enc,
1333 .ivsize = AES_BLOCK_SIZE,
1334 .maxauthsize = SHA256_DIGEST_SIZE,
1335 }, {
1336 .base = {
1337 .cra_name = "authenc(hmac(sha512),cbc(aes))",
1338 .cra_driver_name = "qat_aes_cbc_hmac_sha512",
1339 .cra_priority = 4001,
1340 .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1341 .cra_blocksize = AES_BLOCK_SIZE,
1342 .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1343 .cra_module = THIS_MODULE,
1344 },
1345 .init = qat_alg_aead_sha512_init,
1346 .exit = qat_alg_aead_exit,
1347 .setkey = qat_alg_aead_setkey,
1348 .decrypt = qat_alg_aead_dec,
1349 .encrypt = qat_alg_aead_enc,
1350 .ivsize = AES_BLOCK_SIZE,
1351 .maxauthsize = SHA512_DIGEST_SIZE,
1352 } };
1353
1354 static struct skcipher_alg qat_skciphers[] = { {
1355 .base.cra_name = "cbc(aes)",
1356 .base.cra_driver_name = "qat_aes_cbc",
1357 .base.cra_priority = 4001,
1358 .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1359 .base.cra_blocksize = AES_BLOCK_SIZE,
1360 .base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx),
1361 .base.cra_alignmask = 0,
1362 .base.cra_module = THIS_MODULE,
1363
1364 .init = qat_alg_skcipher_init_tfm,
1365 .exit = qat_alg_skcipher_exit_tfm,
1366 .setkey = qat_alg_skcipher_cbc_setkey,
1367 .decrypt = qat_alg_skcipher_blk_decrypt,
1368 .encrypt = qat_alg_skcipher_blk_encrypt,
1369 .min_keysize = AES_MIN_KEY_SIZE,
1370 .max_keysize = AES_MAX_KEY_SIZE,
1371 .ivsize = AES_BLOCK_SIZE,
1372 }, {
1373 .base.cra_name = "ctr(aes)",
1374 .base.cra_driver_name = "qat_aes_ctr",
1375 .base.cra_priority = 4001,
1376 .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1377 .base.cra_blocksize = 1,
1378 .base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx),
1379 .base.cra_alignmask = 0,
1380 .base.cra_module = THIS_MODULE,
1381
1382 .init = qat_alg_skcipher_init_tfm,
1383 .exit = qat_alg_skcipher_exit_tfm,
1384 .setkey = qat_alg_skcipher_ctr_setkey,
1385 .decrypt = qat_alg_skcipher_decrypt,
1386 .encrypt = qat_alg_skcipher_encrypt,
1387 .min_keysize = AES_MIN_KEY_SIZE,
1388 .max_keysize = AES_MAX_KEY_SIZE,
1389 .ivsize = AES_BLOCK_SIZE,
1390 }, {
1391 .base.cra_name = "xts(aes)",
1392 .base.cra_driver_name = "qat_aes_xts",
1393 .base.cra_priority = 4001,
1394 .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK |
1395 CRYPTO_ALG_ALLOCATES_MEMORY,
1396 .base.cra_blocksize = AES_BLOCK_SIZE,
1397 .base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx),
1398 .base.cra_alignmask = 0,
1399 .base.cra_module = THIS_MODULE,
1400
1401 .init = qat_alg_skcipher_init_xts_tfm,
1402 .exit = qat_alg_skcipher_exit_xts_tfm,
1403 .setkey = qat_alg_skcipher_xts_setkey,
1404 .decrypt = qat_alg_skcipher_xts_decrypt,
1405 .encrypt = qat_alg_skcipher_xts_encrypt,
1406 .min_keysize = 2 * AES_MIN_KEY_SIZE,
1407 .max_keysize = 2 * AES_MAX_KEY_SIZE,
1408 .ivsize = AES_BLOCK_SIZE,
1409 } };
1410
qat_algs_register(void)1411 int qat_algs_register(void)
1412 {
1413 int ret = 0;
1414
1415 mutex_lock(&algs_lock);
1416 if (++active_devs != 1)
1417 goto unlock;
1418
1419 ret = crypto_register_skciphers(qat_skciphers,
1420 ARRAY_SIZE(qat_skciphers));
1421 if (ret)
1422 goto unlock;
1423
1424 ret = crypto_register_aeads(qat_aeads, ARRAY_SIZE(qat_aeads));
1425 if (ret)
1426 goto unreg_algs;
1427
1428 unlock:
1429 mutex_unlock(&algs_lock);
1430 return ret;
1431
1432 unreg_algs:
1433 crypto_unregister_skciphers(qat_skciphers, ARRAY_SIZE(qat_skciphers));
1434 goto unlock;
1435 }
1436
qat_algs_unregister(void)1437 void qat_algs_unregister(void)
1438 {
1439 mutex_lock(&algs_lock);
1440 if (--active_devs != 0)
1441 goto unlock;
1442
1443 crypto_unregister_aeads(qat_aeads, ARRAY_SIZE(qat_aeads));
1444 crypto_unregister_skciphers(qat_skciphers, ARRAY_SIZE(qat_skciphers));
1445
1446 unlock:
1447 mutex_unlock(&algs_lock);
1448 }
1449