1 // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
2 /* Copyright(c) 2014 - 2020 Intel Corporation */
3 #include <linux/module.h>
4 #include <linux/slab.h>
5 #include <linux/crypto.h>
6 #include <crypto/internal/aead.h>
7 #include <crypto/internal/cipher.h>
8 #include <crypto/internal/skcipher.h>
9 #include <crypto/aes.h>
10 #include <crypto/sha1.h>
11 #include <crypto/sha2.h>
12 #include <crypto/hash.h>
13 #include <crypto/hmac.h>
14 #include <crypto/algapi.h>
15 #include <crypto/authenc.h>
16 #include <crypto/scatterwalk.h>
17 #include <crypto/xts.h>
18 #include <linux/dma-mapping.h>
19 #include "adf_accel_devices.h"
20 #include "qat_algs_send.h"
21 #include "adf_common_drv.h"
22 #include "qat_crypto.h"
23 #include "icp_qat_hw.h"
24 #include "icp_qat_fw.h"
25 #include "icp_qat_fw_la.h"
26
27 #define QAT_AES_HW_CONFIG_ENC(alg, mode) \
28 ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \
29 ICP_QAT_HW_CIPHER_NO_CONVERT, \
30 ICP_QAT_HW_CIPHER_ENCRYPT)
31
32 #define QAT_AES_HW_CONFIG_DEC(alg, mode) \
33 ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \
34 ICP_QAT_HW_CIPHER_KEY_CONVERT, \
35 ICP_QAT_HW_CIPHER_DECRYPT)
36
37 #define QAT_AES_HW_CONFIG_DEC_NO_CONV(alg, mode) \
38 ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \
39 ICP_QAT_HW_CIPHER_NO_CONVERT, \
40 ICP_QAT_HW_CIPHER_DECRYPT)
41
42 #define HW_CAP_AES_V2(accel_dev) \
43 (GET_HW_DATA(accel_dev)->accel_capabilities_mask & \
44 ICP_ACCEL_CAPABILITIES_AES_V2)
45
46 static DEFINE_MUTEX(algs_lock);
47 static unsigned int active_devs;
48
49 /* Common content descriptor */
50 struct qat_alg_cd {
51 union {
52 struct qat_enc { /* Encrypt content desc */
53 struct icp_qat_hw_cipher_algo_blk cipher;
54 struct icp_qat_hw_auth_algo_blk hash;
55 } qat_enc_cd;
56 struct qat_dec { /* Decrypt content desc */
57 struct icp_qat_hw_auth_algo_blk hash;
58 struct icp_qat_hw_cipher_algo_blk cipher;
59 } qat_dec_cd;
60 };
61 } __aligned(64);
62
63 struct qat_alg_aead_ctx {
64 struct qat_alg_cd *enc_cd;
65 struct qat_alg_cd *dec_cd;
66 dma_addr_t enc_cd_paddr;
67 dma_addr_t dec_cd_paddr;
68 struct icp_qat_fw_la_bulk_req enc_fw_req;
69 struct icp_qat_fw_la_bulk_req dec_fw_req;
70 struct crypto_shash *hash_tfm;
71 enum icp_qat_hw_auth_algo qat_hash_alg;
72 struct qat_crypto_instance *inst;
73 union {
74 struct sha1_state sha1;
75 struct sha256_state sha256;
76 struct sha512_state sha512;
77 };
78 char ipad[SHA512_BLOCK_SIZE]; /* sufficient for SHA-1/SHA-256 as well */
79 char opad[SHA512_BLOCK_SIZE];
80 };
81
82 struct qat_alg_skcipher_ctx {
83 struct icp_qat_hw_cipher_algo_blk *enc_cd;
84 struct icp_qat_hw_cipher_algo_blk *dec_cd;
85 dma_addr_t enc_cd_paddr;
86 dma_addr_t dec_cd_paddr;
87 struct icp_qat_fw_la_bulk_req enc_fw_req;
88 struct icp_qat_fw_la_bulk_req dec_fw_req;
89 struct qat_crypto_instance *inst;
90 struct crypto_skcipher *ftfm;
91 struct crypto_cipher *tweak;
92 bool fallback;
93 int mode;
94 };
95
qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)96 static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)
97 {
98 switch (qat_hash_alg) {
99 case ICP_QAT_HW_AUTH_ALGO_SHA1:
100 return ICP_QAT_HW_SHA1_STATE1_SZ;
101 case ICP_QAT_HW_AUTH_ALGO_SHA256:
102 return ICP_QAT_HW_SHA256_STATE1_SZ;
103 case ICP_QAT_HW_AUTH_ALGO_SHA512:
104 return ICP_QAT_HW_SHA512_STATE1_SZ;
105 default:
106 return -EFAULT;
107 }
108 return -EFAULT;
109 }
110
qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk * hash,struct qat_alg_aead_ctx * ctx,const u8 * auth_key,unsigned int auth_keylen)111 static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
112 struct qat_alg_aead_ctx *ctx,
113 const u8 *auth_key,
114 unsigned int auth_keylen)
115 {
116 SHASH_DESC_ON_STACK(shash, ctx->hash_tfm);
117 int block_size = crypto_shash_blocksize(ctx->hash_tfm);
118 int digest_size = crypto_shash_digestsize(ctx->hash_tfm);
119 __be32 *hash_state_out;
120 __be64 *hash512_state_out;
121 int i, offset;
122
123 memset(ctx->ipad, 0, block_size);
124 memset(ctx->opad, 0, block_size);
125 shash->tfm = ctx->hash_tfm;
126
127 if (auth_keylen > block_size) {
128 int ret = crypto_shash_digest(shash, auth_key,
129 auth_keylen, ctx->ipad);
130 if (ret)
131 return ret;
132
133 memcpy(ctx->opad, ctx->ipad, digest_size);
134 } else {
135 memcpy(ctx->ipad, auth_key, auth_keylen);
136 memcpy(ctx->opad, auth_key, auth_keylen);
137 }
138
139 for (i = 0; i < block_size; i++) {
140 char *ipad_ptr = ctx->ipad + i;
141 char *opad_ptr = ctx->opad + i;
142 *ipad_ptr ^= HMAC_IPAD_VALUE;
143 *opad_ptr ^= HMAC_OPAD_VALUE;
144 }
145
146 if (crypto_shash_init(shash))
147 return -EFAULT;
148
149 if (crypto_shash_update(shash, ctx->ipad, block_size))
150 return -EFAULT;
151
152 hash_state_out = (__be32 *)hash->sha.state1;
153 hash512_state_out = (__be64 *)hash_state_out;
154
155 switch (ctx->qat_hash_alg) {
156 case ICP_QAT_HW_AUTH_ALGO_SHA1:
157 if (crypto_shash_export(shash, &ctx->sha1))
158 return -EFAULT;
159 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
160 *hash_state_out = cpu_to_be32(ctx->sha1.state[i]);
161 break;
162 case ICP_QAT_HW_AUTH_ALGO_SHA256:
163 if (crypto_shash_export(shash, &ctx->sha256))
164 return -EFAULT;
165 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
166 *hash_state_out = cpu_to_be32(ctx->sha256.state[i]);
167 break;
168 case ICP_QAT_HW_AUTH_ALGO_SHA512:
169 if (crypto_shash_export(shash, &ctx->sha512))
170 return -EFAULT;
171 for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
172 *hash512_state_out = cpu_to_be64(ctx->sha512.state[i]);
173 break;
174 default:
175 return -EFAULT;
176 }
177
178 if (crypto_shash_init(shash))
179 return -EFAULT;
180
181 if (crypto_shash_update(shash, ctx->opad, block_size))
182 return -EFAULT;
183
184 offset = round_up(qat_get_inter_state_size(ctx->qat_hash_alg), 8);
185 if (offset < 0)
186 return -EFAULT;
187
188 hash_state_out = (__be32 *)(hash->sha.state1 + offset);
189 hash512_state_out = (__be64 *)hash_state_out;
190
191 switch (ctx->qat_hash_alg) {
192 case ICP_QAT_HW_AUTH_ALGO_SHA1:
193 if (crypto_shash_export(shash, &ctx->sha1))
194 return -EFAULT;
195 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
196 *hash_state_out = cpu_to_be32(ctx->sha1.state[i]);
197 break;
198 case ICP_QAT_HW_AUTH_ALGO_SHA256:
199 if (crypto_shash_export(shash, &ctx->sha256))
200 return -EFAULT;
201 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
202 *hash_state_out = cpu_to_be32(ctx->sha256.state[i]);
203 break;
204 case ICP_QAT_HW_AUTH_ALGO_SHA512:
205 if (crypto_shash_export(shash, &ctx->sha512))
206 return -EFAULT;
207 for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
208 *hash512_state_out = cpu_to_be64(ctx->sha512.state[i]);
209 break;
210 default:
211 return -EFAULT;
212 }
213 memzero_explicit(ctx->ipad, block_size);
214 memzero_explicit(ctx->opad, block_size);
215 return 0;
216 }
217
qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr * header)218 static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header)
219 {
220 header->hdr_flags =
221 ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
222 header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
223 header->comn_req_flags =
224 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
225 QAT_COMN_PTR_TYPE_SGL);
226 ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
227 ICP_QAT_FW_LA_PARTIAL_NONE);
228 ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
229 ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
230 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
231 ICP_QAT_FW_LA_NO_PROTO);
232 ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
233 ICP_QAT_FW_LA_NO_UPDATE_STATE);
234 }
235
qat_alg_aead_init_enc_session(struct crypto_aead * aead_tfm,int alg,struct crypto_authenc_keys * keys,int mode)236 static int qat_alg_aead_init_enc_session(struct crypto_aead *aead_tfm,
237 int alg,
238 struct crypto_authenc_keys *keys,
239 int mode)
240 {
241 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(aead_tfm);
242 unsigned int digestsize = crypto_aead_authsize(aead_tfm);
243 struct qat_enc *enc_ctx = &ctx->enc_cd->qat_enc_cd;
244 struct icp_qat_hw_cipher_algo_blk *cipher = &enc_ctx->cipher;
245 struct icp_qat_hw_auth_algo_blk *hash =
246 (struct icp_qat_hw_auth_algo_blk *)((char *)enc_ctx +
247 sizeof(struct icp_qat_hw_auth_setup) + keys->enckeylen);
248 struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->enc_fw_req;
249 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
250 struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
251 void *ptr = &req_tmpl->cd_ctrl;
252 struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
253 struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
254
255 /* CD setup */
256 cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg, mode);
257 memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
258 hash->sha.inner_setup.auth_config.config =
259 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
260 ctx->qat_hash_alg, digestsize);
261 hash->sha.inner_setup.auth_counter.counter =
262 cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
263
264 if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
265 return -EFAULT;
266
267 /* Request setup */
268 qat_alg_init_common_hdr(header);
269 header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
270 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
271 ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
272 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
273 ICP_QAT_FW_LA_RET_AUTH_RES);
274 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
275 ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
276 cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
277 cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
278
279 /* Cipher CD config setup */
280 cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
281 cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
282 cipher_cd_ctrl->cipher_cfg_offset = 0;
283 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
284 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
285 /* Auth CD config setup */
286 hash_cd_ctrl->hash_cfg_offset = ((char *)hash - (char *)cipher) >> 3;
287 hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
288 hash_cd_ctrl->inner_res_sz = digestsize;
289 hash_cd_ctrl->final_sz = digestsize;
290
291 switch (ctx->qat_hash_alg) {
292 case ICP_QAT_HW_AUTH_ALGO_SHA1:
293 hash_cd_ctrl->inner_state1_sz =
294 round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
295 hash_cd_ctrl->inner_state2_sz =
296 round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
297 break;
298 case ICP_QAT_HW_AUTH_ALGO_SHA256:
299 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
300 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
301 break;
302 case ICP_QAT_HW_AUTH_ALGO_SHA512:
303 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
304 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
305 break;
306 default:
307 break;
308 }
309 hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
310 ((sizeof(struct icp_qat_hw_auth_setup) +
311 round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
312 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
313 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
314 return 0;
315 }
316
qat_alg_aead_init_dec_session(struct crypto_aead * aead_tfm,int alg,struct crypto_authenc_keys * keys,int mode)317 static int qat_alg_aead_init_dec_session(struct crypto_aead *aead_tfm,
318 int alg,
319 struct crypto_authenc_keys *keys,
320 int mode)
321 {
322 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(aead_tfm);
323 unsigned int digestsize = crypto_aead_authsize(aead_tfm);
324 struct qat_dec *dec_ctx = &ctx->dec_cd->qat_dec_cd;
325 struct icp_qat_hw_auth_algo_blk *hash = &dec_ctx->hash;
326 struct icp_qat_hw_cipher_algo_blk *cipher =
327 (struct icp_qat_hw_cipher_algo_blk *)((char *)dec_ctx +
328 sizeof(struct icp_qat_hw_auth_setup) +
329 roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2);
330 struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->dec_fw_req;
331 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
332 struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
333 void *ptr = &req_tmpl->cd_ctrl;
334 struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
335 struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
336 struct icp_qat_fw_la_auth_req_params *auth_param =
337 (struct icp_qat_fw_la_auth_req_params *)
338 ((char *)&req_tmpl->serv_specif_rqpars +
339 sizeof(struct icp_qat_fw_la_cipher_req_params));
340
341 /* CD setup */
342 cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_DEC(alg, mode);
343 memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
344 hash->sha.inner_setup.auth_config.config =
345 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
346 ctx->qat_hash_alg,
347 digestsize);
348 hash->sha.inner_setup.auth_counter.counter =
349 cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
350
351 if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
352 return -EFAULT;
353
354 /* Request setup */
355 qat_alg_init_common_hdr(header);
356 header->service_cmd_id = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
357 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
358 ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
359 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
360 ICP_QAT_FW_LA_NO_RET_AUTH_RES);
361 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
362 ICP_QAT_FW_LA_CMP_AUTH_RES);
363 cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
364 cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
365
366 /* Cipher CD config setup */
367 cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
368 cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
369 cipher_cd_ctrl->cipher_cfg_offset =
370 (sizeof(struct icp_qat_hw_auth_setup) +
371 roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2) >> 3;
372 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
373 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
374
375 /* Auth CD config setup */
376 hash_cd_ctrl->hash_cfg_offset = 0;
377 hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
378 hash_cd_ctrl->inner_res_sz = digestsize;
379 hash_cd_ctrl->final_sz = digestsize;
380
381 switch (ctx->qat_hash_alg) {
382 case ICP_QAT_HW_AUTH_ALGO_SHA1:
383 hash_cd_ctrl->inner_state1_sz =
384 round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
385 hash_cd_ctrl->inner_state2_sz =
386 round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
387 break;
388 case ICP_QAT_HW_AUTH_ALGO_SHA256:
389 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
390 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
391 break;
392 case ICP_QAT_HW_AUTH_ALGO_SHA512:
393 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
394 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
395 break;
396 default:
397 break;
398 }
399
400 hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
401 ((sizeof(struct icp_qat_hw_auth_setup) +
402 round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
403 auth_param->auth_res_sz = digestsize;
404 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
405 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
406 return 0;
407 }
408
qat_alg_skcipher_init_com(struct qat_alg_skcipher_ctx * ctx,struct icp_qat_fw_la_bulk_req * req,struct icp_qat_hw_cipher_algo_blk * cd,const u8 * key,unsigned int keylen)409 static void qat_alg_skcipher_init_com(struct qat_alg_skcipher_ctx *ctx,
410 struct icp_qat_fw_la_bulk_req *req,
411 struct icp_qat_hw_cipher_algo_blk *cd,
412 const u8 *key, unsigned int keylen)
413 {
414 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
415 struct icp_qat_fw_comn_req_hdr *header = &req->comn_hdr;
416 struct icp_qat_fw_cipher_cd_ctrl_hdr *cd_ctrl = (void *)&req->cd_ctrl;
417 bool aes_v2_capable = HW_CAP_AES_V2(ctx->inst->accel_dev);
418 int mode = ctx->mode;
419
420 qat_alg_init_common_hdr(header);
421 header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
422 cd_pars->u.s.content_desc_params_sz =
423 sizeof(struct icp_qat_hw_cipher_algo_blk) >> 3;
424
425 if (aes_v2_capable && mode == ICP_QAT_HW_CIPHER_XTS_MODE) {
426 ICP_QAT_FW_LA_SLICE_TYPE_SET(header->serv_specif_flags,
427 ICP_QAT_FW_LA_USE_UCS_SLICE_TYPE);
428
429 /* Store both XTS keys in CD, only the first key is sent
430 * to the HW, the second key is used for tweak calculation
431 */
432 memcpy(cd->ucs_aes.key, key, keylen);
433 keylen = keylen / 2;
434 } else if (aes_v2_capable && mode == ICP_QAT_HW_CIPHER_CTR_MODE) {
435 ICP_QAT_FW_LA_SLICE_TYPE_SET(header->serv_specif_flags,
436 ICP_QAT_FW_LA_USE_UCS_SLICE_TYPE);
437 keylen = round_up(keylen, 16);
438 memcpy(cd->ucs_aes.key, key, keylen);
439 } else {
440 memcpy(cd->aes.key, key, keylen);
441 }
442
443 /* Cipher CD config setup */
444 cd_ctrl->cipher_key_sz = keylen >> 3;
445 cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
446 cd_ctrl->cipher_cfg_offset = 0;
447 ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
448 ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
449 }
450
qat_alg_skcipher_init_enc(struct qat_alg_skcipher_ctx * ctx,int alg,const u8 * key,unsigned int keylen,int mode)451 static void qat_alg_skcipher_init_enc(struct qat_alg_skcipher_ctx *ctx,
452 int alg, const u8 *key,
453 unsigned int keylen, int mode)
454 {
455 struct icp_qat_hw_cipher_algo_blk *enc_cd = ctx->enc_cd;
456 struct icp_qat_fw_la_bulk_req *req = &ctx->enc_fw_req;
457 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
458
459 qat_alg_skcipher_init_com(ctx, req, enc_cd, key, keylen);
460 cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
461 enc_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg, mode);
462 }
463
qat_alg_xts_reverse_key(const u8 * key_forward,unsigned int keylen,u8 * key_reverse)464 static void qat_alg_xts_reverse_key(const u8 *key_forward, unsigned int keylen,
465 u8 *key_reverse)
466 {
467 struct crypto_aes_ctx aes_expanded;
468 int nrounds;
469 u8 *key;
470
471 aes_expandkey(&aes_expanded, key_forward, keylen);
472 if (keylen == AES_KEYSIZE_128) {
473 nrounds = 10;
474 key = (u8 *)aes_expanded.key_enc + (AES_BLOCK_SIZE * nrounds);
475 memcpy(key_reverse, key, AES_BLOCK_SIZE);
476 } else {
477 /* AES_KEYSIZE_256 */
478 nrounds = 14;
479 key = (u8 *)aes_expanded.key_enc + (AES_BLOCK_SIZE * nrounds);
480 memcpy(key_reverse, key, AES_BLOCK_SIZE);
481 memcpy(key_reverse + AES_BLOCK_SIZE, key - AES_BLOCK_SIZE,
482 AES_BLOCK_SIZE);
483 }
484 }
485
qat_alg_skcipher_init_dec(struct qat_alg_skcipher_ctx * ctx,int alg,const u8 * key,unsigned int keylen,int mode)486 static void qat_alg_skcipher_init_dec(struct qat_alg_skcipher_ctx *ctx,
487 int alg, const u8 *key,
488 unsigned int keylen, int mode)
489 {
490 struct icp_qat_hw_cipher_algo_blk *dec_cd = ctx->dec_cd;
491 struct icp_qat_fw_la_bulk_req *req = &ctx->dec_fw_req;
492 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
493 bool aes_v2_capable = HW_CAP_AES_V2(ctx->inst->accel_dev);
494
495 qat_alg_skcipher_init_com(ctx, req, dec_cd, key, keylen);
496 cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
497
498 if (aes_v2_capable && mode == ICP_QAT_HW_CIPHER_XTS_MODE) {
499 /* Key reversing not supported, set no convert */
500 dec_cd->aes.cipher_config.val =
501 QAT_AES_HW_CONFIG_DEC_NO_CONV(alg, mode);
502
503 /* In-place key reversal */
504 qat_alg_xts_reverse_key(dec_cd->ucs_aes.key, keylen / 2,
505 dec_cd->ucs_aes.key);
506 } else if (mode != ICP_QAT_HW_CIPHER_CTR_MODE) {
507 dec_cd->aes.cipher_config.val =
508 QAT_AES_HW_CONFIG_DEC(alg, mode);
509 } else {
510 dec_cd->aes.cipher_config.val =
511 QAT_AES_HW_CONFIG_ENC(alg, mode);
512 }
513 }
514
qat_alg_validate_key(int key_len,int * alg,int mode)515 static int qat_alg_validate_key(int key_len, int *alg, int mode)
516 {
517 if (mode != ICP_QAT_HW_CIPHER_XTS_MODE) {
518 switch (key_len) {
519 case AES_KEYSIZE_128:
520 *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
521 break;
522 case AES_KEYSIZE_192:
523 *alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
524 break;
525 case AES_KEYSIZE_256:
526 *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
527 break;
528 default:
529 return -EINVAL;
530 }
531 } else {
532 switch (key_len) {
533 case AES_KEYSIZE_128 << 1:
534 *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
535 break;
536 case AES_KEYSIZE_256 << 1:
537 *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
538 break;
539 default:
540 return -EINVAL;
541 }
542 }
543 return 0;
544 }
545
qat_alg_aead_init_sessions(struct crypto_aead * tfm,const u8 * key,unsigned int keylen,int mode)546 static int qat_alg_aead_init_sessions(struct crypto_aead *tfm, const u8 *key,
547 unsigned int keylen, int mode)
548 {
549 struct crypto_authenc_keys keys;
550 int alg;
551
552 if (crypto_authenc_extractkeys(&keys, key, keylen))
553 goto bad_key;
554
555 if (qat_alg_validate_key(keys.enckeylen, &alg, mode))
556 goto bad_key;
557
558 if (qat_alg_aead_init_enc_session(tfm, alg, &keys, mode))
559 goto error;
560
561 if (qat_alg_aead_init_dec_session(tfm, alg, &keys, mode))
562 goto error;
563
564 memzero_explicit(&keys, sizeof(keys));
565 return 0;
566 bad_key:
567 memzero_explicit(&keys, sizeof(keys));
568 return -EINVAL;
569 error:
570 memzero_explicit(&keys, sizeof(keys));
571 return -EFAULT;
572 }
573
qat_alg_skcipher_init_sessions(struct qat_alg_skcipher_ctx * ctx,const u8 * key,unsigned int keylen,int mode)574 static int qat_alg_skcipher_init_sessions(struct qat_alg_skcipher_ctx *ctx,
575 const u8 *key,
576 unsigned int keylen,
577 int mode)
578 {
579 int alg;
580
581 if (qat_alg_validate_key(keylen, &alg, mode))
582 return -EINVAL;
583
584 qat_alg_skcipher_init_enc(ctx, alg, key, keylen, mode);
585 qat_alg_skcipher_init_dec(ctx, alg, key, keylen, mode);
586 return 0;
587 }
588
qat_alg_aead_rekey(struct crypto_aead * tfm,const u8 * key,unsigned int keylen)589 static int qat_alg_aead_rekey(struct crypto_aead *tfm, const u8 *key,
590 unsigned int keylen)
591 {
592 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
593
594 memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
595 memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
596 memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
597 memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
598
599 return qat_alg_aead_init_sessions(tfm, key, keylen,
600 ICP_QAT_HW_CIPHER_CBC_MODE);
601 }
602
qat_alg_aead_newkey(struct crypto_aead * tfm,const u8 * key,unsigned int keylen)603 static int qat_alg_aead_newkey(struct crypto_aead *tfm, const u8 *key,
604 unsigned int keylen)
605 {
606 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
607 struct qat_crypto_instance *inst = NULL;
608 int node = numa_node_id();
609 struct device *dev;
610 int ret;
611
612 inst = qat_crypto_get_instance_node(node);
613 if (!inst)
614 return -EINVAL;
615 dev = &GET_DEV(inst->accel_dev);
616 ctx->inst = inst;
617 ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd),
618 &ctx->enc_cd_paddr,
619 GFP_ATOMIC);
620 if (!ctx->enc_cd) {
621 ret = -ENOMEM;
622 goto out_free_inst;
623 }
624 ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd),
625 &ctx->dec_cd_paddr,
626 GFP_ATOMIC);
627 if (!ctx->dec_cd) {
628 ret = -ENOMEM;
629 goto out_free_enc;
630 }
631
632 ret = qat_alg_aead_init_sessions(tfm, key, keylen,
633 ICP_QAT_HW_CIPHER_CBC_MODE);
634 if (ret)
635 goto out_free_all;
636
637 return 0;
638
639 out_free_all:
640 memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
641 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
642 ctx->dec_cd, ctx->dec_cd_paddr);
643 ctx->dec_cd = NULL;
644 out_free_enc:
645 memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
646 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
647 ctx->enc_cd, ctx->enc_cd_paddr);
648 ctx->enc_cd = NULL;
649 out_free_inst:
650 ctx->inst = NULL;
651 qat_crypto_put_instance(inst);
652 return ret;
653 }
654
qat_alg_aead_setkey(struct crypto_aead * tfm,const u8 * key,unsigned int keylen)655 static int qat_alg_aead_setkey(struct crypto_aead *tfm, const u8 *key,
656 unsigned int keylen)
657 {
658 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
659
660 if (ctx->enc_cd)
661 return qat_alg_aead_rekey(tfm, key, keylen);
662 else
663 return qat_alg_aead_newkey(tfm, key, keylen);
664 }
665
qat_alg_free_bufl(struct qat_crypto_instance * inst,struct qat_crypto_request * qat_req)666 static void qat_alg_free_bufl(struct qat_crypto_instance *inst,
667 struct qat_crypto_request *qat_req)
668 {
669 struct device *dev = &GET_DEV(inst->accel_dev);
670 struct qat_alg_buf_list *bl = qat_req->buf.bl;
671 struct qat_alg_buf_list *blout = qat_req->buf.blout;
672 dma_addr_t blp = qat_req->buf.blp;
673 dma_addr_t blpout = qat_req->buf.bloutp;
674 size_t sz = qat_req->buf.sz;
675 size_t sz_out = qat_req->buf.sz_out;
676 int bl_dma_dir;
677 int i;
678
679 bl_dma_dir = blp != blpout ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
680
681 for (i = 0; i < bl->num_bufs; i++)
682 dma_unmap_single(dev, bl->bufers[i].addr,
683 bl->bufers[i].len, bl_dma_dir);
684
685 dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
686
687 if (!qat_req->buf.sgl_src_valid)
688 kfree(bl);
689
690 if (blp != blpout) {
691 /* If out of place operation dma unmap only data */
692 int bufless = blout->num_bufs - blout->num_mapped_bufs;
693
694 for (i = bufless; i < blout->num_bufs; i++) {
695 dma_unmap_single(dev, blout->bufers[i].addr,
696 blout->bufers[i].len,
697 DMA_FROM_DEVICE);
698 }
699 dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE);
700
701 if (!qat_req->buf.sgl_dst_valid)
702 kfree(blout);
703 }
704 }
705
qat_alg_sgl_to_bufl(struct qat_crypto_instance * inst,struct scatterlist * sgl,struct scatterlist * sglout,struct qat_crypto_request * qat_req,gfp_t flags)706 static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
707 struct scatterlist *sgl,
708 struct scatterlist *sglout,
709 struct qat_crypto_request *qat_req,
710 gfp_t flags)
711 {
712 struct device *dev = &GET_DEV(inst->accel_dev);
713 int i, sg_nctr = 0;
714 int n = sg_nents(sgl);
715 struct qat_alg_buf_list *bufl;
716 struct qat_alg_buf_list *buflout = NULL;
717 dma_addr_t blp = DMA_MAPPING_ERROR;
718 dma_addr_t bloutp = DMA_MAPPING_ERROR;
719 struct scatterlist *sg;
720 size_t sz_out, sz = struct_size(bufl, bufers, n);
721 int node = dev_to_node(&GET_DEV(inst->accel_dev));
722 int bufl_dma_dir;
723
724 if (unlikely(!n))
725 return -EINVAL;
726
727 qat_req->buf.sgl_src_valid = false;
728 qat_req->buf.sgl_dst_valid = false;
729
730 if (n > QAT_MAX_BUFF_DESC) {
731 bufl = kzalloc_node(sz, flags, node);
732 if (unlikely(!bufl))
733 return -ENOMEM;
734 } else {
735 bufl = &qat_req->buf.sgl_src.sgl_hdr;
736 memset(bufl, 0, sizeof(struct qat_alg_buf_list));
737 qat_req->buf.sgl_src_valid = true;
738 }
739
740 bufl_dma_dir = sgl != sglout ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
741
742 for_each_sg(sgl, sg, n, i)
743 bufl->bufers[i].addr = DMA_MAPPING_ERROR;
744
745 for_each_sg(sgl, sg, n, i) {
746 int y = sg_nctr;
747
748 if (!sg->length)
749 continue;
750
751 bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg),
752 sg->length,
753 bufl_dma_dir);
754 bufl->bufers[y].len = sg->length;
755 if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr)))
756 goto err_in;
757 sg_nctr++;
758 }
759 bufl->num_bufs = sg_nctr;
760 blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
761 if (unlikely(dma_mapping_error(dev, blp)))
762 goto err_in;
763 qat_req->buf.bl = bufl;
764 qat_req->buf.blp = blp;
765 qat_req->buf.sz = sz;
766 /* Handle out of place operation */
767 if (sgl != sglout) {
768 struct qat_alg_buf *bufers;
769
770 n = sg_nents(sglout);
771 sz_out = struct_size(buflout, bufers, n);
772 sg_nctr = 0;
773
774 if (n > QAT_MAX_BUFF_DESC) {
775 buflout = kzalloc_node(sz_out, flags, node);
776 if (unlikely(!buflout))
777 goto err_in;
778 } else {
779 buflout = &qat_req->buf.sgl_dst.sgl_hdr;
780 memset(buflout, 0, sizeof(struct qat_alg_buf_list));
781 qat_req->buf.sgl_dst_valid = true;
782 }
783
784 bufers = buflout->bufers;
785 for_each_sg(sglout, sg, n, i)
786 bufers[i].addr = DMA_MAPPING_ERROR;
787
788 for_each_sg(sglout, sg, n, i) {
789 int y = sg_nctr;
790
791 if (!sg->length)
792 continue;
793
794 bufers[y].addr = dma_map_single(dev, sg_virt(sg),
795 sg->length,
796 DMA_FROM_DEVICE);
797 if (unlikely(dma_mapping_error(dev, bufers[y].addr)))
798 goto err_out;
799 bufers[y].len = sg->length;
800 sg_nctr++;
801 }
802 buflout->num_bufs = sg_nctr;
803 buflout->num_mapped_bufs = sg_nctr;
804 bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE);
805 if (unlikely(dma_mapping_error(dev, bloutp)))
806 goto err_out;
807 qat_req->buf.blout = buflout;
808 qat_req->buf.bloutp = bloutp;
809 qat_req->buf.sz_out = sz_out;
810 } else {
811 /* Otherwise set the src and dst to the same address */
812 qat_req->buf.bloutp = qat_req->buf.blp;
813 qat_req->buf.sz_out = 0;
814 }
815 return 0;
816
817 err_out:
818 if (!dma_mapping_error(dev, bloutp))
819 dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE);
820
821 n = sg_nents(sglout);
822 for (i = 0; i < n; i++)
823 if (!dma_mapping_error(dev, buflout->bufers[i].addr))
824 dma_unmap_single(dev, buflout->bufers[i].addr,
825 buflout->bufers[i].len,
826 DMA_FROM_DEVICE);
827
828 if (!qat_req->buf.sgl_dst_valid)
829 kfree(buflout);
830
831 err_in:
832 if (!dma_mapping_error(dev, blp))
833 dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
834
835 n = sg_nents(sgl);
836 for (i = 0; i < n; i++)
837 if (!dma_mapping_error(dev, bufl->bufers[i].addr))
838 dma_unmap_single(dev, bufl->bufers[i].addr,
839 bufl->bufers[i].len,
840 bufl_dma_dir);
841
842 if (!qat_req->buf.sgl_src_valid)
843 kfree(bufl);
844
845 dev_err(dev, "Failed to map buf for dma\n");
846 return -ENOMEM;
847 }
848
qat_aead_alg_callback(struct icp_qat_fw_la_resp * qat_resp,struct qat_crypto_request * qat_req)849 static void qat_aead_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
850 struct qat_crypto_request *qat_req)
851 {
852 struct qat_alg_aead_ctx *ctx = qat_req->aead_ctx;
853 struct qat_crypto_instance *inst = ctx->inst;
854 struct aead_request *areq = qat_req->aead_req;
855 u8 stat_filed = qat_resp->comn_resp.comn_status;
856 int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
857
858 qat_alg_free_bufl(inst, qat_req);
859 if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
860 res = -EBADMSG;
861 areq->base.complete(&areq->base, res);
862 }
863
qat_alg_update_iv_ctr_mode(struct qat_crypto_request * qat_req)864 static void qat_alg_update_iv_ctr_mode(struct qat_crypto_request *qat_req)
865 {
866 struct skcipher_request *sreq = qat_req->skcipher_req;
867 u64 iv_lo_prev;
868 u64 iv_lo;
869 u64 iv_hi;
870
871 memcpy(qat_req->iv, sreq->iv, AES_BLOCK_SIZE);
872
873 iv_lo = be64_to_cpu(qat_req->iv_lo);
874 iv_hi = be64_to_cpu(qat_req->iv_hi);
875
876 iv_lo_prev = iv_lo;
877 iv_lo += DIV_ROUND_UP(sreq->cryptlen, AES_BLOCK_SIZE);
878 if (iv_lo < iv_lo_prev)
879 iv_hi++;
880
881 qat_req->iv_lo = cpu_to_be64(iv_lo);
882 qat_req->iv_hi = cpu_to_be64(iv_hi);
883 }
884
qat_alg_update_iv_cbc_mode(struct qat_crypto_request * qat_req)885 static void qat_alg_update_iv_cbc_mode(struct qat_crypto_request *qat_req)
886 {
887 struct skcipher_request *sreq = qat_req->skcipher_req;
888 int offset = sreq->cryptlen - AES_BLOCK_SIZE;
889 struct scatterlist *sgl;
890
891 if (qat_req->encryption)
892 sgl = sreq->dst;
893 else
894 sgl = sreq->src;
895
896 scatterwalk_map_and_copy(qat_req->iv, sgl, offset, AES_BLOCK_SIZE, 0);
897 }
898
qat_alg_update_iv(struct qat_crypto_request * qat_req)899 static void qat_alg_update_iv(struct qat_crypto_request *qat_req)
900 {
901 struct qat_alg_skcipher_ctx *ctx = qat_req->skcipher_ctx;
902 struct device *dev = &GET_DEV(ctx->inst->accel_dev);
903
904 switch (ctx->mode) {
905 case ICP_QAT_HW_CIPHER_CTR_MODE:
906 qat_alg_update_iv_ctr_mode(qat_req);
907 break;
908 case ICP_QAT_HW_CIPHER_CBC_MODE:
909 qat_alg_update_iv_cbc_mode(qat_req);
910 break;
911 case ICP_QAT_HW_CIPHER_XTS_MODE:
912 break;
913 default:
914 dev_warn(dev, "Unsupported IV update for cipher mode %d\n",
915 ctx->mode);
916 }
917 }
918
qat_skcipher_alg_callback(struct icp_qat_fw_la_resp * qat_resp,struct qat_crypto_request * qat_req)919 static void qat_skcipher_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
920 struct qat_crypto_request *qat_req)
921 {
922 struct qat_alg_skcipher_ctx *ctx = qat_req->skcipher_ctx;
923 struct qat_crypto_instance *inst = ctx->inst;
924 struct skcipher_request *sreq = qat_req->skcipher_req;
925 u8 stat_filed = qat_resp->comn_resp.comn_status;
926 int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
927
928 qat_alg_free_bufl(inst, qat_req);
929 if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
930 res = -EINVAL;
931
932 if (qat_req->encryption)
933 qat_alg_update_iv(qat_req);
934
935 memcpy(sreq->iv, qat_req->iv, AES_BLOCK_SIZE);
936
937 sreq->base.complete(&sreq->base, res);
938 }
939
qat_alg_callback(void * resp)940 void qat_alg_callback(void *resp)
941 {
942 struct icp_qat_fw_la_resp *qat_resp = resp;
943 struct qat_crypto_request *qat_req =
944 (void *)(__force long)qat_resp->opaque_data;
945 struct qat_instance_backlog *backlog = qat_req->alg_req.backlog;
946
947 qat_req->cb(qat_resp, qat_req);
948
949 qat_alg_send_backlog(backlog);
950 }
951
qat_alg_send_sym_message(struct qat_crypto_request * qat_req,struct qat_crypto_instance * inst,struct crypto_async_request * base)952 static int qat_alg_send_sym_message(struct qat_crypto_request *qat_req,
953 struct qat_crypto_instance *inst,
954 struct crypto_async_request *base)
955 {
956 struct qat_alg_req *alg_req = &qat_req->alg_req;
957
958 alg_req->fw_req = (u32 *)&qat_req->req;
959 alg_req->tx_ring = inst->sym_tx;
960 alg_req->base = base;
961 alg_req->backlog = &inst->backlog;
962
963 return qat_alg_send_message(alg_req);
964 }
965
qat_alg_aead_dec(struct aead_request * areq)966 static int qat_alg_aead_dec(struct aead_request *areq)
967 {
968 struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
969 struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
970 struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
971 struct qat_crypto_request *qat_req = aead_request_ctx(areq);
972 struct icp_qat_fw_la_cipher_req_params *cipher_param;
973 struct icp_qat_fw_la_auth_req_params *auth_param;
974 struct icp_qat_fw_la_bulk_req *msg;
975 int digst_size = crypto_aead_authsize(aead_tfm);
976 gfp_t f = qat_algs_alloc_flags(&areq->base);
977 int ret;
978 u32 cipher_len;
979
980 cipher_len = areq->cryptlen - digst_size;
981 if (cipher_len % AES_BLOCK_SIZE != 0)
982 return -EINVAL;
983
984 ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req, f);
985 if (unlikely(ret))
986 return ret;
987
988 msg = &qat_req->req;
989 *msg = ctx->dec_fw_req;
990 qat_req->aead_ctx = ctx;
991 qat_req->aead_req = areq;
992 qat_req->cb = qat_aead_alg_callback;
993 qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req;
994 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
995 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
996 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
997 cipher_param->cipher_length = cipher_len;
998 cipher_param->cipher_offset = areq->assoclen;
999 memcpy(cipher_param->u.cipher_IV_array, areq->iv, AES_BLOCK_SIZE);
1000 auth_param = (void *)((u8 *)cipher_param + sizeof(*cipher_param));
1001 auth_param->auth_off = 0;
1002 auth_param->auth_len = areq->assoclen + cipher_param->cipher_length;
1003
1004 ret = qat_alg_send_sym_message(qat_req, ctx->inst, &areq->base);
1005 if (ret == -ENOSPC)
1006 qat_alg_free_bufl(ctx->inst, qat_req);
1007
1008 return ret;
1009 }
1010
qat_alg_aead_enc(struct aead_request * areq)1011 static int qat_alg_aead_enc(struct aead_request *areq)
1012 {
1013 struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
1014 struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
1015 struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
1016 struct qat_crypto_request *qat_req = aead_request_ctx(areq);
1017 struct icp_qat_fw_la_cipher_req_params *cipher_param;
1018 struct icp_qat_fw_la_auth_req_params *auth_param;
1019 gfp_t f = qat_algs_alloc_flags(&areq->base);
1020 struct icp_qat_fw_la_bulk_req *msg;
1021 u8 *iv = areq->iv;
1022 int ret;
1023
1024 if (areq->cryptlen % AES_BLOCK_SIZE != 0)
1025 return -EINVAL;
1026
1027 ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req, f);
1028 if (unlikely(ret))
1029 return ret;
1030
1031 msg = &qat_req->req;
1032 *msg = ctx->enc_fw_req;
1033 qat_req->aead_ctx = ctx;
1034 qat_req->aead_req = areq;
1035 qat_req->cb = qat_aead_alg_callback;
1036 qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req;
1037 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
1038 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
1039 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
1040 auth_param = (void *)((u8 *)cipher_param + sizeof(*cipher_param));
1041
1042 memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE);
1043 cipher_param->cipher_length = areq->cryptlen;
1044 cipher_param->cipher_offset = areq->assoclen;
1045
1046 auth_param->auth_off = 0;
1047 auth_param->auth_len = areq->assoclen + areq->cryptlen;
1048
1049 ret = qat_alg_send_sym_message(qat_req, ctx->inst, &areq->base);
1050 if (ret == -ENOSPC)
1051 qat_alg_free_bufl(ctx->inst, qat_req);
1052
1053 return ret;
1054 }
1055
qat_alg_skcipher_rekey(struct qat_alg_skcipher_ctx * ctx,const u8 * key,unsigned int keylen,int mode)1056 static int qat_alg_skcipher_rekey(struct qat_alg_skcipher_ctx *ctx,
1057 const u8 *key, unsigned int keylen,
1058 int mode)
1059 {
1060 memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
1061 memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
1062 memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
1063 memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
1064
1065 return qat_alg_skcipher_init_sessions(ctx, key, keylen, mode);
1066 }
1067
qat_alg_skcipher_newkey(struct qat_alg_skcipher_ctx * ctx,const u8 * key,unsigned int keylen,int mode)1068 static int qat_alg_skcipher_newkey(struct qat_alg_skcipher_ctx *ctx,
1069 const u8 *key, unsigned int keylen,
1070 int mode)
1071 {
1072 struct qat_crypto_instance *inst = NULL;
1073 struct device *dev;
1074 int node = numa_node_id();
1075 int ret;
1076
1077 inst = qat_crypto_get_instance_node(node);
1078 if (!inst)
1079 return -EINVAL;
1080 dev = &GET_DEV(inst->accel_dev);
1081 ctx->inst = inst;
1082 ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd),
1083 &ctx->enc_cd_paddr,
1084 GFP_ATOMIC);
1085 if (!ctx->enc_cd) {
1086 ret = -ENOMEM;
1087 goto out_free_instance;
1088 }
1089 ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd),
1090 &ctx->dec_cd_paddr,
1091 GFP_ATOMIC);
1092 if (!ctx->dec_cd) {
1093 ret = -ENOMEM;
1094 goto out_free_enc;
1095 }
1096
1097 ret = qat_alg_skcipher_init_sessions(ctx, key, keylen, mode);
1098 if (ret)
1099 goto out_free_all;
1100
1101 return 0;
1102
1103 out_free_all:
1104 memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
1105 dma_free_coherent(dev, sizeof(*ctx->dec_cd),
1106 ctx->dec_cd, ctx->dec_cd_paddr);
1107 ctx->dec_cd = NULL;
1108 out_free_enc:
1109 memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
1110 dma_free_coherent(dev, sizeof(*ctx->enc_cd),
1111 ctx->enc_cd, ctx->enc_cd_paddr);
1112 ctx->enc_cd = NULL;
1113 out_free_instance:
1114 ctx->inst = NULL;
1115 qat_crypto_put_instance(inst);
1116 return ret;
1117 }
1118
qat_alg_skcipher_setkey(struct crypto_skcipher * tfm,const u8 * key,unsigned int keylen,int mode)1119 static int qat_alg_skcipher_setkey(struct crypto_skcipher *tfm,
1120 const u8 *key, unsigned int keylen,
1121 int mode)
1122 {
1123 struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
1124
1125 ctx->mode = mode;
1126
1127 if (ctx->enc_cd)
1128 return qat_alg_skcipher_rekey(ctx, key, keylen, mode);
1129 else
1130 return qat_alg_skcipher_newkey(ctx, key, keylen, mode);
1131 }
1132
qat_alg_skcipher_cbc_setkey(struct crypto_skcipher * tfm,const u8 * key,unsigned int keylen)1133 static int qat_alg_skcipher_cbc_setkey(struct crypto_skcipher *tfm,
1134 const u8 *key, unsigned int keylen)
1135 {
1136 return qat_alg_skcipher_setkey(tfm, key, keylen,
1137 ICP_QAT_HW_CIPHER_CBC_MODE);
1138 }
1139
qat_alg_skcipher_ctr_setkey(struct crypto_skcipher * tfm,const u8 * key,unsigned int keylen)1140 static int qat_alg_skcipher_ctr_setkey(struct crypto_skcipher *tfm,
1141 const u8 *key, unsigned int keylen)
1142 {
1143 return qat_alg_skcipher_setkey(tfm, key, keylen,
1144 ICP_QAT_HW_CIPHER_CTR_MODE);
1145 }
1146
qat_alg_skcipher_xts_setkey(struct crypto_skcipher * tfm,const u8 * key,unsigned int keylen)1147 static int qat_alg_skcipher_xts_setkey(struct crypto_skcipher *tfm,
1148 const u8 *key, unsigned int keylen)
1149 {
1150 struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
1151 int ret;
1152
1153 ret = xts_verify_key(tfm, key, keylen);
1154 if (ret)
1155 return ret;
1156
1157 if (keylen >> 1 == AES_KEYSIZE_192) {
1158 ret = crypto_skcipher_setkey(ctx->ftfm, key, keylen);
1159 if (ret)
1160 return ret;
1161
1162 ctx->fallback = true;
1163
1164 return 0;
1165 }
1166
1167 ctx->fallback = false;
1168
1169 ret = qat_alg_skcipher_setkey(tfm, key, keylen,
1170 ICP_QAT_HW_CIPHER_XTS_MODE);
1171 if (ret)
1172 return ret;
1173
1174 if (HW_CAP_AES_V2(ctx->inst->accel_dev))
1175 ret = crypto_cipher_setkey(ctx->tweak, key + (keylen / 2),
1176 keylen / 2);
1177
1178 return ret;
1179 }
1180
qat_alg_set_req_iv(struct qat_crypto_request * qat_req)1181 static void qat_alg_set_req_iv(struct qat_crypto_request *qat_req)
1182 {
1183 struct icp_qat_fw_la_cipher_req_params *cipher_param;
1184 struct qat_alg_skcipher_ctx *ctx = qat_req->skcipher_ctx;
1185 bool aes_v2_capable = HW_CAP_AES_V2(ctx->inst->accel_dev);
1186 u8 *iv = qat_req->skcipher_req->iv;
1187
1188 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
1189
1190 if (aes_v2_capable && ctx->mode == ICP_QAT_HW_CIPHER_XTS_MODE)
1191 crypto_cipher_encrypt_one(ctx->tweak,
1192 (u8 *)cipher_param->u.cipher_IV_array,
1193 iv);
1194 else
1195 memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE);
1196 }
1197
qat_alg_skcipher_encrypt(struct skcipher_request * req)1198 static int qat_alg_skcipher_encrypt(struct skcipher_request *req)
1199 {
1200 struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
1201 struct crypto_tfm *tfm = crypto_skcipher_tfm(stfm);
1202 struct qat_alg_skcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1203 struct qat_crypto_request *qat_req = skcipher_request_ctx(req);
1204 struct icp_qat_fw_la_cipher_req_params *cipher_param;
1205 gfp_t f = qat_algs_alloc_flags(&req->base);
1206 struct icp_qat_fw_la_bulk_req *msg;
1207 int ret;
1208
1209 if (req->cryptlen == 0)
1210 return 0;
1211
1212 ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req, f);
1213 if (unlikely(ret))
1214 return ret;
1215
1216 msg = &qat_req->req;
1217 *msg = ctx->enc_fw_req;
1218 qat_req->skcipher_ctx = ctx;
1219 qat_req->skcipher_req = req;
1220 qat_req->cb = qat_skcipher_alg_callback;
1221 qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req;
1222 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
1223 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
1224 qat_req->encryption = true;
1225 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
1226 cipher_param->cipher_length = req->cryptlen;
1227 cipher_param->cipher_offset = 0;
1228
1229 qat_alg_set_req_iv(qat_req);
1230
1231 ret = qat_alg_send_sym_message(qat_req, ctx->inst, &req->base);
1232 if (ret == -ENOSPC)
1233 qat_alg_free_bufl(ctx->inst, qat_req);
1234
1235 return ret;
1236 }
1237
qat_alg_skcipher_blk_encrypt(struct skcipher_request * req)1238 static int qat_alg_skcipher_blk_encrypt(struct skcipher_request *req)
1239 {
1240 if (req->cryptlen % AES_BLOCK_SIZE != 0)
1241 return -EINVAL;
1242
1243 return qat_alg_skcipher_encrypt(req);
1244 }
1245
qat_alg_skcipher_xts_encrypt(struct skcipher_request * req)1246 static int qat_alg_skcipher_xts_encrypt(struct skcipher_request *req)
1247 {
1248 struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
1249 struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(stfm);
1250 struct skcipher_request *nreq = skcipher_request_ctx(req);
1251
1252 if (req->cryptlen < XTS_BLOCK_SIZE)
1253 return -EINVAL;
1254
1255 if (ctx->fallback) {
1256 memcpy(nreq, req, sizeof(*req));
1257 skcipher_request_set_tfm(nreq, ctx->ftfm);
1258 return crypto_skcipher_encrypt(nreq);
1259 }
1260
1261 return qat_alg_skcipher_encrypt(req);
1262 }
1263
qat_alg_skcipher_decrypt(struct skcipher_request * req)1264 static int qat_alg_skcipher_decrypt(struct skcipher_request *req)
1265 {
1266 struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
1267 struct crypto_tfm *tfm = crypto_skcipher_tfm(stfm);
1268 struct qat_alg_skcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1269 struct qat_crypto_request *qat_req = skcipher_request_ctx(req);
1270 struct icp_qat_fw_la_cipher_req_params *cipher_param;
1271 gfp_t f = qat_algs_alloc_flags(&req->base);
1272 struct icp_qat_fw_la_bulk_req *msg;
1273 int ret;
1274
1275 if (req->cryptlen == 0)
1276 return 0;
1277
1278 ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req, f);
1279 if (unlikely(ret))
1280 return ret;
1281
1282 msg = &qat_req->req;
1283 *msg = ctx->dec_fw_req;
1284 qat_req->skcipher_ctx = ctx;
1285 qat_req->skcipher_req = req;
1286 qat_req->cb = qat_skcipher_alg_callback;
1287 qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req;
1288 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
1289 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
1290 qat_req->encryption = false;
1291 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
1292 cipher_param->cipher_length = req->cryptlen;
1293 cipher_param->cipher_offset = 0;
1294
1295 qat_alg_set_req_iv(qat_req);
1296 qat_alg_update_iv(qat_req);
1297
1298 ret = qat_alg_send_sym_message(qat_req, ctx->inst, &req->base);
1299 if (ret == -ENOSPC)
1300 qat_alg_free_bufl(ctx->inst, qat_req);
1301
1302 return ret;
1303 }
1304
qat_alg_skcipher_blk_decrypt(struct skcipher_request * req)1305 static int qat_alg_skcipher_blk_decrypt(struct skcipher_request *req)
1306 {
1307 if (req->cryptlen % AES_BLOCK_SIZE != 0)
1308 return -EINVAL;
1309
1310 return qat_alg_skcipher_decrypt(req);
1311 }
1312
qat_alg_skcipher_xts_decrypt(struct skcipher_request * req)1313 static int qat_alg_skcipher_xts_decrypt(struct skcipher_request *req)
1314 {
1315 struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
1316 struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(stfm);
1317 struct skcipher_request *nreq = skcipher_request_ctx(req);
1318
1319 if (req->cryptlen < XTS_BLOCK_SIZE)
1320 return -EINVAL;
1321
1322 if (ctx->fallback) {
1323 memcpy(nreq, req, sizeof(*req));
1324 skcipher_request_set_tfm(nreq, ctx->ftfm);
1325 return crypto_skcipher_decrypt(nreq);
1326 }
1327
1328 return qat_alg_skcipher_decrypt(req);
1329 }
1330
qat_alg_aead_init(struct crypto_aead * tfm,enum icp_qat_hw_auth_algo hash,const char * hash_name)1331 static int qat_alg_aead_init(struct crypto_aead *tfm,
1332 enum icp_qat_hw_auth_algo hash,
1333 const char *hash_name)
1334 {
1335 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
1336
1337 ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
1338 if (IS_ERR(ctx->hash_tfm))
1339 return PTR_ERR(ctx->hash_tfm);
1340 ctx->qat_hash_alg = hash;
1341 crypto_aead_set_reqsize(tfm, sizeof(struct qat_crypto_request));
1342 return 0;
1343 }
1344
qat_alg_aead_sha1_init(struct crypto_aead * tfm)1345 static int qat_alg_aead_sha1_init(struct crypto_aead *tfm)
1346 {
1347 return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA1, "sha1");
1348 }
1349
qat_alg_aead_sha256_init(struct crypto_aead * tfm)1350 static int qat_alg_aead_sha256_init(struct crypto_aead *tfm)
1351 {
1352 return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA256, "sha256");
1353 }
1354
qat_alg_aead_sha512_init(struct crypto_aead * tfm)1355 static int qat_alg_aead_sha512_init(struct crypto_aead *tfm)
1356 {
1357 return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA512, "sha512");
1358 }
1359
qat_alg_aead_exit(struct crypto_aead * tfm)1360 static void qat_alg_aead_exit(struct crypto_aead *tfm)
1361 {
1362 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
1363 struct qat_crypto_instance *inst = ctx->inst;
1364 struct device *dev;
1365
1366 crypto_free_shash(ctx->hash_tfm);
1367
1368 if (!inst)
1369 return;
1370
1371 dev = &GET_DEV(inst->accel_dev);
1372 if (ctx->enc_cd) {
1373 memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
1374 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
1375 ctx->enc_cd, ctx->enc_cd_paddr);
1376 }
1377 if (ctx->dec_cd) {
1378 memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
1379 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
1380 ctx->dec_cd, ctx->dec_cd_paddr);
1381 }
1382 qat_crypto_put_instance(inst);
1383 }
1384
qat_alg_skcipher_init_tfm(struct crypto_skcipher * tfm)1385 static int qat_alg_skcipher_init_tfm(struct crypto_skcipher *tfm)
1386 {
1387 crypto_skcipher_set_reqsize(tfm, sizeof(struct qat_crypto_request));
1388 return 0;
1389 }
1390
qat_alg_skcipher_init_xts_tfm(struct crypto_skcipher * tfm)1391 static int qat_alg_skcipher_init_xts_tfm(struct crypto_skcipher *tfm)
1392 {
1393 struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
1394 int reqsize;
1395
1396 ctx->ftfm = crypto_alloc_skcipher("xts(aes)", 0,
1397 CRYPTO_ALG_NEED_FALLBACK);
1398 if (IS_ERR(ctx->ftfm))
1399 return PTR_ERR(ctx->ftfm);
1400
1401 ctx->tweak = crypto_alloc_cipher("aes", 0, 0);
1402 if (IS_ERR(ctx->tweak)) {
1403 crypto_free_skcipher(ctx->ftfm);
1404 return PTR_ERR(ctx->tweak);
1405 }
1406
1407 reqsize = max(sizeof(struct qat_crypto_request),
1408 sizeof(struct skcipher_request) +
1409 crypto_skcipher_reqsize(ctx->ftfm));
1410 crypto_skcipher_set_reqsize(tfm, reqsize);
1411
1412 return 0;
1413 }
1414
qat_alg_skcipher_exit_tfm(struct crypto_skcipher * tfm)1415 static void qat_alg_skcipher_exit_tfm(struct crypto_skcipher *tfm)
1416 {
1417 struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
1418 struct qat_crypto_instance *inst = ctx->inst;
1419 struct device *dev;
1420
1421 if (!inst)
1422 return;
1423
1424 dev = &GET_DEV(inst->accel_dev);
1425 if (ctx->enc_cd) {
1426 memset(ctx->enc_cd, 0,
1427 sizeof(struct icp_qat_hw_cipher_algo_blk));
1428 dma_free_coherent(dev,
1429 sizeof(struct icp_qat_hw_cipher_algo_blk),
1430 ctx->enc_cd, ctx->enc_cd_paddr);
1431 }
1432 if (ctx->dec_cd) {
1433 memset(ctx->dec_cd, 0,
1434 sizeof(struct icp_qat_hw_cipher_algo_blk));
1435 dma_free_coherent(dev,
1436 sizeof(struct icp_qat_hw_cipher_algo_blk),
1437 ctx->dec_cd, ctx->dec_cd_paddr);
1438 }
1439 qat_crypto_put_instance(inst);
1440 }
1441
qat_alg_skcipher_exit_xts_tfm(struct crypto_skcipher * tfm)1442 static void qat_alg_skcipher_exit_xts_tfm(struct crypto_skcipher *tfm)
1443 {
1444 struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
1445
1446 if (ctx->ftfm)
1447 crypto_free_skcipher(ctx->ftfm);
1448
1449 if (ctx->tweak)
1450 crypto_free_cipher(ctx->tweak);
1451
1452 qat_alg_skcipher_exit_tfm(tfm);
1453 }
1454
1455 static struct aead_alg qat_aeads[] = { {
1456 .base = {
1457 .cra_name = "authenc(hmac(sha1),cbc(aes))",
1458 .cra_driver_name = "qat_aes_cbc_hmac_sha1",
1459 .cra_priority = 4001,
1460 .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1461 .cra_blocksize = AES_BLOCK_SIZE,
1462 .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1463 .cra_module = THIS_MODULE,
1464 },
1465 .init = qat_alg_aead_sha1_init,
1466 .exit = qat_alg_aead_exit,
1467 .setkey = qat_alg_aead_setkey,
1468 .decrypt = qat_alg_aead_dec,
1469 .encrypt = qat_alg_aead_enc,
1470 .ivsize = AES_BLOCK_SIZE,
1471 .maxauthsize = SHA1_DIGEST_SIZE,
1472 }, {
1473 .base = {
1474 .cra_name = "authenc(hmac(sha256),cbc(aes))",
1475 .cra_driver_name = "qat_aes_cbc_hmac_sha256",
1476 .cra_priority = 4001,
1477 .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1478 .cra_blocksize = AES_BLOCK_SIZE,
1479 .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1480 .cra_module = THIS_MODULE,
1481 },
1482 .init = qat_alg_aead_sha256_init,
1483 .exit = qat_alg_aead_exit,
1484 .setkey = qat_alg_aead_setkey,
1485 .decrypt = qat_alg_aead_dec,
1486 .encrypt = qat_alg_aead_enc,
1487 .ivsize = AES_BLOCK_SIZE,
1488 .maxauthsize = SHA256_DIGEST_SIZE,
1489 }, {
1490 .base = {
1491 .cra_name = "authenc(hmac(sha512),cbc(aes))",
1492 .cra_driver_name = "qat_aes_cbc_hmac_sha512",
1493 .cra_priority = 4001,
1494 .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1495 .cra_blocksize = AES_BLOCK_SIZE,
1496 .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1497 .cra_module = THIS_MODULE,
1498 },
1499 .init = qat_alg_aead_sha512_init,
1500 .exit = qat_alg_aead_exit,
1501 .setkey = qat_alg_aead_setkey,
1502 .decrypt = qat_alg_aead_dec,
1503 .encrypt = qat_alg_aead_enc,
1504 .ivsize = AES_BLOCK_SIZE,
1505 .maxauthsize = SHA512_DIGEST_SIZE,
1506 } };
1507
1508 static struct skcipher_alg qat_skciphers[] = { {
1509 .base.cra_name = "cbc(aes)",
1510 .base.cra_driver_name = "qat_aes_cbc",
1511 .base.cra_priority = 4001,
1512 .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1513 .base.cra_blocksize = AES_BLOCK_SIZE,
1514 .base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx),
1515 .base.cra_alignmask = 0,
1516 .base.cra_module = THIS_MODULE,
1517
1518 .init = qat_alg_skcipher_init_tfm,
1519 .exit = qat_alg_skcipher_exit_tfm,
1520 .setkey = qat_alg_skcipher_cbc_setkey,
1521 .decrypt = qat_alg_skcipher_blk_decrypt,
1522 .encrypt = qat_alg_skcipher_blk_encrypt,
1523 .min_keysize = AES_MIN_KEY_SIZE,
1524 .max_keysize = AES_MAX_KEY_SIZE,
1525 .ivsize = AES_BLOCK_SIZE,
1526 }, {
1527 .base.cra_name = "ctr(aes)",
1528 .base.cra_driver_name = "qat_aes_ctr",
1529 .base.cra_priority = 4001,
1530 .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1531 .base.cra_blocksize = 1,
1532 .base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx),
1533 .base.cra_alignmask = 0,
1534 .base.cra_module = THIS_MODULE,
1535
1536 .init = qat_alg_skcipher_init_tfm,
1537 .exit = qat_alg_skcipher_exit_tfm,
1538 .setkey = qat_alg_skcipher_ctr_setkey,
1539 .decrypt = qat_alg_skcipher_decrypt,
1540 .encrypt = qat_alg_skcipher_encrypt,
1541 .min_keysize = AES_MIN_KEY_SIZE,
1542 .max_keysize = AES_MAX_KEY_SIZE,
1543 .ivsize = AES_BLOCK_SIZE,
1544 }, {
1545 .base.cra_name = "xts(aes)",
1546 .base.cra_driver_name = "qat_aes_xts",
1547 .base.cra_priority = 4001,
1548 .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK |
1549 CRYPTO_ALG_ALLOCATES_MEMORY,
1550 .base.cra_blocksize = AES_BLOCK_SIZE,
1551 .base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx),
1552 .base.cra_alignmask = 0,
1553 .base.cra_module = THIS_MODULE,
1554
1555 .init = qat_alg_skcipher_init_xts_tfm,
1556 .exit = qat_alg_skcipher_exit_xts_tfm,
1557 .setkey = qat_alg_skcipher_xts_setkey,
1558 .decrypt = qat_alg_skcipher_xts_decrypt,
1559 .encrypt = qat_alg_skcipher_xts_encrypt,
1560 .min_keysize = 2 * AES_MIN_KEY_SIZE,
1561 .max_keysize = 2 * AES_MAX_KEY_SIZE,
1562 .ivsize = AES_BLOCK_SIZE,
1563 } };
1564
qat_algs_register(void)1565 int qat_algs_register(void)
1566 {
1567 int ret = 0;
1568
1569 mutex_lock(&algs_lock);
1570 if (++active_devs != 1)
1571 goto unlock;
1572
1573 ret = crypto_register_skciphers(qat_skciphers,
1574 ARRAY_SIZE(qat_skciphers));
1575 if (ret)
1576 goto unlock;
1577
1578 ret = crypto_register_aeads(qat_aeads, ARRAY_SIZE(qat_aeads));
1579 if (ret)
1580 goto unreg_algs;
1581
1582 unlock:
1583 mutex_unlock(&algs_lock);
1584 return ret;
1585
1586 unreg_algs:
1587 crypto_unregister_skciphers(qat_skciphers, ARRAY_SIZE(qat_skciphers));
1588 goto unlock;
1589 }
1590
qat_algs_unregister(void)1591 void qat_algs_unregister(void)
1592 {
1593 mutex_lock(&algs_lock);
1594 if (--active_devs != 0)
1595 goto unlock;
1596
1597 crypto_unregister_aeads(qat_aeads, ARRAY_SIZE(qat_aeads));
1598 crypto_unregister_skciphers(qat_skciphers, ARRAY_SIZE(qat_skciphers));
1599
1600 unlock:
1601 mutex_unlock(&algs_lock);
1602 }
1603