1 /*
2   This file is provided under a dual BSD/GPLv2 license.  When using or
3   redistributing this file, you may do so under either license.
4 
5   GPL LICENSE SUMMARY
6   Copyright(c) 2014 Intel Corporation.
7   This program is free software; you can redistribute it and/or modify
8   it under the terms of version 2 of the GNU General Public License as
9   published by the Free Software Foundation.
10 
11   This program is distributed in the hope that it will be useful, but
12   WITHOUT ANY WARRANTY; without even the implied warranty of
13   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14   General Public License for more details.
15 
16   Contact Information:
17   qat-linux@intel.com
18 
19   BSD LICENSE
20   Copyright(c) 2014 Intel Corporation.
21   Redistribution and use in source and binary forms, with or without
22   modification, are permitted provided that the following conditions
23   are met:
24 
25     * Redistributions of source code must retain the above copyright
26       notice, this list of conditions and the following disclaimer.
27     * Redistributions in binary form must reproduce the above copyright
28       notice, this list of conditions and the following disclaimer in
29       the documentation and/or other materials provided with the
30       distribution.
31     * Neither the name of Intel Corporation nor the names of its
32       contributors may be used to endorse or promote products derived
33       from this software without specific prior written permission.
34 
35   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
46 */
47 #include <linux/module.h>
48 #include <linux/slab.h>
49 #include <linux/crypto.h>
50 #include <crypto/internal/aead.h>
51 #include <crypto/aes.h>
52 #include <crypto/sha.h>
53 #include <crypto/hash.h>
54 #include <crypto/hmac.h>
55 #include <crypto/algapi.h>
56 #include <crypto/authenc.h>
57 #include <linux/dma-mapping.h>
58 #include "adf_accel_devices.h"
59 #include "adf_transport.h"
60 #include "adf_common_drv.h"
61 #include "qat_crypto.h"
62 #include "icp_qat_hw.h"
63 #include "icp_qat_fw.h"
64 #include "icp_qat_fw_la.h"
65 
66 #define QAT_AES_HW_CONFIG_ENC(alg, mode) \
67 	ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \
68 				       ICP_QAT_HW_CIPHER_NO_CONVERT, \
69 				       ICP_QAT_HW_CIPHER_ENCRYPT)
70 
71 #define QAT_AES_HW_CONFIG_DEC(alg, mode) \
72 	ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \
73 				       ICP_QAT_HW_CIPHER_KEY_CONVERT, \
74 				       ICP_QAT_HW_CIPHER_DECRYPT)
75 
76 static DEFINE_MUTEX(algs_lock);
77 static unsigned int active_devs;
78 
79 struct qat_alg_buf {
80 	uint32_t len;
81 	uint32_t resrvd;
82 	uint64_t addr;
83 } __packed;
84 
85 struct qat_alg_buf_list {
86 	uint64_t resrvd;
87 	uint32_t num_bufs;
88 	uint32_t num_mapped_bufs;
89 	struct qat_alg_buf bufers[];
90 } __packed __aligned(64);
91 
92 /* Common content descriptor */
93 struct qat_alg_cd {
94 	union {
95 		struct qat_enc { /* Encrypt content desc */
96 			struct icp_qat_hw_cipher_algo_blk cipher;
97 			struct icp_qat_hw_auth_algo_blk hash;
98 		} qat_enc_cd;
99 		struct qat_dec { /* Decrytp content desc */
100 			struct icp_qat_hw_auth_algo_blk hash;
101 			struct icp_qat_hw_cipher_algo_blk cipher;
102 		} qat_dec_cd;
103 	};
104 } __aligned(64);
105 
106 struct qat_alg_aead_ctx {
107 	struct qat_alg_cd *enc_cd;
108 	struct qat_alg_cd *dec_cd;
109 	dma_addr_t enc_cd_paddr;
110 	dma_addr_t dec_cd_paddr;
111 	struct icp_qat_fw_la_bulk_req enc_fw_req;
112 	struct icp_qat_fw_la_bulk_req dec_fw_req;
113 	struct crypto_shash *hash_tfm;
114 	enum icp_qat_hw_auth_algo qat_hash_alg;
115 	struct qat_crypto_instance *inst;
116 	union {
117 		struct sha1_state sha1;
118 		struct sha256_state sha256;
119 		struct sha512_state sha512;
120 	};
121 	char ipad[SHA512_BLOCK_SIZE]; /* sufficient for SHA-1/SHA-256 as well */
122 	char opad[SHA512_BLOCK_SIZE];
123 };
124 
125 struct qat_alg_ablkcipher_ctx {
126 	struct icp_qat_hw_cipher_algo_blk *enc_cd;
127 	struct icp_qat_hw_cipher_algo_blk *dec_cd;
128 	dma_addr_t enc_cd_paddr;
129 	dma_addr_t dec_cd_paddr;
130 	struct icp_qat_fw_la_bulk_req enc_fw_req;
131 	struct icp_qat_fw_la_bulk_req dec_fw_req;
132 	struct qat_crypto_instance *inst;
133 	struct crypto_tfm *tfm;
134 };
135 
qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)136 static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)
137 {
138 	switch (qat_hash_alg) {
139 	case ICP_QAT_HW_AUTH_ALGO_SHA1:
140 		return ICP_QAT_HW_SHA1_STATE1_SZ;
141 	case ICP_QAT_HW_AUTH_ALGO_SHA256:
142 		return ICP_QAT_HW_SHA256_STATE1_SZ;
143 	case ICP_QAT_HW_AUTH_ALGO_SHA512:
144 		return ICP_QAT_HW_SHA512_STATE1_SZ;
145 	default:
146 		return -EFAULT;
147 	};
148 	return -EFAULT;
149 }
150 
qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk * hash,struct qat_alg_aead_ctx * ctx,const uint8_t * auth_key,unsigned int auth_keylen)151 static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
152 				  struct qat_alg_aead_ctx *ctx,
153 				  const uint8_t *auth_key,
154 				  unsigned int auth_keylen)
155 {
156 	SHASH_DESC_ON_STACK(shash, ctx->hash_tfm);
157 	int block_size = crypto_shash_blocksize(ctx->hash_tfm);
158 	int digest_size = crypto_shash_digestsize(ctx->hash_tfm);
159 	__be32 *hash_state_out;
160 	__be64 *hash512_state_out;
161 	int i, offset;
162 
163 	memset(ctx->ipad, 0, block_size);
164 	memset(ctx->opad, 0, block_size);
165 	shash->tfm = ctx->hash_tfm;
166 
167 	if (auth_keylen > block_size) {
168 		int ret = crypto_shash_digest(shash, auth_key,
169 					      auth_keylen, ctx->ipad);
170 		if (ret)
171 			return ret;
172 
173 		memcpy(ctx->opad, ctx->ipad, digest_size);
174 	} else {
175 		memcpy(ctx->ipad, auth_key, auth_keylen);
176 		memcpy(ctx->opad, auth_key, auth_keylen);
177 	}
178 
179 	for (i = 0; i < block_size; i++) {
180 		char *ipad_ptr = ctx->ipad + i;
181 		char *opad_ptr = ctx->opad + i;
182 		*ipad_ptr ^= HMAC_IPAD_VALUE;
183 		*opad_ptr ^= HMAC_OPAD_VALUE;
184 	}
185 
186 	if (crypto_shash_init(shash))
187 		return -EFAULT;
188 
189 	if (crypto_shash_update(shash, ctx->ipad, block_size))
190 		return -EFAULT;
191 
192 	hash_state_out = (__be32 *)hash->sha.state1;
193 	hash512_state_out = (__be64 *)hash_state_out;
194 
195 	switch (ctx->qat_hash_alg) {
196 	case ICP_QAT_HW_AUTH_ALGO_SHA1:
197 		if (crypto_shash_export(shash, &ctx->sha1))
198 			return -EFAULT;
199 		for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
200 			*hash_state_out = cpu_to_be32(ctx->sha1.state[i]);
201 		break;
202 	case ICP_QAT_HW_AUTH_ALGO_SHA256:
203 		if (crypto_shash_export(shash, &ctx->sha256))
204 			return -EFAULT;
205 		for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
206 			*hash_state_out = cpu_to_be32(ctx->sha256.state[i]);
207 		break;
208 	case ICP_QAT_HW_AUTH_ALGO_SHA512:
209 		if (crypto_shash_export(shash, &ctx->sha512))
210 			return -EFAULT;
211 		for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
212 			*hash512_state_out = cpu_to_be64(ctx->sha512.state[i]);
213 		break;
214 	default:
215 		return -EFAULT;
216 	}
217 
218 	if (crypto_shash_init(shash))
219 		return -EFAULT;
220 
221 	if (crypto_shash_update(shash, ctx->opad, block_size))
222 		return -EFAULT;
223 
224 	offset = round_up(qat_get_inter_state_size(ctx->qat_hash_alg), 8);
225 	if (offset < 0)
226 		return -EFAULT;
227 
228 	hash_state_out = (__be32 *)(hash->sha.state1 + offset);
229 	hash512_state_out = (__be64 *)hash_state_out;
230 
231 	switch (ctx->qat_hash_alg) {
232 	case ICP_QAT_HW_AUTH_ALGO_SHA1:
233 		if (crypto_shash_export(shash, &ctx->sha1))
234 			return -EFAULT;
235 		for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
236 			*hash_state_out = cpu_to_be32(ctx->sha1.state[i]);
237 		break;
238 	case ICP_QAT_HW_AUTH_ALGO_SHA256:
239 		if (crypto_shash_export(shash, &ctx->sha256))
240 			return -EFAULT;
241 		for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
242 			*hash_state_out = cpu_to_be32(ctx->sha256.state[i]);
243 		break;
244 	case ICP_QAT_HW_AUTH_ALGO_SHA512:
245 		if (crypto_shash_export(shash, &ctx->sha512))
246 			return -EFAULT;
247 		for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
248 			*hash512_state_out = cpu_to_be64(ctx->sha512.state[i]);
249 		break;
250 	default:
251 		return -EFAULT;
252 	}
253 	memzero_explicit(ctx->ipad, block_size);
254 	memzero_explicit(ctx->opad, block_size);
255 	return 0;
256 }
257 
qat_alg_init_hdr_iv_updt(struct icp_qat_fw_comn_req_hdr * header)258 static void qat_alg_init_hdr_iv_updt(struct icp_qat_fw_comn_req_hdr *header)
259 {
260 	ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
261 					   ICP_QAT_FW_CIPH_IV_64BIT_PTR);
262 	ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
263 				       ICP_QAT_FW_LA_UPDATE_STATE);
264 }
265 
qat_alg_init_hdr_no_iv_updt(struct icp_qat_fw_comn_req_hdr * header)266 static void qat_alg_init_hdr_no_iv_updt(struct icp_qat_fw_comn_req_hdr *header)
267 {
268 	ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
269 					   ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
270 	ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
271 				       ICP_QAT_FW_LA_NO_UPDATE_STATE);
272 }
273 
qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr * header,int aead)274 static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header,
275 				    int aead)
276 {
277 	header->hdr_flags =
278 		ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
279 	header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
280 	header->comn_req_flags =
281 		ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
282 					    QAT_COMN_PTR_TYPE_SGL);
283 	ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
284 				  ICP_QAT_FW_LA_PARTIAL_NONE);
285 	if (aead)
286 		qat_alg_init_hdr_no_iv_updt(header);
287 	else
288 		qat_alg_init_hdr_iv_updt(header);
289 	ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
290 				ICP_QAT_FW_LA_NO_PROTO);
291 }
292 
qat_alg_aead_init_enc_session(struct crypto_aead * aead_tfm,int alg,struct crypto_authenc_keys * keys,int mode)293 static int qat_alg_aead_init_enc_session(struct crypto_aead *aead_tfm,
294 					 int alg,
295 					 struct crypto_authenc_keys *keys,
296 					 int mode)
297 {
298 	struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(aead_tfm);
299 	unsigned int digestsize = crypto_aead_authsize(aead_tfm);
300 	struct qat_enc *enc_ctx = &ctx->enc_cd->qat_enc_cd;
301 	struct icp_qat_hw_cipher_algo_blk *cipher = &enc_ctx->cipher;
302 	struct icp_qat_hw_auth_algo_blk *hash =
303 		(struct icp_qat_hw_auth_algo_blk *)((char *)enc_ctx +
304 		sizeof(struct icp_qat_hw_auth_setup) + keys->enckeylen);
305 	struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->enc_fw_req;
306 	struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
307 	struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
308 	void *ptr = &req_tmpl->cd_ctrl;
309 	struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
310 	struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
311 
312 	/* CD setup */
313 	cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg, mode);
314 	memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
315 	hash->sha.inner_setup.auth_config.config =
316 		ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
317 					     ctx->qat_hash_alg, digestsize);
318 	hash->sha.inner_setup.auth_counter.counter =
319 		cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
320 
321 	if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
322 		return -EFAULT;
323 
324 	/* Request setup */
325 	qat_alg_init_common_hdr(header, 1);
326 	header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
327 	ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
328 					   ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
329 	ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
330 				   ICP_QAT_FW_LA_RET_AUTH_RES);
331 	ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
332 				   ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
333 	cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
334 	cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
335 
336 	/* Cipher CD config setup */
337 	cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
338 	cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
339 	cipher_cd_ctrl->cipher_cfg_offset = 0;
340 	ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
341 	ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
342 	/* Auth CD config setup */
343 	hash_cd_ctrl->hash_cfg_offset = ((char *)hash - (char *)cipher) >> 3;
344 	hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
345 	hash_cd_ctrl->inner_res_sz = digestsize;
346 	hash_cd_ctrl->final_sz = digestsize;
347 
348 	switch (ctx->qat_hash_alg) {
349 	case ICP_QAT_HW_AUTH_ALGO_SHA1:
350 		hash_cd_ctrl->inner_state1_sz =
351 			round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
352 		hash_cd_ctrl->inner_state2_sz =
353 			round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
354 		break;
355 	case ICP_QAT_HW_AUTH_ALGO_SHA256:
356 		hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
357 		hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
358 		break;
359 	case ICP_QAT_HW_AUTH_ALGO_SHA512:
360 		hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
361 		hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
362 		break;
363 	default:
364 		break;
365 	}
366 	hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
367 			((sizeof(struct icp_qat_hw_auth_setup) +
368 			 round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
369 	ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
370 	ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
371 	return 0;
372 }
373 
qat_alg_aead_init_dec_session(struct crypto_aead * aead_tfm,int alg,struct crypto_authenc_keys * keys,int mode)374 static int qat_alg_aead_init_dec_session(struct crypto_aead *aead_tfm,
375 					 int alg,
376 					 struct crypto_authenc_keys *keys,
377 					 int mode)
378 {
379 	struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(aead_tfm);
380 	unsigned int digestsize = crypto_aead_authsize(aead_tfm);
381 	struct qat_dec *dec_ctx = &ctx->dec_cd->qat_dec_cd;
382 	struct icp_qat_hw_auth_algo_blk *hash = &dec_ctx->hash;
383 	struct icp_qat_hw_cipher_algo_blk *cipher =
384 		(struct icp_qat_hw_cipher_algo_blk *)((char *)dec_ctx +
385 		sizeof(struct icp_qat_hw_auth_setup) +
386 		roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2);
387 	struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->dec_fw_req;
388 	struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
389 	struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
390 	void *ptr = &req_tmpl->cd_ctrl;
391 	struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
392 	struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
393 	struct icp_qat_fw_la_auth_req_params *auth_param =
394 		(struct icp_qat_fw_la_auth_req_params *)
395 		((char *)&req_tmpl->serv_specif_rqpars +
396 		sizeof(struct icp_qat_fw_la_cipher_req_params));
397 
398 	/* CD setup */
399 	cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_DEC(alg, mode);
400 	memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
401 	hash->sha.inner_setup.auth_config.config =
402 		ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
403 					     ctx->qat_hash_alg,
404 					     digestsize);
405 	hash->sha.inner_setup.auth_counter.counter =
406 		cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
407 
408 	if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
409 		return -EFAULT;
410 
411 	/* Request setup */
412 	qat_alg_init_common_hdr(header, 1);
413 	header->service_cmd_id = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
414 	ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
415 					   ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
416 	ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
417 				   ICP_QAT_FW_LA_NO_RET_AUTH_RES);
418 	ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
419 				   ICP_QAT_FW_LA_CMP_AUTH_RES);
420 	cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
421 	cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
422 
423 	/* Cipher CD config setup */
424 	cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
425 	cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
426 	cipher_cd_ctrl->cipher_cfg_offset =
427 		(sizeof(struct icp_qat_hw_auth_setup) +
428 		 roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2) >> 3;
429 	ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
430 	ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
431 
432 	/* Auth CD config setup */
433 	hash_cd_ctrl->hash_cfg_offset = 0;
434 	hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
435 	hash_cd_ctrl->inner_res_sz = digestsize;
436 	hash_cd_ctrl->final_sz = digestsize;
437 
438 	switch (ctx->qat_hash_alg) {
439 	case ICP_QAT_HW_AUTH_ALGO_SHA1:
440 		hash_cd_ctrl->inner_state1_sz =
441 			round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
442 		hash_cd_ctrl->inner_state2_sz =
443 			round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
444 		break;
445 	case ICP_QAT_HW_AUTH_ALGO_SHA256:
446 		hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
447 		hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
448 		break;
449 	case ICP_QAT_HW_AUTH_ALGO_SHA512:
450 		hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
451 		hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
452 		break;
453 	default:
454 		break;
455 	}
456 
457 	hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
458 			((sizeof(struct icp_qat_hw_auth_setup) +
459 			 round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
460 	auth_param->auth_res_sz = digestsize;
461 	ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
462 	ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
463 	return 0;
464 }
465 
qat_alg_ablkcipher_init_com(struct qat_alg_ablkcipher_ctx * ctx,struct icp_qat_fw_la_bulk_req * req,struct icp_qat_hw_cipher_algo_blk * cd,const uint8_t * key,unsigned int keylen)466 static void qat_alg_ablkcipher_init_com(struct qat_alg_ablkcipher_ctx *ctx,
467 					struct icp_qat_fw_la_bulk_req *req,
468 					struct icp_qat_hw_cipher_algo_blk *cd,
469 					const uint8_t *key, unsigned int keylen)
470 {
471 	struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
472 	struct icp_qat_fw_comn_req_hdr *header = &req->comn_hdr;
473 	struct icp_qat_fw_cipher_cd_ctrl_hdr *cd_ctrl = (void *)&req->cd_ctrl;
474 
475 	memcpy(cd->aes.key, key, keylen);
476 	qat_alg_init_common_hdr(header, 0);
477 	header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
478 	cd_pars->u.s.content_desc_params_sz =
479 				sizeof(struct icp_qat_hw_cipher_algo_blk) >> 3;
480 	/* Cipher CD config setup */
481 	cd_ctrl->cipher_key_sz = keylen >> 3;
482 	cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
483 	cd_ctrl->cipher_cfg_offset = 0;
484 	ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
485 	ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
486 }
487 
qat_alg_ablkcipher_init_enc(struct qat_alg_ablkcipher_ctx * ctx,int alg,const uint8_t * key,unsigned int keylen,int mode)488 static void qat_alg_ablkcipher_init_enc(struct qat_alg_ablkcipher_ctx *ctx,
489 					int alg, const uint8_t *key,
490 					unsigned int keylen, int mode)
491 {
492 	struct icp_qat_hw_cipher_algo_blk *enc_cd = ctx->enc_cd;
493 	struct icp_qat_fw_la_bulk_req *req = &ctx->enc_fw_req;
494 	struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
495 
496 	qat_alg_ablkcipher_init_com(ctx, req, enc_cd, key, keylen);
497 	cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
498 	enc_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg, mode);
499 }
500 
qat_alg_ablkcipher_init_dec(struct qat_alg_ablkcipher_ctx * ctx,int alg,const uint8_t * key,unsigned int keylen,int mode)501 static void qat_alg_ablkcipher_init_dec(struct qat_alg_ablkcipher_ctx *ctx,
502 					int alg, const uint8_t *key,
503 					unsigned int keylen, int mode)
504 {
505 	struct icp_qat_hw_cipher_algo_blk *dec_cd = ctx->dec_cd;
506 	struct icp_qat_fw_la_bulk_req *req = &ctx->dec_fw_req;
507 	struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
508 
509 	qat_alg_ablkcipher_init_com(ctx, req, dec_cd, key, keylen);
510 	cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
511 
512 	if (mode != ICP_QAT_HW_CIPHER_CTR_MODE)
513 		dec_cd->aes.cipher_config.val =
514 					QAT_AES_HW_CONFIG_DEC(alg, mode);
515 	else
516 		dec_cd->aes.cipher_config.val =
517 					QAT_AES_HW_CONFIG_ENC(alg, mode);
518 }
519 
qat_alg_validate_key(int key_len,int * alg,int mode)520 static int qat_alg_validate_key(int key_len, int *alg, int mode)
521 {
522 	if (mode != ICP_QAT_HW_CIPHER_XTS_MODE) {
523 		switch (key_len) {
524 		case AES_KEYSIZE_128:
525 			*alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
526 			break;
527 		case AES_KEYSIZE_192:
528 			*alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
529 			break;
530 		case AES_KEYSIZE_256:
531 			*alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
532 			break;
533 		default:
534 			return -EINVAL;
535 		}
536 	} else {
537 		switch (key_len) {
538 		case AES_KEYSIZE_128 << 1:
539 			*alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
540 			break;
541 		case AES_KEYSIZE_256 << 1:
542 			*alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
543 			break;
544 		default:
545 			return -EINVAL;
546 		}
547 	}
548 	return 0;
549 }
550 
qat_alg_aead_init_sessions(struct crypto_aead * tfm,const u8 * key,unsigned int keylen,int mode)551 static int qat_alg_aead_init_sessions(struct crypto_aead *tfm, const u8 *key,
552 				      unsigned int keylen,  int mode)
553 {
554 	struct crypto_authenc_keys keys;
555 	int alg;
556 
557 	if (crypto_authenc_extractkeys(&keys, key, keylen))
558 		goto bad_key;
559 
560 	if (qat_alg_validate_key(keys.enckeylen, &alg, mode))
561 		goto bad_key;
562 
563 	if (qat_alg_aead_init_enc_session(tfm, alg, &keys, mode))
564 		goto error;
565 
566 	if (qat_alg_aead_init_dec_session(tfm, alg, &keys, mode))
567 		goto error;
568 
569 	memzero_explicit(&keys, sizeof(keys));
570 	return 0;
571 bad_key:
572 	crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
573 	memzero_explicit(&keys, sizeof(keys));
574 	return -EINVAL;
575 error:
576 	memzero_explicit(&keys, sizeof(keys));
577 	return -EFAULT;
578 }
579 
qat_alg_ablkcipher_init_sessions(struct qat_alg_ablkcipher_ctx * ctx,const uint8_t * key,unsigned int keylen,int mode)580 static int qat_alg_ablkcipher_init_sessions(struct qat_alg_ablkcipher_ctx *ctx,
581 					    const uint8_t *key,
582 					    unsigned int keylen,
583 					    int mode)
584 {
585 	int alg;
586 
587 	if (qat_alg_validate_key(keylen, &alg, mode))
588 		goto bad_key;
589 
590 	qat_alg_ablkcipher_init_enc(ctx, alg, key, keylen, mode);
591 	qat_alg_ablkcipher_init_dec(ctx, alg, key, keylen, mode);
592 	return 0;
593 bad_key:
594 	crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
595 	return -EINVAL;
596 }
597 
qat_alg_aead_rekey(struct crypto_aead * tfm,const uint8_t * key,unsigned int keylen)598 static int qat_alg_aead_rekey(struct crypto_aead *tfm, const uint8_t *key,
599 			      unsigned int keylen)
600 {
601 	struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
602 
603 	memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
604 	memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
605 	memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
606 	memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
607 
608 	return qat_alg_aead_init_sessions(tfm, key, keylen,
609 					  ICP_QAT_HW_CIPHER_CBC_MODE);
610 }
611 
qat_alg_aead_newkey(struct crypto_aead * tfm,const uint8_t * key,unsigned int keylen)612 static int qat_alg_aead_newkey(struct crypto_aead *tfm, const uint8_t *key,
613 			       unsigned int keylen)
614 {
615 	struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
616 	struct qat_crypto_instance *inst = NULL;
617 	int node = get_current_node();
618 	struct device *dev;
619 	int ret;
620 
621 	inst = qat_crypto_get_instance_node(node);
622 	if (!inst)
623 		return -EINVAL;
624 	dev = &GET_DEV(inst->accel_dev);
625 	ctx->inst = inst;
626 	ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd),
627 					 &ctx->enc_cd_paddr,
628 					 GFP_ATOMIC);
629 	if (!ctx->enc_cd) {
630 		ret = -ENOMEM;
631 		goto out_free_inst;
632 	}
633 	ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd),
634 					 &ctx->dec_cd_paddr,
635 					 GFP_ATOMIC);
636 	if (!ctx->dec_cd) {
637 		ret = -ENOMEM;
638 		goto out_free_enc;
639 	}
640 
641 	ret = qat_alg_aead_init_sessions(tfm, key, keylen,
642 					 ICP_QAT_HW_CIPHER_CBC_MODE);
643 	if (ret)
644 		goto out_free_all;
645 
646 	return 0;
647 
648 out_free_all:
649 	memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
650 	dma_free_coherent(dev, sizeof(struct qat_alg_cd),
651 			  ctx->dec_cd, ctx->dec_cd_paddr);
652 	ctx->dec_cd = NULL;
653 out_free_enc:
654 	memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
655 	dma_free_coherent(dev, sizeof(struct qat_alg_cd),
656 			  ctx->enc_cd, ctx->enc_cd_paddr);
657 	ctx->enc_cd = NULL;
658 out_free_inst:
659 	ctx->inst = NULL;
660 	qat_crypto_put_instance(inst);
661 	return ret;
662 }
663 
qat_alg_aead_setkey(struct crypto_aead * tfm,const uint8_t * key,unsigned int keylen)664 static int qat_alg_aead_setkey(struct crypto_aead *tfm, const uint8_t *key,
665 			       unsigned int keylen)
666 {
667 	struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
668 
669 	if (ctx->enc_cd)
670 		return qat_alg_aead_rekey(tfm, key, keylen);
671 	else
672 		return qat_alg_aead_newkey(tfm, key, keylen);
673 }
674 
qat_alg_free_bufl(struct qat_crypto_instance * inst,struct qat_crypto_request * qat_req)675 static void qat_alg_free_bufl(struct qat_crypto_instance *inst,
676 			      struct qat_crypto_request *qat_req)
677 {
678 	struct device *dev = &GET_DEV(inst->accel_dev);
679 	struct qat_alg_buf_list *bl = qat_req->buf.bl;
680 	struct qat_alg_buf_list *blout = qat_req->buf.blout;
681 	dma_addr_t blp = qat_req->buf.blp;
682 	dma_addr_t blpout = qat_req->buf.bloutp;
683 	size_t sz = qat_req->buf.sz;
684 	size_t sz_out = qat_req->buf.sz_out;
685 	int i;
686 
687 	for (i = 0; i < bl->num_bufs; i++)
688 		dma_unmap_single(dev, bl->bufers[i].addr,
689 				 bl->bufers[i].len, DMA_BIDIRECTIONAL);
690 
691 	dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
692 	kfree(bl);
693 	if (blp != blpout) {
694 		/* If out of place operation dma unmap only data */
695 		int bufless = blout->num_bufs - blout->num_mapped_bufs;
696 
697 		for (i = bufless; i < blout->num_bufs; i++) {
698 			dma_unmap_single(dev, blout->bufers[i].addr,
699 					 blout->bufers[i].len,
700 					 DMA_BIDIRECTIONAL);
701 		}
702 		dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE);
703 		kfree(blout);
704 	}
705 }
706 
qat_alg_sgl_to_bufl(struct qat_crypto_instance * inst,struct scatterlist * sgl,struct scatterlist * sglout,struct qat_crypto_request * qat_req)707 static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
708 			       struct scatterlist *sgl,
709 			       struct scatterlist *sglout,
710 			       struct qat_crypto_request *qat_req)
711 {
712 	struct device *dev = &GET_DEV(inst->accel_dev);
713 	int i, sg_nctr = 0;
714 	int n = sg_nents(sgl);
715 	struct qat_alg_buf_list *bufl;
716 	struct qat_alg_buf_list *buflout = NULL;
717 	dma_addr_t blp;
718 	dma_addr_t bloutp = 0;
719 	struct scatterlist *sg;
720 	size_t sz_out, sz = struct_size(bufl, bufers, n + 1);
721 
722 	if (unlikely(!n))
723 		return -EINVAL;
724 
725 	bufl = kzalloc_node(sz, GFP_ATOMIC,
726 			    dev_to_node(&GET_DEV(inst->accel_dev)));
727 	if (unlikely(!bufl))
728 		return -ENOMEM;
729 
730 	blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
731 	if (unlikely(dma_mapping_error(dev, blp)))
732 		goto err_in;
733 
734 	for_each_sg(sgl, sg, n, i) {
735 		int y = sg_nctr;
736 
737 		if (!sg->length)
738 			continue;
739 
740 		bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg),
741 						      sg->length,
742 						      DMA_BIDIRECTIONAL);
743 		bufl->bufers[y].len = sg->length;
744 		if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr)))
745 			goto err_in;
746 		sg_nctr++;
747 	}
748 	bufl->num_bufs = sg_nctr;
749 	qat_req->buf.bl = bufl;
750 	qat_req->buf.blp = blp;
751 	qat_req->buf.sz = sz;
752 	/* Handle out of place operation */
753 	if (sgl != sglout) {
754 		struct qat_alg_buf *bufers;
755 
756 		n = sg_nents(sglout);
757 		sz_out = struct_size(buflout, bufers, n + 1);
758 		sg_nctr = 0;
759 		buflout = kzalloc_node(sz_out, GFP_ATOMIC,
760 				       dev_to_node(&GET_DEV(inst->accel_dev)));
761 		if (unlikely(!buflout))
762 			goto err_in;
763 		bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE);
764 		if (unlikely(dma_mapping_error(dev, bloutp)))
765 			goto err_out;
766 		bufers = buflout->bufers;
767 		for_each_sg(sglout, sg, n, i) {
768 			int y = sg_nctr;
769 
770 			if (!sg->length)
771 				continue;
772 
773 			bufers[y].addr = dma_map_single(dev, sg_virt(sg),
774 							sg->length,
775 							DMA_BIDIRECTIONAL);
776 			if (unlikely(dma_mapping_error(dev, bufers[y].addr)))
777 				goto err_out;
778 			bufers[y].len = sg->length;
779 			sg_nctr++;
780 		}
781 		buflout->num_bufs = sg_nctr;
782 		buflout->num_mapped_bufs = sg_nctr;
783 		qat_req->buf.blout = buflout;
784 		qat_req->buf.bloutp = bloutp;
785 		qat_req->buf.sz_out = sz_out;
786 	} else {
787 		/* Otherwise set the src and dst to the same address */
788 		qat_req->buf.bloutp = qat_req->buf.blp;
789 		qat_req->buf.sz_out = 0;
790 	}
791 	return 0;
792 
793 err_out:
794 	n = sg_nents(sglout);
795 	for (i = 0; i < n; i++)
796 		if (!dma_mapping_error(dev, buflout->bufers[i].addr))
797 			dma_unmap_single(dev, buflout->bufers[i].addr,
798 					 buflout->bufers[i].len,
799 					 DMA_BIDIRECTIONAL);
800 	if (!dma_mapping_error(dev, bloutp))
801 		dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE);
802 	kfree(buflout);
803 
804 err_in:
805 	n = sg_nents(sgl);
806 	for (i = 0; i < n; i++)
807 		if (!dma_mapping_error(dev, bufl->bufers[i].addr))
808 			dma_unmap_single(dev, bufl->bufers[i].addr,
809 					 bufl->bufers[i].len,
810 					 DMA_BIDIRECTIONAL);
811 
812 	if (!dma_mapping_error(dev, blp))
813 		dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
814 	kfree(bufl);
815 
816 	dev_err(dev, "Failed to map buf for dma\n");
817 	return -ENOMEM;
818 }
819 
qat_aead_alg_callback(struct icp_qat_fw_la_resp * qat_resp,struct qat_crypto_request * qat_req)820 static void qat_aead_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
821 				  struct qat_crypto_request *qat_req)
822 {
823 	struct qat_alg_aead_ctx *ctx = qat_req->aead_ctx;
824 	struct qat_crypto_instance *inst = ctx->inst;
825 	struct aead_request *areq = qat_req->aead_req;
826 	uint8_t stat_filed = qat_resp->comn_resp.comn_status;
827 	int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
828 
829 	qat_alg_free_bufl(inst, qat_req);
830 	if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
831 		res = -EBADMSG;
832 	areq->base.complete(&areq->base, res);
833 }
834 
qat_ablkcipher_alg_callback(struct icp_qat_fw_la_resp * qat_resp,struct qat_crypto_request * qat_req)835 static void qat_ablkcipher_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
836 					struct qat_crypto_request *qat_req)
837 {
838 	struct qat_alg_ablkcipher_ctx *ctx = qat_req->ablkcipher_ctx;
839 	struct qat_crypto_instance *inst = ctx->inst;
840 	struct ablkcipher_request *areq = qat_req->ablkcipher_req;
841 	uint8_t stat_filed = qat_resp->comn_resp.comn_status;
842 	struct device *dev = &GET_DEV(ctx->inst->accel_dev);
843 	int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
844 
845 	qat_alg_free_bufl(inst, qat_req);
846 	if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
847 		res = -EINVAL;
848 
849 	memcpy(areq->info, qat_req->iv, AES_BLOCK_SIZE);
850 	dma_free_coherent(dev, AES_BLOCK_SIZE, qat_req->iv,
851 			  qat_req->iv_paddr);
852 
853 	areq->base.complete(&areq->base, res);
854 }
855 
qat_alg_callback(void * resp)856 void qat_alg_callback(void *resp)
857 {
858 	struct icp_qat_fw_la_resp *qat_resp = resp;
859 	struct qat_crypto_request *qat_req =
860 				(void *)(__force long)qat_resp->opaque_data;
861 
862 	qat_req->cb(qat_resp, qat_req);
863 }
864 
qat_alg_aead_dec(struct aead_request * areq)865 static int qat_alg_aead_dec(struct aead_request *areq)
866 {
867 	struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
868 	struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
869 	struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
870 	struct qat_crypto_request *qat_req = aead_request_ctx(areq);
871 	struct icp_qat_fw_la_cipher_req_params *cipher_param;
872 	struct icp_qat_fw_la_auth_req_params *auth_param;
873 	struct icp_qat_fw_la_bulk_req *msg;
874 	int digst_size = crypto_aead_authsize(aead_tfm);
875 	int ret, ctr = 0;
876 
877 	ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req);
878 	if (unlikely(ret))
879 		return ret;
880 
881 	msg = &qat_req->req;
882 	*msg = ctx->dec_fw_req;
883 	qat_req->aead_ctx = ctx;
884 	qat_req->aead_req = areq;
885 	qat_req->cb = qat_aead_alg_callback;
886 	qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
887 	qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
888 	qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
889 	cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
890 	cipher_param->cipher_length = areq->cryptlen - digst_size;
891 	cipher_param->cipher_offset = areq->assoclen;
892 	memcpy(cipher_param->u.cipher_IV_array, areq->iv, AES_BLOCK_SIZE);
893 	auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
894 	auth_param->auth_off = 0;
895 	auth_param->auth_len = areq->assoclen + cipher_param->cipher_length;
896 	do {
897 		ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
898 	} while (ret == -EAGAIN && ctr++ < 10);
899 
900 	if (ret == -EAGAIN) {
901 		qat_alg_free_bufl(ctx->inst, qat_req);
902 		return -EBUSY;
903 	}
904 	return -EINPROGRESS;
905 }
906 
qat_alg_aead_enc(struct aead_request * areq)907 static int qat_alg_aead_enc(struct aead_request *areq)
908 {
909 	struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
910 	struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
911 	struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
912 	struct qat_crypto_request *qat_req = aead_request_ctx(areq);
913 	struct icp_qat_fw_la_cipher_req_params *cipher_param;
914 	struct icp_qat_fw_la_auth_req_params *auth_param;
915 	struct icp_qat_fw_la_bulk_req *msg;
916 	uint8_t *iv = areq->iv;
917 	int ret, ctr = 0;
918 
919 	ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req);
920 	if (unlikely(ret))
921 		return ret;
922 
923 	msg = &qat_req->req;
924 	*msg = ctx->enc_fw_req;
925 	qat_req->aead_ctx = ctx;
926 	qat_req->aead_req = areq;
927 	qat_req->cb = qat_aead_alg_callback;
928 	qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
929 	qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
930 	qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
931 	cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
932 	auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
933 
934 	memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE);
935 	cipher_param->cipher_length = areq->cryptlen;
936 	cipher_param->cipher_offset = areq->assoclen;
937 
938 	auth_param->auth_off = 0;
939 	auth_param->auth_len = areq->assoclen + areq->cryptlen;
940 
941 	do {
942 		ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
943 	} while (ret == -EAGAIN && ctr++ < 10);
944 
945 	if (ret == -EAGAIN) {
946 		qat_alg_free_bufl(ctx->inst, qat_req);
947 		return -EBUSY;
948 	}
949 	return -EINPROGRESS;
950 }
951 
qat_alg_ablkcipher_rekey(struct qat_alg_ablkcipher_ctx * ctx,const u8 * key,unsigned int keylen,int mode)952 static int qat_alg_ablkcipher_rekey(struct qat_alg_ablkcipher_ctx *ctx,
953 				    const u8 *key, unsigned int keylen,
954 				    int mode)
955 {
956 	memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
957 	memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
958 	memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
959 	memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
960 
961 	return qat_alg_ablkcipher_init_sessions(ctx, key, keylen, mode);
962 }
963 
qat_alg_ablkcipher_newkey(struct qat_alg_ablkcipher_ctx * ctx,const u8 * key,unsigned int keylen,int mode)964 static int qat_alg_ablkcipher_newkey(struct qat_alg_ablkcipher_ctx *ctx,
965 				     const u8 *key, unsigned int keylen,
966 				     int mode)
967 {
968 	struct qat_crypto_instance *inst = NULL;
969 	struct device *dev;
970 	int node = get_current_node();
971 	int ret;
972 
973 	inst = qat_crypto_get_instance_node(node);
974 	if (!inst)
975 		return -EINVAL;
976 	dev = &GET_DEV(inst->accel_dev);
977 	ctx->inst = inst;
978 	ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd),
979 					 &ctx->enc_cd_paddr,
980 					 GFP_ATOMIC);
981 	if (!ctx->enc_cd) {
982 		ret = -ENOMEM;
983 		goto out_free_instance;
984 	}
985 	ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd),
986 					 &ctx->dec_cd_paddr,
987 					 GFP_ATOMIC);
988 	if (!ctx->dec_cd) {
989 		ret = -ENOMEM;
990 		goto out_free_enc;
991 	}
992 
993 	ret = qat_alg_ablkcipher_init_sessions(ctx, key, keylen, mode);
994 	if (ret)
995 		goto out_free_all;
996 
997 	return 0;
998 
999 out_free_all:
1000 	memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
1001 	dma_free_coherent(dev, sizeof(*ctx->dec_cd),
1002 			  ctx->dec_cd, ctx->dec_cd_paddr);
1003 	ctx->dec_cd = NULL;
1004 out_free_enc:
1005 	memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
1006 	dma_free_coherent(dev, sizeof(*ctx->enc_cd),
1007 			  ctx->enc_cd, ctx->enc_cd_paddr);
1008 	ctx->enc_cd = NULL;
1009 out_free_instance:
1010 	ctx->inst = NULL;
1011 	qat_crypto_put_instance(inst);
1012 	return ret;
1013 }
1014 
qat_alg_ablkcipher_setkey(struct crypto_ablkcipher * tfm,const u8 * key,unsigned int keylen,int mode)1015 static int qat_alg_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
1016 				     const u8 *key, unsigned int keylen,
1017 				     int mode)
1018 {
1019 	struct qat_alg_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
1020 
1021 	if (ctx->enc_cd)
1022 		return qat_alg_ablkcipher_rekey(ctx, key, keylen, mode);
1023 	else
1024 		return qat_alg_ablkcipher_newkey(ctx, key, keylen, mode);
1025 }
1026 
qat_alg_ablkcipher_cbc_setkey(struct crypto_ablkcipher * tfm,const u8 * key,unsigned int keylen)1027 static int qat_alg_ablkcipher_cbc_setkey(struct crypto_ablkcipher *tfm,
1028 					 const u8 *key, unsigned int keylen)
1029 {
1030 	return qat_alg_ablkcipher_setkey(tfm, key, keylen,
1031 					 ICP_QAT_HW_CIPHER_CBC_MODE);
1032 }
1033 
qat_alg_ablkcipher_ctr_setkey(struct crypto_ablkcipher * tfm,const u8 * key,unsigned int keylen)1034 static int qat_alg_ablkcipher_ctr_setkey(struct crypto_ablkcipher *tfm,
1035 					 const u8 *key, unsigned int keylen)
1036 {
1037 	return qat_alg_ablkcipher_setkey(tfm, key, keylen,
1038 					 ICP_QAT_HW_CIPHER_CTR_MODE);
1039 }
1040 
qat_alg_ablkcipher_xts_setkey(struct crypto_ablkcipher * tfm,const u8 * key,unsigned int keylen)1041 static int qat_alg_ablkcipher_xts_setkey(struct crypto_ablkcipher *tfm,
1042 					 const u8 *key, unsigned int keylen)
1043 {
1044 	return qat_alg_ablkcipher_setkey(tfm, key, keylen,
1045 					 ICP_QAT_HW_CIPHER_XTS_MODE);
1046 }
1047 
qat_alg_ablkcipher_encrypt(struct ablkcipher_request * req)1048 static int qat_alg_ablkcipher_encrypt(struct ablkcipher_request *req)
1049 {
1050 	struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
1051 	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(atfm);
1052 	struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1053 	struct qat_crypto_request *qat_req = ablkcipher_request_ctx(req);
1054 	struct icp_qat_fw_la_cipher_req_params *cipher_param;
1055 	struct icp_qat_fw_la_bulk_req *msg;
1056 	struct device *dev = &GET_DEV(ctx->inst->accel_dev);
1057 	int ret, ctr = 0;
1058 
1059 	if (req->nbytes == 0)
1060 		return 0;
1061 
1062 	qat_req->iv = dma_alloc_coherent(dev, AES_BLOCK_SIZE,
1063 					 &qat_req->iv_paddr, GFP_ATOMIC);
1064 	if (!qat_req->iv)
1065 		return -ENOMEM;
1066 
1067 	ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req);
1068 	if (unlikely(ret)) {
1069 		dma_free_coherent(dev, AES_BLOCK_SIZE, qat_req->iv,
1070 				  qat_req->iv_paddr);
1071 		return ret;
1072 	}
1073 
1074 	msg = &qat_req->req;
1075 	*msg = ctx->enc_fw_req;
1076 	qat_req->ablkcipher_ctx = ctx;
1077 	qat_req->ablkcipher_req = req;
1078 	qat_req->cb = qat_ablkcipher_alg_callback;
1079 	qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
1080 	qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
1081 	qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
1082 	cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
1083 	cipher_param->cipher_length = req->nbytes;
1084 	cipher_param->cipher_offset = 0;
1085 	cipher_param->u.s.cipher_IV_ptr = qat_req->iv_paddr;
1086 	memcpy(qat_req->iv, req->info, AES_BLOCK_SIZE);
1087 	do {
1088 		ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
1089 	} while (ret == -EAGAIN && ctr++ < 10);
1090 
1091 	if (ret == -EAGAIN) {
1092 		qat_alg_free_bufl(ctx->inst, qat_req);
1093 		dma_free_coherent(dev, AES_BLOCK_SIZE, qat_req->iv,
1094 				  qat_req->iv_paddr);
1095 		return -EBUSY;
1096 	}
1097 	return -EINPROGRESS;
1098 }
1099 
qat_alg_ablkcipher_blk_encrypt(struct ablkcipher_request * req)1100 static int qat_alg_ablkcipher_blk_encrypt(struct ablkcipher_request *req)
1101 {
1102 	if (req->nbytes % AES_BLOCK_SIZE != 0)
1103 		return -EINVAL;
1104 
1105 	return qat_alg_ablkcipher_encrypt(req);
1106 }
1107 
qat_alg_ablkcipher_decrypt(struct ablkcipher_request * req)1108 static int qat_alg_ablkcipher_decrypt(struct ablkcipher_request *req)
1109 {
1110 	struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
1111 	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(atfm);
1112 	struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1113 	struct qat_crypto_request *qat_req = ablkcipher_request_ctx(req);
1114 	struct icp_qat_fw_la_cipher_req_params *cipher_param;
1115 	struct icp_qat_fw_la_bulk_req *msg;
1116 	struct device *dev = &GET_DEV(ctx->inst->accel_dev);
1117 	int ret, ctr = 0;
1118 
1119 	if (req->nbytes == 0)
1120 		return 0;
1121 
1122 	qat_req->iv = dma_alloc_coherent(dev, AES_BLOCK_SIZE,
1123 					 &qat_req->iv_paddr, GFP_ATOMIC);
1124 	if (!qat_req->iv)
1125 		return -ENOMEM;
1126 
1127 	ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req);
1128 	if (unlikely(ret)) {
1129 		dma_free_coherent(dev, AES_BLOCK_SIZE, qat_req->iv,
1130 				  qat_req->iv_paddr);
1131 		return ret;
1132 	}
1133 
1134 	msg = &qat_req->req;
1135 	*msg = ctx->dec_fw_req;
1136 	qat_req->ablkcipher_ctx = ctx;
1137 	qat_req->ablkcipher_req = req;
1138 	qat_req->cb = qat_ablkcipher_alg_callback;
1139 	qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
1140 	qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
1141 	qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
1142 	cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
1143 	cipher_param->cipher_length = req->nbytes;
1144 	cipher_param->cipher_offset = 0;
1145 	cipher_param->u.s.cipher_IV_ptr = qat_req->iv_paddr;
1146 	memcpy(qat_req->iv, req->info, AES_BLOCK_SIZE);
1147 	do {
1148 		ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
1149 	} while (ret == -EAGAIN && ctr++ < 10);
1150 
1151 	if (ret == -EAGAIN) {
1152 		qat_alg_free_bufl(ctx->inst, qat_req);
1153 		dma_free_coherent(dev, AES_BLOCK_SIZE, qat_req->iv,
1154 				  qat_req->iv_paddr);
1155 		return -EBUSY;
1156 	}
1157 	return -EINPROGRESS;
1158 }
1159 
qat_alg_ablkcipher_blk_decrypt(struct ablkcipher_request * req)1160 static int qat_alg_ablkcipher_blk_decrypt(struct ablkcipher_request *req)
1161 {
1162 	if (req->nbytes % AES_BLOCK_SIZE != 0)
1163 		return -EINVAL;
1164 
1165 	return qat_alg_ablkcipher_decrypt(req);
1166 }
qat_alg_aead_init(struct crypto_aead * tfm,enum icp_qat_hw_auth_algo hash,const char * hash_name)1167 static int qat_alg_aead_init(struct crypto_aead *tfm,
1168 			     enum icp_qat_hw_auth_algo hash,
1169 			     const char *hash_name)
1170 {
1171 	struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
1172 
1173 	ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
1174 	if (IS_ERR(ctx->hash_tfm))
1175 		return PTR_ERR(ctx->hash_tfm);
1176 	ctx->qat_hash_alg = hash;
1177 	crypto_aead_set_reqsize(tfm, sizeof(struct qat_crypto_request));
1178 	return 0;
1179 }
1180 
qat_alg_aead_sha1_init(struct crypto_aead * tfm)1181 static int qat_alg_aead_sha1_init(struct crypto_aead *tfm)
1182 {
1183 	return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA1, "sha1");
1184 }
1185 
qat_alg_aead_sha256_init(struct crypto_aead * tfm)1186 static int qat_alg_aead_sha256_init(struct crypto_aead *tfm)
1187 {
1188 	return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA256, "sha256");
1189 }
1190 
qat_alg_aead_sha512_init(struct crypto_aead * tfm)1191 static int qat_alg_aead_sha512_init(struct crypto_aead *tfm)
1192 {
1193 	return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA512, "sha512");
1194 }
1195 
qat_alg_aead_exit(struct crypto_aead * tfm)1196 static void qat_alg_aead_exit(struct crypto_aead *tfm)
1197 {
1198 	struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
1199 	struct qat_crypto_instance *inst = ctx->inst;
1200 	struct device *dev;
1201 
1202 	crypto_free_shash(ctx->hash_tfm);
1203 
1204 	if (!inst)
1205 		return;
1206 
1207 	dev = &GET_DEV(inst->accel_dev);
1208 	if (ctx->enc_cd) {
1209 		memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
1210 		dma_free_coherent(dev, sizeof(struct qat_alg_cd),
1211 				  ctx->enc_cd, ctx->enc_cd_paddr);
1212 	}
1213 	if (ctx->dec_cd) {
1214 		memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
1215 		dma_free_coherent(dev, sizeof(struct qat_alg_cd),
1216 				  ctx->dec_cd, ctx->dec_cd_paddr);
1217 	}
1218 	qat_crypto_put_instance(inst);
1219 }
1220 
qat_alg_ablkcipher_init(struct crypto_tfm * tfm)1221 static int qat_alg_ablkcipher_init(struct crypto_tfm *tfm)
1222 {
1223 	struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1224 
1225 	tfm->crt_ablkcipher.reqsize = sizeof(struct qat_crypto_request);
1226 	ctx->tfm = tfm;
1227 	return 0;
1228 }
1229 
qat_alg_ablkcipher_exit(struct crypto_tfm * tfm)1230 static void qat_alg_ablkcipher_exit(struct crypto_tfm *tfm)
1231 {
1232 	struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1233 	struct qat_crypto_instance *inst = ctx->inst;
1234 	struct device *dev;
1235 
1236 	if (!inst)
1237 		return;
1238 
1239 	dev = &GET_DEV(inst->accel_dev);
1240 	if (ctx->enc_cd) {
1241 		memset(ctx->enc_cd, 0,
1242 		       sizeof(struct icp_qat_hw_cipher_algo_blk));
1243 		dma_free_coherent(dev,
1244 				  sizeof(struct icp_qat_hw_cipher_algo_blk),
1245 				  ctx->enc_cd, ctx->enc_cd_paddr);
1246 	}
1247 	if (ctx->dec_cd) {
1248 		memset(ctx->dec_cd, 0,
1249 		       sizeof(struct icp_qat_hw_cipher_algo_blk));
1250 		dma_free_coherent(dev,
1251 				  sizeof(struct icp_qat_hw_cipher_algo_blk),
1252 				  ctx->dec_cd, ctx->dec_cd_paddr);
1253 	}
1254 	qat_crypto_put_instance(inst);
1255 }
1256 
1257 
1258 static struct aead_alg qat_aeads[] = { {
1259 	.base = {
1260 		.cra_name = "authenc(hmac(sha1),cbc(aes))",
1261 		.cra_driver_name = "qat_aes_cbc_hmac_sha1",
1262 		.cra_priority = 4001,
1263 		.cra_flags = CRYPTO_ALG_ASYNC,
1264 		.cra_blocksize = AES_BLOCK_SIZE,
1265 		.cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1266 		.cra_module = THIS_MODULE,
1267 	},
1268 	.init = qat_alg_aead_sha1_init,
1269 	.exit = qat_alg_aead_exit,
1270 	.setkey = qat_alg_aead_setkey,
1271 	.decrypt = qat_alg_aead_dec,
1272 	.encrypt = qat_alg_aead_enc,
1273 	.ivsize = AES_BLOCK_SIZE,
1274 	.maxauthsize = SHA1_DIGEST_SIZE,
1275 }, {
1276 	.base = {
1277 		.cra_name = "authenc(hmac(sha256),cbc(aes))",
1278 		.cra_driver_name = "qat_aes_cbc_hmac_sha256",
1279 		.cra_priority = 4001,
1280 		.cra_flags = CRYPTO_ALG_ASYNC,
1281 		.cra_blocksize = AES_BLOCK_SIZE,
1282 		.cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1283 		.cra_module = THIS_MODULE,
1284 	},
1285 	.init = qat_alg_aead_sha256_init,
1286 	.exit = qat_alg_aead_exit,
1287 	.setkey = qat_alg_aead_setkey,
1288 	.decrypt = qat_alg_aead_dec,
1289 	.encrypt = qat_alg_aead_enc,
1290 	.ivsize = AES_BLOCK_SIZE,
1291 	.maxauthsize = SHA256_DIGEST_SIZE,
1292 }, {
1293 	.base = {
1294 		.cra_name = "authenc(hmac(sha512),cbc(aes))",
1295 		.cra_driver_name = "qat_aes_cbc_hmac_sha512",
1296 		.cra_priority = 4001,
1297 		.cra_flags = CRYPTO_ALG_ASYNC,
1298 		.cra_blocksize = AES_BLOCK_SIZE,
1299 		.cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1300 		.cra_module = THIS_MODULE,
1301 	},
1302 	.init = qat_alg_aead_sha512_init,
1303 	.exit = qat_alg_aead_exit,
1304 	.setkey = qat_alg_aead_setkey,
1305 	.decrypt = qat_alg_aead_dec,
1306 	.encrypt = qat_alg_aead_enc,
1307 	.ivsize = AES_BLOCK_SIZE,
1308 	.maxauthsize = SHA512_DIGEST_SIZE,
1309 } };
1310 
1311 static struct crypto_alg qat_algs[] = { {
1312 	.cra_name = "cbc(aes)",
1313 	.cra_driver_name = "qat_aes_cbc",
1314 	.cra_priority = 4001,
1315 	.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1316 	.cra_blocksize = AES_BLOCK_SIZE,
1317 	.cra_ctxsize = sizeof(struct qat_alg_ablkcipher_ctx),
1318 	.cra_alignmask = 0,
1319 	.cra_type = &crypto_ablkcipher_type,
1320 	.cra_module = THIS_MODULE,
1321 	.cra_init = qat_alg_ablkcipher_init,
1322 	.cra_exit = qat_alg_ablkcipher_exit,
1323 	.cra_u = {
1324 		.ablkcipher = {
1325 			.setkey = qat_alg_ablkcipher_cbc_setkey,
1326 			.decrypt = qat_alg_ablkcipher_blk_decrypt,
1327 			.encrypt = qat_alg_ablkcipher_blk_encrypt,
1328 			.min_keysize = AES_MIN_KEY_SIZE,
1329 			.max_keysize = AES_MAX_KEY_SIZE,
1330 			.ivsize = AES_BLOCK_SIZE,
1331 		},
1332 	},
1333 }, {
1334 	.cra_name = "ctr(aes)",
1335 	.cra_driver_name = "qat_aes_ctr",
1336 	.cra_priority = 4001,
1337 	.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1338 	.cra_blocksize = 1,
1339 	.cra_ctxsize = sizeof(struct qat_alg_ablkcipher_ctx),
1340 	.cra_alignmask = 0,
1341 	.cra_type = &crypto_ablkcipher_type,
1342 	.cra_module = THIS_MODULE,
1343 	.cra_init = qat_alg_ablkcipher_init,
1344 	.cra_exit = qat_alg_ablkcipher_exit,
1345 	.cra_u = {
1346 		.ablkcipher = {
1347 			.setkey = qat_alg_ablkcipher_ctr_setkey,
1348 			.decrypt = qat_alg_ablkcipher_decrypt,
1349 			.encrypt = qat_alg_ablkcipher_encrypt,
1350 			.min_keysize = AES_MIN_KEY_SIZE,
1351 			.max_keysize = AES_MAX_KEY_SIZE,
1352 			.ivsize = AES_BLOCK_SIZE,
1353 		},
1354 	},
1355 }, {
1356 	.cra_name = "xts(aes)",
1357 	.cra_driver_name = "qat_aes_xts",
1358 	.cra_priority = 4001,
1359 	.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1360 	.cra_blocksize = AES_BLOCK_SIZE,
1361 	.cra_ctxsize = sizeof(struct qat_alg_ablkcipher_ctx),
1362 	.cra_alignmask = 0,
1363 	.cra_type = &crypto_ablkcipher_type,
1364 	.cra_module = THIS_MODULE,
1365 	.cra_init = qat_alg_ablkcipher_init,
1366 	.cra_exit = qat_alg_ablkcipher_exit,
1367 	.cra_u = {
1368 		.ablkcipher = {
1369 			.setkey = qat_alg_ablkcipher_xts_setkey,
1370 			.decrypt = qat_alg_ablkcipher_blk_decrypt,
1371 			.encrypt = qat_alg_ablkcipher_blk_encrypt,
1372 			.min_keysize = 2 * AES_MIN_KEY_SIZE,
1373 			.max_keysize = 2 * AES_MAX_KEY_SIZE,
1374 			.ivsize = AES_BLOCK_SIZE,
1375 		},
1376 	},
1377 } };
1378 
qat_algs_register(void)1379 int qat_algs_register(void)
1380 {
1381 	int ret = 0, i;
1382 
1383 	mutex_lock(&algs_lock);
1384 	if (++active_devs != 1)
1385 		goto unlock;
1386 
1387 	for (i = 0; i < ARRAY_SIZE(qat_algs); i++)
1388 		qat_algs[i].cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
1389 
1390 	ret = crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs));
1391 	if (ret)
1392 		goto unlock;
1393 
1394 	for (i = 0; i < ARRAY_SIZE(qat_aeads); i++)
1395 		qat_aeads[i].base.cra_flags = CRYPTO_ALG_ASYNC;
1396 
1397 	ret = crypto_register_aeads(qat_aeads, ARRAY_SIZE(qat_aeads));
1398 	if (ret)
1399 		goto unreg_algs;
1400 
1401 unlock:
1402 	mutex_unlock(&algs_lock);
1403 	return ret;
1404 
1405 unreg_algs:
1406 	crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
1407 	goto unlock;
1408 }
1409 
qat_algs_unregister(void)1410 void qat_algs_unregister(void)
1411 {
1412 	mutex_lock(&algs_lock);
1413 	if (--active_devs != 0)
1414 		goto unlock;
1415 
1416 	crypto_unregister_aeads(qat_aeads, ARRAY_SIZE(qat_aeads));
1417 	crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
1418 
1419 unlock:
1420 	mutex_unlock(&algs_lock);
1421 }
1422