1 /*
2   This file is provided under a dual BSD/GPLv2 license.  When using or
3   redistributing this file, you may do so under either license.
4 
5   GPL LICENSE SUMMARY
6   Copyright(c) 2014 Intel Corporation.
7   This program is free software; you can redistribute it and/or modify
8   it under the terms of version 2 of the GNU General Public License as
9   published by the Free Software Foundation.
10 
11   This program is distributed in the hope that it will be useful, but
12   WITHOUT ANY WARRANTY; without even the implied warranty of
13   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14   General Public License for more details.
15 
16   Contact Information:
17   qat-linux@intel.com
18 
19   BSD LICENSE
20   Copyright(c) 2014 Intel Corporation.
21   Redistribution and use in source and binary forms, with or without
22   modification, are permitted provided that the following conditions
23   are met:
24 
25     * Redistributions of source code must retain the above copyright
26       notice, this list of conditions and the following disclaimer.
27     * Redistributions in binary form must reproduce the above copyright
28       notice, this list of conditions and the following disclaimer in
29       the documentation and/or other materials provided with the
30       distribution.
31     * Neither the name of Intel Corporation nor the names of its
32       contributors may be used to endorse or promote products derived
33       from this software without specific prior written permission.
34 
35   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
46 */
47 #include <linux/module.h>
48 #include <linux/slab.h>
49 #include <linux/crypto.h>
50 #include <crypto/internal/aead.h>
51 #include <crypto/aes.h>
52 #include <crypto/sha.h>
53 #include <crypto/hash.h>
54 #include <crypto/hmac.h>
55 #include <crypto/algapi.h>
56 #include <crypto/authenc.h>
57 #include <linux/dma-mapping.h>
58 #include "adf_accel_devices.h"
59 #include "adf_transport.h"
60 #include "adf_common_drv.h"
61 #include "qat_crypto.h"
62 #include "icp_qat_hw.h"
63 #include "icp_qat_fw.h"
64 #include "icp_qat_fw_la.h"
65 
66 #define QAT_AES_HW_CONFIG_ENC(alg, mode) \
67 	ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \
68 				       ICP_QAT_HW_CIPHER_NO_CONVERT, \
69 				       ICP_QAT_HW_CIPHER_ENCRYPT)
70 
71 #define QAT_AES_HW_CONFIG_DEC(alg, mode) \
72 	ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \
73 				       ICP_QAT_HW_CIPHER_KEY_CONVERT, \
74 				       ICP_QAT_HW_CIPHER_DECRYPT)
75 
76 static DEFINE_MUTEX(algs_lock);
77 static unsigned int active_devs;
78 
79 struct qat_alg_buf {
80 	uint32_t len;
81 	uint32_t resrvd;
82 	uint64_t addr;
83 } __packed;
84 
85 struct qat_alg_buf_list {
86 	uint64_t resrvd;
87 	uint32_t num_bufs;
88 	uint32_t num_mapped_bufs;
89 	struct qat_alg_buf bufers[];
90 } __packed __aligned(64);
91 
92 /* Common content descriptor */
93 struct qat_alg_cd {
94 	union {
95 		struct qat_enc { /* Encrypt content desc */
96 			struct icp_qat_hw_cipher_algo_blk cipher;
97 			struct icp_qat_hw_auth_algo_blk hash;
98 		} qat_enc_cd;
99 		struct qat_dec { /* Decrytp content desc */
100 			struct icp_qat_hw_auth_algo_blk hash;
101 			struct icp_qat_hw_cipher_algo_blk cipher;
102 		} qat_dec_cd;
103 	};
104 } __aligned(64);
105 
106 struct qat_alg_aead_ctx {
107 	struct qat_alg_cd *enc_cd;
108 	struct qat_alg_cd *dec_cd;
109 	dma_addr_t enc_cd_paddr;
110 	dma_addr_t dec_cd_paddr;
111 	struct icp_qat_fw_la_bulk_req enc_fw_req;
112 	struct icp_qat_fw_la_bulk_req dec_fw_req;
113 	struct crypto_shash *hash_tfm;
114 	enum icp_qat_hw_auth_algo qat_hash_alg;
115 	struct qat_crypto_instance *inst;
116 };
117 
118 struct qat_alg_ablkcipher_ctx {
119 	struct icp_qat_hw_cipher_algo_blk *enc_cd;
120 	struct icp_qat_hw_cipher_algo_blk *dec_cd;
121 	dma_addr_t enc_cd_paddr;
122 	dma_addr_t dec_cd_paddr;
123 	struct icp_qat_fw_la_bulk_req enc_fw_req;
124 	struct icp_qat_fw_la_bulk_req dec_fw_req;
125 	struct qat_crypto_instance *inst;
126 	struct crypto_tfm *tfm;
127 	spinlock_t lock;	/* protects qat_alg_ablkcipher_ctx struct */
128 };
129 
qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)130 static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)
131 {
132 	switch (qat_hash_alg) {
133 	case ICP_QAT_HW_AUTH_ALGO_SHA1:
134 		return ICP_QAT_HW_SHA1_STATE1_SZ;
135 	case ICP_QAT_HW_AUTH_ALGO_SHA256:
136 		return ICP_QAT_HW_SHA256_STATE1_SZ;
137 	case ICP_QAT_HW_AUTH_ALGO_SHA512:
138 		return ICP_QAT_HW_SHA512_STATE1_SZ;
139 	default:
140 		return -EFAULT;
141 	};
142 	return -EFAULT;
143 }
144 
qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk * hash,struct qat_alg_aead_ctx * ctx,const uint8_t * auth_key,unsigned int auth_keylen)145 static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
146 				  struct qat_alg_aead_ctx *ctx,
147 				  const uint8_t *auth_key,
148 				  unsigned int auth_keylen)
149 {
150 	SHASH_DESC_ON_STACK(shash, ctx->hash_tfm);
151 	struct sha1_state sha1;
152 	struct sha256_state sha256;
153 	struct sha512_state sha512;
154 	int block_size = crypto_shash_blocksize(ctx->hash_tfm);
155 	int digest_size = crypto_shash_digestsize(ctx->hash_tfm);
156 	char ipad[block_size];
157 	char opad[block_size];
158 	__be32 *hash_state_out;
159 	__be64 *hash512_state_out;
160 	int i, offset;
161 
162 	memset(ipad, 0, block_size);
163 	memset(opad, 0, block_size);
164 	shash->tfm = ctx->hash_tfm;
165 	shash->flags = 0x0;
166 
167 	if (auth_keylen > block_size) {
168 		int ret = crypto_shash_digest(shash, auth_key,
169 					      auth_keylen, ipad);
170 		if (ret)
171 			return ret;
172 
173 		memcpy(opad, ipad, digest_size);
174 	} else {
175 		memcpy(ipad, auth_key, auth_keylen);
176 		memcpy(opad, auth_key, auth_keylen);
177 	}
178 
179 	for (i = 0; i < block_size; i++) {
180 		char *ipad_ptr = ipad + i;
181 		char *opad_ptr = opad + i;
182 		*ipad_ptr ^= HMAC_IPAD_VALUE;
183 		*opad_ptr ^= HMAC_OPAD_VALUE;
184 	}
185 
186 	if (crypto_shash_init(shash))
187 		return -EFAULT;
188 
189 	if (crypto_shash_update(shash, ipad, block_size))
190 		return -EFAULT;
191 
192 	hash_state_out = (__be32 *)hash->sha.state1;
193 	hash512_state_out = (__be64 *)hash_state_out;
194 
195 	switch (ctx->qat_hash_alg) {
196 	case ICP_QAT_HW_AUTH_ALGO_SHA1:
197 		if (crypto_shash_export(shash, &sha1))
198 			return -EFAULT;
199 		for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
200 			*hash_state_out = cpu_to_be32(*(sha1.state + i));
201 		break;
202 	case ICP_QAT_HW_AUTH_ALGO_SHA256:
203 		if (crypto_shash_export(shash, &sha256))
204 			return -EFAULT;
205 		for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
206 			*hash_state_out = cpu_to_be32(*(sha256.state + i));
207 		break;
208 	case ICP_QAT_HW_AUTH_ALGO_SHA512:
209 		if (crypto_shash_export(shash, &sha512))
210 			return -EFAULT;
211 		for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
212 			*hash512_state_out = cpu_to_be64(*(sha512.state + i));
213 		break;
214 	default:
215 		return -EFAULT;
216 	}
217 
218 	if (crypto_shash_init(shash))
219 		return -EFAULT;
220 
221 	if (crypto_shash_update(shash, opad, block_size))
222 		return -EFAULT;
223 
224 	offset = round_up(qat_get_inter_state_size(ctx->qat_hash_alg), 8);
225 	hash_state_out = (__be32 *)(hash->sha.state1 + offset);
226 	hash512_state_out = (__be64 *)hash_state_out;
227 
228 	switch (ctx->qat_hash_alg) {
229 	case ICP_QAT_HW_AUTH_ALGO_SHA1:
230 		if (crypto_shash_export(shash, &sha1))
231 			return -EFAULT;
232 		for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
233 			*hash_state_out = cpu_to_be32(*(sha1.state + i));
234 		break;
235 	case ICP_QAT_HW_AUTH_ALGO_SHA256:
236 		if (crypto_shash_export(shash, &sha256))
237 			return -EFAULT;
238 		for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
239 			*hash_state_out = cpu_to_be32(*(sha256.state + i));
240 		break;
241 	case ICP_QAT_HW_AUTH_ALGO_SHA512:
242 		if (crypto_shash_export(shash, &sha512))
243 			return -EFAULT;
244 		for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
245 			*hash512_state_out = cpu_to_be64(*(sha512.state + i));
246 		break;
247 	default:
248 		return -EFAULT;
249 	}
250 	memzero_explicit(ipad, block_size);
251 	memzero_explicit(opad, block_size);
252 	return 0;
253 }
254 
qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr * header)255 static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header)
256 {
257 	header->hdr_flags =
258 		ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
259 	header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
260 	header->comn_req_flags =
261 		ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
262 					    QAT_COMN_PTR_TYPE_SGL);
263 	ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
264 				  ICP_QAT_FW_LA_PARTIAL_NONE);
265 	ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
266 					   ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
267 	ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
268 				ICP_QAT_FW_LA_NO_PROTO);
269 	ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
270 				       ICP_QAT_FW_LA_NO_UPDATE_STATE);
271 }
272 
qat_alg_aead_init_enc_session(struct crypto_aead * aead_tfm,int alg,struct crypto_authenc_keys * keys,int mode)273 static int qat_alg_aead_init_enc_session(struct crypto_aead *aead_tfm,
274 					 int alg,
275 					 struct crypto_authenc_keys *keys,
276 					 int mode)
277 {
278 	struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(aead_tfm);
279 	unsigned int digestsize = crypto_aead_authsize(aead_tfm);
280 	struct qat_enc *enc_ctx = &ctx->enc_cd->qat_enc_cd;
281 	struct icp_qat_hw_cipher_algo_blk *cipher = &enc_ctx->cipher;
282 	struct icp_qat_hw_auth_algo_blk *hash =
283 		(struct icp_qat_hw_auth_algo_blk *)((char *)enc_ctx +
284 		sizeof(struct icp_qat_hw_auth_setup) + keys->enckeylen);
285 	struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->enc_fw_req;
286 	struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
287 	struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
288 	void *ptr = &req_tmpl->cd_ctrl;
289 	struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
290 	struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
291 
292 	/* CD setup */
293 	cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg, mode);
294 	memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
295 	hash->sha.inner_setup.auth_config.config =
296 		ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
297 					     ctx->qat_hash_alg, digestsize);
298 	hash->sha.inner_setup.auth_counter.counter =
299 		cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
300 
301 	if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
302 		return -EFAULT;
303 
304 	/* Request setup */
305 	qat_alg_init_common_hdr(header);
306 	header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
307 	ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
308 					   ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
309 	ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
310 				   ICP_QAT_FW_LA_RET_AUTH_RES);
311 	ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
312 				   ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
313 	cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
314 	cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
315 
316 	/* Cipher CD config setup */
317 	cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
318 	cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
319 	cipher_cd_ctrl->cipher_cfg_offset = 0;
320 	ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
321 	ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
322 	/* Auth CD config setup */
323 	hash_cd_ctrl->hash_cfg_offset = ((char *)hash - (char *)cipher) >> 3;
324 	hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
325 	hash_cd_ctrl->inner_res_sz = digestsize;
326 	hash_cd_ctrl->final_sz = digestsize;
327 
328 	switch (ctx->qat_hash_alg) {
329 	case ICP_QAT_HW_AUTH_ALGO_SHA1:
330 		hash_cd_ctrl->inner_state1_sz =
331 			round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
332 		hash_cd_ctrl->inner_state2_sz =
333 			round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
334 		break;
335 	case ICP_QAT_HW_AUTH_ALGO_SHA256:
336 		hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
337 		hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
338 		break;
339 	case ICP_QAT_HW_AUTH_ALGO_SHA512:
340 		hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
341 		hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
342 		break;
343 	default:
344 		break;
345 	}
346 	hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
347 			((sizeof(struct icp_qat_hw_auth_setup) +
348 			 round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
349 	ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
350 	ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
351 	return 0;
352 }
353 
qat_alg_aead_init_dec_session(struct crypto_aead * aead_tfm,int alg,struct crypto_authenc_keys * keys,int mode)354 static int qat_alg_aead_init_dec_session(struct crypto_aead *aead_tfm,
355 					 int alg,
356 					 struct crypto_authenc_keys *keys,
357 					 int mode)
358 {
359 	struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(aead_tfm);
360 	unsigned int digestsize = crypto_aead_authsize(aead_tfm);
361 	struct qat_dec *dec_ctx = &ctx->dec_cd->qat_dec_cd;
362 	struct icp_qat_hw_auth_algo_blk *hash = &dec_ctx->hash;
363 	struct icp_qat_hw_cipher_algo_blk *cipher =
364 		(struct icp_qat_hw_cipher_algo_blk *)((char *)dec_ctx +
365 		sizeof(struct icp_qat_hw_auth_setup) +
366 		roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2);
367 	struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->dec_fw_req;
368 	struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
369 	struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
370 	void *ptr = &req_tmpl->cd_ctrl;
371 	struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
372 	struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
373 	struct icp_qat_fw_la_auth_req_params *auth_param =
374 		(struct icp_qat_fw_la_auth_req_params *)
375 		((char *)&req_tmpl->serv_specif_rqpars +
376 		sizeof(struct icp_qat_fw_la_cipher_req_params));
377 
378 	/* CD setup */
379 	cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_DEC(alg, mode);
380 	memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
381 	hash->sha.inner_setup.auth_config.config =
382 		ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
383 					     ctx->qat_hash_alg,
384 					     digestsize);
385 	hash->sha.inner_setup.auth_counter.counter =
386 		cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
387 
388 	if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
389 		return -EFAULT;
390 
391 	/* Request setup */
392 	qat_alg_init_common_hdr(header);
393 	header->service_cmd_id = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
394 	ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
395 					   ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
396 	ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
397 				   ICP_QAT_FW_LA_NO_RET_AUTH_RES);
398 	ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
399 				   ICP_QAT_FW_LA_CMP_AUTH_RES);
400 	cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
401 	cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
402 
403 	/* Cipher CD config setup */
404 	cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
405 	cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
406 	cipher_cd_ctrl->cipher_cfg_offset =
407 		(sizeof(struct icp_qat_hw_auth_setup) +
408 		 roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2) >> 3;
409 	ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
410 	ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
411 
412 	/* Auth CD config setup */
413 	hash_cd_ctrl->hash_cfg_offset = 0;
414 	hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
415 	hash_cd_ctrl->inner_res_sz = digestsize;
416 	hash_cd_ctrl->final_sz = digestsize;
417 
418 	switch (ctx->qat_hash_alg) {
419 	case ICP_QAT_HW_AUTH_ALGO_SHA1:
420 		hash_cd_ctrl->inner_state1_sz =
421 			round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
422 		hash_cd_ctrl->inner_state2_sz =
423 			round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
424 		break;
425 	case ICP_QAT_HW_AUTH_ALGO_SHA256:
426 		hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
427 		hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
428 		break;
429 	case ICP_QAT_HW_AUTH_ALGO_SHA512:
430 		hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
431 		hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
432 		break;
433 	default:
434 		break;
435 	}
436 
437 	hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
438 			((sizeof(struct icp_qat_hw_auth_setup) +
439 			 round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
440 	auth_param->auth_res_sz = digestsize;
441 	ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
442 	ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
443 	return 0;
444 }
445 
qat_alg_ablkcipher_init_com(struct qat_alg_ablkcipher_ctx * ctx,struct icp_qat_fw_la_bulk_req * req,struct icp_qat_hw_cipher_algo_blk * cd,const uint8_t * key,unsigned int keylen)446 static void qat_alg_ablkcipher_init_com(struct qat_alg_ablkcipher_ctx *ctx,
447 					struct icp_qat_fw_la_bulk_req *req,
448 					struct icp_qat_hw_cipher_algo_blk *cd,
449 					const uint8_t *key, unsigned int keylen)
450 {
451 	struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
452 	struct icp_qat_fw_comn_req_hdr *header = &req->comn_hdr;
453 	struct icp_qat_fw_cipher_cd_ctrl_hdr *cd_ctrl = (void *)&req->cd_ctrl;
454 
455 	memcpy(cd->aes.key, key, keylen);
456 	qat_alg_init_common_hdr(header);
457 	header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
458 	cd_pars->u.s.content_desc_params_sz =
459 				sizeof(struct icp_qat_hw_cipher_algo_blk) >> 3;
460 	/* Cipher CD config setup */
461 	cd_ctrl->cipher_key_sz = keylen >> 3;
462 	cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
463 	cd_ctrl->cipher_cfg_offset = 0;
464 	ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
465 	ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
466 }
467 
qat_alg_ablkcipher_init_enc(struct qat_alg_ablkcipher_ctx * ctx,int alg,const uint8_t * key,unsigned int keylen,int mode)468 static void qat_alg_ablkcipher_init_enc(struct qat_alg_ablkcipher_ctx *ctx,
469 					int alg, const uint8_t *key,
470 					unsigned int keylen, int mode)
471 {
472 	struct icp_qat_hw_cipher_algo_blk *enc_cd = ctx->enc_cd;
473 	struct icp_qat_fw_la_bulk_req *req = &ctx->enc_fw_req;
474 	struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
475 
476 	qat_alg_ablkcipher_init_com(ctx, req, enc_cd, key, keylen);
477 	cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
478 	enc_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg, mode);
479 }
480 
qat_alg_ablkcipher_init_dec(struct qat_alg_ablkcipher_ctx * ctx,int alg,const uint8_t * key,unsigned int keylen,int mode)481 static void qat_alg_ablkcipher_init_dec(struct qat_alg_ablkcipher_ctx *ctx,
482 					int alg, const uint8_t *key,
483 					unsigned int keylen, int mode)
484 {
485 	struct icp_qat_hw_cipher_algo_blk *dec_cd = ctx->dec_cd;
486 	struct icp_qat_fw_la_bulk_req *req = &ctx->dec_fw_req;
487 	struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
488 
489 	qat_alg_ablkcipher_init_com(ctx, req, dec_cd, key, keylen);
490 	cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
491 
492 	if (mode != ICP_QAT_HW_CIPHER_CTR_MODE)
493 		dec_cd->aes.cipher_config.val =
494 					QAT_AES_HW_CONFIG_DEC(alg, mode);
495 	else
496 		dec_cd->aes.cipher_config.val =
497 					QAT_AES_HW_CONFIG_ENC(alg, mode);
498 }
499 
qat_alg_validate_key(int key_len,int * alg,int mode)500 static int qat_alg_validate_key(int key_len, int *alg, int mode)
501 {
502 	if (mode != ICP_QAT_HW_CIPHER_XTS_MODE) {
503 		switch (key_len) {
504 		case AES_KEYSIZE_128:
505 			*alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
506 			break;
507 		case AES_KEYSIZE_192:
508 			*alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
509 			break;
510 		case AES_KEYSIZE_256:
511 			*alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
512 			break;
513 		default:
514 			return -EINVAL;
515 		}
516 	} else {
517 		switch (key_len) {
518 		case AES_KEYSIZE_128 << 1:
519 			*alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
520 			break;
521 		case AES_KEYSIZE_256 << 1:
522 			*alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
523 			break;
524 		default:
525 			return -EINVAL;
526 		}
527 	}
528 	return 0;
529 }
530 
qat_alg_aead_init_sessions(struct crypto_aead * tfm,const u8 * key,unsigned int keylen,int mode)531 static int qat_alg_aead_init_sessions(struct crypto_aead *tfm, const u8 *key,
532 				      unsigned int keylen,  int mode)
533 {
534 	struct crypto_authenc_keys keys;
535 	int alg;
536 
537 	if (crypto_authenc_extractkeys(&keys, key, keylen))
538 		goto bad_key;
539 
540 	if (qat_alg_validate_key(keys.enckeylen, &alg, mode))
541 		goto bad_key;
542 
543 	if (qat_alg_aead_init_enc_session(tfm, alg, &keys, mode))
544 		goto error;
545 
546 	if (qat_alg_aead_init_dec_session(tfm, alg, &keys, mode))
547 		goto error;
548 
549 	memzero_explicit(&keys, sizeof(keys));
550 	return 0;
551 bad_key:
552 	crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
553 	memzero_explicit(&keys, sizeof(keys));
554 	return -EINVAL;
555 error:
556 	memzero_explicit(&keys, sizeof(keys));
557 	return -EFAULT;
558 }
559 
qat_alg_ablkcipher_init_sessions(struct qat_alg_ablkcipher_ctx * ctx,const uint8_t * key,unsigned int keylen,int mode)560 static int qat_alg_ablkcipher_init_sessions(struct qat_alg_ablkcipher_ctx *ctx,
561 					    const uint8_t *key,
562 					    unsigned int keylen,
563 					    int mode)
564 {
565 	int alg;
566 
567 	if (qat_alg_validate_key(keylen, &alg, mode))
568 		goto bad_key;
569 
570 	qat_alg_ablkcipher_init_enc(ctx, alg, key, keylen, mode);
571 	qat_alg_ablkcipher_init_dec(ctx, alg, key, keylen, mode);
572 	return 0;
573 bad_key:
574 	crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
575 	return -EINVAL;
576 }
577 
qat_alg_aead_setkey(struct crypto_aead * tfm,const uint8_t * key,unsigned int keylen)578 static int qat_alg_aead_setkey(struct crypto_aead *tfm, const uint8_t *key,
579 			       unsigned int keylen)
580 {
581 	struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
582 	struct device *dev;
583 
584 	if (ctx->enc_cd) {
585 		/* rekeying */
586 		dev = &GET_DEV(ctx->inst->accel_dev);
587 		memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
588 		memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
589 		memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
590 		memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
591 	} else {
592 		/* new key */
593 		int node = get_current_node();
594 		struct qat_crypto_instance *inst =
595 				qat_crypto_get_instance_node(node);
596 		if (!inst) {
597 			return -EINVAL;
598 		}
599 
600 		dev = &GET_DEV(inst->accel_dev);
601 		ctx->inst = inst;
602 		ctx->enc_cd = dma_zalloc_coherent(dev, sizeof(*ctx->enc_cd),
603 						  &ctx->enc_cd_paddr,
604 						  GFP_ATOMIC);
605 		if (!ctx->enc_cd) {
606 			return -ENOMEM;
607 		}
608 		ctx->dec_cd = dma_zalloc_coherent(dev, sizeof(*ctx->dec_cd),
609 						  &ctx->dec_cd_paddr,
610 						  GFP_ATOMIC);
611 		if (!ctx->dec_cd) {
612 			goto out_free_enc;
613 		}
614 	}
615 	if (qat_alg_aead_init_sessions(tfm, key, keylen,
616 				       ICP_QAT_HW_CIPHER_CBC_MODE))
617 		goto out_free_all;
618 
619 	return 0;
620 
621 out_free_all:
622 	memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
623 	dma_free_coherent(dev, sizeof(struct qat_alg_cd),
624 			  ctx->dec_cd, ctx->dec_cd_paddr);
625 	ctx->dec_cd = NULL;
626 out_free_enc:
627 	memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
628 	dma_free_coherent(dev, sizeof(struct qat_alg_cd),
629 			  ctx->enc_cd, ctx->enc_cd_paddr);
630 	ctx->enc_cd = NULL;
631 	return -ENOMEM;
632 }
633 
qat_alg_free_bufl(struct qat_crypto_instance * inst,struct qat_crypto_request * qat_req)634 static void qat_alg_free_bufl(struct qat_crypto_instance *inst,
635 			      struct qat_crypto_request *qat_req)
636 {
637 	struct device *dev = &GET_DEV(inst->accel_dev);
638 	struct qat_alg_buf_list *bl = qat_req->buf.bl;
639 	struct qat_alg_buf_list *blout = qat_req->buf.blout;
640 	dma_addr_t blp = qat_req->buf.blp;
641 	dma_addr_t blpout = qat_req->buf.bloutp;
642 	size_t sz = qat_req->buf.sz;
643 	size_t sz_out = qat_req->buf.sz_out;
644 	int i;
645 
646 	for (i = 0; i < bl->num_bufs; i++)
647 		dma_unmap_single(dev, bl->bufers[i].addr,
648 				 bl->bufers[i].len, DMA_BIDIRECTIONAL);
649 
650 	dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
651 	kfree(bl);
652 	if (blp != blpout) {
653 		/* If out of place operation dma unmap only data */
654 		int bufless = blout->num_bufs - blout->num_mapped_bufs;
655 
656 		for (i = bufless; i < blout->num_bufs; i++) {
657 			dma_unmap_single(dev, blout->bufers[i].addr,
658 					 blout->bufers[i].len,
659 					 DMA_BIDIRECTIONAL);
660 		}
661 		dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE);
662 		kfree(blout);
663 	}
664 }
665 
qat_alg_sgl_to_bufl(struct qat_crypto_instance * inst,struct scatterlist * sgl,struct scatterlist * sglout,struct qat_crypto_request * qat_req)666 static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
667 			       struct scatterlist *sgl,
668 			       struct scatterlist *sglout,
669 			       struct qat_crypto_request *qat_req)
670 {
671 	struct device *dev = &GET_DEV(inst->accel_dev);
672 	int i, sg_nctr = 0;
673 	int n = sg_nents(sgl);
674 	struct qat_alg_buf_list *bufl;
675 	struct qat_alg_buf_list *buflout = NULL;
676 	dma_addr_t blp;
677 	dma_addr_t bloutp = 0;
678 	struct scatterlist *sg;
679 	size_t sz_out, sz = sizeof(struct qat_alg_buf_list) +
680 			((1 + n) * sizeof(struct qat_alg_buf));
681 
682 	if (unlikely(!n))
683 		return -EINVAL;
684 
685 	bufl = kzalloc_node(sz, GFP_ATOMIC,
686 			    dev_to_node(&GET_DEV(inst->accel_dev)));
687 	if (unlikely(!bufl))
688 		return -ENOMEM;
689 
690 	blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
691 	if (unlikely(dma_mapping_error(dev, blp)))
692 		goto err_in;
693 
694 	for_each_sg(sgl, sg, n, i) {
695 		int y = sg_nctr;
696 
697 		if (!sg->length)
698 			continue;
699 
700 		bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg),
701 						      sg->length,
702 						      DMA_BIDIRECTIONAL);
703 		bufl->bufers[y].len = sg->length;
704 		if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr)))
705 			goto err_in;
706 		sg_nctr++;
707 	}
708 	bufl->num_bufs = sg_nctr;
709 	qat_req->buf.bl = bufl;
710 	qat_req->buf.blp = blp;
711 	qat_req->buf.sz = sz;
712 	/* Handle out of place operation */
713 	if (sgl != sglout) {
714 		struct qat_alg_buf *bufers;
715 
716 		n = sg_nents(sglout);
717 		sz_out = sizeof(struct qat_alg_buf_list) +
718 			((1 + n) * sizeof(struct qat_alg_buf));
719 		sg_nctr = 0;
720 		buflout = kzalloc_node(sz_out, GFP_ATOMIC,
721 				       dev_to_node(&GET_DEV(inst->accel_dev)));
722 		if (unlikely(!buflout))
723 			goto err_in;
724 		bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE);
725 		if (unlikely(dma_mapping_error(dev, bloutp)))
726 			goto err_out;
727 		bufers = buflout->bufers;
728 		for_each_sg(sglout, sg, n, i) {
729 			int y = sg_nctr;
730 
731 			if (!sg->length)
732 				continue;
733 
734 			bufers[y].addr = dma_map_single(dev, sg_virt(sg),
735 							sg->length,
736 							DMA_BIDIRECTIONAL);
737 			if (unlikely(dma_mapping_error(dev, bufers[y].addr)))
738 				goto err_out;
739 			bufers[y].len = sg->length;
740 			sg_nctr++;
741 		}
742 		buflout->num_bufs = sg_nctr;
743 		buflout->num_mapped_bufs = sg_nctr;
744 		qat_req->buf.blout = buflout;
745 		qat_req->buf.bloutp = bloutp;
746 		qat_req->buf.sz_out = sz_out;
747 	} else {
748 		/* Otherwise set the src and dst to the same address */
749 		qat_req->buf.bloutp = qat_req->buf.blp;
750 		qat_req->buf.sz_out = 0;
751 	}
752 	return 0;
753 
754 err_out:
755 	n = sg_nents(sglout);
756 	for (i = 0; i < n; i++)
757 		if (!dma_mapping_error(dev, buflout->bufers[i].addr))
758 			dma_unmap_single(dev, buflout->bufers[i].addr,
759 					 buflout->bufers[i].len,
760 					 DMA_BIDIRECTIONAL);
761 	if (!dma_mapping_error(dev, bloutp))
762 		dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE);
763 	kfree(buflout);
764 
765 err_in:
766 	n = sg_nents(sgl);
767 	for (i = 0; i < n; i++)
768 		if (!dma_mapping_error(dev, bufl->bufers[i].addr))
769 			dma_unmap_single(dev, bufl->bufers[i].addr,
770 					 bufl->bufers[i].len,
771 					 DMA_BIDIRECTIONAL);
772 
773 	if (!dma_mapping_error(dev, blp))
774 		dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
775 	kfree(bufl);
776 
777 	dev_err(dev, "Failed to map buf for dma\n");
778 	return -ENOMEM;
779 }
780 
qat_aead_alg_callback(struct icp_qat_fw_la_resp * qat_resp,struct qat_crypto_request * qat_req)781 static void qat_aead_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
782 				  struct qat_crypto_request *qat_req)
783 {
784 	struct qat_alg_aead_ctx *ctx = qat_req->aead_ctx;
785 	struct qat_crypto_instance *inst = ctx->inst;
786 	struct aead_request *areq = qat_req->aead_req;
787 	uint8_t stat_filed = qat_resp->comn_resp.comn_status;
788 	int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
789 
790 	qat_alg_free_bufl(inst, qat_req);
791 	if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
792 		res = -EBADMSG;
793 	areq->base.complete(&areq->base, res);
794 }
795 
qat_ablkcipher_alg_callback(struct icp_qat_fw_la_resp * qat_resp,struct qat_crypto_request * qat_req)796 static void qat_ablkcipher_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
797 					struct qat_crypto_request *qat_req)
798 {
799 	struct qat_alg_ablkcipher_ctx *ctx = qat_req->ablkcipher_ctx;
800 	struct qat_crypto_instance *inst = ctx->inst;
801 	struct ablkcipher_request *areq = qat_req->ablkcipher_req;
802 	uint8_t stat_filed = qat_resp->comn_resp.comn_status;
803 	int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
804 
805 	qat_alg_free_bufl(inst, qat_req);
806 	if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
807 		res = -EINVAL;
808 	areq->base.complete(&areq->base, res);
809 }
810 
qat_alg_callback(void * resp)811 void qat_alg_callback(void *resp)
812 {
813 	struct icp_qat_fw_la_resp *qat_resp = resp;
814 	struct qat_crypto_request *qat_req =
815 				(void *)(__force long)qat_resp->opaque_data;
816 
817 	qat_req->cb(qat_resp, qat_req);
818 }
819 
qat_alg_aead_dec(struct aead_request * areq)820 static int qat_alg_aead_dec(struct aead_request *areq)
821 {
822 	struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
823 	struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
824 	struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
825 	struct qat_crypto_request *qat_req = aead_request_ctx(areq);
826 	struct icp_qat_fw_la_cipher_req_params *cipher_param;
827 	struct icp_qat_fw_la_auth_req_params *auth_param;
828 	struct icp_qat_fw_la_bulk_req *msg;
829 	int digst_size = crypto_aead_authsize(aead_tfm);
830 	int ret, ctr = 0;
831 
832 	ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req);
833 	if (unlikely(ret))
834 		return ret;
835 
836 	msg = &qat_req->req;
837 	*msg = ctx->dec_fw_req;
838 	qat_req->aead_ctx = ctx;
839 	qat_req->aead_req = areq;
840 	qat_req->cb = qat_aead_alg_callback;
841 	qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
842 	qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
843 	qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
844 	cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
845 	cipher_param->cipher_length = areq->cryptlen - digst_size;
846 	cipher_param->cipher_offset = areq->assoclen;
847 	memcpy(cipher_param->u.cipher_IV_array, areq->iv, AES_BLOCK_SIZE);
848 	auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
849 	auth_param->auth_off = 0;
850 	auth_param->auth_len = areq->assoclen + cipher_param->cipher_length;
851 	do {
852 		ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
853 	} while (ret == -EAGAIN && ctr++ < 10);
854 
855 	if (ret == -EAGAIN) {
856 		qat_alg_free_bufl(ctx->inst, qat_req);
857 		return -EBUSY;
858 	}
859 	return -EINPROGRESS;
860 }
861 
qat_alg_aead_enc(struct aead_request * areq)862 static int qat_alg_aead_enc(struct aead_request *areq)
863 {
864 	struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
865 	struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
866 	struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
867 	struct qat_crypto_request *qat_req = aead_request_ctx(areq);
868 	struct icp_qat_fw_la_cipher_req_params *cipher_param;
869 	struct icp_qat_fw_la_auth_req_params *auth_param;
870 	struct icp_qat_fw_la_bulk_req *msg;
871 	uint8_t *iv = areq->iv;
872 	int ret, ctr = 0;
873 
874 	ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req);
875 	if (unlikely(ret))
876 		return ret;
877 
878 	msg = &qat_req->req;
879 	*msg = ctx->enc_fw_req;
880 	qat_req->aead_ctx = ctx;
881 	qat_req->aead_req = areq;
882 	qat_req->cb = qat_aead_alg_callback;
883 	qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
884 	qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
885 	qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
886 	cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
887 	auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
888 
889 	memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE);
890 	cipher_param->cipher_length = areq->cryptlen;
891 	cipher_param->cipher_offset = areq->assoclen;
892 
893 	auth_param->auth_off = 0;
894 	auth_param->auth_len = areq->assoclen + areq->cryptlen;
895 
896 	do {
897 		ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
898 	} while (ret == -EAGAIN && ctr++ < 10);
899 
900 	if (ret == -EAGAIN) {
901 		qat_alg_free_bufl(ctx->inst, qat_req);
902 		return -EBUSY;
903 	}
904 	return -EINPROGRESS;
905 }
906 
qat_alg_ablkcipher_setkey(struct crypto_ablkcipher * tfm,const u8 * key,unsigned int keylen,int mode)907 static int qat_alg_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
908 				     const u8 *key, unsigned int keylen,
909 				     int mode)
910 {
911 	struct qat_alg_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
912 	struct device *dev;
913 
914 	spin_lock(&ctx->lock);
915 	if (ctx->enc_cd) {
916 		/* rekeying */
917 		dev = &GET_DEV(ctx->inst->accel_dev);
918 		memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
919 		memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
920 		memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
921 		memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
922 	} else {
923 		/* new key */
924 		int node = get_current_node();
925 		struct qat_crypto_instance *inst =
926 				qat_crypto_get_instance_node(node);
927 		if (!inst) {
928 			spin_unlock(&ctx->lock);
929 			return -EINVAL;
930 		}
931 
932 		dev = &GET_DEV(inst->accel_dev);
933 		ctx->inst = inst;
934 		ctx->enc_cd = dma_zalloc_coherent(dev, sizeof(*ctx->enc_cd),
935 						  &ctx->enc_cd_paddr,
936 						  GFP_ATOMIC);
937 		if (!ctx->enc_cd) {
938 			spin_unlock(&ctx->lock);
939 			return -ENOMEM;
940 		}
941 		ctx->dec_cd = dma_zalloc_coherent(dev, sizeof(*ctx->dec_cd),
942 						  &ctx->dec_cd_paddr,
943 						  GFP_ATOMIC);
944 		if (!ctx->dec_cd) {
945 			spin_unlock(&ctx->lock);
946 			goto out_free_enc;
947 		}
948 	}
949 	spin_unlock(&ctx->lock);
950 	if (qat_alg_ablkcipher_init_sessions(ctx, key, keylen, mode))
951 		goto out_free_all;
952 
953 	return 0;
954 
955 out_free_all:
956 	memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
957 	dma_free_coherent(dev, sizeof(*ctx->dec_cd),
958 			  ctx->dec_cd, ctx->dec_cd_paddr);
959 	ctx->dec_cd = NULL;
960 out_free_enc:
961 	memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
962 	dma_free_coherent(dev, sizeof(*ctx->enc_cd),
963 			  ctx->enc_cd, ctx->enc_cd_paddr);
964 	ctx->enc_cd = NULL;
965 	return -ENOMEM;
966 }
967 
qat_alg_ablkcipher_cbc_setkey(struct crypto_ablkcipher * tfm,const u8 * key,unsigned int keylen)968 static int qat_alg_ablkcipher_cbc_setkey(struct crypto_ablkcipher *tfm,
969 					 const u8 *key, unsigned int keylen)
970 {
971 	return qat_alg_ablkcipher_setkey(tfm, key, keylen,
972 					 ICP_QAT_HW_CIPHER_CBC_MODE);
973 }
974 
qat_alg_ablkcipher_ctr_setkey(struct crypto_ablkcipher * tfm,const u8 * key,unsigned int keylen)975 static int qat_alg_ablkcipher_ctr_setkey(struct crypto_ablkcipher *tfm,
976 					 const u8 *key, unsigned int keylen)
977 {
978 	return qat_alg_ablkcipher_setkey(tfm, key, keylen,
979 					 ICP_QAT_HW_CIPHER_CTR_MODE);
980 }
981 
qat_alg_ablkcipher_xts_setkey(struct crypto_ablkcipher * tfm,const u8 * key,unsigned int keylen)982 static int qat_alg_ablkcipher_xts_setkey(struct crypto_ablkcipher *tfm,
983 					 const u8 *key, unsigned int keylen)
984 {
985 	return qat_alg_ablkcipher_setkey(tfm, key, keylen,
986 					 ICP_QAT_HW_CIPHER_XTS_MODE);
987 }
988 
qat_alg_ablkcipher_encrypt(struct ablkcipher_request * req)989 static int qat_alg_ablkcipher_encrypt(struct ablkcipher_request *req)
990 {
991 	struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
992 	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(atfm);
993 	struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
994 	struct qat_crypto_request *qat_req = ablkcipher_request_ctx(req);
995 	struct icp_qat_fw_la_cipher_req_params *cipher_param;
996 	struct icp_qat_fw_la_bulk_req *msg;
997 	int ret, ctr = 0;
998 
999 	ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req);
1000 	if (unlikely(ret))
1001 		return ret;
1002 
1003 	msg = &qat_req->req;
1004 	*msg = ctx->enc_fw_req;
1005 	qat_req->ablkcipher_ctx = ctx;
1006 	qat_req->ablkcipher_req = req;
1007 	qat_req->cb = qat_ablkcipher_alg_callback;
1008 	qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
1009 	qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
1010 	qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
1011 	cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
1012 	cipher_param->cipher_length = req->nbytes;
1013 	cipher_param->cipher_offset = 0;
1014 	memcpy(cipher_param->u.cipher_IV_array, req->info, AES_BLOCK_SIZE);
1015 	do {
1016 		ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
1017 	} while (ret == -EAGAIN && ctr++ < 10);
1018 
1019 	if (ret == -EAGAIN) {
1020 		qat_alg_free_bufl(ctx->inst, qat_req);
1021 		return -EBUSY;
1022 	}
1023 	return -EINPROGRESS;
1024 }
1025 
qat_alg_ablkcipher_decrypt(struct ablkcipher_request * req)1026 static int qat_alg_ablkcipher_decrypt(struct ablkcipher_request *req)
1027 {
1028 	struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
1029 	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(atfm);
1030 	struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1031 	struct qat_crypto_request *qat_req = ablkcipher_request_ctx(req);
1032 	struct icp_qat_fw_la_cipher_req_params *cipher_param;
1033 	struct icp_qat_fw_la_bulk_req *msg;
1034 	int ret, ctr = 0;
1035 
1036 	ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req);
1037 	if (unlikely(ret))
1038 		return ret;
1039 
1040 	msg = &qat_req->req;
1041 	*msg = ctx->dec_fw_req;
1042 	qat_req->ablkcipher_ctx = ctx;
1043 	qat_req->ablkcipher_req = req;
1044 	qat_req->cb = qat_ablkcipher_alg_callback;
1045 	qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
1046 	qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
1047 	qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
1048 	cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
1049 	cipher_param->cipher_length = req->nbytes;
1050 	cipher_param->cipher_offset = 0;
1051 	memcpy(cipher_param->u.cipher_IV_array, req->info, AES_BLOCK_SIZE);
1052 	do {
1053 		ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
1054 	} while (ret == -EAGAIN && ctr++ < 10);
1055 
1056 	if (ret == -EAGAIN) {
1057 		qat_alg_free_bufl(ctx->inst, qat_req);
1058 		return -EBUSY;
1059 	}
1060 	return -EINPROGRESS;
1061 }
1062 
qat_alg_aead_init(struct crypto_aead * tfm,enum icp_qat_hw_auth_algo hash,const char * hash_name)1063 static int qat_alg_aead_init(struct crypto_aead *tfm,
1064 			     enum icp_qat_hw_auth_algo hash,
1065 			     const char *hash_name)
1066 {
1067 	struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
1068 
1069 	ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
1070 	if (IS_ERR(ctx->hash_tfm))
1071 		return PTR_ERR(ctx->hash_tfm);
1072 	ctx->qat_hash_alg = hash;
1073 	crypto_aead_set_reqsize(tfm, sizeof(struct qat_crypto_request));
1074 	return 0;
1075 }
1076 
qat_alg_aead_sha1_init(struct crypto_aead * tfm)1077 static int qat_alg_aead_sha1_init(struct crypto_aead *tfm)
1078 {
1079 	return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA1, "sha1");
1080 }
1081 
qat_alg_aead_sha256_init(struct crypto_aead * tfm)1082 static int qat_alg_aead_sha256_init(struct crypto_aead *tfm)
1083 {
1084 	return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA256, "sha256");
1085 }
1086 
qat_alg_aead_sha512_init(struct crypto_aead * tfm)1087 static int qat_alg_aead_sha512_init(struct crypto_aead *tfm)
1088 {
1089 	return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA512, "sha512");
1090 }
1091 
qat_alg_aead_exit(struct crypto_aead * tfm)1092 static void qat_alg_aead_exit(struct crypto_aead *tfm)
1093 {
1094 	struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
1095 	struct qat_crypto_instance *inst = ctx->inst;
1096 	struct device *dev;
1097 
1098 	crypto_free_shash(ctx->hash_tfm);
1099 
1100 	if (!inst)
1101 		return;
1102 
1103 	dev = &GET_DEV(inst->accel_dev);
1104 	if (ctx->enc_cd) {
1105 		memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
1106 		dma_free_coherent(dev, sizeof(struct qat_alg_cd),
1107 				  ctx->enc_cd, ctx->enc_cd_paddr);
1108 	}
1109 	if (ctx->dec_cd) {
1110 		memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
1111 		dma_free_coherent(dev, sizeof(struct qat_alg_cd),
1112 				  ctx->dec_cd, ctx->dec_cd_paddr);
1113 	}
1114 	qat_crypto_put_instance(inst);
1115 }
1116 
qat_alg_ablkcipher_init(struct crypto_tfm * tfm)1117 static int qat_alg_ablkcipher_init(struct crypto_tfm *tfm)
1118 {
1119 	struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1120 
1121 	spin_lock_init(&ctx->lock);
1122 	tfm->crt_ablkcipher.reqsize = sizeof(struct qat_crypto_request);
1123 	ctx->tfm = tfm;
1124 	return 0;
1125 }
1126 
qat_alg_ablkcipher_exit(struct crypto_tfm * tfm)1127 static void qat_alg_ablkcipher_exit(struct crypto_tfm *tfm)
1128 {
1129 	struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1130 	struct qat_crypto_instance *inst = ctx->inst;
1131 	struct device *dev;
1132 
1133 	if (!inst)
1134 		return;
1135 
1136 	dev = &GET_DEV(inst->accel_dev);
1137 	if (ctx->enc_cd) {
1138 		memset(ctx->enc_cd, 0,
1139 		       sizeof(struct icp_qat_hw_cipher_algo_blk));
1140 		dma_free_coherent(dev,
1141 				  sizeof(struct icp_qat_hw_cipher_algo_blk),
1142 				  ctx->enc_cd, ctx->enc_cd_paddr);
1143 	}
1144 	if (ctx->dec_cd) {
1145 		memset(ctx->dec_cd, 0,
1146 		       sizeof(struct icp_qat_hw_cipher_algo_blk));
1147 		dma_free_coherent(dev,
1148 				  sizeof(struct icp_qat_hw_cipher_algo_blk),
1149 				  ctx->dec_cd, ctx->dec_cd_paddr);
1150 	}
1151 	qat_crypto_put_instance(inst);
1152 }
1153 
1154 
1155 static struct aead_alg qat_aeads[] = { {
1156 	.base = {
1157 		.cra_name = "authenc(hmac(sha1),cbc(aes))",
1158 		.cra_driver_name = "qat_aes_cbc_hmac_sha1",
1159 		.cra_priority = 4001,
1160 		.cra_flags = CRYPTO_ALG_ASYNC,
1161 		.cra_blocksize = AES_BLOCK_SIZE,
1162 		.cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1163 		.cra_module = THIS_MODULE,
1164 	},
1165 	.init = qat_alg_aead_sha1_init,
1166 	.exit = qat_alg_aead_exit,
1167 	.setkey = qat_alg_aead_setkey,
1168 	.decrypt = qat_alg_aead_dec,
1169 	.encrypt = qat_alg_aead_enc,
1170 	.ivsize = AES_BLOCK_SIZE,
1171 	.maxauthsize = SHA1_DIGEST_SIZE,
1172 }, {
1173 	.base = {
1174 		.cra_name = "authenc(hmac(sha256),cbc(aes))",
1175 		.cra_driver_name = "qat_aes_cbc_hmac_sha256",
1176 		.cra_priority = 4001,
1177 		.cra_flags = CRYPTO_ALG_ASYNC,
1178 		.cra_blocksize = AES_BLOCK_SIZE,
1179 		.cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1180 		.cra_module = THIS_MODULE,
1181 	},
1182 	.init = qat_alg_aead_sha256_init,
1183 	.exit = qat_alg_aead_exit,
1184 	.setkey = qat_alg_aead_setkey,
1185 	.decrypt = qat_alg_aead_dec,
1186 	.encrypt = qat_alg_aead_enc,
1187 	.ivsize = AES_BLOCK_SIZE,
1188 	.maxauthsize = SHA256_DIGEST_SIZE,
1189 }, {
1190 	.base = {
1191 		.cra_name = "authenc(hmac(sha512),cbc(aes))",
1192 		.cra_driver_name = "qat_aes_cbc_hmac_sha512",
1193 		.cra_priority = 4001,
1194 		.cra_flags = CRYPTO_ALG_ASYNC,
1195 		.cra_blocksize = AES_BLOCK_SIZE,
1196 		.cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1197 		.cra_module = THIS_MODULE,
1198 	},
1199 	.init = qat_alg_aead_sha512_init,
1200 	.exit = qat_alg_aead_exit,
1201 	.setkey = qat_alg_aead_setkey,
1202 	.decrypt = qat_alg_aead_dec,
1203 	.encrypt = qat_alg_aead_enc,
1204 	.ivsize = AES_BLOCK_SIZE,
1205 	.maxauthsize = SHA512_DIGEST_SIZE,
1206 } };
1207 
1208 static struct crypto_alg qat_algs[] = { {
1209 	.cra_name = "cbc(aes)",
1210 	.cra_driver_name = "qat_aes_cbc",
1211 	.cra_priority = 4001,
1212 	.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1213 	.cra_blocksize = AES_BLOCK_SIZE,
1214 	.cra_ctxsize = sizeof(struct qat_alg_ablkcipher_ctx),
1215 	.cra_alignmask = 0,
1216 	.cra_type = &crypto_ablkcipher_type,
1217 	.cra_module = THIS_MODULE,
1218 	.cra_init = qat_alg_ablkcipher_init,
1219 	.cra_exit = qat_alg_ablkcipher_exit,
1220 	.cra_u = {
1221 		.ablkcipher = {
1222 			.setkey = qat_alg_ablkcipher_cbc_setkey,
1223 			.decrypt = qat_alg_ablkcipher_decrypt,
1224 			.encrypt = qat_alg_ablkcipher_encrypt,
1225 			.min_keysize = AES_MIN_KEY_SIZE,
1226 			.max_keysize = AES_MAX_KEY_SIZE,
1227 			.ivsize = AES_BLOCK_SIZE,
1228 		},
1229 	},
1230 }, {
1231 	.cra_name = "ctr(aes)",
1232 	.cra_driver_name = "qat_aes_ctr",
1233 	.cra_priority = 4001,
1234 	.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1235 	.cra_blocksize = AES_BLOCK_SIZE,
1236 	.cra_ctxsize = sizeof(struct qat_alg_ablkcipher_ctx),
1237 	.cra_alignmask = 0,
1238 	.cra_type = &crypto_ablkcipher_type,
1239 	.cra_module = THIS_MODULE,
1240 	.cra_init = qat_alg_ablkcipher_init,
1241 	.cra_exit = qat_alg_ablkcipher_exit,
1242 	.cra_u = {
1243 		.ablkcipher = {
1244 			.setkey = qat_alg_ablkcipher_ctr_setkey,
1245 			.decrypt = qat_alg_ablkcipher_decrypt,
1246 			.encrypt = qat_alg_ablkcipher_encrypt,
1247 			.min_keysize = AES_MIN_KEY_SIZE,
1248 			.max_keysize = AES_MAX_KEY_SIZE,
1249 			.ivsize = AES_BLOCK_SIZE,
1250 		},
1251 	},
1252 }, {
1253 	.cra_name = "xts(aes)",
1254 	.cra_driver_name = "qat_aes_xts",
1255 	.cra_priority = 4001,
1256 	.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1257 	.cra_blocksize = AES_BLOCK_SIZE,
1258 	.cra_ctxsize = sizeof(struct qat_alg_ablkcipher_ctx),
1259 	.cra_alignmask = 0,
1260 	.cra_type = &crypto_ablkcipher_type,
1261 	.cra_module = THIS_MODULE,
1262 	.cra_init = qat_alg_ablkcipher_init,
1263 	.cra_exit = qat_alg_ablkcipher_exit,
1264 	.cra_u = {
1265 		.ablkcipher = {
1266 			.setkey = qat_alg_ablkcipher_xts_setkey,
1267 			.decrypt = qat_alg_ablkcipher_decrypt,
1268 			.encrypt = qat_alg_ablkcipher_encrypt,
1269 			.min_keysize = 2 * AES_MIN_KEY_SIZE,
1270 			.max_keysize = 2 * AES_MAX_KEY_SIZE,
1271 			.ivsize = AES_BLOCK_SIZE,
1272 		},
1273 	},
1274 } };
1275 
qat_algs_register(void)1276 int qat_algs_register(void)
1277 {
1278 	int ret = 0, i;
1279 
1280 	mutex_lock(&algs_lock);
1281 	if (++active_devs != 1)
1282 		goto unlock;
1283 
1284 	for (i = 0; i < ARRAY_SIZE(qat_algs); i++)
1285 		qat_algs[i].cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
1286 
1287 	ret = crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs));
1288 	if (ret)
1289 		goto unlock;
1290 
1291 	for (i = 0; i < ARRAY_SIZE(qat_aeads); i++)
1292 		qat_aeads[i].base.cra_flags = CRYPTO_ALG_ASYNC;
1293 
1294 	ret = crypto_register_aeads(qat_aeads, ARRAY_SIZE(qat_aeads));
1295 	if (ret)
1296 		goto unreg_algs;
1297 
1298 unlock:
1299 	mutex_unlock(&algs_lock);
1300 	return ret;
1301 
1302 unreg_algs:
1303 	crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
1304 	goto unlock;
1305 }
1306 
qat_algs_unregister(void)1307 void qat_algs_unregister(void)
1308 {
1309 	mutex_lock(&algs_lock);
1310 	if (--active_devs != 0)
1311 		goto unlock;
1312 
1313 	crypto_unregister_aeads(qat_aeads, ARRAY_SIZE(qat_aeads));
1314 	crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
1315 
1316 unlock:
1317 	mutex_unlock(&algs_lock);
1318 }
1319