1 /*
2  * Copyright (c) 2016 Intel Corporation.
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 /**
8  * @file
9  * @brief Crypto Cipher APIs
10  *
11  * This file contains the Crypto Abstraction layer APIs.
12  *
13  * [Experimental] Users should note that the APIs can change
14  * as a part of ongoing development.
15  */
16 
17 #ifndef ZEPHYR_INCLUDE_CRYPTO_H_
18 #define ZEPHYR_INCLUDE_CRYPTO_H_
19 
20 #include <zephyr/device.h>
21 #include <errno.h>
22 #include <zephyr/sys/util.h>
23 #include <zephyr/sys/__assert.h>
24 #include <zephyr/crypto/hash.h>
25 #include "cipher.h"
26 
27 /**
28  * @brief Crypto APIs
29  * @defgroup crypto Crypto
30  * @ingroup os_services
31  * @{
32  */
33 
34 
35 /* ctx.flags values. Not all drivers support all flags.
36  * A user app can query the supported hw / driver
37  * capabilities via provided API (crypto_query_hwcaps()), and choose a
38  * supported config during the session setup.
39  */
40 #define CAP_OPAQUE_KEY_HNDL		BIT(0)
41 #define CAP_RAW_KEY			BIT(1)
42 
43 /* TBD to define */
44 #define CAP_KEY_LOADING_API		BIT(2)
45 
46 /** Whether the output is placed in separate buffer or not */
47 #define CAP_INPLACE_OPS			BIT(3)
48 #define CAP_SEPARATE_IO_BUFS		BIT(4)
49 
50 /**
51  * These denotes if the output (completion of a cipher_xxx_op) is conveyed
52  * by the op function returning, or it is conveyed by an async notification
53  */
54 #define CAP_SYNC_OPS			BIT(5)
55 #define CAP_ASYNC_OPS			BIT(6)
56 
57 /** Whether the hardware/driver supports autononce feature */
58 #define CAP_AUTONONCE			BIT(7)
59 
60 /** Don't prefix IV to cipher blocks */
61 #define CAP_NO_IV_PREFIX		BIT(8)
62 
63 /* More flags to be added as necessary */
64 
65 /** @brief Crypto driver API definition. */
66 __subsystem struct crypto_driver_api {
67 	int (*query_hw_caps)(const struct device *dev);
68 
69 	/* Setup a crypto session */
70 	int (*cipher_begin_session)(const struct device *dev, struct cipher_ctx *ctx,
71 			     enum cipher_algo algo, enum cipher_mode mode,
72 			     enum cipher_op op_type);
73 
74 	/* Tear down an established session */
75 	int (*cipher_free_session)(const struct device *dev, struct cipher_ctx *ctx);
76 
77 	/* Register async crypto op completion callback with the driver */
78 	int (*cipher_async_callback_set)(const struct device *dev,
79 					 cipher_completion_cb cb);
80 
81 	/* Setup a hash session */
82 	int (*hash_begin_session)(const struct device *dev, struct hash_ctx *ctx,
83 				  enum hash_algo algo);
84 	/* Tear down an established hash session */
85 	int (*hash_free_session)(const struct device *dev, struct hash_ctx *ctx);
86 	/* Register async hash op completion callback with the driver */
87 	int (*hash_async_callback_set)(const struct device *dev,
88 					 hash_completion_cb cb);
89 };
90 
91 /* Following are the public API a user app may call.
92  * The first two relate to crypto "session" setup / teardown. Further we
93  * have four cipher mode specific (CTR, CCM, CBC ...) calls to perform the
94  * actual crypto operation in the context of a session. Also we have an
95  * API to provide the callback for async operations.
96  */
97 
98 /**
99  * @brief Query the crypto hardware capabilities
100  *
101  * This API is used by the app to query the capabilities supported by the
102  * crypto device. Based on this the app can specify a subset of the supported
103  * options to be honored for a session during cipher_begin_session().
104  *
105  * @param dev Pointer to the device structure for the driver instance.
106  *
107  * @return bitmask of supported options.
108  */
crypto_query_hwcaps(const struct device * dev)109 static inline int crypto_query_hwcaps(const struct device *dev)
110 {
111 	struct crypto_driver_api *api;
112 	int tmp;
113 
114 	api = (struct crypto_driver_api *) dev->api;
115 
116 	tmp = api->query_hw_caps(dev);
117 
118 	__ASSERT((tmp & (CAP_OPAQUE_KEY_HNDL | CAP_RAW_KEY)) != 0,
119 		 "Driver should support at least one key type: RAW/Opaque");
120 
121 	__ASSERT((tmp & (CAP_INPLACE_OPS | CAP_SEPARATE_IO_BUFS)) != 0,
122 	     "Driver should support at least one IO buf type: Inplace/separate");
123 
124 	__ASSERT((tmp & (CAP_SYNC_OPS | CAP_ASYNC_OPS)) != 0,
125 		"Driver should support at least one op-type: sync/async");
126 	return tmp;
127 
128 }
129 
130 /**
131  * @}
132  */
133 
134 /**
135  * @brief Crypto Cipher APIs
136  * @defgroup crypto_cipher Cipher
137  * @ingroup crypto
138  * @{
139  */
140 
141 /**
142  * @brief Setup a crypto session
143  *
144  * Initializes one time parameters, like the session key, algorithm and cipher
145  * mode which may remain constant for all operations in the session. The state
146  * may be cached in hardware and/or driver data state variables.
147  *
148  * @param  dev      Pointer to the device structure for the driver instance.
149  * @param  ctx      Pointer to the context structure. Various one time
150  *			parameters like key, keylength, etc. are supplied via
151  *			this structure. The structure documentation specifies
152  *			which fields are to be populated by the app before
153  *			making this call.
154  * @param  algo     The crypto algorithm to be used in this session. e.g AES
155  * @param  mode     The cipher mode to be used in this session. e.g CBC, CTR
156  * @param  optype   Whether we should encrypt or decrypt in this session
157  *
158  * @return 0 on success, negative errno code on fail.
159  */
cipher_begin_session(const struct device * dev,struct cipher_ctx * ctx,enum cipher_algo algo,enum cipher_mode mode,enum cipher_op optype)160 static inline int cipher_begin_session(const struct device *dev,
161 				       struct cipher_ctx *ctx,
162 				       enum cipher_algo algo,
163 				       enum cipher_mode  mode,
164 				       enum cipher_op optype)
165 {
166 	struct crypto_driver_api *api;
167 	uint32_t flags;
168 
169 	api = (struct crypto_driver_api *) dev->api;
170 	ctx->device = dev;
171 	ctx->ops.cipher_mode = mode;
172 
173 	flags = (ctx->flags & (CAP_OPAQUE_KEY_HNDL | CAP_RAW_KEY));
174 	__ASSERT(flags != 0U, "Keytype missing: RAW Key or OPAQUE handle");
175 	__ASSERT(flags != (CAP_OPAQUE_KEY_HNDL | CAP_RAW_KEY),
176 			 "conflicting options for keytype");
177 
178 	flags = (ctx->flags & (CAP_INPLACE_OPS | CAP_SEPARATE_IO_BUFS));
179 	__ASSERT(flags != 0U, "IO buffer type missing");
180 	__ASSERT(flags != (CAP_INPLACE_OPS | CAP_SEPARATE_IO_BUFS),
181 			"conflicting options for IO buffer type");
182 
183 	flags = (ctx->flags & (CAP_SYNC_OPS | CAP_ASYNC_OPS));
184 	__ASSERT(flags != 0U, "sync/async type missing");
185 	__ASSERT(flags != (CAP_SYNC_OPS |  CAP_ASYNC_OPS),
186 			"conflicting options for sync/async");
187 
188 	return api->cipher_begin_session(dev, ctx, algo, mode, optype);
189 }
190 
191 /**
192  * @brief Cleanup a crypto session
193  *
194  * Clears the hardware and/or driver state of a previous session.
195  *
196  * @param  dev      Pointer to the device structure for the driver instance.
197  * @param  ctx      Pointer to the crypto context structure of the session
198  *			to be freed.
199  *
200  * @return 0 on success, negative errno code on fail.
201  */
cipher_free_session(const struct device * dev,struct cipher_ctx * ctx)202 static inline int cipher_free_session(const struct device *dev,
203 				      struct cipher_ctx *ctx)
204 {
205 	struct crypto_driver_api *api;
206 
207 	api = (struct crypto_driver_api *) dev->api;
208 
209 	return api->cipher_free_session(dev, ctx);
210 }
211 
212 /**
213  * @brief Registers an async crypto op completion callback with the driver
214  *
215  * The application can register an async crypto op completion callback handler
216  * to be invoked by the driver, on completion of a prior request submitted via
217  * cipher_do_op(). Based on crypto device hardware semantics, this is likely to
218  * be invoked from an ISR context.
219  *
220  * @param  dev   Pointer to the device structure for the driver instance.
221  * @param  cb    Pointer to application callback to be called by the driver.
222  *
223  * @return 0 on success, -ENOTSUP if the driver does not support async op,
224  *			  negative errno code on other error.
225  */
cipher_callback_set(const struct device * dev,cipher_completion_cb cb)226 static inline int cipher_callback_set(const struct device *dev,
227 				      cipher_completion_cb cb)
228 {
229 	struct crypto_driver_api *api;
230 
231 	api = (struct crypto_driver_api *) dev->api;
232 
233 	if (api->cipher_async_callback_set) {
234 		return api->cipher_async_callback_set(dev, cb);
235 	}
236 
237 	return -ENOTSUP;
238 
239 }
240 
241 /**
242  * @brief Perform single-block crypto operation (ECB cipher mode). This
243  * should not be overloaded to operate on multiple blocks for security reasons.
244  *
245  * @param  ctx       Pointer to the crypto context of this op.
246  * @param  pkt   Structure holding the input/output buffer pointers.
247  *
248  * @return 0 on success, negative errno code on fail.
249  */
cipher_block_op(struct cipher_ctx * ctx,struct cipher_pkt * pkt)250 static inline int cipher_block_op(struct cipher_ctx *ctx,
251 				     struct cipher_pkt *pkt)
252 {
253 	__ASSERT(ctx->ops.cipher_mode == CRYPTO_CIPHER_MODE_ECB, "ECB mode "
254 		 "session invoking a different mode handler");
255 
256 	pkt->ctx = ctx;
257 	return ctx->ops.block_crypt_hndlr(ctx, pkt);
258 }
259 
260 /**
261  * @brief Perform Cipher Block Chaining (CBC) crypto operation.
262  *
263  * @param  ctx       Pointer to the crypto context of this op.
264  * @param  pkt   Structure holding the input/output buffer pointers.
265  * @param  iv        Initialization Vector (IV) for the operation. Same
266  *			 IV value should not be reused across multiple
267  *			 operations (within a session context) for security.
268  *
269  * @return 0 on success, negative errno code on fail.
270  */
cipher_cbc_op(struct cipher_ctx * ctx,struct cipher_pkt * pkt,uint8_t * iv)271 static inline int cipher_cbc_op(struct cipher_ctx *ctx,
272 				struct cipher_pkt *pkt, uint8_t *iv)
273 {
274 	__ASSERT(ctx->ops.cipher_mode == CRYPTO_CIPHER_MODE_CBC, "CBC mode "
275 		 "session invoking a different mode handler");
276 
277 	pkt->ctx = ctx;
278 	return ctx->ops.cbc_crypt_hndlr(ctx, pkt, iv);
279 }
280 
281 /**
282  * @brief Perform Counter (CTR) mode crypto operation.
283  *
284  * @param  ctx       Pointer to the crypto context of this op.
285  * @param  pkt   Structure holding the input/output buffer pointers.
286  * @param  iv        Initialization Vector (IV) for the operation. We use a
287  *			 split counter formed by appending IV and ctr.
288  *			 Consequently  ivlen = keylen - ctrlen. 'ctrlen' is
289  *			 specified during session setup through the
290  *			 'ctx.mode_params.ctr_params.ctr_len' parameter. IV
291  *			 should not be reused across multiple operations
292  *			 (within a session context) for security. The non-IV
293  *			 part of the split counter is transparent to the caller
294  *			 and is fully managed by the crypto provider.
295  *
296  * @return 0 on success, negative errno code on fail.
297  */
cipher_ctr_op(struct cipher_ctx * ctx,struct cipher_pkt * pkt,uint8_t * iv)298 static inline int cipher_ctr_op(struct cipher_ctx *ctx,
299 				struct cipher_pkt *pkt, uint8_t *iv)
300 {
301 	__ASSERT(ctx->ops.cipher_mode == CRYPTO_CIPHER_MODE_CTR, "CTR mode "
302 		 "session invoking a different mode handler");
303 
304 	pkt->ctx = ctx;
305 	return ctx->ops.ctr_crypt_hndlr(ctx, pkt, iv);
306 }
307 
308 /**
309  * @brief Perform Counter with CBC-MAC (CCM) mode crypto operation
310  *
311  * @param  ctx       Pointer to the crypto context of this op.
312  * @param  pkt   Structure holding the input/output, Associated
313  *			 Data (AD) and auth tag buffer pointers.
314  * @param  nonce     Nonce for the operation. Same nonce value should not
315  *			 be reused across multiple operations (within a
316  *			 session context) for security.
317  *
318  * @return 0 on success, negative errno code on fail.
319  */
cipher_ccm_op(struct cipher_ctx * ctx,struct cipher_aead_pkt * pkt,uint8_t * nonce)320 static inline int cipher_ccm_op(struct cipher_ctx *ctx,
321 				struct cipher_aead_pkt *pkt, uint8_t *nonce)
322 {
323 	__ASSERT(ctx->ops.cipher_mode == CRYPTO_CIPHER_MODE_CCM, "CCM mode "
324 		 "session invoking a different mode handler");
325 
326 	pkt->pkt->ctx = ctx;
327 	return ctx->ops.ccm_crypt_hndlr(ctx, pkt, nonce);
328 }
329 
330 /**
331  * @brief Perform Galois/Counter Mode (GCM) crypto operation
332  *
333  * @param  ctx       Pointer to the crypto context of this op.
334  * @param  pkt   Structure holding the input/output, Associated
335  *			 Data (AD) and auth tag buffer pointers.
336  * @param  nonce     Nonce for the operation. Same nonce value should not
337  *			 be reused across multiple operations (within a
338  *			 session context) for security.
339  *
340  * @return 0 on success, negative errno code on fail.
341  */
cipher_gcm_op(struct cipher_ctx * ctx,struct cipher_aead_pkt * pkt,uint8_t * nonce)342 static inline int cipher_gcm_op(struct cipher_ctx *ctx,
343 				struct cipher_aead_pkt *pkt, uint8_t *nonce)
344 {
345 	__ASSERT(ctx->ops.cipher_mode == CRYPTO_CIPHER_MODE_GCM, "GCM mode "
346 		 "session invoking a different mode handler");
347 
348 	pkt->pkt->ctx = ctx;
349 	return ctx->ops.gcm_crypt_hndlr(ctx, pkt, nonce);
350 }
351 
352 
353 /**
354  * @}
355  */
356 
357 /**
358  * @brief Crypto Hash APIs
359  * @defgroup crypto_hash Hash
360  * @ingroup crypto
361  * @{
362  */
363 
364 
365 /**
366  * @brief Setup a hash session
367  *
368  * Initializes one time parameters, like the algorithm which may
369  * remain constant for all operations in the session. The state may be
370  * cached in hardware and/or driver data state variables.
371  *
372  * @param  dev      Pointer to the device structure for the driver instance.
373  * @param  ctx      Pointer to the context structure. Various one time
374  *			parameters like session capabilities and algorithm are
375  *			supplied via this structure. The structure documentation
376  *			specifies which fields are to be populated by the app
377  *			before making this call.
378  * @param  algo     The hash algorithm to be used in this session. e.g sha256
379  *
380  * @return 0 on success, negative errno code on fail.
381  */
hash_begin_session(const struct device * dev,struct hash_ctx * ctx,enum hash_algo algo)382 static inline int hash_begin_session(const struct device *dev,
383 				     struct hash_ctx *ctx,
384 				     enum hash_algo algo)
385 {
386 	uint32_t flags;
387 	struct crypto_driver_api *api;
388 
389 	api = (struct crypto_driver_api *) dev->api;
390 	ctx->device = dev;
391 
392 	flags = (ctx->flags & (CAP_INPLACE_OPS | CAP_SEPARATE_IO_BUFS));
393 	__ASSERT(flags != 0U, "IO buffer type missing");
394 	__ASSERT(flags != (CAP_INPLACE_OPS | CAP_SEPARATE_IO_BUFS),
395 			"conflicting options for IO buffer type");
396 
397 	flags = (ctx->flags & (CAP_SYNC_OPS | CAP_ASYNC_OPS));
398 	__ASSERT(flags != 0U, "sync/async type missing");
399 	__ASSERT(flags != (CAP_SYNC_OPS |  CAP_ASYNC_OPS),
400 			"conflicting options for sync/async");
401 
402 
403 	return api->hash_begin_session(dev, ctx, algo);
404 }
405 
406 /**
407  * @brief Cleanup a hash session
408  *
409  * Clears the hardware and/or driver state of a session. @see hash_begin_session
410  *
411  * @param  dev      Pointer to the device structure for the driver instance.
412  * @param  ctx      Pointer to the crypto hash  context structure of the session
413  *		    to be freed.
414  *
415  * @return 0 on success, negative errno code on fail.
416  */
hash_free_session(const struct device * dev,struct hash_ctx * ctx)417 static inline int hash_free_session(const struct device *dev,
418 				    struct hash_ctx *ctx)
419 {
420 	struct crypto_driver_api *api;
421 
422 	api = (struct crypto_driver_api *) dev->api;
423 
424 	return api->hash_free_session(dev, ctx);
425 }
426 
427 /**
428  * @brief Registers an async hash completion callback with the driver
429  *
430  * The application can register an async hash completion callback handler
431  * to be invoked by the driver, on completion of a prior request submitted via
432  * hash_compute(). Based on crypto device hardware semantics, this is likely to
433  * be invoked from an ISR context.
434  *
435  * @param  dev   Pointer to the device structure for the driver instance.
436  * @param  cb    Pointer to application callback to be called by the driver.
437  *
438  * @return 0 on success, -ENOTSUP if the driver does not support async op,
439  *			  negative errno code on other error.
440  */
hash_callback_set(const struct device * dev,hash_completion_cb cb)441 static inline int hash_callback_set(const struct device *dev,
442 				    hash_completion_cb cb)
443 {
444 	struct crypto_driver_api *api;
445 
446 	api = (struct crypto_driver_api *) dev->api;
447 
448 	if (api->hash_async_callback_set) {
449 		return api->hash_async_callback_set(dev, cb);
450 	}
451 
452 	return -ENOTSUP;
453 
454 }
455 
456 /**
457  * @brief Perform  a cryptographic hash function.
458  *
459  * @param  ctx       Pointer to the hash context of this op.
460  * @param  pkt       Structure holding the input/output.
461 
462  * @return 0 on success, negative errno code on fail.
463  */
hash_compute(struct hash_ctx * ctx,struct hash_pkt * pkt)464 static inline int hash_compute(struct hash_ctx *ctx, struct hash_pkt *pkt)
465 {
466 	pkt->ctx = ctx;
467 
468 	return ctx->hash_hndlr(ctx, pkt, true);
469 }
470 
471 /**
472  * @brief Perform  a cryptographic multipart hash operation.
473  *
474  * This function can be called zero or more times, passing a slice of the
475  * the data. The hash is calculated using all the given pieces.
476  * To calculate the hash call @c hash_compute().
477  *
478  * @param  ctx       Pointer to the hash context of this op.
479  * @param  pkt       Structure holding the input.
480 
481  * @return 0 on success, negative errno code on fail.
482  */
hash_update(struct hash_ctx * ctx,struct hash_pkt * pkt)483 static inline int hash_update(struct hash_ctx *ctx, struct hash_pkt *pkt)
484 {
485 	pkt->ctx = ctx;
486 
487 	return ctx->hash_hndlr(ctx, pkt, false);
488 }
489 
490 /**
491  * @}
492  */
493 
494 #endif /* ZEPHYR_INCLUDE_CRYPTO_H_ */
495