1 /*
2 * Copyright (c) 2024 GARDENA GmbH
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 *
6 * Design decisions:
7 * - As there is only one AES controller, this implementation is not using a device configuration.
8 *
9 * Notes:
10 * - If not noted otherwise, chapter numbers refer to the SiM3U1XX/SiM3C1XX reference manual
11 * (SiM3U1xx-SiM3C1xx-RM.pdf, revision 1.0)
12 * - Each DMA channel has one word of unused data (=> 3 x 4 = 12 bytes of unused RAM)
13 */
14
15 #define DT_DRV_COMPAT silabs_si32_aes
16
17 #define LOG_LEVEL CONFIG_CRYPTO_LOG_LEVEL
18 #include <zephyr/logging/log.h>
19 LOG_MODULE_REGISTER(aes_silabs_si32);
20
21 #include <zephyr/crypto/crypto.h>
22 #include <zephyr/device.h>
23 #include <zephyr/drivers/dma.h>
24 #include <zephyr/init.h>
25 #include <zephyr/kernel.h>
26 #include <zephyr/sys/atomic.h>
27 #include <zephyr/sys/byteorder.h>
28
29 #include <SI32_AES_A_Type.h>
30 #include <SI32_CLKCTRL_A_Type.h>
31 #include <SI32_DMACTRL_A_Type.h>
32 #include "SI32_DMAXBAR_A_Type.h"
33 #include <si32_device.h>
34
35 #include <errno.h>
36
37 #define AES_KEY_SIZE 16
38 #define AES_BLOCK_SIZE 16
39
40 #define DMA_CHANNEL_COUNT DT_PROP(DT_INST(0, silabs_si32_dma), dma_channels)
41
42 #define DMA_CHANNEL_ID_RX DT_INST_DMAS_CELL_BY_NAME(0, rx, channel)
43 #define DMA_CHANNEL_ID_TX DT_INST_DMAS_CELL_BY_NAME(0, tx, channel)
44 #define DMA_CHANNEL_ID_XOR DT_INST_DMAS_CELL_BY_NAME(0, xor, channel)
45
46 BUILD_ASSERT(DMA_CHANNEL_ID_RX < DMA_CHANNEL_COUNT, "Too few DMA channels");
47 BUILD_ASSERT(DMA_CHANNEL_ID_TX < DMA_CHANNEL_COUNT, "Too few DMA channels");
48 BUILD_ASSERT(DMA_CHANNEL_ID_XOR < DMA_CHANNEL_COUNT, "Too few DMA channels");
49
50 struct crypto_session {
51 /* Decryption key needed only by ECB and CBC, and counter only by CTR. */
52 union {
53 uint8_t decryption_key[32]; /* only used for decryption sessions */
54 uint32_t current_ctr; /* only used for AES-CTR sessions */
55 };
56
57 bool in_use;
58 };
59
60 struct crypto_data {
61 struct crypto_session sessions[CONFIG_CRYPTO_SI32_MAX_SESSION];
62 };
63
64 K_MUTEX_DEFINE(crypto_si32_in_use);
65 K_SEM_DEFINE(crypto_si32_work_done, 0, 1);
66
67 static struct crypto_data crypto_si32_data;
68
crypto_si32_dma_completed(const struct device * dev,void * user_data,uint32_t channel,int status)69 static void crypto_si32_dma_completed(const struct device *dev, void *user_data, uint32_t channel,
70 int status)
71 {
72 ARG_UNUSED(dev);
73 ARG_UNUSED(user_data);
74
75 const char *const result = status == DMA_STATUS_COMPLETE ? "succeeded" : "failed";
76
77 switch (channel) {
78 case DMA_CHANNEL_ID_RX:
79 LOG_DBG("AES0 RX DMA channel %s", result);
80 k_sem_give(&crypto_si32_work_done);
81 break;
82 case DMA_CHANNEL_ID_TX:
83 LOG_DBG("AES0 TX DMA channel %s", result);
84 break;
85 case DMA_CHANNEL_ID_XOR:
86 LOG_DBG("AES0 XOR DMA channel %s", result);
87 break;
88 default:
89 LOG_ERR("Unknown DMA channel number: %d", channel);
90 break;
91 }
92 }
93
crypto_si32_query_hw_caps(const struct device * dev)94 static int crypto_si32_query_hw_caps(const struct device *dev)
95 {
96 ARG_UNUSED(dev);
97
98 return (CAP_RAW_KEY | CAP_INPLACE_OPS | CAP_SEPARATE_IO_BUFS | CAP_SYNC_OPS |
99 CAP_NO_IV_PREFIX);
100 }
101
crypto_si32_irq_error_handler(const struct device * dev)102 static void crypto_si32_irq_error_handler(const struct device *dev)
103 {
104 ARG_UNUSED(dev);
105
106 /* 12.3 Interrupts: An AES0 error interrupt can be generated whenever an input/output data
107 * FIFO overrun (DORF = 1) or underrun (DURF = 1) error occurs, or when an XOR data FIFO
108 * overrun (XORF = 1) occurs.
109 */
110 if (SI32_AES_0->STATUS.ERRI) {
111 LOG_ERR("AES0 FIFO overrun (%u), underrun (%u), XOR FIF0 overrun (%u)",
112 SI32_AES_0->STATUS.DORF, SI32_AES_0->STATUS.DURF, SI32_AES_0->STATUS.XORF);
113 SI32_AES_A_clear_error_interrupt(SI32_AES_0);
114 }
115 }
116
117 /* For simplicity, the AES HW does not get turned of when not in use. */
crypto_si32_init(const struct device * dev)118 static int crypto_si32_init(const struct device *dev)
119 {
120 ARG_UNUSED(dev);
121
122 /* Enable clock for AES HW */
123 SI32_CLKCTRL_A_enable_apb_to_modules_0(SI32_CLKCTRL_0, SI32_CLKCTRL_A_APBCLKG0_AES0);
124
125 /* To use the AES0 module, firmware must first clear the RESET bit before initializing the
126 * registers.
127 */
128 SI32_AES_A_reset_module(SI32_AES_0);
129
130 __ASSERT(SI32_AES_0->CONTROL.RESET == 0, "Reset done");
131
132 /* 12.3. Interrupts: The completion interrupt should only be used in conjunction
133 * with software mode (SWMDEN bit is set to 1) and not with DMA operations, where the DMA
134 * completion interrupt should be used.
135 */
136 SI32_AES_A_disable_operation_complete_interrupt(SI32_AES_0); /* default */
137
138 /* 12.3. Interrupts: The error interrupt should always be enabled (ERRIEN = 1), even when
139 * using the DMA with the AES module.
140 */
141 SI32_AES_A_enable_error_interrupt(SI32_AES_0);
142
143 /* Install error handler */
144 IRQ_CONNECT(DT_INST_IRQN(0), DT_INST_IRQ(0, priority), crypto_si32_irq_error_handler,
145 DEVICE_DT_INST_GET(0), 0);
146 irq_enable(DT_INST_IRQN(0));
147
148 /* Halt AES0 module on debug breakpoint */
149 SI32_AES_A_enable_stall_in_debug_mode(SI32_AES_0);
150
151 /* For peripheral transfers, firmware should configure the peripheral for the DMA transfer
152 * and set the device’s DMA crossbar (DMAXBAR) to map a DMA channel to the peripheral.
153 */
154 SI32_DMAXBAR_A_select_channel_peripheral(SI32_DMAXBAR_0, SI32_DMAXBAR_CHAN5_AES0_TX);
155 SI32_DMAXBAR_A_select_channel_peripheral(SI32_DMAXBAR_0, SI32_DMAXBAR_CHAN6_AES0_RX);
156 SI32_DMAXBAR_A_select_channel_peripheral(SI32_DMAXBAR_0, SI32_DMAXBAR_CHAN7_AES0_XOR);
157
158 return 0;
159 }
160
crypto_si32_aes_set_key(const uint8_t * key,uint8_t key_len)161 static int crypto_si32_aes_set_key(const uint8_t *key, uint8_t key_len)
162 {
163 const uint32_t *key_as_word = (const uint32_t *)key;
164
165 switch (key_len) {
166 case 32:
167 SI32_AES_0->HWKEY7.U32 = key_as_word[7];
168 SI32_AES_0->HWKEY6.U32 = key_as_word[6];
169 __fallthrough;
170 case 24:
171 SI32_AES_0->HWKEY5.U32 = key_as_word[5];
172 SI32_AES_0->HWKEY4.U32 = key_as_word[4];
173 __fallthrough;
174 case 16:
175 SI32_AES_0->HWKEY3.U32 = key_as_word[3];
176 SI32_AES_0->HWKEY2.U32 = key_as_word[2];
177 SI32_AES_0->HWKEY1.U32 = key_as_word[1];
178 SI32_AES_0->HWKEY0.U32 = key_as_word[0];
179 break;
180 default:
181 LOG_ERR("Invalid key len: %" PRIu16, key_len);
182 return -EINVAL;
183 }
184
185 return 0;
186 }
187
crypto_si32_aes_calc_decryption_key(const struct cipher_ctx * ctx,uint8_t * decryption_key)188 static int crypto_si32_aes_calc_decryption_key(const struct cipher_ctx *ctx,
189 uint8_t *decryption_key)
190 {
191 uint32_t *decryption_key_word = (uint32_t *)decryption_key;
192 int ret;
193
194 ret = crypto_si32_aes_set_key(ctx->key.bit_stream, ctx->keylen);
195 if (ret) {
196 return ret;
197 }
198
199 LOG_INF("Generating decryption key");
200 /* TODO: How much of this can be removed? */
201 SI32_AES_A_write_xfrsize(SI32_AES_0, 0);
202 SI32_AES_A_enable_error_interrupt(SI32_AES_0);
203 SI32_AES_A_exit_cipher_block_chaining_mode(SI32_AES_0);
204 SI32_AES_A_exit_counter_mode(SI32_AES_0);
205 SI32_AES_A_exit_bypass_hardware_mode(SI32_AES_0);
206 SI32_AES_A_select_xor_path_none(SI32_AES_0);
207 SI32_AES_A_select_software_mode(SI32_AES_0);
208 SI32_AES_A_select_encryption_mode(SI32_AES_0);
209 SI32_AES_A_enable_key_capture(SI32_AES_0);
210
211 for (unsigned int i = 0; i < 4; i++) {
212 SI32_AES_A_write_datafifo(SI32_AES_0, 0x00000000);
213 }
214
215 SI32_AES_A_clear_operation_complete_interrupt(SI32_AES_0);
216 SI32_AES_A_start_operation(SI32_AES_0);
217 while (!SI32_AES_A_is_operation_complete_interrupt_pending(SI32_AES_0)) {
218 /* This should not take long */
219 }
220
221 for (unsigned int i = 0; i < 4; i++) {
222 SI32_AES_A_read_datafifo(SI32_AES_0);
223 }
224
225 switch (ctx->keylen) {
226 case 32:
227 decryption_key_word[7] = SI32_AES_0->HWKEY7.U32;
228 decryption_key_word[6] = SI32_AES_0->HWKEY6.U32;
229 __fallthrough;
230 case 24:
231 decryption_key_word[5] = SI32_AES_0->HWKEY5.U32;
232 decryption_key_word[4] = SI32_AES_0->HWKEY4.U32;
233 __fallthrough;
234 case 16:
235 decryption_key_word[3] = SI32_AES_0->HWKEY3.U32;
236 decryption_key_word[2] = SI32_AES_0->HWKEY2.U32;
237 decryption_key_word[1] = SI32_AES_0->HWKEY1.U32;
238 decryption_key_word[0] = SI32_AES_0->HWKEY0.U32;
239 break;
240 default:
241 LOG_ERR("Invalid key len: %" PRIu16, ctx->keylen);
242 return -EINVAL;
243 }
244
245 return 0;
246 }
247
crypto_si32_aes_set_key_size(const struct cipher_ctx * ctx)248 static int crypto_si32_aes_set_key_size(const struct cipher_ctx *ctx)
249 {
250 switch (ctx->keylen) {
251 case 32:
252 SI32_AES_A_select_key_size_256(SI32_AES_0);
253 break;
254 case 24:
255 SI32_AES_A_select_key_size_192(SI32_AES_0);
256 break;
257 case 16:
258 SI32_AES_A_select_key_size_128(SI32_AES_0);
259 break;
260 default:
261 LOG_ERR("Invalid key len: %" PRIu16, ctx->keylen);
262 return -EINVAL;
263 }
264
265 return 0;
266 }
267
assert_dma_settings_common(struct SI32_DMADESC_A_Struct * channel_descriptor)268 static void assert_dma_settings_common(struct SI32_DMADESC_A_Struct *channel_descriptor)
269 {
270 ARG_UNUSED(channel_descriptor);
271
272 __ASSERT(channel_descriptor->CONFIG.SRCSIZE == 2,
273 "Source size (SRCSIZE) and destination size (DSTSIZE) are 2 for a word transfer.");
274 __ASSERT(channel_descriptor->CONFIG.DSTSIZE == 2,
275 "Source size (SRCSIZE) and destination size (DSTSIZE) are 2 for a word transfer.");
276 __ASSERT(channel_descriptor->CONFIG.RPOWER == 2,
277 "RPOWER = 2 (4 data transfers per transaction).");
278 }
279
assert_dma_settings_channel_rx(struct SI32_DMADESC_A_Struct * channel_descriptor)280 static void assert_dma_settings_channel_rx(struct SI32_DMADESC_A_Struct *channel_descriptor)
281 {
282 ARG_UNUSED(channel_descriptor);
283
284 assert_dma_settings_common(channel_descriptor);
285
286 __ASSERT(channel_descriptor->SRCEND.U32 == (uintptr_t)&SI32_AES_0->DATAFIFO,
287 "Source end pointer set to the DATAFIFO register.");
288 __ASSERT(channel_descriptor->CONFIG.DSTAIMD == 0b10,
289 "The DSTAIMD field should be set to 010b for word increments.");
290 __ASSERT(channel_descriptor->CONFIG.SRCAIMD == 0b11,
291 "The SRCAIMD field should be set to 011b for no increment.");
292 }
293
assert_dma_settings_channel_tx(struct SI32_DMADESC_A_Struct * channel_descriptor)294 static void assert_dma_settings_channel_tx(struct SI32_DMADESC_A_Struct *channel_descriptor)
295 {
296 ARG_UNUSED(channel_descriptor);
297
298 assert_dma_settings_common(channel_descriptor);
299
300 __ASSERT(channel_descriptor->DSTEND.U32 == (uintptr_t)&SI32_AES_0->DATAFIFO,
301 "Destination end pointer set to the DATAFIFO register.");
302 __ASSERT(channel_descriptor->CONFIG.DSTAIMD == 0b11,
303 "The DSTAIMD field should be set to 011b for no increment.");
304 __ASSERT(channel_descriptor->CONFIG.SRCAIMD == 0b10,
305 "The SRCAIMD field should be set to 010b for word increments.");
306 }
307
assert_dma_settings_channel_xor(struct SI32_DMADESC_A_Struct * channel_descriptor)308 static void assert_dma_settings_channel_xor(struct SI32_DMADESC_A_Struct *channel_descriptor)
309 {
310 ARG_UNUSED(channel_descriptor);
311
312 assert_dma_settings_common(channel_descriptor);
313
314 __ASSERT(channel_descriptor->DSTEND.U32 == (uintptr_t)&SI32_AES_0->XORFIFO,
315 "Destination end pointer set to the XORFIFO register.");
316 __ASSERT(channel_descriptor->CONFIG.DSTAIMD == 0b11,
317 "The DSTAIMD field should be set to 011b for no increment.");
318 __ASSERT(channel_descriptor->CONFIG.SRCAIMD == 0b10,
319 "The SRCAIMD field should be set to 010b for word increments.");
320 }
321
322 /* Set up and start input (TX) DMA channel */
crypto_si32_dma_setup_tx(struct cipher_pkt * pkt,unsigned int in_buf_offset)323 static int crypto_si32_dma_setup_tx(struct cipher_pkt *pkt, unsigned int in_buf_offset)
324 {
325 struct dma_block_config dma_block_cfg = {};
326 const struct device *dma = DEVICE_DT_GET(DT_NODELABEL(dma));
327 struct dma_config dma_cfg;
328 int ret;
329
330 if (!pkt->in_len) {
331 LOG_WRN("Zero-sized data");
332 return 0;
333 }
334
335 if (pkt->in_len % 16) {
336 LOG_ERR("Data size must be 4-word aligned");
337 return -EINVAL;
338 }
339
340 dma_block_cfg.block_size = pkt->in_len - in_buf_offset;
341 dma_block_cfg.source_address = (uintptr_t)pkt->in_buf + in_buf_offset;
342 dma_block_cfg.source_addr_adj = 0b00; /* increment */
343 dma_block_cfg.dest_address = (uintptr_t)&SI32_AES_0->DATAFIFO;
344 dma_block_cfg.dest_addr_adj = 0b10; /* no change (no increment) */
345
346 dma_cfg = (struct dma_config){
347 .channel_direction = MEMORY_TO_PERIPHERAL,
348 .source_data_size = 4, /* SiM3x1xx limitation: must match dest_data_size */
349 .dest_data_size = 4, /* DATAFIFO must be written to in word chunks (4 bytes) */
350 .source_burst_length = AES_BLOCK_SIZE,
351 .dest_burst_length = AES_BLOCK_SIZE,
352 .block_count = 1,
353 .head_block = &dma_block_cfg,
354 .dma_callback = crypto_si32_dma_completed,
355 };
356
357 /* Stop channel to ensure we are not messing with an ongoing DMA operation */
358 ret = dma_stop(dma, DMA_CHANNEL_ID_TX);
359 if (ret) {
360 LOG_ERR("TX DMA channel stop failed: %d", ret);
361 return ret;
362 }
363
364 ret = dma_config(dma, DMA_CHANNEL_ID_TX, &dma_cfg);
365 if (ret) {
366 LOG_ERR("TX DMA channel setup failed: %d", ret);
367 return ret;
368 }
369
370 ret = dma_start(dma, DMA_CHANNEL_ID_TX);
371 if (ret) {
372 LOG_ERR("TX DMA channel start failed: %d", ret);
373 return ret;
374 }
375
376 /* Some assertions, helpful during development */
377 {
378 struct SI32_DMADESC_A_Struct *d =
379 (struct SI32_DMADESC_A_Struct *)SI32_DMACTRL_0->BASEPTR.U32;
380
381 /* Verify 12.5.2. General DMA Transfer Setup */
382 assert_dma_settings_channel_tx(d + DMA_CHANNEL_ID_TX);
383
384 /* Other checks */
385 __ASSERT(SI32_DMACTRL_A_is_channel_enabled(SI32_DMACTRL_0, DMA_CHANNEL_ID_TX),
386 "The channel request mask (CHREQMCLR) must be cleared for the channel to "
387 "use peripheral transfers.");
388
389 __ASSERT(SI32_DMAXBAR_0->DMAXBAR0.CH5SEL == 0b0001,
390 "0001: Service AES0 TX data requests.");
391 }
392
393 return 0;
394 }
395
396 /* Set up and start output (RX) DMA channel */
crypto_si32_dma_setup_rx(struct cipher_pkt * pkt,unsigned int in_buf_offset,unsigned int out_buf_offset)397 static int crypto_si32_dma_setup_rx(struct cipher_pkt *pkt, unsigned int in_buf_offset,
398 unsigned int out_buf_offset)
399 {
400 struct dma_block_config dma_block_cfg = {};
401 const struct device *dma = DEVICE_DT_GET(DT_NODELABEL(dma));
402 struct dma_config dma_cfg;
403 int ret;
404 uint32_t dest_address;
405
406 if (!pkt->in_len) {
407 LOG_WRN("Zero-sized data");
408 return 0;
409 }
410
411 if (pkt->in_len % 16) {
412 LOG_ERR("Data size must be 4-word aligned");
413 return -EINVAL;
414 }
415
416 /* A NULL out_buf indicates an in-place operation. */
417 if (pkt->out_buf == NULL) {
418 dest_address = (uintptr_t)pkt->in_buf;
419 } else {
420 if ((pkt->out_buf_max - out_buf_offset) < (pkt->in_len - in_buf_offset)) {
421 LOG_ERR("Output buf too small");
422 return -ENOMEM;
423 }
424
425 dest_address = (uintptr_t)(pkt->out_buf + out_buf_offset);
426 }
427
428 /* Set up output (RX) DMA channel */
429 dma_block_cfg.block_size = pkt->in_len - in_buf_offset;
430 dma_block_cfg.source_address = (uintptr_t)&SI32_AES_0->DATAFIFO;
431 dma_block_cfg.source_addr_adj = 0b10; /* no change */
432 dma_block_cfg.dest_address = dest_address;
433 dma_block_cfg.dest_addr_adj = 0b00; /* increment */
434
435 dma_cfg = (struct dma_config){
436 .channel_direction = PERIPHERAL_TO_MEMORY,
437 .source_data_size = 4, /* DATAFIFO must be read from in word chunks (4 bytes) */
438 .dest_data_size = 4, /* SiM3x1xx limitation: must match source_data_size */
439 .source_burst_length = AES_BLOCK_SIZE,
440 .dest_burst_length = AES_BLOCK_SIZE,
441 .block_count = 1,
442 .head_block = &dma_block_cfg,
443 .dma_callback = crypto_si32_dma_completed,
444 };
445
446 /* Stop channel to ensure we are not messing with an ongoing DMA operation */
447 ret = dma_stop(dma, DMA_CHANNEL_ID_RX);
448 if (ret) {
449 LOG_ERR("RX DMA channel stop failed: %d", ret);
450 return ret;
451 }
452
453 ret = dma_config(dma, DMA_CHANNEL_ID_RX, &dma_cfg);
454 if (ret) {
455 LOG_ERR("RX DMA channel setup failed: %d", ret);
456 return ret;
457 }
458
459 ret = dma_start(dma, DMA_CHANNEL_ID_RX);
460 if (ret) {
461 LOG_ERR("RX DMA channel start failed: %d", ret);
462 return ret;
463 }
464
465 /* Some assertions, helpful during development */
466 {
467 struct SI32_DMADESC_A_Struct *d =
468 (struct SI32_DMADESC_A_Struct *)SI32_DMACTRL_0->BASEPTR.U32;
469
470 /* As per 12.5.2. General DMA Transfer Setup, check input and output channel
471 * programming
472 */
473 assert_dma_settings_channel_rx(d + DMA_CHANNEL_ID_RX);
474
475 /* Other checks */
476 __ASSERT(SI32_DMACTRL_A_is_channel_enabled(SI32_DMACTRL_0, DMA_CHANNEL_ID_RX),
477 "The channel request mask (CHREQMCLR) must be cleared for the channel to "
478 "use peripheral transfers.");
479
480 __ASSERT(SI32_DMAXBAR_0->DMAXBAR0.CH6SEL == 0b0001,
481 "0001: Service AES0 RX data requests.");
482 }
483
484 return 0;
485 }
486
487 /* Set up and start XOR DMA channel */
crypto_si32_dma_setup_xor(struct cipher_pkt * pkt)488 static int crypto_si32_dma_setup_xor(struct cipher_pkt *pkt)
489 {
490 struct dma_block_config dma_block_cfg = {};
491 const struct device *dma = DEVICE_DT_GET(DT_NODELABEL(dma));
492 struct dma_config dma_cfg;
493 int ret;
494
495 if (!pkt->in_len) {
496 LOG_WRN("Zero-sized data");
497 return 0;
498 }
499
500 if (pkt->in_len % 16) {
501 LOG_ERR("Data size must be 4-word aligned");
502 return -EINVAL;
503 }
504
505 dma_block_cfg.block_size = pkt->in_len;
506 dma_block_cfg.source_address = (uintptr_t)pkt->in_buf;
507 dma_block_cfg.source_addr_adj = 0b00; /* increment */
508 dma_block_cfg.dest_address = (uintptr_t)&SI32_AES_0->XORFIFO;
509 dma_block_cfg.dest_addr_adj = 0b10; /* no change (no increment) */
510
511 dma_cfg = (struct dma_config){
512 .channel_direction = MEMORY_TO_PERIPHERAL,
513 .source_data_size = 4, /* SiM3x1xx limitation: must match dest_data_size */
514 .dest_data_size = 4, /* DATAFIFO must be written to in word chunks (4 bytes) */
515 .source_burst_length = AES_BLOCK_SIZE,
516 .dest_burst_length = AES_BLOCK_SIZE,
517 .block_count = 1,
518 .head_block = &dma_block_cfg,
519 .dma_callback = crypto_si32_dma_completed,
520 };
521
522 /* Stop channel to ensure we are not messing with an ongoing DMA operation */
523 ret = dma_stop(dma, DMA_CHANNEL_ID_XOR);
524 if (ret) {
525 LOG_ERR("XOR DMA channel stop failed: %d", ret);
526 return ret;
527 }
528
529 ret = dma_config(dma, DMA_CHANNEL_ID_XOR, &dma_cfg);
530 if (ret) {
531 LOG_ERR("XOR DMA channel setup failed: %d", ret);
532 return ret;
533 }
534
535 ret = dma_start(dma, DMA_CHANNEL_ID_XOR);
536 if (ret) {
537 LOG_ERR("XOR DMA channel start failed: %d", ret);
538 return ret;
539 }
540
541 /* Some assertions, helpful during development */
542 {
543 struct SI32_DMADESC_A_Struct *d =
544 (struct SI32_DMADESC_A_Struct *)SI32_DMACTRL_0->BASEPTR.U32;
545
546 /* As per 12.5.2. General DMA Transfer Setup, check input and output channel
547 * programming
548 */
549 assert_dma_settings_channel_xor(d + DMA_CHANNEL_ID_XOR);
550
551 /* Other checks */
552 __ASSERT(SI32_DMACTRL_A_is_channel_enabled(SI32_DMACTRL_0, DMA_CHANNEL_ID_XOR),
553 "The channel request mask (CHREQMCLR) must be cleared for the channel to "
554 "use peripheral transfers.");
555
556 __ASSERT(SI32_DMAXBAR_0->DMAXBAR0.CH7SEL == 0b0001,
557 "0001: Service AES0 XOR data requests.");
558 }
559
560 return 0;
561 }
562
crypto_si32_aes_ecb_op(struct cipher_ctx * ctx,struct cipher_pkt * pkt,const enum cipher_op op)563 static int crypto_si32_aes_ecb_op(struct cipher_ctx *ctx, struct cipher_pkt *pkt,
564 const enum cipher_op op)
565 {
566 struct crypto_session *session;
567 int ret;
568
569 if (!ctx) {
570 LOG_WRN("Missing context");
571 return -EINVAL;
572 }
573
574 session = (struct crypto_session *)ctx->drv_sessn_state;
575
576 if (!pkt) {
577 LOG_WRN("Missing packet");
578 return -EINVAL;
579 }
580
581 if (pkt->in_len % 16) {
582 LOG_ERR("Can't work on partial blocks");
583 return -EINVAL;
584 }
585
586 if (pkt->in_len > 16) {
587 LOG_ERR("Refusing to work on multiple ECB blocks");
588 return -EINVAL;
589 }
590
591 if (pkt->in_len == 0) {
592 LOG_DBG("Zero-sized packet");
593 return 0;
594 }
595
596 if ((ctx->flags & CAP_INPLACE_OPS) && (pkt->out_buf != NULL)) {
597 LOG_ERR("In-place must not have an out_buf");
598 return -EINVAL;
599 }
600
601 /* As per 12.6.1./12.6.2. Configuring the DMA for ECB Encryption/Decryption */
602
603 /* DMA Input Channel */
604 ret = crypto_si32_dma_setup_tx(pkt, 0);
605 if (ret) {
606 return ret;
607 }
608
609 /* DMA Output Channel */
610 ret = crypto_si32_dma_setup_rx(pkt, 0, 0);
611 if (ret) {
612 return ret;
613 }
614
615 /* AES Module */
616
617 /* 1. The XFRSIZE register should be set to N-1, where N is the number of 4-word blocks. */
618 SI32_AES_A_write_xfrsize(SI32_AES_0, pkt->in_len / AES_BLOCK_SIZE - 1);
619
620 switch (op) {
621 case CRYPTO_CIPHER_OP_ENCRYPT:
622 /* 2. The HWKEYx registers should be written with the desired key in little endian
623 * format.
624 */
625 ret = crypto_si32_aes_set_key(ctx->key.bit_stream, ctx->keylen);
626 if (ret) {
627 return ret;
628 }
629 break;
630 case CRYPTO_CIPHER_OP_DECRYPT:
631 /* 2. The HWKEYx registers should be written with decryption key value
632 * (automatically generated in the HWKEYx registers after the encryption process).
633 */
634 ret = crypto_si32_aes_set_key(session->decryption_key, ctx->keylen);
635 if (ret) {
636 return ret;
637 }
638 break;
639 default:
640 LOG_ERR("Unsupported cipher_op: %d", op);
641 return -ENOSYS;
642 }
643
644 /* 3. The CONTROL register should be set as follows: */
645 {
646 __ASSERT(SI32_AES_0->CONTROL.ERRIEN == 1, "a. ERRIEN set to 1.");
647
648 /* KEYSIZE set to the appropriate number of bits for the key. */
649 ret = crypto_si32_aes_set_key_size(ctx);
650 if (ret) {
651 return ret;
652 }
653
654 switch (op) {
655 /* c. EDMD set to 1 for encryption. */
656 case CRYPTO_CIPHER_OP_ENCRYPT:
657 SI32_AES_A_select_encryption_mode(SI32_AES_0);
658 break;
659 /* c. EDMD set to 1 for DEcryption. (documentation is wrong here) */
660 case CRYPTO_CIPHER_OP_DECRYPT:
661 SI32_AES_A_select_decryption_mode(SI32_AES_0);
662 break;
663 default:
664 LOG_ERR("Unsupported cipher_op: %d", op);
665 return -ENOSYS;
666 }
667
668 /* d. KEYCPEN set to 1 to enable key capture at the end of the transaction. */
669 SI32_AES_A_enable_key_capture(SI32_AES_0);
670
671 /* e. The HCBCEN, HCTREN, XOREN, BEN, SWMDEN bits should all be cleared to 0. */
672 SI32_AES_A_exit_cipher_block_chaining_mode(SI32_AES_0); /* Clear HCBCEN */
673 SI32_AES_A_exit_counter_mode(SI32_AES_0); /* Clear HCTREN */
674 SI32_AES_A_select_xor_path_none(SI32_AES_0); /* Clear XOREN */
675 SI32_AES_A_exit_bypass_hardware_mode(SI32_AES_0); /* Clear BEN */
676 SI32_AES_A_select_dma_mode(SI32_AES_0); /* Clear SWMDEN*/
677 }
678
679 k_sem_reset(&crypto_si32_work_done);
680
681 /* Once the DMA and AES settings have been set, the transfer should be started by writing 1
682 * to the XFRSTA bit.
683 */
684 SI32_AES_A_start_operation(SI32_AES_0);
685
686 ret = k_sem_take(&crypto_si32_work_done, Z_TIMEOUT_MS(50)); /* TODO: Verify 50 ms */
687 if (ret) {
688 LOG_ERR("AES operation timed out: %d", ret);
689 return -EIO;
690 }
691
692 pkt->out_len = pkt->in_len;
693
694 return 0;
695 }
696
crypto_si32_aes_cbc_op(struct cipher_ctx * ctx,struct cipher_pkt * pkt,const enum cipher_op op,const uint8_t iv[16])697 static int crypto_si32_aes_cbc_op(struct cipher_ctx *ctx, struct cipher_pkt *pkt,
698 const enum cipher_op op, const uint8_t iv[16])
699 {
700 struct crypto_session *session;
701 int ret;
702 unsigned int in_buf_offset = 0;
703 unsigned int out_buf_offset = 0;
704
705 if (!ctx) {
706 LOG_WRN("Missing context");
707 return -EINVAL;
708 }
709
710 session = (struct crypto_session *)ctx->drv_sessn_state;
711
712 if (!pkt) {
713 LOG_WRN("Missing packet");
714 return -EINVAL;
715 }
716
717 if (pkt->in_len % 16) {
718 LOG_ERR("Can't work on partial blocks");
719 return -EINVAL;
720 }
721
722 if (pkt->in_len == 0) {
723 LOG_WRN("Zero-sized packet");
724 return 0;
725 }
726
727 /* Prefix IV to/remove from ciphertext unless CAP_NO_IV_PREFIX is set. */
728 if ((ctx->flags & CAP_NO_IV_PREFIX) == 0U) {
729 switch (op) {
730 case CRYPTO_CIPHER_OP_ENCRYPT:
731 if (pkt->out_buf_max < 16) {
732 LOG_ERR("Output buf too small");
733 return -ENOMEM;
734 }
735 if (!pkt->out_buf) {
736 LOG_ERR("Missing output buf");
737 return -EINVAL;
738 }
739 memcpy(pkt->out_buf, iv, 16);
740 out_buf_offset = 16;
741 break;
742 case CRYPTO_CIPHER_OP_DECRYPT:
743 in_buf_offset = 16;
744 break;
745 default:
746 LOG_ERR("Unsupported cipher_op: %d", op);
747 return -ENOSYS;
748 }
749 }
750
751 /* As per 12.7.1.1./12.7.1.2. Configuring the DMA for Hardware CBC Encryption/Decryption */
752
753 /* DMA Input Channel */
754 ret = crypto_si32_dma_setup_tx(pkt, in_buf_offset);
755 if (ret) {
756 return ret;
757 }
758
759 /* DMA Output Channel */
760 ret = crypto_si32_dma_setup_rx(pkt, in_buf_offset, out_buf_offset);
761 if (ret) {
762 return ret;
763 }
764
765 /* Initialization Vector */
766
767 /* The initialization vector should be initialized to the HWCTRx registers. */
768 SI32_AES_0->HWCTR0.U32 = *((uint32_t *)iv);
769 SI32_AES_0->HWCTR1.U32 = *((uint32_t *)iv + 1);
770 SI32_AES_0->HWCTR2.U32 = *((uint32_t *)iv + 2);
771 SI32_AES_0->HWCTR3.U32 = *((uint32_t *)iv + 3);
772
773 /* AES Module */
774
775 /* 1. The XFRSIZE register should be set to N-1, where N is the number of 4-word blocks. */
776 SI32_AES_A_write_xfrsize(SI32_AES_0, (pkt->in_len - in_buf_offset) / AES_BLOCK_SIZE - 1);
777
778 switch (op) {
779 case CRYPTO_CIPHER_OP_ENCRYPT:
780 /* 2. The HWKEYx registers should be written with the desired key in little endian
781 * format.
782 */
783 ret = crypto_si32_aes_set_key(ctx->key.bit_stream, ctx->keylen);
784 if (ret) {
785 return ret;
786 }
787 break;
788 case CRYPTO_CIPHER_OP_DECRYPT:
789 /* 2. The HWKEYx registers should be written with decryption key value
790 * (automatically generated in the HWKEYx registers after the encryption process).
791 */
792 ret = crypto_si32_aes_set_key(session->decryption_key, ctx->keylen);
793 if (ret) {
794 return ret;
795 }
796 break;
797 default:
798 LOG_ERR("Unsupported cipher_op: %d", op);
799 return -ENOSYS;
800 }
801
802 /* 3. The CONTROL register should be set as follows: */
803 {
804 __ASSERT(SI32_AES_0->CONTROL.ERRIEN == 1, "a. ERRIEN set to 1.");
805
806 /* b. KEYSIZE set to the appropriate number of bits for the key. */
807 ret = crypto_si32_aes_set_key_size(ctx);
808 if (ret) {
809 return ret;
810 }
811
812 switch (op) {
813 case CRYPTO_CIPHER_OP_ENCRYPT:
814 /* c. XOREN bits set to 01b to enable the XOR input path. */
815 SI32_AES_A_select_xor_path_input(SI32_AES_0);
816
817 /* d. EDMD set to 1 for encryption. */
818 SI32_AES_A_select_encryption_mode(SI32_AES_0);
819
820 /* e. KEYCPEN set to 1 to enable key capture at the end of the transaction.
821 */
822 SI32_AES_A_enable_key_capture(SI32_AES_0);
823 break;
824 case CRYPTO_CIPHER_OP_DECRYPT:
825 /* c. XOREN set to 10b to enable the XOR output path. */
826 SI32_AES_A_select_xor_path_output(SI32_AES_0);
827
828 /* d. EDMD set to 0 for decryption. */
829 SI32_AES_A_select_decryption_mode(SI32_AES_0);
830
831 /* e. KEYCPEN set to 0 to disable key capture at the end of the transaction.
832 */
833 SI32_AES_A_disable_key_capture(SI32_AES_0);
834 break;
835 default:
836 LOG_ERR("Unsupported cipher_op: %d", op);
837 return -ENOSYS;
838 }
839
840 /* f. HCBCEN set to 1 to enable Hardware Cipher Block Chaining mode. */
841 SI32_AES_A_enter_cipher_block_chaining_mode(SI32_AES_0);
842
843 /* g. The HCTREN, BEN, SWMDEN bits should all be cleared to 0. */
844 SI32_AES_A_exit_counter_mode(SI32_AES_0); /* Clear HCTREN */
845 SI32_AES_A_exit_bypass_hardware_mode(SI32_AES_0); /* Clear BEN */
846 SI32_AES_A_select_dma_mode(SI32_AES_0); /* Clear SWMDEN*/
847 }
848
849 k_sem_reset(&crypto_si32_work_done);
850
851 /* Once the DMA and AES settings have been set, the transfer should be started by writing 1
852 * to the XFRSTA bit.
853 */
854 SI32_AES_A_start_operation(SI32_AES_0);
855
856 ret = k_sem_take(&crypto_si32_work_done, Z_TIMEOUT_MS(50)); /* TODO: Verify 50 ms */
857 if (ret) {
858 LOG_ERR("AES operation timed out: %d", ret);
859 return -EIO;
860 }
861
862 /* Update passed IV buffer with new version */
863 *((uint32_t *)iv) = SI32_AES_0->HWCTR0.U32;
864 *((uint32_t *)iv + 1) = SI32_AES_0->HWCTR1.U32;
865 *((uint32_t *)iv + 2) = SI32_AES_0->HWCTR2.U32;
866 *((uint32_t *)iv + 3) = SI32_AES_0->HWCTR3.U32;
867
868 pkt->out_len = pkt->in_len - in_buf_offset + out_buf_offset;
869
870 return 0;
871 }
872
crypto_si32_aes_ctr_op(struct cipher_ctx * ctx,struct cipher_pkt * pkt,uint8_t iv[12])873 static int crypto_si32_aes_ctr_op(struct cipher_ctx *ctx, struct cipher_pkt *pkt, uint8_t iv[12])
874 {
875 struct crypto_session *session;
876 int ret;
877
878 if (!ctx) {
879 LOG_WRN("Missing context");
880 return -EINVAL;
881 }
882
883 session = (struct crypto_session *)ctx->drv_sessn_state;
884
885 if (!pkt) {
886 LOG_WRN("Missing packet");
887 return -EINVAL;
888 }
889
890 if (pkt->in_len % 16) {
891 LOG_ERR("Can't work on partial blocks");
892 return -EINVAL;
893 }
894
895 if (pkt->in_len == 0) {
896 LOG_WRN("Zero-sized packet");
897 return 0;
898 }
899
900 k_mutex_lock(&crypto_si32_in_use, K_FOREVER);
901
902 /* 12.8.1./12.8.2. Configuring the DMA for CTR Encryption/Decryption */
903
904 /* DMA Output Channel */
905 ret = crypto_si32_dma_setup_rx(pkt, 0, 0);
906 if (ret) {
907 goto out_unlock;
908 }
909
910 /* DMA XOR Channel */
911 ret = crypto_si32_dma_setup_xor(pkt);
912 if (ret) {
913 goto out_unlock;
914 }
915
916 /* Initialization Vector */
917
918 /* The initialization vector should be initialized to the HWCTRx registers. */
919 switch (ctx->mode_params.ctr_info.ctr_len) {
920 case 32:
921 SI32_AES_0->HWCTR3.U32 = sys_cpu_to_be32(session->current_ctr);
922 SI32_AES_0->HWCTR2.U32 = *((uint32_t *)iv + 2);
923 SI32_AES_0->HWCTR1.U32 = *((uint32_t *)iv + 1);
924 SI32_AES_0->HWCTR0.U32 = *((uint32_t *)iv);
925 break;
926 default:
927 LOG_ERR("Unsupported counter length: %" PRIu16, ctx->mode_params.ctr_info.ctr_len);
928 ret = -ENOSYS;
929 goto out_unlock;
930 }
931
932 /* AES Module */
933
934 /* 1. The XFRSIZE register should be set to N-1, where N is the number of 4-word blocks. */
935 SI32_AES_A_write_xfrsize(SI32_AES_0, pkt->in_len / AES_BLOCK_SIZE - 1);
936
937 /* 2. The HWKEYx registers should be written with the desired key in little endian format.
938 */
939 ret = crypto_si32_aes_set_key(ctx->key.bit_stream, ctx->keylen);
940 if (ret) {
941 goto out_unlock;
942 }
943
944 /* 3. The CONTROL register should be set as follows: */
945 {
946 __ASSERT(SI32_AES_0->CONTROL.ERRIEN == 1, "a. ERRIEN set to 1.");
947
948 /* b. KEYSIZE set to the appropriate number of bits for the key. */
949 ret = crypto_si32_aes_set_key_size(ctx);
950 if (ret) {
951 goto out_unlock;
952 }
953
954 /* c. EDMD set to 1 for encryption. */
955 SI32_AES_A_select_encryption_mode(SI32_AES_0);
956
957 /* d. KEYCPEN set to 0 to disable key capture at the end of the transaction. */
958 SI32_AES_A_disable_key_capture(SI32_AES_0);
959
960 /* e. HCTREN set to 1 to enable Hardware Counter mode. */
961 SI32_AES_A_enter_counter_mode(SI32_AES_0);
962
963 /* f. XOREN set to 10b to enable the XOR output path. */
964 SI32_AES_A_select_xor_path_output(SI32_AES_0);
965
966 /* g. The HCBCEN, BEN, SWMDEN bits should all be cleared to 0. */
967 SI32_AES_A_exit_cipher_block_chaining_mode(SI32_AES_0); /* Clear HCBCEN */
968 SI32_AES_A_exit_bypass_hardware_mode(SI32_AES_0); /* Clear BEN */
969 SI32_AES_A_select_dma_mode(SI32_AES_0); /* Clear SWMDEN*/
970 }
971
972 k_sem_reset(&crypto_si32_work_done);
973
974 /* Once the DMA and AES settings have been set, the transfer should be started by writing 1
975 * to the XFRSTA bit.
976 */
977 SI32_AES_A_start_operation(SI32_AES_0);
978
979 ret = k_sem_take(&crypto_si32_work_done, Z_TIMEOUT_MS(50)); /* TODO: Verify 50 ms */
980 if (ret) {
981 LOG_ERR("AES operation timed out: %d", ret);
982 ret = -EIO;
983 goto out_unlock;
984 }
985
986 /* Update session with new counter value */
987 switch (ctx->mode_params.ctr_info.ctr_len) {
988 case 32:
989 session->current_ctr = sys_be32_to_cpu(SI32_AES_0->HWCTR3.U32);
990 break;
991 default:
992 LOG_ERR("Unsupported counter length: %" PRIu16, ctx->mode_params.ctr_info.ctr_len);
993 ret = -ENOSYS;
994 goto out_unlock;
995 }
996
997 pkt->out_len = pkt->in_len;
998
999 out_unlock:
1000 k_mutex_unlock(&crypto_si32_in_use);
1001
1002 return ret;
1003 }
1004
crypto_si32_aes_ecb_encrypt(struct cipher_ctx * ctx,struct cipher_pkt * pkt)1005 static int crypto_si32_aes_ecb_encrypt(struct cipher_ctx *ctx, struct cipher_pkt *pkt)
1006 {
1007 int ret;
1008
1009 k_mutex_lock(&crypto_si32_in_use, K_FOREVER);
1010 ret = crypto_si32_aes_ecb_op(ctx, pkt, CRYPTO_CIPHER_OP_ENCRYPT);
1011 k_mutex_unlock(&crypto_si32_in_use);
1012
1013 return ret;
1014 }
1015
crypto_si32_aes_ecb_decrypt(struct cipher_ctx * ctx,struct cipher_pkt * pkt)1016 static int crypto_si32_aes_ecb_decrypt(struct cipher_ctx *ctx, struct cipher_pkt *pkt)
1017 {
1018 int ret;
1019
1020 k_mutex_lock(&crypto_si32_in_use, K_FOREVER);
1021 ret = crypto_si32_aes_ecb_op(ctx, pkt, CRYPTO_CIPHER_OP_DECRYPT);
1022 k_mutex_unlock(&crypto_si32_in_use);
1023
1024 return ret;
1025 }
1026
crypto_si32_aes_cbc_encrypt(struct cipher_ctx * ctx,struct cipher_pkt * pkt,uint8_t * iv)1027 static int crypto_si32_aes_cbc_encrypt(struct cipher_ctx *ctx, struct cipher_pkt *pkt, uint8_t *iv)
1028 {
1029 int ret;
1030
1031 k_mutex_lock(&crypto_si32_in_use, K_FOREVER);
1032 ret = crypto_si32_aes_cbc_op(ctx, pkt, CRYPTO_CIPHER_OP_ENCRYPT, iv);
1033 k_mutex_unlock(&crypto_si32_in_use);
1034
1035 return ret;
1036 }
1037
crypto_si32_aes_cbc_decrypt(struct cipher_ctx * ctx,struct cipher_pkt * pkt,uint8_t * iv)1038 static int crypto_si32_aes_cbc_decrypt(struct cipher_ctx *ctx, struct cipher_pkt *pkt, uint8_t *iv)
1039 {
1040 int ret;
1041
1042 k_mutex_lock(&crypto_si32_in_use, K_FOREVER);
1043 ret = crypto_si32_aes_cbc_op(ctx, pkt, CRYPTO_CIPHER_OP_DECRYPT, iv);
1044 k_mutex_unlock(&crypto_si32_in_use);
1045
1046 return ret;
1047 }
1048
crypto_si32_begin_session(const struct device * dev,struct cipher_ctx * ctx,const enum cipher_algo algo,const enum cipher_mode mode,const enum cipher_op op)1049 static int crypto_si32_begin_session(const struct device *dev, struct cipher_ctx *ctx,
1050 const enum cipher_algo algo, const enum cipher_mode mode,
1051 const enum cipher_op op)
1052 {
1053 int ret = 0;
1054 struct crypto_session *session = 0;
1055
1056 if (algo != CRYPTO_CIPHER_ALGO_AES) {
1057 LOG_ERR("This driver supports only AES");
1058 return -ENOTSUP;
1059 }
1060
1061 if (!(ctx->flags & CAP_SYNC_OPS)) {
1062 LOG_ERR("This driver supports only synchronous mode");
1063 return -ENOTSUP;
1064 }
1065
1066 if (ctx->key.bit_stream == NULL) {
1067 LOG_ERR("No key provided");
1068 return -EINVAL;
1069 }
1070
1071 if (ctx->keylen != 16) {
1072 LOG_ERR("Only AES-128 implemented");
1073 return -ENOSYS;
1074 }
1075
1076 switch (mode) {
1077 case CRYPTO_CIPHER_MODE_CBC:
1078 if (ctx->flags & CAP_INPLACE_OPS && (ctx->flags & CAP_NO_IV_PREFIX) == 0) {
1079 LOG_ERR("In-place requires no IV prefix");
1080 return -EINVAL;
1081 }
1082 break;
1083 case CRYPTO_CIPHER_MODE_CTR:
1084 if (ctx->mode_params.ctr_info.ctr_len != 32U) {
1085 LOG_ERR("Only 32 bit counter implemented");
1086 return -ENOSYS;
1087 }
1088 break;
1089 case CRYPTO_CIPHER_MODE_ECB:
1090 case CRYPTO_CIPHER_MODE_CCM:
1091 case CRYPTO_CIPHER_MODE_GCM:
1092 default:
1093 break;
1094 }
1095
1096 k_mutex_lock(&crypto_si32_in_use, K_FOREVER);
1097
1098 for (unsigned int i = 0; i < ARRAY_SIZE(crypto_si32_data.sessions); i++) {
1099 if (crypto_si32_data.sessions[i].in_use) {
1100 continue;
1101 }
1102
1103 LOG_INF("Claiming session %u", i);
1104 session = &crypto_si32_data.sessions[i];
1105 break;
1106 }
1107
1108 if (!session) {
1109 LOG_INF("All %d session(s) in use", CONFIG_CRYPTO_SI32_MAX_SESSION);
1110 ret = -ENOSPC;
1111 goto out;
1112 }
1113
1114 switch (op) {
1115 case CRYPTO_CIPHER_OP_ENCRYPT:
1116 switch (mode) {
1117 case CRYPTO_CIPHER_MODE_ECB:
1118 ctx->ops.block_crypt_hndlr = crypto_si32_aes_ecb_encrypt;
1119 break;
1120 case CRYPTO_CIPHER_MODE_CBC:
1121 ctx->ops.cbc_crypt_hndlr = crypto_si32_aes_cbc_encrypt;
1122 break;
1123 case CRYPTO_CIPHER_MODE_CTR:
1124 ctx->ops.ctr_crypt_hndlr = crypto_si32_aes_ctr_op;
1125 session->current_ctr = 0;
1126 break;
1127 case CRYPTO_CIPHER_MODE_CCM:
1128 case CRYPTO_CIPHER_MODE_GCM:
1129 default:
1130 LOG_ERR("Unsupported encryption mode: %d", mode);
1131 ret = -ENOSYS;
1132 goto out;
1133 }
1134 break;
1135 case CRYPTO_CIPHER_OP_DECRYPT:
1136 switch (mode) {
1137 case CRYPTO_CIPHER_MODE_ECB:
1138 ctx->ops.block_crypt_hndlr = crypto_si32_aes_ecb_decrypt;
1139 ret = crypto_si32_aes_calc_decryption_key(ctx, session->decryption_key);
1140 if (ret) {
1141 goto out;
1142 }
1143 break;
1144 case CRYPTO_CIPHER_MODE_CBC:
1145 ctx->ops.cbc_crypt_hndlr = crypto_si32_aes_cbc_decrypt;
1146 ret = crypto_si32_aes_calc_decryption_key(ctx, session->decryption_key);
1147 if (ret) {
1148 goto out;
1149 }
1150 break;
1151 case CRYPTO_CIPHER_MODE_CTR:
1152 ctx->ops.ctr_crypt_hndlr = crypto_si32_aes_ctr_op;
1153 session->current_ctr = 0;
1154 break;
1155 case CRYPTO_CIPHER_MODE_CCM:
1156 case CRYPTO_CIPHER_MODE_GCM:
1157 default:
1158 LOG_ERR("Unsupported decryption mode: %d", mode);
1159 ret = -ENOSYS;
1160 goto out;
1161 }
1162 break;
1163 default:
1164 LOG_ERR("Unsupported cipher_op: %d", op);
1165 ret = -ENOSYS;
1166 goto out;
1167 }
1168
1169 session->in_use = true;
1170 ctx->drv_sessn_state = session;
1171
1172 out:
1173 k_mutex_unlock(&crypto_si32_in_use);
1174
1175 return ret;
1176 }
1177
crypto_si32_free_session(const struct device * dev,struct cipher_ctx * ctx)1178 static int crypto_si32_free_session(const struct device *dev, struct cipher_ctx *ctx)
1179 {
1180 ARG_UNUSED(dev);
1181
1182 if (!ctx) {
1183 LOG_WRN("Missing context");
1184 return -EINVAL;
1185 }
1186
1187 struct crypto_session *session = (struct crypto_session *)ctx->drv_sessn_state;
1188
1189 k_mutex_lock(&crypto_si32_in_use, K_FOREVER);
1190 session->in_use = false;
1191 k_mutex_unlock(&crypto_si32_in_use);
1192
1193 return 0;
1194 }
1195
1196 /* AES only, no support for hashing */
1197 static DEVICE_API(crypto, crypto_si32_api) = {
1198 .query_hw_caps = crypto_si32_query_hw_caps,
1199 .cipher_begin_session = crypto_si32_begin_session,
1200 .cipher_free_session = crypto_si32_free_session,
1201 };
1202
1203 DEVICE_DT_INST_DEFINE(0, crypto_si32_init, NULL, NULL, NULL, POST_KERNEL,
1204 CONFIG_CRYPTO_INIT_PRIORITY, &crypto_si32_api);
1205