1 /*
2  * Copyright (c) 2022-2023 Arm Limited. All rights reserved.
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *     http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "sic_drv.h"
18 
19 #include "cmsis_compiler.h"
20 #include <stddef.h>
21 
22 /**
23  * \brief SIC register map structure
24  */
25 struct _sic_reg_map_t {
26     volatile uint32_t cbcr;
27                 /*!< Offset: 0x000 (R/ ) SIC Cache Build Configuration Register */
28     volatile uint32_t ccr;
29                 /*!< Offset: 0x004 (R/W) SIC Cache Control Register */
30     volatile uint32_t aecr;
31                 /*!< Offset: 0x008 (R/W) SIC Authentication Engine Control Register */
32     volatile uint32_t arba;
33                 /*!< Offset: 0x00C (R/W) SIC Authentication Region Base Address Register */
34     volatile uint32_t area;
35                 /*!< Offset: 0x010 (R/W) SIC Authentication Region End Address Register */
36     volatile uint32_t aescr;
37                 /*!< Offset: 0x014 (R/W) SIC AES Configuration Register */
38     volatile uint32_t aesrbgr;
39                 /*!< Offset: 0x018 (R/W) SIC AES Random Bit Generator seed Register */
40     volatile uint32_t aesrbgcr;
41                 /*!< Offset: 0x01C (R/ ) SIC AES Random Bit Generator Counter Register */
42     volatile uint32_t reserved_0[4];   /*!< Offset: 0x020-0x02F Reserved */
43     volatile uint32_t istar;
44                 /*!< Offset: 0x030 (R/ ) SIC Interrupt Status Register */
45     volatile uint32_t isclrr;
46                 /*!< Offset: 0x034 (R/W) SIC Interrupt Clear Register */
47     volatile uint32_t einr;
48                 /*!< Offset: 0x038 (R/W) SIC Interrupt Enable Register */
49     volatile uint32_t reserved_1[9]; /*!< Offset: 0x3C-0x5F Reserved */
50     volatile uint32_t pmcr;
51                 /*!< Offset: 0x060 (R/W) SIC Performance Monitoring Control Register */
52     volatile uint32_t pmphc;
53                 /*!< Offset: 0x064 (R/ ) SIC Performance Monitoring Page Hit Register */
54     volatile uint32_t pmpmc;
55                 /*!< Offset: 0x068 (R/ ) SIC Performance Monitoring Page Miss Register */
56     volatile uint32_t pmbc;
57                 /*!< Offset: 0x06C (R/ ) SIC Performance Monitoring Bypass Register */
58     volatile uint32_t reserved_2[36]; /*!< Offset: 0x70-0xFF Reserved */
59     __PACKED_STRUCT {
60         volatile uint32_t drr;
61                 /*!< Offset: 0xX00 (R/W) SIC Decryption Region Register */
62         volatile uint32_t reserved_0[3]; /*!< Offset: 0xX04-0xX0F Reserved */
63         volatile uint32_t div;
64                 /*!< Offset: 0xX10 (R/W) SIC Decryption IV Register */
65         volatile uint32_t dnw[2];
66                 /*!< Offset: 0xX14 (R/W) SIC Decryption Nonce Word Register */
67         volatile uint32_t reserved_1[1]; /*!< Offset: 0xX1C-0xX1F Reserved */
68         volatile uint32_t dkw[8];
69                 /*!< Offset: 0xX20 ( /W) SIC Decryption Key Word Register */
70         volatile uint32_t phc;
71                 /*!< Offset: 0xX40 (R/ ) SIC Page Hit Counter Register */
72         volatile uint32_t pmc;
73                 /*!< Offset: 0xX44 (R/ ) SIC Page Miss Counter Register */
74         volatile uint32_t pbc;
75                 /*!< Offset: 0xX48 (R/ ) SIC Page Bypass Counter Register */
76         volatile uint32_t reserved_2[45]; /*!< Offset: 0xX4C-0xXFF Reserved */
77     } dr[4];
78     volatile uint32_t reserved_3[640]; /*!< Offset: 0x500-0xEFF Reserved */
79     volatile uint32_t di[3];
80                 /*!< Offset: 0xF00 (R/ ) SIC Debug Information Register */
81     volatile uint32_t reserved_4[49]; /*!< Offset: 0x500-0xEFF Reserved */
82     volatile uint32_t pidr4;
83                 /*!< Offset: 0xFD0 (R/ ) Peripheral ID 4 */
84     volatile uint32_t reserved_5[3];   /*!< Offset: 0xFD4-0xFDC Reserved */
85     volatile uint32_t pidr0;
86                 /*!< Offset: 0xFE0 (R/ ) Peripheral ID 0 */
87     volatile uint32_t pidr1;
88                 /*!< Offset: 0xFE4 (R/ ) Peripheral ID 1 */
89     volatile uint32_t pidr2;
90                 /*!< Offset: 0xFE8 (R/ ) Peripheral ID 2 */
91     volatile uint32_t pidr3;
92                 /*!< Offset: 0xFEC (R/ ) Peripheral ID 3 */
93     volatile uint32_t cidr0;
94                 /*!< Offset: 0xFF0 (R/ ) Component ID 0 */
95     volatile uint32_t cidr1;
96                 /*!< Offset: 0xFF4 (R/ ) Component ID 1 */
97     volatile uint32_t cidr2;
98                 /*!< Offset: 0xFF8 (R/ ) Component ID 2 */
99     volatile uint32_t cidr3;
100                 /*!< Offset: 0xFFC (R/ ) Component ID 3 */
101     volatile uint32_t htr[];
102                 /*!< Offset: 0x1000 (R/W) Hash Tag RAM */
103 };
104 
get_dr_am(struct sic_dev_t * dev)105 static inline uint8_t get_dr_am(struct sic_dev_t *dev)
106 {
107     struct _sic_reg_map_t* p_sic = (struct _sic_reg_map_t*)dev->cfg->base;
108 
109     return 1 << (p_sic->cbcr & 0xF);
110 }
111 
have_pmon(struct sic_dev_t * dev)112 static inline bool have_pmon(struct sic_dev_t *dev)
113 {
114     struct _sic_reg_map_t* p_sic = (struct _sic_reg_map_t*)dev->cfg->base;
115 
116     return (p_sic->cbcr >> 4) & 0x1;
117 }
118 
is_sic_enabled(struct sic_dev_t * dev)119 static inline uint8_t is_sic_enabled(struct sic_dev_t *dev)
120 {
121     struct _sic_reg_map_t* p_sic = (struct _sic_reg_map_t*)dev->cfg->base;
122 
123     /* Return the SIC_ENABLE field */
124     return p_sic->ccr & 0x1u;
125 }
126 
127 
sic_page_size_get(struct sic_dev_t * dev)128 size_t sic_page_size_get(struct sic_dev_t *dev)
129 {
130     struct _sic_reg_map_t* p_sic = (struct _sic_reg_map_t*)dev->cfg->base;
131 
132     return 1 << (((p_sic->cbcr >> 8) & 0xF) + 7);
133 }
134 
sic_page_count_get(struct sic_dev_t * dev)135 uint32_t sic_page_count_get(struct sic_dev_t *dev)
136 {
137     struct _sic_reg_map_t* p_sic = (struct _sic_reg_map_t*)dev->cfg->base;
138 
139     return ((p_sic->cbcr >> 12) & 0x3FF) + 1;
140 }
141 
sic_enable(struct sic_dev_t * dev)142 enum sic_error_t sic_enable(struct sic_dev_t *dev)
143 {
144     struct _sic_reg_map_t* p_sic = (struct _sic_reg_map_t*)dev->cfg->base;
145 
146     /* Set the SIC_ENABLE field */
147     p_sic->ccr |= 0x1u;
148 
149     return SIC_ERROR_NONE;
150 }
151 
sic_disable(struct sic_dev_t * dev)152 enum sic_error_t sic_disable(struct sic_dev_t *dev)
153 {
154     struct _sic_reg_map_t* p_sic = (struct _sic_reg_map_t*)dev->cfg->base;
155 
156     /* TODO FIXME re-enable this once the FVP sets the SIC_IDLE field */
157     /* Wait for the SIC to be idle */
158     /* while(!(p_sic->istar & (0x1u << 8))) { */
159         /* Unset the SIC_ENABLE field */
160         p_sic->ccr &= ~(0x1u);
161     /* } */
162 
163     return SIC_ERROR_NONE;
164 }
165 
sic_auth_init(struct sic_dev_t * dev,enum sic_digest_size_t digest_size,enum sic_digest_config_t digest_config,uintptr_t base,size_t size)166 enum sic_error_t sic_auth_init(struct sic_dev_t *dev,
167                                enum sic_digest_size_t digest_size,
168                                enum sic_digest_config_t digest_config,
169                                uintptr_t base, size_t size)
170 {
171     struct _sic_reg_map_t* p_sic = (struct _sic_reg_map_t*)dev->cfg->base;
172     uintptr_t end;
173 
174     /* The CCR register cannot be written while the SIC is enabled */
175     if (is_sic_enabled(dev)) {
176         return SIC_ERROR_INVALID_OP_WHILE_ENABLED;
177     }
178 
179     if (base & 0xFFFu) {
180         return SIC_ERROR_INVALID_ALIGNMENT;
181     }
182 
183     if (size & 0xFFFu) {
184         return SIC_ERROR_INVALID_ALIGNMENT;
185     }
186 
187     /* Auth region must not be larger than 8MiB */
188     if (size == 0x0u || size >= 0x800000u) {
189         return SIC_ERROR_INVALID_SIZE;
190     }
191 
192     end = base + size;
193     /* Prevent overflow */
194     if (base > end) {
195         return SIC_ERROR_INVALID_SIZE;
196     }
197 
198     p_sic->arba = base;
199     p_sic->area = end;
200 
201     /* Set the DIGEST_SIZE field */
202     p_sic->ccr &= ~(0x3u << 5);
203     p_sic->ccr |= (digest_size & 0x3u) << 5;
204 
205     if (digest_size == SIC_DIGEST_SIZE_128) {
206         /* Set the DIGEST_COMPARISON_MODE field */
207         p_sic->ccr &= ~(0x1u << 4);
208         p_sic->ccr |= (digest_config & 0x1u) << 4;
209     }
210 
211     return SIC_ERROR_NONE;
212 }
213 
sic_auth_enable(struct sic_dev_t * dev)214 enum sic_error_t sic_auth_enable(struct sic_dev_t *dev)
215 {
216     struct _sic_reg_map_t* p_sic = (struct _sic_reg_map_t*)dev->cfg->base;
217 
218     /* The CCR register cannot be written while the SIC is enabled */
219     if (is_sic_enabled(dev)) {
220         return SIC_ERROR_INVALID_OP_WHILE_ENABLED;
221     }
222 
223     /* Set the AUTHENTICATION_ENABLE field */
224     p_sic->ccr |= (0x1u << 1);
225 
226     /* Set the AENG_CLOCK_GATING_ENABLE field */
227     p_sic->ccr |= (0x1u << 8);
228 
229     /* Unset the PAGE_INVALIDATE field */
230     p_sic->aecr &= ~(0x1u);
231 
232     return SIC_ERROR_NONE;
233 }
234 
sic_auth_disable(struct sic_dev_t * dev)235 enum sic_error_t sic_auth_disable(struct sic_dev_t *dev)
236 {
237     struct _sic_reg_map_t* p_sic = (struct _sic_reg_map_t*)dev->cfg->base;
238     enum sic_error_t err;
239 
240     /* The CCR register cannot be written while the SIC is enabled */
241     if (is_sic_enabled(dev)) {
242         return SIC_ERROR_INVALID_OP_WHILE_ENABLED;
243     }
244 
245     /* Unset the AUTHENTICATION_ENABLE field */
246     p_sic->ccr &= ~(0x1u << 1);
247 
248     /* Unset the AENG_CLOCK_GATING_ENABLE field */
249     p_sic->ccr &= ~(0x1u << 8);
250 
251     err = sic_auth_invalidate_pages(dev);
252 
253     return err;
254 }
255 
sic_auth_invalidate_pages(struct sic_dev_t * dev)256 enum sic_error_t sic_auth_invalidate_pages(struct sic_dev_t *dev)
257 {
258     struct _sic_reg_map_t* p_sic = (struct _sic_reg_map_t*)dev->cfg->base;
259 
260     /* The AECR register cannot be written while the SIC is enabled */
261     if (is_sic_enabled(dev)) {
262         return SIC_ERROR_INVALID_OP_WHILE_ENABLED;
263     }
264 
265     /* Set the PAGE_INVALIDATE field */
266     p_sic->aecr |= 0x1u;
267 
268     return SIC_ERROR_NONE;
269 }
270 
sic_auth_axim_cfg_set(struct sic_dev_t * dev,const struct sic_auth_axim_config_t * cfg)271 enum sic_error_t sic_auth_axim_cfg_set(struct sic_dev_t *dev,
272                                        const struct sic_auth_axim_config_t *cfg)
273 {
274     struct _sic_reg_map_t* p_sic = (struct _sic_reg_map_t*)dev->cfg->base;
275 
276     /* The AECR register cannot be written while the SIC is enabled */
277     if (is_sic_enabled(dev)) {
278         return SIC_ERROR_INVALID_OP_WHILE_ENABLED;
279     }
280 
281 
282     /* Set the AXIM_ARPROT0_CONTROL field */
283     p_sic->aecr &= ~(0x3u << 18);
284     p_sic->aecr |= (cfg->priv & 0x3u) << 18;
285 
286     /* Set the AXIM_ARPROT1_CONTROL field */
287     p_sic->aecr &= ~(0x3u << 20);
288     p_sic->aecr |= (cfg->secure & 0x3u) << 20;
289 
290     /* Set the AXIM_ARPROT2_CONTROL field */
291     p_sic->aecr &= ~(0x3u << 22);
292     p_sic->aecr |= (cfg->dinst & 0x3u) << 22;
293 
294     /* Set the AXIM_ARCACHE0_CONTROL field */
295     p_sic->aecr &= ~(0x3u << 24);
296     p_sic->aecr |= (cfg->bufferable & 0x3u) << 24;
297 
298     /* Set the AXIM_ARCACHE1_CONTROL field */
299     p_sic->aecr &= ~(0x3u << 26);
300     p_sic->aecr |= (cfg->modifiable & 0x3u) << 26;
301 
302     /* Set the AXIM_ARCACHE2_CONTROL field */
303     p_sic->aecr &= ~(0x3u << 28);
304     p_sic->aecr |= (cfg->allocate & 0x3u) << 28;
305 
306     /* Set the AXIM_ARCACHE3_CONTROL field */
307     p_sic->aecr &= ~(0x3u << 30);
308     p_sic->aecr |= (cfg->other_allocate & 0x3u) << 30;
309 
310     return SIC_ERROR_NONE;
311 }
312 
sic_auth_axim_cfg_get(struct sic_dev_t * dev,struct sic_auth_axim_config_t * cfg)313 enum sic_error_t sic_auth_axim_cfg_get(struct sic_dev_t *dev,
314                                        struct sic_auth_axim_config_t *cfg)
315 {
316     struct _sic_reg_map_t* p_sic = (struct _sic_reg_map_t*)dev->cfg->base;
317 
318     /* Get the AXIM_ARPROT0_CONTROL field */
319     cfg->priv = (p_sic->aecr >> 18) & 0x3u;
320 
321     /* Get the AXIM_ARPROT1_CONTROL field */
322     cfg->secure = (p_sic->aecr >> 20) & 0x3u;
323 
324     /* Get the AXIM_ARPROT2_CONTROL field */
325     cfg->dinst = (p_sic->aecr >> 22) & 0x3u;
326 
327     /* Get the AXIM_ARCACHE0_CONTROL field */
328     cfg->bufferable = (p_sic->aecr >> 24) & 0x3u;
329 
330     /* Get the AXIM_ARCACHE1_CONTROL field */
331     cfg->modifiable = (p_sic->aecr >> 26) & 0x3u;
332 
333     /* Get the AXIM_ARCACHE2_CONTROL field */
334     cfg->allocate = (p_sic->aecr >> 28) & 0x3u;
335 
336     /* Get the AXIM_ARCACHE3_CONTROL field */
337     cfg->other_allocate = (p_sic->aecr >> 30) & 0x3u;
338 
339     return SIC_ERROR_NONE;
340 }
341 
sic_auth_table_set(struct sic_dev_t * dev,uint32_t * data,size_t data_len_bytes,size_t table_offset)342 enum sic_error_t sic_auth_table_set(struct sic_dev_t *dev, uint32_t *data,
343                                     size_t data_len_bytes, size_t table_offset)
344 {
345     struct _sic_reg_map_t* p_sic = (struct _sic_reg_map_t*)dev->cfg->base;
346     size_t idx;
347 
348     /* The tables cannot be written while the SIC is enabled */
349     if (is_sic_enabled(dev)) {
350         return SIC_ERROR_INVALID_OP_WHILE_ENABLED;
351     }
352 
353     if (table_offset + data_len_bytes >= (sic_page_count_get(dev) * 32)) {
354         return SIC_ERROR_INVALID_ADDRESS;
355     }
356 
357     for (idx = 0; idx < data_len_bytes / 4; idx++) {
358         p_sic->htr[(table_offset / 4) + idx] = data[idx];
359     }
360 
361     return SIC_ERROR_NONE;
362 }
363 
sic_decrypt_init(struct sic_dev_t * dev,enum sic_decrypt_keysize_t decrypt_keysize,bool decrypt_padding_enable)364 enum sic_error_t sic_decrypt_init(struct sic_dev_t *dev,
365                                   enum sic_decrypt_keysize_t decrypt_keysize,
366                                   bool decrypt_padding_enable)
367 {
368     struct _sic_reg_map_t* p_sic = (struct _sic_reg_map_t*)dev->cfg->base;
369 
370     /* The CCR register cannot be written while the SIC is enabled */
371     if (is_sic_enabled(dev)) {
372         return SIC_ERROR_INVALID_OP_WHILE_ENABLED;
373     }
374 
375     /* Set the DECRYPTION_KEY_SIZE field */
376     p_sic->ccr &= ~(0x1u << 3);
377     p_sic->ccr |= (decrypt_keysize & 0x1u) << 3;
378 
379     /* Set the AENG_PADDING_ENABLE field */
380     p_sic->ccr &= ~(0x1u << 12);
381     p_sic->ccr |= (decrypt_padding_enable & 0x1u) << 12;
382 
383     /* Set the Decryption Enable bit */
384     p_sic->ccr |= 0x1u << 2;
385 
386     return SIC_ERROR_NONE;
387 }
388 
check_region_overlap(struct sic_dev_t * dev,uint8_t region_idx,uintptr_t base,size_t size)389 static enum sic_error_t check_region_overlap(struct sic_dev_t *dev,
390                                              uint8_t region_idx, uintptr_t base,
391                                              size_t size)
392 {
393     struct _sic_reg_map_t* p_sic = (struct _sic_reg_map_t*)dev->cfg->base;
394     uintptr_t region_base;
395     uintptr_t region_size;
396     size_t idx;
397 
398     for (idx = 0; idx < get_dr_am(dev); idx++) {
399         /* Skip the current region, in case we're reenabling a region that was
400          * previously enabled */
401         if (idx == region_idx) {
402             continue;
403         }
404 
405         region_base = (p_sic->dr[idx].drr & 0xFFFFFu) << 12;
406         region_size = ((p_sic->dr[idx].drr >> 20) & 0x7FF) << 12;
407 
408         /* Skip regions that have not been configured */
409         if (region_size == 0) {
410             continue;
411         }
412 
413         if ((base > region_base && base < region_base + region_size)
414          || (region_base > base && region_base < base + size)) {
415             return SIC_ERROR_INVALID_ADDRESS;
416         }
417     }
418 
419     return SIC_ERROR_NONE;
420 }
421 
sic_decrypt_region_enable(struct sic_dev_t * dev,uint8_t region_idx,uintptr_t base,size_t size,uint32_t fw_revision,uint32_t * nonce,uint32_t * key)422 enum sic_error_t sic_decrypt_region_enable(struct sic_dev_t *dev,
423                                            uint8_t region_idx,
424                                            uintptr_t base, size_t size,
425                                            uint32_t fw_revision,
426                                            uint32_t *nonce, uint32_t *key)
427 {
428     struct _sic_reg_map_t* p_sic = (struct _sic_reg_map_t*)dev->cfg->base;
429     enum sic_error_t err;
430     uint32_t idx;
431     size_t key_len;
432 
433     /* Size must be 4KiB aligned */
434     if (size & 0xFFFu) {
435         return SIC_ERROR_INVALID_ALIGNMENT;
436     }
437 
438     /* decrypt region must not be larger than 8MiB - 4 KiB */
439     if (size == 0x0u || size >= (0x800000u - 4096u)) {
440         return SIC_ERROR_INVALID_SIZE;
441     }
442 
443     if (region_idx >= get_dr_am(dev)) {
444         return SIC_ERROR_INVALID_REGION;
445     }
446 
447     /* It's not valid to have a region overlap another region */
448     err = check_region_overlap(dev, region_idx, base, size);
449     if (err != SIC_ERROR_NONE) {
450         return err;
451     }
452 
453     /* Address is right-shifted by 12 */
454     p_sic->dr[region_idx].drr &= ~0xFFFFFu;
455     p_sic->dr[region_idx].drr |= (base >> 12);
456 
457     /* Size is in the uppermost 12 bits. It's measured in 4k pages, so
458      * right-shift the size by 12 to get the page amount. Bit 31 _must not_ be
459      * set
460      */
461     p_sic->dr[region_idx].drr &= ~0xFFF00000u;
462     p_sic->dr[region_idx].drr |= ((size >> 12) & 0x7FF) << 20;
463 
464     /* The FW revision is used as the dr-specific portion of the IV */
465     p_sic->dr[region_idx].div = fw_revision;
466 
467     /* The Nonce is 2 words */
468     for (idx = 0; idx < 2; idx++) {
469         p_sic->dr[region_idx].dnw[idx] = nonce[idx];
470     }
471 
472     /* The key is 4 words, plus another 4 if the 256-bit key bit is set in the
473      * CCR. If it is NULL, assume it is set in another way (e.g. by KMU export)
474      */
475     if (key != NULL) {
476         key_len = 4 + (((p_sic->ccr >> 3) & 0x1) * 4);
477         for (idx = 0; idx < key_len; idx++) {
478             p_sic->dr[region_idx].dkw[idx] = key[idx];
479         }
480     }
481 
482     return SIC_ERROR_NONE;
483 }
484 
sic_decrypt_region_disable(struct sic_dev_t * dev,uint8_t region_idx)485 enum sic_error_t sic_decrypt_region_disable(struct sic_dev_t *dev,
486                                             uint8_t region_idx)
487 {
488     struct _sic_reg_map_t* p_sic = (struct _sic_reg_map_t*)dev->cfg->base;
489 
490     if (region_idx >= get_dr_am(dev)) {
491         return SIC_ERROR_INVALID_REGION;
492     }
493 
494     /* Address is right-shifted by 12 */
495     p_sic->dr[region_idx].drr = 0x0u;
496 
497     return SIC_ERROR_NONE;
498 }
499 
sic_decrypt_mitigations_set(struct sic_dev_t * dev,const struct sic_decrypt_mitigations_config_t * cfg)500 enum sic_error_t sic_decrypt_mitigations_set(struct sic_dev_t *dev,
501                                              const struct sic_decrypt_mitigations_config_t *cfg)
502 {
503     struct _sic_reg_map_t* p_sic = (struct _sic_reg_map_t*)dev->cfg->base;
504 
505     /* The AESCR register cannot be written while the SIC is enabled */
506     if (is_sic_enabled(dev)) {
507         return SIC_ERROR_INVALID_OP_WHILE_ENABLED;
508     }
509 
510     /* Set the AES_DFA_EN field */
511     p_sic->aescr &= ~(0x1u << 0);
512     p_sic->aescr |= (cfg->aes_dfa_enable & 0x1u) << 0;
513 
514     /* Set the AES_DUMMY_EN field */
515     p_sic->aescr &= ~(0x1u << 1);
516     p_sic->aescr |= (cfg->aes_dummy_enable & 0x1u) << 1;
517 
518     /* Set the AES_DUMMY_VALID_PHASE_EN field */
519     p_sic->aescr &= ~(0x1u << 2);
520     p_sic->aescr |= (cfg->aes_dummy_valid_phase_enable & 0x1u) << 2;
521 
522     /* Set the AES_DUMMY_VALID_KEY_EN field */
523     p_sic->aescr &= ~(0x1u << 3);
524     p_sic->aescr |= (cfg->aes_dummy_valid_key_enable & 0x1u) << 3;
525 
526     /* Set the AES_DR_PRE_MAX_ROUNDS field */
527     p_sic->aescr &= ~(0x7u << 4);
528     p_sic->aescr |= (cfg->aes_dr_pre_rounds_max & 0x7u) << 4;
529 
530     /* Set the AES_DR_POST_MAX_ROUNDS field */
531     p_sic->aescr &= ~(0x7u << 7);
532     p_sic->aescr |= (cfg->aes_dr_post_rounds_max & 0x7u) << 7;
533 
534     /* Set the AES_DR_VALID_MAX_ROUNDS field */
535     p_sic->aescr &= ~(0x3u << 10);
536     p_sic->aescr |= (cfg->aes_dr_post_rounds_max & 0x3u) << 10;
537 
538     return SIC_ERROR_NONE;
539 }
540 
sic_decrypt_mitigations_get(struct sic_dev_t * dev,struct sic_decrypt_mitigations_config_t * cfg)541 enum sic_error_t sic_decrypt_mitigations_get(struct sic_dev_t *dev,
542                                              struct sic_decrypt_mitigations_config_t *cfg)
543 {
544     struct _sic_reg_map_t* p_sic = (struct _sic_reg_map_t*)dev->cfg->base;
545 
546     /* Get the AES_DFA_EN field */
547     cfg->aes_dfa_enable = (p_sic->aescr >> 0) & 0x1u;
548 
549     /* Get the AES_DUMMY_EN field */
550     cfg->aes_dummy_enable = (p_sic->aescr >> 1) & 0x1u;
551 
552     /* Get the AES_DUMMY_VALID_PHASE_EN field */
553     cfg->aes_dummy_valid_phase_enable = (p_sic->aescr >> 2) & 0x1u;
554 
555     /* Get the AES_DUMMY_VALID_KEY_EN field */
556     cfg->aes_dummy_valid_key_enable = (p_sic->aescr >> 3) & 0x1u;
557 
558     /* Get the AES_DR_PRE_MAX_ROUNDS field */
559     cfg->aes_dr_pre_rounds_max = (p_sic->aescr >> 4) & 0x7u;
560 
561     /* Get the AES_DR_POST_MAX_ROUNDS field */
562     cfg->aes_dr_post_rounds_max = (p_sic->aescr >> 7) & 0x7u;
563 
564     /* Get the AES_DR_VALID_MAX_ROUNDS field */
565     cfg->aes_dr_valid_rounds_max = (p_sic->aescr >> 10) & 0x3u;
566 
567     return SIC_ERROR_NONE;
568 }
569 
sic_decrypt_rbg_seed_set(struct sic_dev_t * dev,const uint8_t * seed,size_t seed_len)570 enum sic_error_t sic_decrypt_rbg_seed_set(struct sic_dev_t *dev,
571                                           const uint8_t *seed,
572                                           size_t seed_len)
573 {
574     struct _sic_reg_map_t* p_sic = (struct _sic_reg_map_t*)dev->cfg->base;
575     uint32_t seed_start;
576     uint32_t idx;
577 
578     /* The AESRBGRC register cannot be written while the SIC is enabled */
579     if (is_sic_enabled(dev)) {
580         return SIC_ERROR_INVALID_OP_WHILE_ENABLED;
581     }
582 
583     if (p_sic->aesrbgcr == 16) {
584         return SIC_ERROR_RBG_SEED_ALREADY_SET;
585     }
586 
587     seed_start = p_sic->aesrbgcr;
588 
589     for (idx = seed_start; idx < 16 && idx < seed_start + seed_len; idx++) {
590         p_sic->aesrbgr = seed[idx];
591 
592         while(p_sic->aesrbgcr == idx) {}
593     }
594 
595     return SIC_ERROR_NONE;
596 }
597 
sic_pmon_enable(struct sic_dev_t * dev,enum sic_pmon_counting_mode_t counting_mode,bool timer_enable,uint32_t timer_val)598 enum sic_error_t sic_pmon_enable(struct sic_dev_t *dev,
599                                  enum sic_pmon_counting_mode_t counting_mode,
600                                  bool timer_enable, uint32_t timer_val)
601 {
602     struct _sic_reg_map_t* p_sic = (struct _sic_reg_map_t*)dev->cfg->base;
603 
604     if (!have_pmon(dev)) {
605         return SIC_ERROR_NO_HW_SUPPORT;
606     }
607 
608     /* Set the PMON_TIMER_ENABLE field */
609     p_sic->pmcr &= ~(0x1u << 1);
610     p_sic->pmcr |= (timer_enable & 0x1u) << 1;
611 
612     /* Set the PMON_COUNTING_MODE field */
613     p_sic->pmcr &= ~(0x1u << 2);
614     p_sic->pmcr |= (counting_mode & 0x1u) << 2;
615 
616     /* Set the PMON_TIMER field */
617     p_sic->pmcr &= ~(0xFFFFFFu << 8);
618     if (timer_enable) {
619         p_sic->pmcr |= (counting_mode & 0xFFFFFFu) << 8;
620     }
621 
622     /* Set the PMON_ENABLE field */
623     p_sic->pmcr |= 0x1u;
624 
625     return SIC_ERROR_NONE;
626 }
627 
sic_pmon_disable(struct sic_dev_t * dev)628 enum sic_error_t sic_pmon_disable(struct sic_dev_t *dev)
629 {
630     struct _sic_reg_map_t* p_sic = (struct _sic_reg_map_t*)dev->cfg->base;
631 
632     if (!have_pmon(dev)) {
633         return SIC_ERROR_NO_HW_SUPPORT;
634     }
635 
636     /* Set the PMON_ENABLE field */
637     p_sic->pmcr &= ~(0x1u);
638 
639     return SIC_ERROR_NONE;
640 }
641 
sic_pmon_get_stats(struct sic_dev_t * dev,struct sic_pmon_counters_t * counters)642 enum sic_error_t sic_pmon_get_stats(struct sic_dev_t *dev,
643                                     struct sic_pmon_counters_t *counters)
644 {
645     struct _sic_reg_map_t* p_sic = (struct _sic_reg_map_t*)dev->cfg->base;
646 
647     if (!have_pmon(dev)) {
648         return SIC_ERROR_NO_HW_SUPPORT;
649     }
650 
651     counters->page_hit_counter  = p_sic->pmphc;
652     counters->page_miss_counter = p_sic->pmpmc;
653     counters->bypass_counter    = p_sic->pmbc;
654 
655     return SIC_ERROR_NONE;
656 }
657