1 /*
2  * Copyright (c) 2022-2024, Arm Limited. All rights reserved.
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *     http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 /**
18  * \file kmu_drv.c
19  * \brief Driver for Arm KMU.
20  */
21 
22 #include "kmu_drv.h"
23 
24 #include "fatal_error.h"
25 #ifdef KMU_CONFIG_EXTERNAL_DPA_HARDENED_WORD_COPY
26 #include "dpa_hardened_word_copy.h"
27 #endif /* KMU_CONFIG_EXTERNAL_DPA_HARDENED_WORD_COPY */
28 
29 struct _kmu_reg_map_t {
30     volatile uint32_t kmubc;
31                 /*!< Offset: 0x000 (R/ ) KMU Build Configuration Register */
32     volatile uint32_t kmuis;
33                 /*!< Offset: 0x004 (R/ ) KMU Interrupt Status Register */
34     volatile uint32_t kmuie;
35                 /*!< Offset: 0x008 (R/W) KMU Interrupt Enable Register */
36     volatile uint32_t kmuic;
37                 /*!< Offset: 0x00C (R/W) KMU Interrupt Clear Register */
38     volatile uint32_t kmuprbgsi;
39                 /*!< Offset: 0x010 (R/W) PRBG Seed Input Register */
40     volatile uint32_t reserved_0[7];
41                 /*!< Offset: 0x14-0x30 Reserved */
42     volatile uint32_t kmuksc[32];
43                 /*!< Offset: 0x030 (R/W) KMU Key Slot Configuration Register */
44     volatile uint32_t kmudkpa[32];
45                 /*!< Offset: 0x0B0 (R/W) KMU Destination Key Port Address
46                  *                       Register */
47     volatile uint32_t kmuksk[32][8];
48                 /*!< Offset: 0x130 (R/W) KMU Key Slot Register */
49     volatile uint32_t kmurd_8;
50                 /*!< Offset: 0x530 (R/ ) KMU 8-cycle-limit random delay Register */
51     volatile uint32_t kmurd_16;
52                 /*!< Offset: 0x534 (R/ ) KMU 16-cycle-limit random delay Register */
53     volatile uint32_t kmurd_32;
54                 /*!< Offset: 0x538 (R/ ) KMU 32-cycle-limit random delay Register */
55     volatile uint32_t reserved_1[668];
56                 /*!< Offset: 0x53C-0xFCC Reserved */
57     volatile uint32_t pidr4;
58                 /*!< Offset: 0xFD0 (R/ ) Peripheral ID 4 */
59     volatile uint32_t reserved_2[3];
60                 /*!< Offset: 0xFD4-0xFDC Reserved */
61     volatile uint32_t pidr0;
62                 /*!< Offset: 0xFE0 (R/ ) Peripheral ID 0 */
63     volatile uint32_t pidr1;
64                 /*!< Offset: 0xFE4 (R/ ) Peripheral ID 1 */
65     volatile uint32_t pidr2;
66                 /*!< Offset: 0xFE8 (R/ ) Peripheral ID 2 */
67     volatile uint32_t pidr3;
68                 /*!< Offset: 0xFEC (R/ ) Peripheral ID 3 */
69     volatile uint32_t cidr0;
70                 /*!< Offset: 0xFF0 (R/ ) Component ID 0 */
71     volatile uint32_t cidr1;
72                 /*!< Offset: 0xFF4 (R/ ) Component ID 1 */
73     volatile uint32_t cidr2;
74                 /*!< Offset: 0xFF8 (R/ ) Component ID 2 */
75     volatile uint32_t cidr3;
76                 /*!< Offset: 0xFFC (R/ ) Component ID 3 */
77 };
78 
kmu_init(struct kmu_dev_t * dev,uint8_t * prbg_seed)79 enum kmu_error_t kmu_init(struct kmu_dev_t *dev, uint8_t *prbg_seed)
80 {
81     uint32_t *p_prgb_seed_word = (uint32_t *)prbg_seed;
82     struct _kmu_reg_map_t* p_kmu = (struct _kmu_reg_map_t*)dev->cfg->base;
83     uint32_t idx;
84 
85     if ((uint32_t)p_prgb_seed_word & (sizeof(uint32_t) - 1)) {
86         FATAL_ERR(KMU_ERROR_INVALID_ALIGNMENT);
87         return KMU_ERROR_INVALID_ALIGNMENT;
88     }
89 
90     for (idx = 0; idx < KMU_PRBG_SEED_LEN / sizeof(uint32_t); idx++) {
91         p_kmu->kmuprbgsi = p_prgb_seed_word[idx];
92     }
93 
94     /* The lock can be done on any of the kmuksc registers, so we choose #0 */
95     p_kmu->kmuksc[0] |= KMU_KMUKSC_L_KMUPRBGSI_MASK;
96 
97     /* TODO FIXME enable more selectively */
98     p_kmu->kmuie = 0xFFFFFFFFu;
99 
100     return KMU_ERROR_NONE;
101 }
102 
kmu_get_key_export_config(struct kmu_dev_t * dev,uint32_t slot,struct kmu_key_export_config_t * config)103 enum kmu_error_t kmu_get_key_export_config(struct kmu_dev_t *dev, uint32_t slot,
104                                            struct kmu_key_export_config_t *config)
105 {
106     struct _kmu_reg_map_t* p_kmu = (struct _kmu_reg_map_t*)dev->cfg->base;
107 
108     if (slot >= KMU_GET_NKS(p_kmu)) {
109         FATAL_ERR(KMU_ERROR_INVALID_SLOT);
110         return KMU_ERROR_INVALID_SLOT;
111     }
112 
113     config->export_address = p_kmu->kmudkpa[slot];
114 
115     config->destination_port_write_delay =
116         (p_kmu->kmuksc[slot] & KMU_KMUKSC_DPWD_MASK) >> KMU_KMUKSC_DPWD_OFF;
117 
118     config->destination_port_address_increment =
119         (p_kmu->kmuksc[slot] & KMU_KMUKSC_DPAI_MASK) >> KMU_KMUKSC_DPAI_OFF;
120 
121     config->destination_port_data_width_code =
122         (p_kmu->kmuksc[slot] & KMU_KMUKSC_DPDW_MASK) >> KMU_KMUKSC_DPDW_OFF;
123 
124     config->destination_port_data_writes_code =
125         (p_kmu->kmuksc[slot] & KMU_KMUKSC_NDPW_MASK) >> KMU_KMUKSC_NDPW_OFF;
126 
127     config->new_mask_for_next_key_writes =
128         (p_kmu->kmuksc[slot] & KMU_KMUKSC_NMNKW_MASK) >> KMU_KMUKSC_NMNKW_OFF;
129 
130     config->write_mask_disable =
131         (p_kmu->kmuksc[slot] & KMU_KMUKSC_WMD_MASK) >> KMU_KMUKSC_WMD_OFF;
132 
133     return KMU_ERROR_NONE;
134 }
135 
kmu_set_key_export_config(struct kmu_dev_t * dev,uint32_t slot,const struct kmu_key_export_config_t * config)136 enum kmu_error_t kmu_set_key_export_config(struct kmu_dev_t *dev, uint32_t slot,
137                                            const struct kmu_key_export_config_t *config)
138 {
139     struct _kmu_reg_map_t* p_kmu = (struct _kmu_reg_map_t*)dev->cfg->base;
140     enum kmu_error_t err;
141 
142     if (slot >= KMU_GET_NKS(p_kmu)) {
143         FATAL_ERR(KMU_ERROR_INVALID_SLOT);
144         return KMU_ERROR_INVALID_SLOT;
145     }
146 
147     err = kmu_get_key_export_config_locked(dev, slot);
148     if (err != KMU_ERROR_NONE) {
149         return err;
150     }
151 
152     p_kmu->kmudkpa[slot] = config->export_address;
153 
154     p_kmu->kmuksc[slot] &= ~KMU_KMUKSC_DPWD_MASK;
155     p_kmu->kmuksc[slot] |=
156         ((config->destination_port_write_delay << KMU_KMUKSC_DPWD_OFF)
157          & KMU_KMUKSC_DPWD_MASK);
158 
159     p_kmu->kmuksc[slot] &= ~KMU_KMUKSC_DPAI_MASK;
160     p_kmu->kmuksc[slot] |=
161         ((config->destination_port_address_increment << KMU_KMUKSC_DPAI_OFF)
162          & KMU_KMUKSC_DPAI_MASK);
163 
164     p_kmu->kmuksc[slot] &= ~KMU_KMUKSC_DPDW_MASK;
165     p_kmu->kmuksc[slot] |=
166         ((config->destination_port_data_width_code << KMU_KMUKSC_DPDW_OFF)
167          & KMU_KMUKSC_DPDW_MASK);
168 
169     p_kmu->kmuksc[slot] &= ~KMU_KMUKSC_NDPW_MASK;
170     p_kmu->kmuksc[slot] |=
171         ((config->destination_port_data_writes_code << KMU_KMUKSC_NDPW_OFF)
172          & KMU_KMUKSC_NDPW_MASK);
173 
174     p_kmu->kmuksc[slot] &= ~KMU_KMUKSC_NMNKW_MASK;
175     p_kmu->kmuksc[slot] |=
176         ((config->new_mask_for_next_key_writes << KMU_KMUKSC_NMNKW_OFF)
177          & KMU_KMUKSC_NMNKW_MASK);
178 
179     p_kmu->kmuksc[slot] &= ~KMU_KMUKSC_WMD_MASK;
180     p_kmu->kmuksc[slot] |=
181         ((config->write_mask_disable << KMU_KMUKSC_WMD_OFF)
182          & KMU_KMUKSC_WMD_MASK);
183 
184     return KMU_ERROR_NONE;
185 }
186 
kmu_set_key_locked(struct kmu_dev_t * dev,uint32_t slot)187 enum kmu_error_t kmu_set_key_locked(struct kmu_dev_t *dev, uint32_t slot)
188 {
189     struct _kmu_reg_map_t* p_kmu = (struct _kmu_reg_map_t*)dev->cfg->base;
190 
191     if (slot >= KMU_GET_NKS(p_kmu)) {
192         FATAL_ERR(KMU_ERROR_INVALID_SLOT);
193         return KMU_ERROR_INVALID_SLOT;
194     }
195 
196     p_kmu->kmuksc[slot] |= KMU_KMUKSC_LKSKR_MASK;
197 
198     return KMU_ERROR_NONE;
199 }
200 
kmu_get_key_locked(struct kmu_dev_t * dev,uint32_t slot)201 enum kmu_error_t kmu_get_key_locked(struct kmu_dev_t *dev, uint32_t slot)
202 {
203     struct _kmu_reg_map_t* p_kmu = (struct _kmu_reg_map_t*)dev->cfg->base;
204 
205     if (slot >= KMU_GET_NKS(p_kmu)) {
206         FATAL_ERR(KMU_ERROR_INVALID_SLOT);
207         return KMU_ERROR_INVALID_SLOT;
208     }
209 
210     if (p_kmu->kmuksc[slot] & KMU_KMUKSC_LKSKR_MASK) {
211         NONFATAL_ERR(KMU_ERROR_SLOT_LOCKED);
212         return KMU_ERROR_SLOT_LOCKED;
213     } else {
214         return KMU_ERROR_NONE;
215     }
216 }
217 
kmu_set_key_export_config_locked(struct kmu_dev_t * dev,uint32_t slot)218 enum kmu_error_t kmu_set_key_export_config_locked(struct kmu_dev_t *dev,
219                                                   uint32_t slot)
220 {
221     struct _kmu_reg_map_t* p_kmu = (struct _kmu_reg_map_t*)dev->cfg->base;
222 
223     if (slot >= KMU_GET_NKS(p_kmu)) {
224         FATAL_ERR(KMU_ERROR_INVALID_SLOT);
225         return KMU_ERROR_INVALID_SLOT;
226     }
227 
228     p_kmu->kmuksc[slot] |= KMU_KMUKSC_LKS_MASK;
229 
230     return KMU_ERROR_NONE;
231 }
232 
kmu_get_key_export_config_locked(struct kmu_dev_t * dev,uint32_t slot)233 enum kmu_error_t kmu_get_key_export_config_locked(struct kmu_dev_t *dev,
234                                                   uint32_t slot)
235 {
236     struct _kmu_reg_map_t* p_kmu = (struct _kmu_reg_map_t*)dev->cfg->base;
237 
238     if (slot >= KMU_GET_NKS(p_kmu)) {
239         FATAL_ERR(KMU_ERROR_INVALID_SLOT);
240         return KMU_ERROR_INVALID_SLOT;
241     }
242 
243     if (p_kmu->kmuksc[slot] & KMU_KMUKSC_LKS_MASK) {
244         NONFATAL_ERR(KMU_ERROR_SLOT_LOCKED);
245         return KMU_ERROR_SLOT_LOCKED;
246     } else {
247         return KMU_ERROR_NONE;
248     }
249 }
250 
kmu_set_slot_invalid(struct kmu_dev_t * dev,uint32_t slot)251 enum kmu_error_t kmu_set_slot_invalid(struct kmu_dev_t *dev, uint32_t slot)
252 {
253     struct _kmu_reg_map_t* p_kmu = (struct _kmu_reg_map_t*)dev->cfg->base;
254     enum kmu_error_t err;
255 
256     if (slot >= KMU_GET_NKS(p_kmu)) {
257         FATAL_ERR(KMU_ERROR_INVALID_SLOT);
258         return KMU_ERROR_INVALID_SLOT;
259     }
260 
261     p_kmu->kmuksc[slot] |= KMU_KMUKSC_IKS_MASK;
262 
263     if (p_kmu->kmuis & KMU_KMISR_AIKSWE_MASK) {
264         err = KMU_ERROR_INTERNAL_ERROR;
265     } else {
266         err = KMU_ERROR_NONE;
267     }
268 
269     p_kmu->kmuic = 0xFFFFFFFFu;
270     return err;
271 }
272 
kmu_get_slot_invalid(struct kmu_dev_t * dev,uint32_t slot)273 enum kmu_error_t kmu_get_slot_invalid(struct kmu_dev_t *dev, uint32_t slot)
274 {
275     struct _kmu_reg_map_t* p_kmu = (struct _kmu_reg_map_t*)dev->cfg->base;
276 
277     if (slot >= KMU_GET_NKS(p_kmu)) {
278         FATAL_ERR(KMU_ERROR_INVALID_SLOT);
279         return KMU_ERROR_INVALID_SLOT;
280     }
281 
282     if (p_kmu->kmuksc[slot] | KMU_KMUKSC_KSIP_MASK){
283         NONFATAL_ERR(KMU_ERROR_SLOT_INVALIDATED);
284         return KMU_ERROR_SLOT_INVALIDATED;
285     } else {
286         return KMU_ERROR_NONE;
287     }
288 }
289 
kmu_set_key(struct kmu_dev_t * dev,uint32_t slot,uint8_t * key,size_t key_len)290 enum kmu_error_t kmu_set_key(struct kmu_dev_t *dev, uint32_t slot, uint8_t *key,
291                              size_t key_len)
292 {
293     enum kmu_error_t err;
294     struct _kmu_reg_map_t* p_kmu = (struct _kmu_reg_map_t*)dev->cfg->base;
295     uint32_t* p_key_word = (uint32_t*)key;
296     size_t idx;
297 
298     if ((uint32_t)key & (sizeof(uint32_t) - 1)) {
299         FATAL_ERR(KMU_ERROR_INVALID_ALIGNMENT);
300         return KMU_ERROR_INVALID_ALIGNMENT;
301     }
302 
303     if (key_len & (sizeof(uint32_t) - 1) || key_len > 32) {
304         FATAL_ERR(KMU_ERROR_INVALID_LENGTH);
305         return KMU_ERROR_INVALID_LENGTH;
306     }
307 
308     if (slot >= KMU_GET_NKS(p_kmu)) {
309         FATAL_ERR(KMU_ERROR_INVALID_SLOT);
310         return KMU_ERROR_INVALID_SLOT;
311     }
312 
313     err = kmu_get_key_locked(dev, slot);
314     if (err != KMU_ERROR_NONE) {
315         FATAL_ERR(err);
316         return err;
317     }
318 
319 #ifndef KMU_CONFIG_EXTERNAL_DPA_HARDENED_WORD_COPY
320     for (idx = 0; idx < key_len / sizeof(uint32_t); idx++) {
321         p_kmu->kmuksk[slot][idx] = p_key_word[idx];
322     }
323 #else
324     (void)idx;
325     dpa_hardened_word_copy(p_kmu->kmuksk[slot], p_key_word, key_len / sizeof(uint32_t));
326 #endif /* KMU_CONFIG_EXTERNAL_DPA_HARDENED_WORD_COPY */
327 
328     if (p_kmu->kmuis & KMU_KMISR_MWKSW_MASK) {
329         p_kmu->kmuis &= ~KMU_KMISR_MWKSW_MASK;
330         FATAL_ERR(KMU_ERROR_SLOT_ALREADY_WRITTEN);
331         return KMU_ERROR_SLOT_ALREADY_WRITTEN;
332     }
333 
334     return KMU_ERROR_NONE;
335 }
336 
kmu_get_key(struct kmu_dev_t * dev,uint32_t slot,uint8_t * buf,size_t buf_len)337 enum kmu_error_t kmu_get_key(struct kmu_dev_t *dev, uint32_t slot, uint8_t *buf,
338                     size_t buf_len)
339 {
340     enum kmu_error_t err;
341     struct _kmu_reg_map_t* p_kmu = (struct _kmu_reg_map_t*)dev->cfg->base;
342     uint32_t* p_buf_word = (uint32_t*)buf;
343     size_t idx;
344 
345     if ((uint32_t)buf & (sizeof(uint32_t) - 1)) {
346         FATAL_ERR(KMU_ERROR_INVALID_ALIGNMENT);
347         return KMU_ERROR_INVALID_ALIGNMENT;
348     }
349 
350     if ((buf_len & (sizeof(uint32_t) - 1)) || buf_len > 32) {
351         FATAL_ERR(KMU_ERROR_INVALID_LENGTH);
352         return KMU_ERROR_INVALID_LENGTH;
353     }
354 
355     if (slot >= KMU_GET_NKS(p_kmu)) {
356         FATAL_ERR(KMU_ERROR_INVALID_SLOT);
357         return KMU_ERROR_INVALID_SLOT;
358     }
359 
360     err = kmu_get_key_locked(dev, slot);
361     if (err != KMU_ERROR_NONE) {
362         FATAL_ERR(err);
363         return err;
364     }
365 
366 #ifndef KMU_CONFIG_EXTERNAL_DPA_HARDENED_WORD_COPY
367     for (idx = 0; idx < buf_len / sizeof(uint32_t); idx++) {
368         p_buf_word[idx] = p_kmu->kmuksk[slot][idx];
369     }
370 #else
371     (void)idx;
372     dpa_hardened_word_copy(p_buf_word, p_kmu->kmuksk[slot], buf_len / sizeof(uint32_t));
373 #endif /* KMU_CONFIG_EXTERNAL_DPA_HARDENED_WORD_COPY */
374 
375     return KMU_ERROR_NONE;
376 }
377 
378 
kmu_get_key_buffer_ptr(struct kmu_dev_t * dev,uint32_t slot,volatile uint32_t ** key_slot,size_t * slot_size)379 enum kmu_error_t kmu_get_key_buffer_ptr(struct kmu_dev_t *dev, uint32_t slot,
380                                         volatile uint32_t **key_slot,
381                                         size_t *slot_size)
382 {
383     enum kmu_error_t err;
384     struct _kmu_reg_map_t* p_kmu = (struct _kmu_reg_map_t*)dev->cfg->base;
385 
386     if (slot >= KMU_GET_NKS(p_kmu)) {
387         FATAL_ERR(KMU_ERROR_INVALID_SLOT);
388         return KMU_ERROR_INVALID_SLOT;
389     }
390 
391     err = kmu_get_key_locked(dev, slot);
392     if (err != KMU_ERROR_NONE) {
393         return err;
394     }
395 
396     *key_slot = p_kmu->kmuksk[slot];
397     *slot_size = sizeof(p_kmu->kmuksk[slot]);
398 
399     return KMU_ERROR_NONE;
400 }
401 
402 
kmu_export_key(struct kmu_dev_t * dev,uint32_t slot)403 enum kmu_error_t kmu_export_key(struct kmu_dev_t *dev, uint32_t slot)
404 {
405     enum kmu_error_t err;
406     struct _kmu_reg_map_t* p_kmu = (struct _kmu_reg_map_t*)dev->cfg->base;
407 
408     if (slot >= KMU_GET_NKS(p_kmu)) {
409         FATAL_ERR(KMU_ERROR_INVALID_SLOT);
410         return KMU_ERROR_INVALID_SLOT;
411     }
412 
413     /* Trigger the key ready operation */
414     p_kmu->kmuksc[slot] |= KMU_KMUKSC_VKS_MASK;
415 
416     /* Wait for the ready operation to complete */
417     while (p_kmu->kmuksc[slot] & KMU_KMUKSC_VKS_MASK) {}
418 
419     /* Check that key readying succeeded, if not return the right error */
420     if (!(p_kmu->kmuksc[slot] & KMU_KMUKSC_KSR_MASK)) {
421         if (p_kmu->kmuis & KMU_KMISR_KSNL_MASK) {
422             err = KMU_ERROR_SLOT_NOT_LOCKED;
423             goto out;
424         } else if (p_kmu->kmuis & KMU_KMISR_KSKRSM_MASK) {
425             err = KMU_ERROR_INVALID_LENGTH;
426             goto out;
427         } else if (p_kmu->kmuis & KMU_KMISR_KSDPANS_MASK) {
428             err = KMU_ERROR_INVALID_EXPORT_ADDR;
429             goto out;
430         }
431 
432         /* Shouldn't ever happen, all errors should be one of the three above */
433         err = KMU_ERROR_INTERNAL_ERROR;
434         goto out;
435     }
436 
437     /* Trigger the key export operation */
438     p_kmu->kmuksc[slot] |= KMU_KMUKSC_EK_MASK;
439 
440     /* Wait for the key export to complete */
441     while (!(p_kmu->kmuis & KMU_KMISR_KEC_MASK)) {}
442 
443     if (p_kmu->kmuis & KMU_KMISR_WTE_MASK) {
444         err = KMU_ERROR_INTERNAL_ERROR;
445         goto out;
446     } else if (p_kmu->kmuis & KMU_KMISR_INPPE_MASK) {
447         err = KMU_ERROR_INTERNAL_ERROR;
448         goto out;
449     } else if (p_kmu->kmuis & KMU_KMISR_AWBE_MASK) {
450         err = KMU_ERROR_INTERNAL_ERROR;
451         goto out;
452     }
453 
454     err = KMU_ERROR_NONE;
455 out:
456     p_kmu->kmuic = 0xFFFFFFFFu;
457     return err;
458 }
459 
kmu_random_delay(struct kmu_dev_t * dev,enum kmu_delay_limit_t limit)460 enum kmu_error_t kmu_random_delay(struct kmu_dev_t *dev,
461                                   enum kmu_delay_limit_t limit)
462 {
463     struct _kmu_reg_map_t* p_kmu = (struct _kmu_reg_map_t*)dev->cfg->base;
464     uint32_t foo;
465 
466     switch(limit) {
467     case KMU_DELAY_LIMIT_8_CYCLES:
468         foo = p_kmu->kmurd_8;
469         break;
470     case KMU_DELAY_LIMIT_16_CYCLES:
471         foo = p_kmu->kmurd_16;
472         break;
473     case KMU_DELAY_LIMIT_32_CYCLES:
474         foo = p_kmu->kmurd_32;
475         break;
476     default:
477         FATAL_ERR(KMU_ERROR_INVALID_DELAY_LENGTH);
478         return KMU_ERROR_INVALID_DELAY_LENGTH;
479     }
480 
481     return KMU_ERROR_NONE;
482 }
483