1 /*
2  * Copyright (c) 2022, Arm Limited. All rights reserved.
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *     http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 /**
18  * \file kmu_drv.c
19  * \brief Driver for Arm KMU.
20  */
21 
22 #include "kmu_drv.h"
23 
24 struct _kmu_reg_map_t {
25     volatile uint32_t kmubc;
26                 /*!< Offset: 0x000 (R/ ) KMU Build Configuration Register */
27     volatile uint32_t kmuis;
28                 /*!< Offset: 0x004 (R/ ) KMU Interrupt Status Register */
29     volatile uint32_t kmuie;
30                 /*!< Offset: 0x008 (R/W) KMU Interrupt Enable Register */
31     volatile uint32_t kmuic;
32                 /*!< Offset: 0x00C (R/W) KMU Interrupt Clear Register */
33     volatile uint32_t kmuprbgsi;
34                 /*!< Offset: 0x010 (R/W) PRBG Seed Input Register */
35     volatile uint32_t reserved_0[7];
36                 /*!< Offset: 0x14-0x30 Reserved */
37     volatile uint32_t kmuksc[32];
38                 /*!< Offset: 0x030 (R/W) KMU Key Slot Configuration Register */
39     volatile uint32_t kmudkpa[32];
40                 /*!< Offset: 0x0B0 (R/W) KMU Destination Key Port Address
41                  *                       Register */
42     volatile uint32_t kmuksk[32][8];
43                 /*!< Offset: 0x130 (R/W) KMU Key Slot Register */
44     volatile uint32_t reserved_1[680];
45                 /*!< Offset: 0x530-0xFCC Reserved */
46     volatile uint32_t pidr4;
47                 /*!< Offset: 0xFD0 (R/ ) Peripheral ID 4 */
48     volatile uint32_t reserved_2[3];
49                 /*!< Offset: 0xFD4-0xFDC Reserved */
50     volatile uint32_t pidr0;
51                 /*!< Offset: 0xFE0 (R/ ) Peripheral ID 0 */
52     volatile uint32_t pidr1;
53                 /*!< Offset: 0xFE4 (R/ ) Peripheral ID 1 */
54     volatile uint32_t pidr2;
55                 /*!< Offset: 0xFE8 (R/ ) Peripheral ID 2 */
56     volatile uint32_t pidr3;
57                 /*!< Offset: 0xFEC (R/ ) Peripheral ID 3 */
58     volatile uint32_t cidr0;
59                 /*!< Offset: 0xFF0 (R/ ) Component ID 0 */
60     volatile uint32_t cidr1;
61                 /*!< Offset: 0xFF4 (R/ ) Component ID 1 */
62     volatile uint32_t cidr2;
63                 /*!< Offset: 0xFF8 (R/ ) Component ID 2 */
64     volatile uint32_t cidr3;
65                 /*!< Offset: 0xFFC (R/ ) Component ID 3 */
66 };
67 
kmu_init(struct kmu_dev_t * dev,uint8_t * prbg_seed)68 enum kmu_error_t kmu_init(struct kmu_dev_t *dev, uint8_t *prbg_seed)
69 {
70     uint32_t *p_prgb_seed_word = (uint32_t *)prbg_seed;
71     struct _kmu_reg_map_t* p_kmu = (struct _kmu_reg_map_t*)dev->cfg->base;
72     uint32_t idx;
73 
74     if ((uint32_t)p_prgb_seed_word & (sizeof(uint32_t) - 1)) {
75         return KMU_ERROR_INVALID_ALIGNMENT;
76     }
77 
78     for (idx = 0; idx < KMU_PRBG_SEED_LEN / sizeof(uint32_t); idx++) {
79         p_kmu->kmuprbgsi = p_prgb_seed_word[idx];
80     }
81 
82     /* The lock can be done on any of the kmuksc registers, so we choose #0 */
83     p_kmu->kmuksc[0] |= KMU_KMUKSC_L_KMUPRBGSI_MASK;
84 
85     /* TODO FIXME enable more selectively */
86     p_kmu->kmuie = 0xFFFFFFFFu;
87 
88     return KMU_ERROR_NONE;
89 }
90 
kmu_key_get_export_config(struct kmu_dev_t * dev,uint32_t slot,struct kmu_key_export_config_t * config)91 enum kmu_error_t kmu_key_get_export_config(struct kmu_dev_t *dev, uint32_t slot,
92                                            struct kmu_key_export_config_t *config)
93 {
94     struct _kmu_reg_map_t* p_kmu = (struct _kmu_reg_map_t*)dev->cfg->base;
95 
96     if (slot >= KMU_GET_NKS(p_kmu)) {
97         return KMU_ERROR_INVALID_SLOT;
98     }
99 
100     config->destination_port_write_delay =
101         (p_kmu->kmuksc[slot] & KMU_KMUKSC_DPWD_MASK) >> KMU_KMUKSC_DPWD_OFF;
102 
103     config->destination_port_address_increment =
104         (p_kmu->kmuksc[slot] & KMU_KMUKSC_DPAI_MASK) >> KMU_KMUKSC_DPAI_OFF;
105 
106     config->destination_port_data_width_code =
107         (p_kmu->kmuksc[slot] & KMU_KMUKSC_DPDW_MASK) >> KMU_KMUKSC_DPDW_OFF;
108 
109     config->destination_port_data_writes_code =
110         (p_kmu->kmuksc[slot] & KMU_KMUKSC_NDPW_MASK) >> KMU_KMUKSC_NDPW_OFF;
111 
112     config->new_mask_for_next_key_writes =
113         (p_kmu->kmuksc[slot] & KMU_KMUKSC_NMNKW_MASK) >> KMU_KMUKSC_NMNKW_OFF;
114 
115     config->write_mask_disable =
116         (p_kmu->kmuksc[slot] & KMU_KMUKSC_WMD_MASK) >> KMU_KMUKSC_WMD_OFF;
117 
118     return KMU_ERROR_NONE;
119 }
120 
kmu_key_set_export_config(struct kmu_dev_t * dev,uint32_t slot,struct kmu_key_export_config_t * config)121 enum kmu_error_t kmu_key_set_export_config(struct kmu_dev_t *dev, uint32_t slot,
122                                            struct kmu_key_export_config_t *config)
123 {
124     struct _kmu_reg_map_t* p_kmu = (struct _kmu_reg_map_t*)dev->cfg->base;
125 
126     if (slot >= KMU_GET_NKS(p_kmu)) {
127         return KMU_ERROR_INVALID_SLOT;
128     }
129 
130     p_kmu->kmuksc[slot] &= ~KMU_KMUKSC_DPWD_MASK;
131     p_kmu->kmuksc[slot] |=
132         ((config->destination_port_write_delay << KMU_KMUKSC_DPWD_OFF)
133          & KMU_KMUKSC_DPWD_MASK);
134 
135     p_kmu->kmuksc[slot] &= ~KMU_KMUKSC_DPAI_MASK;
136     p_kmu->kmuksc[slot] |=
137         ((config->destination_port_address_increment << KMU_KMUKSC_DPAI_OFF)
138          & KMU_KMUKSC_DPAI_MASK);
139 
140     p_kmu->kmuksc[slot] &= ~KMU_KMUKSC_DPDW_MASK;
141     p_kmu->kmuksc[slot] |=
142         ((config->destination_port_data_width_code << KMU_KMUKSC_DPDW_OFF)
143          & KMU_KMUKSC_DPDW_MASK);
144 
145     p_kmu->kmuksc[slot] &= ~KMU_KMUKSC_NDPW_MASK;
146     p_kmu->kmuksc[slot] |=
147         ((config->destination_port_data_writes_code << KMU_KMUKSC_NDPW_OFF)
148          & KMU_KMUKSC_NDPW_MASK);
149 
150     p_kmu->kmuksc[slot] &= ~KMU_KMUKSC_NMNKW_MASK;
151     p_kmu->kmuksc[slot] |=
152         ((config->new_mask_for_next_key_writes << KMU_KMUKSC_NMNKW_OFF)
153          & KMU_KMUKSC_NMNKW_MASK);
154 
155     p_kmu->kmuksc[slot] &= ~KMU_KMUKSC_WMD_MASK;
156     p_kmu->kmuksc[slot] |=
157         ((config->write_mask_disable << KMU_KMUKSC_WMD_OFF)
158          & KMU_KMUKSC_WMD_MASK);
159 
160     return KMU_ERROR_NONE;
161 }
162 
kmu_set_key_locked(struct kmu_dev_t * dev,uint32_t slot)163 enum kmu_error_t kmu_set_key_locked(struct kmu_dev_t *dev, uint32_t slot)
164 {
165     struct _kmu_reg_map_t* p_kmu = (struct _kmu_reg_map_t*)dev->cfg->base;
166 
167     if (slot >= KMU_GET_NKS(p_kmu)) {
168         return KMU_ERROR_INVALID_SLOT;
169     }
170 
171     p_kmu->kmuksc[slot] |= KMU_KMUKSC_LKSKR_MASK;
172 
173     return KMU_ERROR_NONE;
174 }
175 
kmu_get_key_locked(struct kmu_dev_t * dev,uint32_t slot)176 enum kmu_error_t kmu_get_key_locked(struct kmu_dev_t *dev, uint32_t slot)
177 {
178     struct _kmu_reg_map_t* p_kmu = (struct _kmu_reg_map_t*)dev->cfg->base;
179 
180     if (slot >= KMU_GET_NKS(p_kmu)) {
181         return KMU_ERROR_INVALID_SLOT;
182     }
183 
184     if (p_kmu->kmuksc[slot] & KMU_KMUKSC_LKSKR_MASK) {
185         return KMU_ERROR_SLOT_LOCKED;
186     } else {
187         return KMU_ERROR_NONE;
188     }
189 }
190 
kmu_set_key_export_config_locked(struct kmu_dev_t * dev,uint32_t slot)191 enum kmu_error_t kmu_set_key_export_config_locked(struct kmu_dev_t *dev,
192                                                   uint32_t slot)
193 {
194     struct _kmu_reg_map_t* p_kmu = (struct _kmu_reg_map_t*)dev->cfg->base;
195 
196     if (slot >= KMU_GET_NKS(p_kmu)) {
197         return KMU_ERROR_INVALID_SLOT;
198     }
199 
200     p_kmu->kmuksc[slot] |= KMU_KMUKSC_LKS_MASK;
201 
202     return KMU_ERROR_NONE;
203 }
204 
kmu_get_key_export_config_locked(struct kmu_dev_t * dev,uint32_t slot)205 enum kmu_error_t kmu_get_key_export_config_locked(struct kmu_dev_t *dev,
206                                                   uint32_t slot)
207 {
208     struct _kmu_reg_map_t* p_kmu = (struct _kmu_reg_map_t*)dev->cfg->base;
209 
210     if (slot >= KMU_GET_NKS(p_kmu)) {
211         return KMU_ERROR_INVALID_SLOT;
212     }
213 
214     if (p_kmu->kmuksc[slot] & KMU_KMUKSC_LKS_MASK) {
215         return KMU_ERROR_SLOT_LOCKED;
216     } else {
217         return KMU_ERROR_NONE;
218     }
219 }
220 
kmu_set_slot_invalid(struct kmu_dev_t * dev,uint32_t slot)221 enum kmu_error_t kmu_set_slot_invalid(struct kmu_dev_t *dev, uint32_t slot)
222 {
223     struct _kmu_reg_map_t* p_kmu = (struct _kmu_reg_map_t*)dev->cfg->base;
224     enum kmu_error_t err;
225 
226     if (slot >= KMU_GET_NKS(p_kmu)) {
227         return KMU_ERROR_INVALID_SLOT;
228     }
229 
230     p_kmu->kmuksc[slot] |= KMU_KMUKSC_IKS_MASK;
231 
232     if (p_kmu->kmuis & KMU_KMISR_AIKSWE_MASK) {
233         err = KMU_ERROR_INTERNAL_ERROR;
234     } else {
235         err = KMU_ERROR_NONE;
236     }
237 
238     p_kmu->kmuic = 0xFFFFFFFFu;
239     return err;
240 }
241 
kmu_get_slot_invalid(struct kmu_dev_t * dev,uint32_t slot)242 enum kmu_error_t kmu_get_slot_invalid(struct kmu_dev_t *dev, uint32_t slot)
243 {
244     struct _kmu_reg_map_t* p_kmu = (struct _kmu_reg_map_t*)dev->cfg->base;
245 
246     if (slot >= KMU_GET_NKS(p_kmu)) {
247         return KMU_ERROR_INVALID_SLOT;
248     }
249 
250     if (p_kmu->kmuksc[slot] | KMU_KMUKSC_KSIP_MASK){
251         return KMU_ERROR_SLOT_INVALIDATED;
252     } else {
253         return KMU_ERROR_NONE;
254     }
255 }
256 
kmu_set_key(struct kmu_dev_t * dev,uint32_t slot,uint8_t * key,size_t key_len)257 enum kmu_error_t kmu_set_key(struct kmu_dev_t *dev, uint32_t slot, uint8_t *key,
258                              size_t key_len)
259 {
260     enum kmu_error_t err;
261     struct _kmu_reg_map_t* p_kmu = (struct _kmu_reg_map_t*)dev->cfg->base;
262     uint32_t* p_key_word = (uint32_t*)key;
263     size_t idx;
264 
265     if ((uint32_t)key & (sizeof(uint32_t) - 1)) {
266         return KMU_ERROR_INVALID_ALIGNMENT;
267     }
268 
269     if (key_len & (sizeof(uint32_t) - 1) || key_len > 32) {
270         return KMU_ERROR_INVALID_LENGTH;
271     }
272 
273     if (slot >= KMU_GET_NKS(p_kmu)) {
274         return KMU_ERROR_INVALID_SLOT;
275     }
276 
277     err = kmu_get_key_locked(dev, slot);
278     if (err != KMU_ERROR_NONE) {
279         return err;
280     }
281 
282     for (idx = 0; idx < key_len / sizeof(uint32_t); idx++) {
283         p_kmu->kmuksk[slot][idx] = p_key_word[idx];
284         if (p_kmu->kmuis & KMU_KMISR_MWKSW_MASK) {
285             p_kmu->kmuis &= ~KMU_KMISR_MWKSW_MASK;
286             return KMU_ERROR_SLOT_ALREADY_WRITTEN;
287         }
288         if (p_kmu->kmuksk[slot][idx] != p_key_word[idx]) {
289             return KMU_ERROR_INTERNAL_ERROR;
290         }
291     }
292 
293     return KMU_ERROR_NONE;
294 }
295 
kmu_get_key(struct kmu_dev_t * dev,uint32_t slot,uint8_t * buf,size_t buf_len)296 enum kmu_error_t kmu_get_key(struct kmu_dev_t *dev, uint32_t slot, uint8_t *buf,
297                     size_t buf_len)
298 {
299     enum kmu_error_t err;
300     struct _kmu_reg_map_t* p_kmu = (struct _kmu_reg_map_t*)dev->cfg->base;
301     uint32_t* p_buf_word = (uint32_t*)buf;
302     size_t idx;
303 
304     if ((uint32_t)buf & (sizeof(uint32_t) - 1)) {
305         return KMU_ERROR_INVALID_ALIGNMENT;
306     }
307 
308     if ((buf_len & (sizeof(uint32_t) - 1)) || buf_len > 32) {
309         return KMU_ERROR_INVALID_LENGTH;
310     }
311 
312     if (slot >= KMU_GET_NKS(p_kmu)) {
313         return KMU_ERROR_INVALID_SLOT;
314     }
315 
316     err = kmu_get_key_locked(dev, slot);
317     if (err != KMU_ERROR_NONE) {
318         return err;
319     }
320 
321     for (idx = 0; idx < buf_len / sizeof(uint32_t); idx++) {
322         p_buf_word[idx] = p_kmu->kmuksk[slot][idx];
323     }
324 
325     return KMU_ERROR_NONE;
326 }
327 
kmu_export_key(struct kmu_dev_t * dev,uint32_t slot)328 enum kmu_error_t kmu_export_key(struct kmu_dev_t *dev, uint32_t slot)
329 {
330     enum kmu_error_t err;
331     struct _kmu_reg_map_t* p_kmu = (struct _kmu_reg_map_t*)dev->cfg->base;
332 
333     if (slot >= KMU_GET_NKS(p_kmu)) {
334         return KMU_ERROR_INVALID_SLOT;
335     }
336 
337     /* Trigger the key ready operation */
338     p_kmu->kmuksc[slot] |= KMU_KMUKSC_VKS_MASK;
339 
340     /* Wait for the ready operation to complete */
341     while (p_kmu->kmuksc[slot] & KMU_KMUKSC_VKS_MASK) {}
342 
343     /* Check that key readying succeeded, if not return the right error */
344     if (!(p_kmu->kmuksc[slot] & KMU_KMUKSC_KSR_MASK)) {
345         if (p_kmu->kmuis & KMU_KMISR_KSNL_MASK) {
346             err = KMU_ERROR_SLOT_NOT_LOCKED;
347             goto out;
348         } else if (p_kmu->kmuis & KMU_KMISR_KSKRSM_MASK) {
349             err = KMU_ERROR_INVALID_LENGTH;
350             goto out;
351         } else if (p_kmu->kmuis & KMU_KMISR_KSDPANS_MASK) {
352             err = KMU_ERROR_INVALID_EXPORT_ADDR;
353             goto out;
354         }
355 
356         /* Shouldn't ever happen, all errors should be one of the three above */
357         err = KMU_ERROR_INTERNAL_ERROR;
358         goto out;
359     }
360 
361     /* Trigger the key export operation */
362     p_kmu->kmuksc[slot] |= KMU_KMUKSC_EK_MASK;
363 
364     /* Wait for the key export to complete */
365     while (p_kmu->kmuis & KMU_KMISR_KEC_MASK) {}
366 
367     if (p_kmu->kmuis & KMU_KMISR_WTE_MASK) {
368         err = KMU_ERROR_INTERNAL_ERROR;
369         goto out;
370     } else if (p_kmu->kmuis & KMU_KMISR_INPPE_MASK) {
371         err = KMU_ERROR_INTERNAL_ERROR;
372         goto out;
373     } else if (p_kmu->kmuis & KMU_KMISR_AWBE_MASK) {
374         err = KMU_ERROR_INTERNAL_ERROR;
375         goto out;
376     }
377 
378     err = KMU_ERROR_NONE;
379 out:
380     p_kmu->kmuic = 0xFFFFFFFFu;
381     return err;
382 }
383