1 /*
2 * Copyright (c) 2022-2024, Arm Limited. All rights reserved.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 /**
18 * \file lcm_drv.c
19 * \brief Driver for Arm LCM.
20 */
21
22 #include "lcm_drv.h"
23 #include "tfm_hal_device_header.h"
24 #include "device_definition.h"
25
26 #include "fatal_error.h"
27
28 #include <stddef.h>
29 #include <stdint.h>
30 #include <assert.h>
31
32 #ifdef LCM_DCU_PARITY
33 #define DCU_ENABLED_MASK 0x55555555
34 #define DCU_DISABLED_MASK 0xAAAAAAAA
35 #else
36 #define DCU_ENABLED_MASK 0xFFFFFFFF
37 #define DCU_DISABLED_MASK 0x00000000
38 #endif
39
40 #ifdef INTEGRITY_CHECKER_S
41 __ALIGNED(INTEGRITY_CHECKER_REQUIRED_ALIGNMENT)
42 #endif
43 static uint8_t dummy_key_value[32] = {0x01, 0x02, 0x03, 0x04,
44 0x01, 0x02, 0x03, 0x04,
45 0x01, 0x02, 0x03, 0x04,
46 0x01, 0x02, 0x03, 0x04,
47 0x01, 0x02, 0x03, 0x04,
48 0x01, 0x02, 0x03, 0x04,
49 0x01, 0x02, 0x03, 0x04,
50 0x01, 0x02, 0x03, 0x04};
51
52 struct _lcm_reg_map_t {
53 volatile uint32_t lcs_value;
54 /*!< Offset: 0x000 (R/ ) LCM Lifecycle state Register */
55 volatile uint32_t key_err;
56 /*!< Offset: 0x004 (R/ ) LCM zero count error status Register */
57 volatile uint32_t tp_mode;
58 /*!< Offset: 0x008 (R/ ) LCM TP Mode (TCI/PCI) Register */
59 volatile uint32_t fatal_err;
60 /*!< Offset: 0x00C (R/W) LCM Fatal Error mode Enable Register */
61 volatile uint32_t dm_rma_lock;
62 /*!< Offset: 0x010 (R/W) LCM DRM RMA Flag lock enable */
63 volatile uint32_t sp_enable;
64 /*!< Offset: 0x014 (R/W) LCM Secure Provisioning enable
65 * Register */
66 volatile uint32_t otp_addr_width;
67 /*!< Offset: 0x018 (R/ ) LCM OTP Address Width Register */
68 volatile uint32_t otp_size_in_bytes;
69 /*!< Offset: 0x01C (R/ ) LCM OTP Size in bytes Register */
70 volatile uint32_t gppc;
71 /*!< Offset: 0x020 (R/ ) LCM General Purpose Persistent
72 * Configuration Register */
73 volatile uint32_t reserved_0[55];
74 /*!< Offset: 0x024-0x0FC Reserved */
75 volatile uint32_t dcu_en[4];
76 /*!< Offset: 0x100 (R/W) LCM DCU enable Registers */
77 volatile uint32_t dcu_lock[4];
78 /*!< Offset: 0x110 (R/W) LCM DCU lock Registers */
79 volatile uint32_t dcu_sp_disable_mask[4];
80 /*!< Offset: 0x120 (R/ ) LCM DCU SP disable mask Registers */
81 volatile uint32_t dcu_disable_mask[4];
82 /*!< Offset: 0x130 (R/ ) LCM DCU disable mask Registers */
83 volatile uint32_t reserved_1[932];
84 /*!< Offset: 0x140-0xFCC Reserved */
85 volatile uint32_t pidr4;
86 /*!< Offset: 0xFD0 (R/ ) Peripheral ID 4 */
87 volatile uint32_t reserved_2[3];
88 /*!< Offset: 0xFD4-0xFDC Reserved */
89 volatile uint32_t pidr0;
90 /*!< Offset: 0xFE0 (R/ ) Peripheral ID 0 */
91 volatile uint32_t pidr1;
92 /*!< Offset: 0xFE4 (R/ ) Peripheral ID 1 */
93 volatile uint32_t pidr2;
94 /*!< Offset: 0xFE8 (R/ ) Peripheral ID 2 */
95 volatile uint32_t pidr3;
96 /*!< Offset: 0xFEC (R/ ) Peripheral ID 3 */
97 volatile uint32_t cidr0;
98 /*!< Offset: 0xFF0 (R/ ) Component ID 0 */
99 volatile uint32_t cidr1;
100 /*!< Offset: 0xFF4 (R/ ) Component ID 1 */
101 volatile uint32_t cidr2;
102 /*!< Offset: 0xFF8 (R/ ) Component ID 2 */
103 volatile uint32_t cidr3;
104 /*!< Offset: 0xFFC (R/ ) Component ID 3 */
105 union {
106 volatile uint32_t raw_otp[16384];
107 /*!< Offset: 0x1000 (R/W) OTP direct access */
108 struct lcm_otp_layout_t otp;
109 };
110 };
111
is_pointer_word_aligned(void * ptr)112 static int is_pointer_word_aligned(void *ptr) {
113 return !((uint32_t)ptr & (sizeof(uint32_t) - 1));
114 }
115
rma_erase_all_keys(struct lcm_dev_t * dev)116 static enum lcm_error_t rma_erase_all_keys(struct lcm_dev_t *dev)
117 {
118 enum lcm_error_t err;
119 uint32_t idx;
120 uint32_t otp_overwrite_val = 0xFFFFFFFFu;
121
122 /* Overwrite all secret keys, and rotpk, with all-one words */
123 for (idx = 0; idx < offsetof(struct lcm_otp_layout_t, tp_mode_config);
124 idx += sizeof(uint32_t)) {
125 err = lcm_otp_write(dev, idx, sizeof(otp_overwrite_val),
126 (uint8_t *)&otp_overwrite_val);
127 /* The HW keys are writable in RMA state, but not readable */
128 if (err != LCM_ERROR_NONE && err != LCM_ERROR_WRITE_VERIFY_FAIL) {
129 FATAL_ERR(err);
130 return err;
131 }
132 }
133
134 return LCM_ERROR_NONE;
135 }
136
lcm_init(struct lcm_dev_t * dev)137 enum lcm_error_t lcm_init(struct lcm_dev_t *dev)
138 {
139 struct _lcm_reg_map_t *p_lcm = (struct _lcm_reg_map_t *)dev->cfg->base;
140 enum lcm_lcs_t lcs;
141 enum lcm_error_t err;
142
143 err = lcm_get_lcs(dev, &lcs);
144 if (err != LCM_ERROR_NONE) {
145 return err;
146 }
147
148 if (lcs == LCM_LCS_SE) {
149 if (p_lcm->key_err) {
150 FATAL_ERR(LCM_ERROR_INVALID_KEY);
151 return LCM_ERROR_INVALID_KEY;
152 }
153 } else if (lcs == LCM_LCS_RMA) {
154 err = rma_erase_all_keys(dev);
155 if (err != LCM_ERROR_NONE) {
156 return err;
157 }
158 }
159
160 return LCM_ERROR_NONE;
161 }
162
lcm_get_tp_mode(struct lcm_dev_t * dev,enum lcm_tp_mode_t * mode)163 enum lcm_error_t lcm_get_tp_mode(struct lcm_dev_t *dev, enum lcm_tp_mode_t *mode)
164 {
165 struct _lcm_reg_map_t *p_lcm = (struct _lcm_reg_map_t *)dev->cfg->base;
166
167 *mode = (enum lcm_tp_mode_t)p_lcm->tp_mode;
168
169 return LCM_ERROR_NONE;
170 }
171
lcm_set_tp_mode(struct lcm_dev_t * dev,enum lcm_tp_mode_t mode)172 enum lcm_error_t lcm_set_tp_mode(struct lcm_dev_t *dev, enum lcm_tp_mode_t mode)
173 {
174 enum lcm_tp_mode_t curr_mode;
175 enum lcm_lcs_t lcs;
176 uint32_t mode_reg_value;
177 uint32_t readback_reg_value;
178 enum lcm_bool_t fatal_err;
179 enum lcm_error_t err;
180
181 err = lcm_get_lcs(dev, &lcs);
182 if (err != LCM_ERROR_NONE) {
183 return err;
184 }
185
186 if (lcs != LCM_LCS_CM) {
187 FATAL_ERR(LCM_ERROR_INVALID_LCS);
188 return LCM_ERROR_INVALID_LCS;
189 }
190
191 err = lcm_get_tp_mode(dev, &curr_mode);
192 if (err != LCM_ERROR_NONE) {
193 return err;
194 }
195
196 if(curr_mode != LCM_TP_MODE_VIRGIN) {
197 FATAL_ERR(LCM_ERROR_INVALID_TRANSITION);
198 return LCM_ERROR_INVALID_TRANSITION;
199 }
200
201 switch(mode) {
202 case LCM_TP_MODE_TCI:
203 /* High hamming-weight magic constant used to enable TCI mode */
204 mode_reg_value = 0x0000FFFFu;
205 break;
206 case LCM_TP_MODE_PCI:
207 /* High hamming-weight magic constant used to enable PCI mode */
208 mode_reg_value = 0xFFFF0000u;
209 break;
210 default:
211 FATAL_ERR(LCM_ERROR_INVALID_TRANSITION);
212 return LCM_ERROR_INVALID_TRANSITION;
213 }
214
215 err = lcm_otp_write(dev, offsetof(struct lcm_otp_layout_t, tp_mode_config),
216 sizeof(uint32_t), (uint8_t *)&mode_reg_value);
217 if (err != LCM_ERROR_NONE) {
218 return err;
219 }
220
221 err = lcm_otp_read(dev, offsetof(struct lcm_otp_layout_t, tp_mode_config),
222 sizeof(uint32_t), (uint8_t *)&readback_reg_value);
223 if (err != LCM_ERROR_NONE) {
224 return err;
225 }
226
227 if (readback_reg_value != mode_reg_value) {
228 FATAL_ERR(LCM_ERROR_INTERNAL_ERROR);
229 return LCM_ERROR_INTERNAL_ERROR;
230 }
231
232 err = lcm_get_fatal_error(dev, &fatal_err);
233 if (err != LCM_ERROR_NONE) {
234 return err;
235 }
236
237 if (fatal_err == LCM_TRUE) {
238 FATAL_ERR(LCM_ERROR_FATAL_ERR);
239 return LCM_ERROR_FATAL_ERR;
240 }
241
242 return LCM_ERROR_NONE;
243 }
244
lcm_get_sp_enabled(struct lcm_dev_t * dev,enum lcm_bool_t * enabled)245 enum lcm_error_t lcm_get_sp_enabled(struct lcm_dev_t *dev, enum lcm_bool_t *enabled)
246 {
247 struct _lcm_reg_map_t *p_lcm = (struct _lcm_reg_map_t *)dev->cfg->base;
248
249 *enabled = (enum lcm_bool_t)p_lcm->sp_enable;
250
251 return LCM_ERROR_NONE;
252 }
253
mask_dcus_for_sp_enable(struct lcm_dev_t * dev)254 static inline enum lcm_error_t mask_dcus_for_sp_enable(struct lcm_dev_t *dev)
255 {
256 struct _lcm_reg_map_t *p_lcm = (struct _lcm_reg_map_t *)dev->cfg->base;
257 size_t idx;
258 uint32_t mask_enabled;
259 uint32_t mask_disabled;
260 uint32_t dcu_val;
261 uint32_t mask_val;
262
263 for (idx = 0; idx < LCM_DCU_WIDTH_IN_BYTES / sizeof(uint32_t); idx++) {
264 mask_val = p_lcm->dcu_sp_disable_mask[idx];
265
266 mask_enabled = mask_val & DCU_ENABLED_MASK;
267 mask_disabled = mask_val & DCU_DISABLED_MASK;
268
269 dcu_val = p_lcm->dcu_en[idx];
270
271 dcu_val &= mask_enabled;
272 dcu_val |= ((~dcu_val & DCU_ENABLED_MASK) << 1) & mask_disabled;
273
274 p_lcm->dcu_en[idx] = dcu_val;
275 }
276
277 return LCM_ERROR_NONE;
278 }
279
lcm_set_sp_enabled(struct lcm_dev_t * dev)280 enum lcm_error_t lcm_set_sp_enabled(struct lcm_dev_t *dev)
281 {
282 struct _lcm_reg_map_t *p_lcm = (struct _lcm_reg_map_t *)dev->cfg->base;
283 enum lcm_bool_t fatal_err;
284 enum lcm_error_t err;
285 uint32_t idx;
286
287 mask_dcus_for_sp_enable(dev);
288
289 /* High hamming-weight magic constant used to trigger secure provisioning
290 * mode
291 */
292 p_lcm->sp_enable = 0x5EC10E1Eu;
293
294 /* Perform a >2000 cycle wait in order for the secure provisioning reset to
295 * happen, before checking if it has worked.
296 */
297 for (idx = 0; idx < 4000; idx++) {
298 __NOP();
299 }
300
301 /* Put the CPU into an idle state so that the reset can occur */
302 __WFI();
303
304 while(p_lcm->sp_enable != LCM_TRUE) {}
305
306 err = lcm_get_fatal_error(dev, &fatal_err);
307 if (err != LCM_ERROR_NONE) {
308 return err;
309 }
310
311 if (fatal_err == LCM_TRUE) {
312 FATAL_ERR(LCM_ERROR_FATAL_ERR);
313 return LCM_ERROR_FATAL_ERR;
314 }
315
316 return LCM_ERROR_NONE;
317 }
318
lcm_get_fatal_error(struct lcm_dev_t * dev,enum lcm_bool_t * error)319 enum lcm_error_t lcm_get_fatal_error(struct lcm_dev_t *dev, enum lcm_bool_t *error)
320 {
321 struct _lcm_reg_map_t *p_lcm = (struct _lcm_reg_map_t *)dev->cfg->base;
322
323 *error = (enum lcm_bool_t)p_lcm->fatal_err;
324
325 return LCM_ERROR_NONE;
326 }
327
lcm_set_fatal_error(struct lcm_dev_t * dev)328 enum lcm_error_t lcm_set_fatal_error(struct lcm_dev_t *dev)
329 {
330 struct _lcm_reg_map_t *p_lcm = (struct _lcm_reg_map_t *)dev->cfg->base;
331
332 /* High hamming-weight magic constant used to trigger fatal error state */
333 p_lcm->fatal_err = 0xFA7A1EEEu;
334
335 return LCM_ERROR_NONE;
336 }
337
lcm_get_gppc(struct lcm_dev_t * dev,uint32_t * gppc)338 enum lcm_error_t lcm_get_gppc(struct lcm_dev_t *dev, uint32_t *gppc)
339 {
340 struct _lcm_reg_map_t *p_lcm = (struct _lcm_reg_map_t *)dev->cfg->base;
341
342 *gppc = p_lcm->gppc;
343
344 return LCM_ERROR_NONE;
345 }
346
lcm_get_otp_size(struct lcm_dev_t * dev,uint32_t * size)347 enum lcm_error_t lcm_get_otp_size(struct lcm_dev_t *dev, uint32_t *size)
348 {
349 struct _lcm_reg_map_t *p_lcm = (struct _lcm_reg_map_t *)dev->cfg->base;
350
351 *size = p_lcm->otp_size_in_bytes;
352
353 return LCM_ERROR_NONE;
354 }
355
lcm_get_lcs(struct lcm_dev_t * dev,enum lcm_lcs_t * lcs)356 enum lcm_error_t lcm_get_lcs(struct lcm_dev_t *dev, enum lcm_lcs_t *lcs)
357 {
358 struct _lcm_reg_map_t *p_lcm = (struct _lcm_reg_map_t *)dev->cfg->base;
359 enum lcm_bool_t fatal_err;
360 enum lcm_error_t err;
361
362 err = lcm_get_fatal_error(dev, &fatal_err);
363 if (err != LCM_ERROR_NONE) {
364 return err;
365 }
366
367 if (fatal_err == LCM_TRUE) {
368 FATAL_ERR(LCM_ERROR_FATAL_ERR);
369 return LCM_ERROR_FATAL_ERR;
370 }
371
372
373 *lcs = (enum lcm_lcs_t)p_lcm->lcs_value;
374
375 if (*lcs == LCM_LCS_INVALID) {
376 return LCM_ERROR_INVALID_LCS;
377 }
378
379 return LCM_ERROR_NONE;
380 }
381
382 #ifdef INTEGRITY_CHECKER_S
count_zero_bits(const uint32_t * addr,uint32_t len,uint32_t * zero_bits)383 static enum lcm_error_t count_zero_bits(const uint32_t *addr, uint32_t len,
384 uint32_t *zero_bits)
385 {
386 enum integrity_checker_error_t ic_err;
387
388 ic_err = integrity_checker_compute_value(&INTEGRITY_CHECKER_DEV_S,
389 INTEGRITY_CHECKER_MODE_ZERO_COUNT,
390 addr, len, zero_bits, sizeof(uint32_t),
391 NULL);
392
393 if (ic_err == INTEGRITY_CHECKER_ERROR_NONE) {
394 return LCM_ERROR_NONE;
395 } else {
396 FATAL_ERR(LCM_ERROR_INTERNAL_ERROR);
397 return LCM_ERROR_INTERNAL_ERROR;
398 }
399 }
400 #else
count_zero_bits(const uint32_t * addr,uint32_t len,uint32_t * zero_bits)401 static enum lcm_error_t count_zero_bits(const uint32_t *addr, uint32_t len,
402 uint32_t *zero_bits)
403 {
404 uint32_t idx;
405 uint32_t word;
406 uint32_t bit_index;
407
408 *zero_bits = 0;
409
410 for (idx = 0; idx < len; idx += sizeof(uint32_t)) {
411 word = addr[idx / sizeof(uint32_t)];
412
413 for (bit_index = 0; bit_index < sizeof(word) * 8; bit_index++) {
414 *zero_bits += 1 - ((word >> bit_index) & 1);
415 }
416 }
417
418 return LCM_ERROR_NONE;
419 }
420 #endif /* INTEGRITY_CHECKER_DEV_S */
421
count_otp_zero_bits(struct lcm_dev_t * dev,uint32_t offset,uint32_t len,uint32_t * zero_bits)422 static enum lcm_error_t count_otp_zero_bits(struct lcm_dev_t *dev,
423 uint32_t offset, uint32_t len,
424 uint32_t *zero_bits)
425 {
426 struct _lcm_reg_map_t *p_lcm = (struct _lcm_reg_map_t *)dev->cfg->base;
427
428 return count_zero_bits((uint32_t *)(((uint8_t *)p_lcm->raw_otp) + offset),
429 len, zero_bits);
430 }
431
otp_write_unchecked(struct lcm_dev_t * dev,uint32_t offset,uint32_t len,uint32_t * p_buf_word)432 static void otp_write_unchecked(struct lcm_dev_t *dev, uint32_t offset,
433 uint32_t len, uint32_t *p_buf_word)
434 {
435 uint32_t idx;
436 struct _lcm_reg_map_t *p_lcm = (struct _lcm_reg_map_t *)dev->cfg->base;
437
438 /* Perform the actual write */
439 for (idx = 0; idx < len / sizeof(uint32_t); idx++) {
440 p_lcm->raw_otp[(offset / sizeof(uint32_t)) + idx] = p_buf_word[idx];
441 }
442 }
443
cm_to_dm(struct lcm_dev_t * dev,uint16_t gppc_val)444 static enum lcm_error_t cm_to_dm(struct lcm_dev_t *dev, uint16_t gppc_val)
445 {
446 enum lcm_error_t err;
447 uint32_t config_val;
448 #ifdef INTEGRITY_CHECKER_S
449 __ALIGNED(INTEGRITY_CHECKER_REQUIRED_ALIGNMENT)
450 #endif
451 uint32_t zero_bits;
452
453 config_val = LCM_TRUE;
454
455 err = count_otp_zero_bits(dev, offsetof(struct lcm_otp_layout_t, huk), 32,
456 &zero_bits);
457 if (err != LCM_ERROR_NONE) {
458 return err;
459 }
460 if (zero_bits == 256) {
461 err = lcm_otp_write(dev, offsetof(struct lcm_otp_layout_t, huk), 32,
462 dummy_key_value);
463 if (err != LCM_ERROR_NONE) {
464 return err;
465 }
466 }
467
468 err = count_otp_zero_bits(dev, offsetof(struct lcm_otp_layout_t, guk), 32,
469 &zero_bits);
470 if (err != LCM_ERROR_NONE) {
471 return err;
472 }
473 if (zero_bits == 256) {
474 err = lcm_otp_write(dev, offsetof(struct lcm_otp_layout_t, guk), 32,
475 dummy_key_value);
476 if (err != LCM_ERROR_NONE) {
477 return err;
478 }
479 }
480
481 err = count_otp_zero_bits(dev, offsetof(struct lcm_otp_layout_t, kp_cm), 32,
482 &zero_bits);
483 if (err != LCM_ERROR_NONE) {
484 return err;
485 }
486 if (zero_bits == 256) {
487 err = lcm_otp_write(dev, offsetof(struct lcm_otp_layout_t, kp_cm), 32,
488 dummy_key_value);
489 if (err != LCM_ERROR_NONE) {
490 return err;
491 }
492 }
493
494 err = count_otp_zero_bits(dev, offsetof(struct lcm_otp_layout_t, kce_cm), 32,
495 &zero_bits);
496 if (err != LCM_ERROR_NONE) {
497 return err;
498 }
499 if (zero_bits == 256) {
500 err = lcm_otp_write(dev, offsetof(struct lcm_otp_layout_t, kce_cm), 32,
501 dummy_key_value);
502 if (err != LCM_ERROR_NONE) {
503 return err;
504 }
505 }
506
507 otp_write_unchecked(dev, offsetof(struct lcm_otp_layout_t, cm_config_1),
508 sizeof(uint32_t), &config_val);
509
510 err = lcm_otp_read(dev, offsetof(struct lcm_otp_layout_t, cm_config_1),
511 sizeof(uint32_t), (uint8_t *)&config_val);
512 if (err != LCM_ERROR_NONE) {
513 return err;
514 }
515
516 if (config_val != 0) {
517 return LCM_ERROR_WRITE_VERIFY_FAIL;
518 }
519
520 config_val = 0;
521
522 err = count_otp_zero_bits(dev, offsetof(struct lcm_otp_layout_t, rotpk),
523 32, &zero_bits);
524 if (err != LCM_ERROR_NONE) {
525 return err;
526 }
527
528 config_val |= (zero_bits & 0xFF) << 0;
529 config_val |= ((uint32_t)gppc_val) << 8;
530 #if LCM_VERSION == 0
531 /* The upper bit has been used to trigger the DM->CM transition already */
532 config_val |= 0x800;
533 #endif /* LCM_VERSION == 0 */
534
535 err = lcm_otp_write(dev, offsetof(struct lcm_otp_layout_t, cm_config_2),
536 sizeof(uint32_t), (uint8_t *)&config_val);
537 if (err != LCM_ERROR_NONE) {
538 return err;
539 }
540
541 return LCM_ERROR_NONE;
542 }
543
dm_to_se(struct lcm_dev_t * dev)544 static enum lcm_error_t dm_to_se(struct lcm_dev_t *dev)
545 {
546 enum lcm_error_t err;
547 uint32_t config_val;
548 #ifdef INTEGRITY_CHECKER_S
549 __ALIGNED(INTEGRITY_CHECKER_REQUIRED_ALIGNMENT)
550 #endif
551 uint32_t zero_bits;
552
553 err = count_otp_zero_bits(dev, offsetof(struct lcm_otp_layout_t, kp_dm), 32,
554 &zero_bits);
555 if (err != LCM_ERROR_NONE) {
556 return err;
557 }
558 if (zero_bits == 256) {
559 err = lcm_otp_write(dev, offsetof(struct lcm_otp_layout_t, kp_dm), 32,
560 dummy_key_value);
561 if (err != LCM_ERROR_NONE) {
562 return err;
563 }
564 }
565
566 err = count_otp_zero_bits(dev, offsetof(struct lcm_otp_layout_t, kce_dm), 32,
567 &zero_bits);
568 if (err != LCM_ERROR_NONE) {
569 return err;
570 }
571 if (zero_bits == 256) {
572 err = lcm_otp_write(dev, offsetof(struct lcm_otp_layout_t, kce_dm), 32,
573 dummy_key_value);
574 if (err != LCM_ERROR_NONE) {
575 return err;
576 }
577 }
578
579 config_val = LCM_TRUE;
580
581 /* This OTP field doesn't read-back as written, but that isn't an error */
582 err = lcm_otp_write(dev, offsetof(struct lcm_otp_layout_t, dm_config),
583 sizeof(uint32_t), (uint8_t *)&config_val);
584 if (!(err == LCM_ERROR_NONE || err == LCM_ERROR_WRITE_VERIFY_FAIL)) {
585 FATAL_ERR(err);
586 return err;
587 }
588
589 /* Manually check that the readback value is what we expect (0x0 means no
590 * key bit count errors).
591 */
592 err = lcm_otp_read(dev, offsetof(struct lcm_otp_layout_t, dm_config),
593 sizeof(uint32_t), (uint8_t *)&config_val);
594 if (err != LCM_ERROR_NONE) {
595 return err;
596 }
597
598 if (config_val != 0) {
599 FATAL_ERR(LCM_ERROR_WRITE_VERIFY_FAIL);
600 return LCM_ERROR_WRITE_VERIFY_FAIL;
601 }
602
603 return LCM_ERROR_NONE;
604 }
605
any_to_rma(struct lcm_dev_t * dev)606 static enum lcm_error_t any_to_rma(struct lcm_dev_t *dev)
607 {
608 enum lcm_error_t err;
609 uint32_t rma_flag = LCM_TRUE;
610
611 /* Write the CM RMA flag */
612 err = lcm_otp_write(dev, offsetof(struct lcm_otp_layout_t, cm_rma_flag),
613 sizeof(uint32_t), (uint8_t *)&rma_flag);
614 if (err != LCM_ERROR_NONE) {
615 return err;
616 }
617
618 /* Write the DM RMA flag */
619 err = lcm_otp_write(dev, offsetof(struct lcm_otp_layout_t, dm_rma_flag),
620 sizeof(uint32_t), (uint8_t *)&rma_flag);
621 if (err != LCM_ERROR_NONE) {
622 return err;
623 }
624
625 return LCM_ERROR_NONE;
626 }
627
lcm_set_lcs(struct lcm_dev_t * dev,enum lcm_lcs_t lcs,uint16_t gppc_val)628 enum lcm_error_t lcm_set_lcs(struct lcm_dev_t *dev, enum lcm_lcs_t lcs,
629 uint16_t gppc_val)
630 {
631 enum lcm_bool_t fatal_err;
632 enum lcm_bool_t sp_enable;
633 enum lcm_tp_mode_t tp_mode;
634 enum lcm_error_t err;
635 enum lcm_lcs_t curr_lcs;
636
637 err = lcm_get_lcs(dev, &curr_lcs);
638 if (err != LCM_ERROR_NONE) {
639 return err;
640 }
641 if (lcs == curr_lcs) {
642 return LCM_ERROR_NONE;
643 }
644
645 err = lcm_get_tp_mode(dev, &tp_mode);
646 if (err != LCM_ERROR_NONE) {
647 return err;
648 }
649 if (!(tp_mode == LCM_TP_MODE_PCI || tp_mode == LCM_TP_MODE_TCI)) {
650 FATAL_ERR(LCM_ERROR_INVALID_TP_MODE);
651 return LCM_ERROR_INVALID_TP_MODE;
652 }
653
654 err = lcm_get_sp_enabled(dev, &sp_enable);
655 if (err != LCM_ERROR_NONE) {
656 return err;
657 }
658 if (sp_enable != LCM_TRUE) {
659 err = lcm_set_sp_enabled(dev);
660 if (err != LCM_ERROR_NONE) {
661 return err;
662 }
663 }
664
665 do {
666 err = lcm_get_sp_enabled(dev, &sp_enable);
667 if (err != LCM_ERROR_NONE) {
668 return err;
669 }
670 err = lcm_get_fatal_error(dev, &fatal_err);
671 if (err != LCM_ERROR_NONE) {
672 return err;
673 }
674 } while (sp_enable == LCM_FALSE && fatal_err == LCM_FALSE);
675
676 if (fatal_err == LCM_TRUE) {
677 FATAL_ERR(LCM_ERROR_FATAL_ERR);
678 return LCM_ERROR_FATAL_ERR;
679 }
680
681 switch (lcs) {
682 case LCM_LCS_CM:
683 /* There's no possible valid transition back to CM */
684 FATAL_ERR(LCM_ERROR_INVALID_TRANSITION);
685 return LCM_ERROR_INVALID_TRANSITION;
686 case LCM_LCS_DM:
687 if (curr_lcs != LCM_LCS_CM) {
688 FATAL_ERR(LCM_ERROR_INVALID_TRANSITION);
689 return LCM_ERROR_INVALID_TRANSITION;
690 }
691
692 return cm_to_dm(dev, gppc_val);
693
694 case LCM_LCS_SE:
695 if (curr_lcs != LCM_LCS_DM) {
696 FATAL_ERR(LCM_ERROR_INVALID_TRANSITION);
697 return LCM_ERROR_INVALID_TRANSITION;
698 }
699
700 return dm_to_se(dev);
701
702 case LCM_LCS_RMA:
703 return any_to_rma(dev);
704
705 case LCM_LCS_INVALID:
706 FATAL_ERR(LCM_ERROR_INVALID_LCS);
707 return LCM_ERROR_INVALID_LCS;
708 }
709
710 /* Should never get here */
711 FATAL_ERR(LCM_ERROR_INTERNAL_ERROR);
712 return LCM_ERROR_INTERNAL_ERROR;
713 }
714
715 static const struct lcm_hw_slot_zero_count_mapping {
716 uint32_t offset;
717 uint32_t size;
718 uint32_t zero_count_offset;
719 uint32_t shift;
720 uint32_t bit_size;
721 } zero_count_mappings[] = {
722 #if (LCM_VERSION == 1)
723 {
724 offsetof(struct lcm_otp_layout_t, huk),
725 sizeof(((struct lcm_otp_layout_t*)0)->huk),
726 offsetof(struct lcm_otp_layout_t, cm_config_1),
727 0,
728 8,
729 },
730 {
731 offsetof(struct lcm_otp_layout_t, guk),
732 sizeof(((struct lcm_otp_layout_t*)0)->guk),
733 offsetof(struct lcm_otp_layout_t, cm_config_1),
734 8,
735 8,
736 },
737 {
738 offsetof(struct lcm_otp_layout_t, kp_cm),
739 sizeof(((struct lcm_otp_layout_t*)0)->kp_cm),
740 offsetof(struct lcm_otp_layout_t, cm_config_1),
741 16,
742 8,
743 },
744 {
745 offsetof(struct lcm_otp_layout_t, kce_cm),
746 sizeof(((struct lcm_otp_layout_t*)0)->kce_cm),
747 offsetof(struct lcm_otp_layout_t, cm_config_1),
748 24,
749 8,
750 },
751 #endif /* (LCM_VERSION == 1) */
752
753 {
754 offsetof(struct lcm_otp_layout_t, rotpk),
755 sizeof(((struct lcm_otp_layout_t*)0)->rotpk),
756 offsetof(struct lcm_otp_layout_t, cm_config_2),
757 0,
758 8,
759 },
760
761 #if (LCM_VERSION == 1)
762 {
763 offsetof(struct lcm_otp_layout_t, kp_dm),
764 sizeof(((struct lcm_otp_layout_t*)0)->kp_dm),
765 offsetof(struct lcm_otp_layout_t, dm_config),
766 0,
767 8,
768 },
769 {
770 offsetof(struct lcm_otp_layout_t, kce_dm),
771 sizeof(((struct lcm_otp_layout_t*)0)->kce_dm),
772 offsetof(struct lcm_otp_layout_t, dm_config),
773 8,
774 8,
775 },
776 #endif /* (LCM_VERSION == 1) */
777 };
778
write_zero_count_if_needed(struct lcm_dev_t * dev,uint32_t offset,uint32_t len,const uint8_t * buf)779 static enum lcm_error_t write_zero_count_if_needed(struct lcm_dev_t *dev,
780 uint32_t offset,
781 uint32_t len,
782 const uint8_t *buf)
783 {
784 enum lcm_error_t err;
785 uint32_t idx;
786 uint32_t zero_bits = 0;
787 uint32_t otp_word;
788 const struct lcm_hw_slot_zero_count_mapping *mapping;
789 bool zero_count_needed = false;
790
791 for (idx = 0;
792 idx < (sizeof(zero_count_mappings) / sizeof(zero_count_mappings[0]));
793 idx++) {
794 mapping = &zero_count_mappings[idx];
795 if (offset == mapping->offset) {
796 zero_count_needed = true;
797 break;
798 }
799 }
800
801 /* No zero count needed */
802 if (!zero_count_needed) {
803 return LCM_ERROR_NONE;
804 }
805
806 err = count_zero_bits((uint32_t *)buf, len, &zero_bits);
807 if (err != LCM_ERROR_NONE) {
808 return err;
809 }
810
811 /* sanity check that we don't overflow */
812 assert((zero_bits & ~((1 << mapping->bit_size) - 1)) == 0);
813
814 if (zero_bits & ~((1 << mapping->bit_size) - 1)) {
815 return LCM_ERR_INVALID_ZERO_COUNT;
816 }
817
818 err = lcm_otp_read(dev, mapping->zero_count_offset, sizeof(otp_word),
819 (uint8_t *)&otp_word);
820 if (err != LCM_ERROR_NONE) {
821 return err;
822 }
823
824 otp_word |= zero_bits << mapping->shift;
825
826 otp_write_unchecked(dev, mapping->zero_count_offset,
827 sizeof(otp_word), &otp_word);
828
829 return LCM_ERROR_NONE;
830 }
831
832 /* Armclang attempts to inline this function, which causes huge code size
833 * increases. It is marked as __attribute__((noinline)) explicitly to prevent
834 * this.
835 */
836 __attribute__((noinline))
lcm_otp_write(struct lcm_dev_t * dev,uint32_t offset,uint32_t len,const uint8_t * buf)837 enum lcm_error_t lcm_otp_write(struct lcm_dev_t *dev, uint32_t offset, uint32_t len,
838 const uint8_t *buf)
839 {
840 enum lcm_error_t err;
841 struct _lcm_reg_map_t *p_lcm = (struct _lcm_reg_map_t *)dev->cfg->base;
842 uint32_t *p_buf_word = (uint32_t *)buf;
843 uint32_t otp_size;
844 uint32_t idx;
845
846 if (!is_pointer_word_aligned(p_buf_word)) {
847 FATAL_ERR(LCM_ERROR_INVALID_ALIGNMENT);
848 return LCM_ERROR_INVALID_ALIGNMENT;
849 }
850
851 if (offset & (sizeof(uint32_t) - 1)) {
852 FATAL_ERR(LCM_ERROR_INVALID_OFFSET);
853 return LCM_ERROR_INVALID_OFFSET;
854 }
855
856 if (len & (sizeof(uint32_t) - 1)) {
857 FATAL_ERR(LCM_ERROR_INVALID_LENGTH);
858 return LCM_ERROR_INVALID_LENGTH;
859 }
860
861 err = lcm_get_otp_size(dev, &otp_size);
862 if (err != LCM_ERROR_NONE) {
863 return err;
864 }
865
866 if (otp_size < (offset + len)) {
867 FATAL_ERR(LCM_ERROR_INVALID_OFFSET);
868 return LCM_ERROR_INVALID_OFFSET;
869 }
870
871 /* Write the zero count if needed */
872 err = write_zero_count_if_needed(dev, offset, len, buf);
873 if (err != LCM_ERROR_NONE) {
874 return err;
875 }
876
877 /* Perform the actual write */
878 otp_write_unchecked(dev, offset, len, p_buf_word);
879
880 /* Verify the write is correct */
881 for (idx = 0; idx < len / sizeof(uint32_t); idx++) {
882 if (p_buf_word[idx] != p_lcm->raw_otp[(offset / sizeof(uint32_t)) + idx]) {
883 NONFATAL_ERR(LCM_ERROR_WRITE_VERIFY_FAIL);
884 return LCM_ERROR_WRITE_VERIFY_FAIL;
885 }
886 }
887
888 return LCM_ERROR_NONE;
889 }
890
891
892 /* Armclang attempts to inline this function, which causes huge code size
893 * increases. It is marked as __attribute__((noinline)) explicitly to prevent
894 * this.
895 */
896 __attribute__((noinline))
lcm_otp_read(struct lcm_dev_t * dev,uint32_t offset,uint32_t len,uint8_t * buf)897 enum lcm_error_t lcm_otp_read(struct lcm_dev_t *dev, uint32_t offset,
898 uint32_t len, uint8_t *buf)
899 {
900 enum lcm_error_t err;
901 struct _lcm_reg_map_t *p_lcm = (struct _lcm_reg_map_t *)dev->cfg->base;
902 uint32_t *p_buf_word = (uint32_t *)buf;
903 uint32_t validation_word;
904 uint32_t otp_size;
905 uint32_t idx;
906
907 if (!is_pointer_word_aligned(p_buf_word)) {
908 FATAL_ERR(LCM_ERROR_INVALID_ALIGNMENT);
909 return LCM_ERROR_INVALID_ALIGNMENT;
910 }
911
912 if (offset & (sizeof(uint32_t) - 1)) {
913 FATAL_ERR(LCM_ERROR_INVALID_OFFSET);
914 return LCM_ERROR_INVALID_OFFSET;
915 }
916
917 if (len & (sizeof(uint32_t) - 1)) {
918 FATAL_ERR(LCM_ERROR_INVALID_LENGTH);
919 return LCM_ERROR_INVALID_LENGTH;
920 }
921
922 err = lcm_get_otp_size(dev, &otp_size);
923 if (err != LCM_ERROR_NONE) {
924 return err;
925 }
926
927 if (otp_size < (offset + len)) {
928 FATAL_ERR(LCM_ERROR_INVALID_OFFSET);
929 return LCM_ERROR_INVALID_OFFSET;
930 }
931
932 for (idx = 0; idx < len / sizeof(uint32_t); idx++) {
933 p_buf_word[idx] = p_lcm->raw_otp[(offset / sizeof(uint32_t)) + idx];
934
935 #ifdef KMU_S
936 kmu_random_delay(&KMU_DEV_S, KMU_DELAY_LIMIT_32_CYCLES);
937 #endif /* KMU_S */
938 /* Double read verify is done in hardware for addresses outside of the
939 * LCM OTP user area. In that case, don't perform a software check.
940 */
941 if (offset >= sizeof(struct lcm_otp_layout_t)) {
942 validation_word = p_lcm->raw_otp[(offset / sizeof(uint32_t)) + idx];
943 if (validation_word != p_buf_word[idx]) {
944 FATAL_ERR(LCM_ERROR_READ_VERIFY_FAIL);
945 return LCM_ERROR_READ_VERIFY_FAIL;
946 }
947 }
948 }
949
950 return LCM_ERROR_NONE;
951 }
952
lcm_dcu_get_enabled(struct lcm_dev_t * dev,uint8_t * val)953 enum lcm_error_t lcm_dcu_get_enabled(struct lcm_dev_t *dev, uint8_t *val)
954 {
955 struct _lcm_reg_map_t *p_lcm = (struct _lcm_reg_map_t *)dev->cfg->base;
956 uint32_t *p_val_word = (uint32_t *)val;
957 uint32_t idx;
958
959 if (!is_pointer_word_aligned(p_val_word)) {
960 FATAL_ERR(LCM_ERROR_INVALID_ALIGNMENT);
961 return LCM_ERROR_INVALID_ALIGNMENT;
962 }
963
964 for (idx = 0; idx < LCM_DCU_WIDTH_IN_BYTES / sizeof(uint32_t); idx++) {
965 p_val_word[idx] = p_lcm->dcu_en[idx];
966 }
967
968 return LCM_ERROR_NONE;
969 }
970
check_dcu_mask(struct lcm_dev_t * dev,uint32_t * val)971 static enum lcm_error_t check_dcu_mask(struct lcm_dev_t *dev, uint32_t *val)
972 {
973 struct _lcm_reg_map_t *p_lcm = (struct _lcm_reg_map_t *)dev->cfg->base;
974 size_t idx;
975
976 for (idx = 0; idx < LCM_DCU_WIDTH_IN_BYTES / sizeof(uint32_t); idx++) {
977 if (val[idx] & ~p_lcm->dcu_disable_mask[idx]) {
978 return LCM_ERR_DCU_MASK_MISMATCH;
979 }
980 }
981
982 return LCM_ERROR_NONE;
983 }
984
lcm_dcu_set_enabled(struct lcm_dev_t * dev,uint8_t * val)985 enum lcm_error_t lcm_dcu_set_enabled(struct lcm_dev_t *dev, uint8_t *val)
986 {
987 struct _lcm_reg_map_t *p_lcm = (struct _lcm_reg_map_t *)dev->cfg->base;
988 uint32_t *p_val_word = (uint32_t *)val;
989 enum lcm_error_t err;
990 uint32_t idx;
991
992 if (!is_pointer_word_aligned(p_val_word)) {
993 FATAL_ERR(LCM_ERROR_INVALID_ALIGNMENT);
994 return LCM_ERROR_INVALID_ALIGNMENT;
995 }
996
997 err = check_dcu_mask(dev, p_val_word);
998 if (err != LCM_ERROR_NONE) {
999 return err;
1000 }
1001
1002 for (idx = 0; idx < LCM_DCU_WIDTH_IN_BYTES / sizeof(uint32_t); idx++) {
1003 p_lcm->dcu_en[idx] = p_val_word[idx];
1004 }
1005
1006 for (idx = 0; idx < LCM_DCU_WIDTH_IN_BYTES / sizeof(uint32_t); idx++) {
1007 if (p_lcm->dcu_en[idx] != p_val_word[idx]) {
1008 FATAL_ERR(LCM_ERROR_WRITE_VERIFY_FAIL);
1009 return LCM_ERROR_WRITE_VERIFY_FAIL;
1010 }
1011 }
1012
1013 return LCM_ERROR_NONE;
1014 }
1015
lcm_dcu_get_locked(struct lcm_dev_t * dev,uint8_t * val)1016 enum lcm_error_t lcm_dcu_get_locked(struct lcm_dev_t *dev, uint8_t *val)
1017 {
1018 struct _lcm_reg_map_t *p_lcm = (struct _lcm_reg_map_t *)dev->cfg->base;
1019 uint32_t *p_val_word = (uint32_t *)val;
1020 uint32_t idx;
1021
1022 if (!is_pointer_word_aligned(p_val_word)) {
1023 FATAL_ERR(LCM_ERROR_INVALID_ALIGNMENT);
1024 return LCM_ERROR_INVALID_ALIGNMENT;
1025 }
1026
1027 for (idx = 0; idx < LCM_DCU_WIDTH_IN_BYTES / sizeof(uint32_t); idx++) {
1028 p_val_word[idx] = p_lcm->dcu_lock[idx];
1029 }
1030
1031 return LCM_ERROR_NONE;
1032 }
1033
lcm_dcu_set_locked(struct lcm_dev_t * dev,uint8_t * val)1034 enum lcm_error_t lcm_dcu_set_locked(struct lcm_dev_t *dev, uint8_t *val)
1035 {
1036 struct _lcm_reg_map_t *p_lcm = (struct _lcm_reg_map_t *)dev->cfg->base;
1037 uint32_t *p_val_word = (uint32_t *)val;
1038 uint32_t idx;
1039
1040 if (!is_pointer_word_aligned(p_val_word)) {
1041 FATAL_ERR(LCM_ERROR_INVALID_ALIGNMENT);
1042 return LCM_ERROR_INVALID_ALIGNMENT;
1043 }
1044
1045 for (idx = 0; idx < LCM_DCU_WIDTH_IN_BYTES / sizeof(uint32_t); idx++) {
1046 p_lcm->dcu_lock[idx] = p_val_word[idx];
1047 }
1048
1049 return LCM_ERROR_NONE;
1050 }
1051
lcm_dcu_get_sp_disable_mask(struct lcm_dev_t * dev,uint8_t * val)1052 enum lcm_error_t lcm_dcu_get_sp_disable_mask(struct lcm_dev_t *dev, uint8_t *val)
1053 {
1054 struct _lcm_reg_map_t *p_lcm = (struct _lcm_reg_map_t *)dev->cfg->base;
1055 uint32_t *p_val_word = (uint32_t *)val;
1056 uint32_t idx;
1057
1058 if (!is_pointer_word_aligned(p_val_word)) {
1059 FATAL_ERR(LCM_ERROR_INVALID_ALIGNMENT);
1060 return LCM_ERROR_INVALID_ALIGNMENT;
1061 }
1062
1063 for (idx = 0; idx < LCM_DCU_WIDTH_IN_BYTES / sizeof(uint32_t); idx++) {
1064 p_val_word[idx] = p_lcm->dcu_sp_disable_mask[idx];
1065 }
1066
1067 return LCM_ERROR_NONE;
1068 }
1069
lcm_dcu_get_disable_mask(struct lcm_dev_t * dev,uint8_t * val)1070 enum lcm_error_t lcm_dcu_get_disable_mask(struct lcm_dev_t *dev, uint8_t *val)
1071 {
1072 struct _lcm_reg_map_t *p_lcm = (struct _lcm_reg_map_t *)dev->cfg->base;
1073 uint32_t *p_val_word = (uint32_t *)val;
1074 uint32_t idx;
1075
1076 if (!is_pointer_word_aligned(p_val_word)) {
1077 FATAL_ERR(LCM_ERROR_INVALID_ALIGNMENT);
1078 return LCM_ERROR_INVALID_ALIGNMENT;
1079 }
1080
1081 for (idx = 0; idx < LCM_DCU_WIDTH_IN_BYTES / sizeof(uint32_t); idx++) {
1082 p_val_word[idx] = p_lcm->dcu_disable_mask[idx];
1083 }
1084
1085 return LCM_ERROR_NONE;
1086 }
1087