1 /*
2  * Copyright 2022 The ChromiumOS Authors.
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 #include <stdbool.h>
7 #include <stdint.h>
8 
9 #include <zephyr/device.h>
10 #include <zephyr/devicetree.h>
11 #include <zephyr/irq.h>
12 #include <zephyr/logging/log.h>
13 #include <zephyr/sys/__assert.h>
14 
15 LOG_MODULE_REGISTER(soc_it8xxx2_ilm, CONFIG_LOG_DEFAULT_LEVEL);
16 
17 /*
18  * Instruction Local Memory (ILM) support for IT8xxx2.
19  *
20  * IT8xxx2 allows 4-kilobyte blocks of RAM be configured individually as either Instruction- or
21  * Data Local Memory (ILM or DLM). Addresses from which instructions will be fetched by the CPU
22  * *must* be in the Flash memory space: it is not permitted to execute from RAM addresses, only
23  * through ILM mappings into RAM.
24  *
25  * When a RAM block is configured as ILM, accesses to addresses matching the corresponding Scratch
26  * SRAM address register (SCARn{H,M,L}) are redirected to the corresponding ILM block in RAM.
27  * If SCAR0 (corresponding to ILM0) has the value 0x8021532 and ILM0 is enabled, then instruction
28  * fetches from the memory range 0x8021532..0x8022532 will be redirected to physical addresses
29  * 0x80100000..0x80101000 (the first 4k block of RAM).
30  *
31  * Instruction fetch from Flash is normally cacheable, but configuring ILM for a region makes that
32  * address range non-cacheable (which is appropriate because Flash has high latency but RAM is
33  * essentially the same speed as cache).
34  */
35 
36 extern const uint8_t __ilm_flash_start[];
37 extern const uint8_t __ilm_flash_end[];
38 extern uint8_t __ilm_ram_start[];
39 extern uint8_t __ilm_ram_end[];
40 
41 #define ILM_BLOCK_SIZE 0x1000
42 BUILD_ASSERT((ILM_BLOCK_SIZE & (ILM_BLOCK_SIZE - 1)) == 0, "ILM_BLOCK_SIZE must be a power of two");
43 
44 #define FLASH_BASE CONFIG_FLASH_BASE_ADDRESS
45 #define RAM_BASE   CONFIG_SRAM_BASE_ADDRESS
46 
47 #define ILM_NODE DT_NODELABEL(ilm)
48 
49 #define SCARH_ENABLE	 BIT(3)
50 #define SCARH_ADDR_BIT19 BIT(7)
51 
52 /*
53  * SCAR registers contain 20-bit addresses in three registers, with one set
54  * of SCAR registers for each ILM block that may be configured.
55  */
56 struct scar_reg {
57 	/* Bits 0..7 of address; SCARnL */
58 	uint8_t l;
59 	/* Bits 8..15 of address; SCARnM */
60 	uint8_t m;
61 	/* Bits 16..18 and 19 of address, plus the enable bit for the entire SCAR; SCARnH */
62 	uint8_t h;
63 };
64 
65 struct ilm_config {
66 	volatile struct scar_reg *scar_regs[CONFIG_ILM_MAX_SIZE / 4];
67 };
68 
it8xxx2_is_ilm_configured(void)69 bool it8xxx2_is_ilm_configured(void)
70 {
71 	return device_is_ready(DEVICE_DT_GET(ILM_NODE));
72 }
73 
is_block_aligned(const void * const p)74 static bool __maybe_unused is_block_aligned(const void *const p)
75 {
76 	return ((uintptr_t)p & (ILM_BLOCK_SIZE - 1)) == 0;
77 }
78 
it8xxx2_configure_ilm_block(const struct ilm_config * const config,void * ram_addr,const void * flash_addr,const size_t copy_sz)79 static int it8xxx2_configure_ilm_block(const struct ilm_config *const config, void *ram_addr,
80 				       const void *flash_addr, const size_t copy_sz)
81 {
82 	if ((uintptr_t)ram_addr < RAM_BASE) {
83 		return -EFAULT; /* Not in RAM */
84 	}
85 	const int dirmap_index = ((uintptr_t)ram_addr - RAM_BASE) / ILM_BLOCK_SIZE;
86 
87 	if (dirmap_index >= ARRAY_SIZE(config->scar_regs)) {
88 		return -EFAULT; /* Past the end of RAM */
89 	}
90 	BUILD_ASSERT((FLASH_BASE & GENMASK(19, 0)) == 0,
91 		     "Flash is assumed to be aligned to SCAR register width");
92 	if (((uintptr_t)flash_addr - FLASH_BASE) & ~GENMASK(19, 0)) {
93 		return -EFAULT; /* Address doesn't fit in the SCAR */
94 	}
95 	if (!is_block_aligned(flash_addr)) {
96 		/* Bits 0..11 of SCAR can be programmed but ILM only works if they're zero */
97 		return -EFAULT;
98 	}
99 
100 	LOG_DBG("Enabling ILM%d %p -> %p, copy %d", dirmap_index, flash_addr, ram_addr, copy_sz);
101 
102 	volatile struct scar_reg *const scar = config->scar_regs[dirmap_index];
103 
104 	int irq_key = irq_lock();
105 
106 	/* Ensure scratch RAM for block data access is enabled */
107 	scar->h = SCARH_ENABLE;
108 	/* Copy block contents from flash into RAM */
109 	memcpy(ram_addr, flash_addr, copy_sz);
110 	/* Program SCAR */
111 	scar->l = (uintptr_t)flash_addr & GENMASK(7, 0);
112 	scar->m = ((uintptr_t)flash_addr & GENMASK(15, 8)) >> 8;
113 
114 	uint8_t scarh_value = ((uintptr_t)flash_addr & GENMASK(18, 16)) >> 16;
115 
116 	if ((uintptr_t)flash_addr & BIT(19)) {
117 		scarh_value |= SCARH_ADDR_BIT19;
118 	}
119 	scar->h = scarh_value;
120 
121 	irq_unlock(irq_key);
122 	return 0;
123 }
124 
it8xxx2_ilm_init(const struct device * dev)125 static int it8xxx2_ilm_init(const struct device *dev)
126 {
127 	/* Invariants enforced by the linker script */
128 	__ASSERT(is_block_aligned(__ilm_ram_start),
129 		 "ILM physical base address (%p) must be 4k-aligned", __ilm_ram_start);
130 	__ASSERT(is_block_aligned(__ilm_flash_start),
131 		 "ILM flash base address (%p) must be 4k-aligned", __ilm_flash_start);
132 	__ASSERT_NO_MSG((uintptr_t)__ilm_ram_end >= (uintptr_t)__ilm_ram_start &&
133 			(uintptr_t)__ilm_flash_end >= (uintptr_t)__ilm_flash_start);
134 
135 	LOG_DBG("ILM init %p-%p -> %p-%p", __ilm_flash_start, __ilm_flash_end, __ilm_ram_start,
136 		__ilm_ram_end);
137 	for (uintptr_t block_base = (uintptr_t)__ilm_ram_start;
138 	     block_base < (uintptr_t)__ilm_ram_end; block_base += ILM_BLOCK_SIZE) {
139 		uintptr_t flash_base =
140 			(uintptr_t)__ilm_flash_start + (block_base - (uintptr_t)__ilm_ram_start);
141 		/*
142 		 * Part of the target RAM block might be used for non-code data; avoid overwriting
143 		 * it by only copying as much data as the ILM flash region contains.
144 		 */
145 		size_t used_size = MIN((uintptr_t)__ilm_flash_end - flash_base, ILM_BLOCK_SIZE);
146 		int rv = it8xxx2_configure_ilm_block(dev->config, (void *)block_base,
147 						     (const void *)flash_base, used_size);
148 
149 		if (rv) {
150 			LOG_ERR("Unable to configure ILM block %p: %d", (void *)flash_base, rv);
151 			return rv;
152 		}
153 	}
154 
155 	return 0;
156 }
157 
158 #define SCAR_REG(n) (volatile struct scar_reg *)DT_REG_ADDR_BY_IDX(ILM_NODE, n)
159 
160 static const struct ilm_config ilm_config = {
161 	.scar_regs = {
162 		/* SCAR0 SRAM 4KB */
163 		SCAR_REG(0),
164 		SCAR_REG(1),
165 		SCAR_REG(2),
166 		SCAR_REG(3),
167 		SCAR_REG(4),
168 		SCAR_REG(5),
169 		SCAR_REG(6),
170 		SCAR_REG(7),
171 		SCAR_REG(8),
172 		SCAR_REG(9),
173 		SCAR_REG(10),
174 		SCAR_REG(11),
175 		SCAR_REG(12),
176 		SCAR_REG(13),
177 		SCAR_REG(14),
178 		/*
179 		 * Except for CONFIG_SOC_IT81202CX and CONFIG_SOC_IT81302CX
180 		 * maximum ILM size are 60KB, the ILM size of other varients
181 		 * are equal to the SRAM size.
182 		 */
183 #if (CONFIG_ILM_MAX_SIZE == 256)
184 		/* SCAR15 SRAM 4KB */
185 		SCAR_REG(15),
186 		/* SCAR16 SRAM 16KB */
187 		SCAR_REG(16), SCAR_REG(16), SCAR_REG(16), SCAR_REG(16),
188 		/* SCAR17 SRAM 16KB */
189 		SCAR_REG(17), SCAR_REG(17), SCAR_REG(17), SCAR_REG(17),
190 		/* SCAR18 SRAM 16KB */
191 		SCAR_REG(18), SCAR_REG(18), SCAR_REG(18), SCAR_REG(18),
192 		/* SCAR19 SRAM 16KB */
193 		SCAR_REG(19), SCAR_REG(19), SCAR_REG(19), SCAR_REG(19),
194 		/* SCAR20 SRAM 32KB */
195 		SCAR_REG(20), SCAR_REG(20), SCAR_REG(20), SCAR_REG(20),
196 		SCAR_REG(20), SCAR_REG(20), SCAR_REG(20), SCAR_REG(20),
197 		/* SCAR21 SRAM 32KB */
198 		SCAR_REG(21), SCAR_REG(21), SCAR_REG(21), SCAR_REG(21),
199 		SCAR_REG(21), SCAR_REG(21), SCAR_REG(21), SCAR_REG(21),
200 		/* SCAR22 SRAM 32KB */
201 		SCAR_REG(22), SCAR_REG(22), SCAR_REG(22), SCAR_REG(22),
202 		SCAR_REG(22), SCAR_REG(22), SCAR_REG(22), SCAR_REG(22),
203 		/* SCAR23 SRAM 32KB */
204 		SCAR_REG(23), SCAR_REG(23), SCAR_REG(23), SCAR_REG(23),
205 		SCAR_REG(23), SCAR_REG(23), SCAR_REG(23), SCAR_REG(23)
206 #endif
207 	}};
208 BUILD_ASSERT(ARRAY_SIZE(ilm_config.scar_regs) * ILM_BLOCK_SIZE == KB(CONFIG_ILM_MAX_SIZE),
209 	     "Wrong number of SCAR registers defined for RAM size");
210 
211 DEVICE_DT_DEFINE(ILM_NODE, &it8xxx2_ilm_init, NULL, NULL, &ilm_config, PRE_KERNEL_1, 0, NULL);
212