1 /*
2  * Copyright 2020 Broadcom
3  * Copyright 2024 NXP
4  * Copyright 2025 Arm Limited and/or its affiliates <open-source-office@arm.com>
5  *
6  * SPDX-License-Identifier: Apache-2.0
7  */
8 
9 #include <zephyr/device.h>
10 #include <zephyr/kernel.h>
11 #include <zephyr/arch/cpu.h>
12 #include <zephyr/sys/__assert.h>
13 #include <zephyr/sw_isr_table.h>
14 #include <zephyr/dt-bindings/interrupt-controller/arm-gic.h>
15 #include <zephyr/drivers/interrupt_controller/gic.h>
16 #include <zephyr/sys/barrier.h>
17 #include "intc_gic_common_priv.h"
18 #include "intc_gicv3_priv.h"
19 
20 #include <string.h>
21 
22 #define DT_DRV_COMPAT arm_gic_v3
23 
24 #define GIC_V3_NODE	DT_COMPAT_GET_ANY_STATUS_OKAY(DT_DRV_COMPAT)
25 
26 #define GIC_REDISTRIBUTOR_STRIDE	DT_PROP_OR(GIC_V3_NODE, redistributor_stride, 0)
27 #define GIC_NUM_REDISTRIBUTOR_REGIONS	DT_PROP_OR(GIC_V3_NODE, redistributor_regions, 1)
28 
29 #define GIC_REG_REGION(idx, node_id)				\
30 	{							\
31 		.base = DT_REG_ADDR_BY_IDX(node_id, idx),	\
32 		.size = DT_REG_SIZE_BY_IDX(node_id, idx),	\
33 	}
34 
35 /*
36  * Structure to save GIC register region info
37  */
38 struct gic_reg_region {
39 	mem_addr_t base;
40 	mem_addr_t size;
41 };
42 
43 /*
44  * GIC register regions info table
45  */
46 static struct gic_reg_region gic_reg_regions[] = {
47 	LISTIFY(DT_NUM_REGS(GIC_V3_NODE), GIC_REG_REGION, (,), GIC_V3_NODE)
48 };
49 
50 
51 /* Redistributor base addresses for each core */
52 mem_addr_t gic_rdists[CONFIG_MP_MAX_NUM_CPUS];
53 
54 #if defined(CONFIG_ARMV8_A_NS) || defined(CONFIG_GIC_SINGLE_SECURITY_STATE)
55 #define IGROUPR_VAL	0xFFFFFFFFU
56 #else
57 #define IGROUPR_VAL	0x0U
58 #endif
59 
60 /*
61  * We allocate memory for PROPBASE to cover 2 ^ lpi_id_bits LPIs to
62  * deal with (one configuration byte per interrupt). PENDBASE has to
63  * be 64kB aligned (one bit per LPI, plus 8192 bits for SPI/PPI/SGI).
64  */
65 #define ITS_MAX_LPI_NRBITS	16 /* 64K LPIs */
66 
67 #define LPI_PROPBASE_SZ(nrbits)	ROUND_UP(BIT(nrbits), KB(64))
68 #define LPI_PENDBASE_SZ(nrbits)	ROUND_UP(BIT(nrbits) / 8, KB(64))
69 
70 #ifdef CONFIG_GIC_V3_ITS
71 static uintptr_t lpi_prop_table;
72 
73 atomic_t nlpi_intid = ATOMIC_INIT(8192);
74 #endif
75 
gic_get_rdist(void)76 static inline mem_addr_t gic_get_rdist(void)
77 {
78 	return gic_rdists[arch_curr_cpu()->id];
79 }
80 
81 /*
82  * Wait for register write pending
83  * TODO: add timed wait
84  */
gic_wait_rwp(uint32_t intid)85 static int gic_wait_rwp(uint32_t intid)
86 {
87 	uint32_t rwp_mask;
88 	mem_addr_t base;
89 
90 	if (intid < GIC_SPI_INT_BASE) {
91 		base = (gic_get_rdist() + GICR_CTLR);
92 		rwp_mask = BIT(GICR_CTLR_RWP);
93 	} else {
94 		base = GICD_CTLR;
95 		rwp_mask = BIT(GICD_CTLR_RWP);
96 	}
97 
98 	while (sys_read32(base) & rwp_mask) {
99 		;
100 	}
101 
102 	return 0;
103 }
104 
105 #ifdef CONFIG_GIC_V3_ITS
arm_gic_lpi_setup(unsigned int intid,bool enable)106 static void arm_gic_lpi_setup(unsigned int intid, bool enable)
107 {
108 	uint8_t *cfg = &((uint8_t *)lpi_prop_table)[intid - 8192];
109 
110 	if (enable) {
111 		*cfg |= BIT(0);
112 	} else {
113 		*cfg &= ~BIT(0);
114 	}
115 
116 	barrier_dsync_fence_full();
117 
118 	its_rdist_invall();
119 }
120 
arm_gic_lpi_set_priority(unsigned int intid,unsigned int prio)121 static void arm_gic_lpi_set_priority(unsigned int intid, unsigned int prio)
122 {
123 	uint8_t *cfg = &((uint8_t *)lpi_prop_table)[intid - 8192];
124 
125 	*cfg &= 0xfc;
126 	*cfg |= prio & 0xfc;
127 
128 	barrier_dsync_fence_full();
129 
130 	its_rdist_invall();
131 }
132 
arm_gic_lpi_is_enabled(unsigned int intid)133 static bool arm_gic_lpi_is_enabled(unsigned int intid)
134 {
135 	uint8_t *cfg = &((uint8_t *)lpi_prop_table)[intid - 8192];
136 
137 	return (*cfg & BIT(0));
138 }
139 #endif
140 
141 #if defined(CONFIG_ARMV8_A_NS) || defined(CONFIG_GIC_SINGLE_SECURITY_STATE)
arm_gic_write_irouter(uint64_t val,unsigned int intid)142 static inline void arm_gic_write_irouter(uint64_t val, unsigned int intid)
143 {
144 	mem_addr_t addr = IROUTER(GET_DIST_BASE(intid), intid);
145 
146 #ifdef CONFIG_ARM
147 	sys_write32((uint32_t)val, addr);
148 	sys_write32((uint32_t)(val >> 32U), addr + 4);
149 #else
150 	sys_write64(val, addr);
151 #endif
152 }
153 #endif
154 
arm_gic_irq_set_priority(unsigned int intid,unsigned int prio,uint32_t flags)155 void arm_gic_irq_set_priority(unsigned int intid,
156 			      unsigned int prio, uint32_t flags)
157 {
158 #ifdef CONFIG_GIC_V3_ITS
159 	if (intid >= 8192) {
160 		arm_gic_lpi_set_priority(intid, prio);
161 		return;
162 	}
163 #endif
164 	uint32_t mask = BIT(intid & (GIC_NUM_INTR_PER_REG - 1));
165 	uint32_t idx = intid / GIC_NUM_INTR_PER_REG;
166 	uint32_t shift;
167 	uint32_t val;
168 	mem_addr_t base = GET_DIST_BASE(intid);
169 
170 	/* Disable the interrupt */
171 	sys_write32(mask, ICENABLER(base, idx));
172 	gic_wait_rwp(intid);
173 
174 	/* PRIORITYR registers provide byte access */
175 	sys_write8(prio & GIC_PRI_MASK, IPRIORITYR(base, intid));
176 
177 	/* Interrupt type config */
178 	if (!GIC_IS_SGI(intid)) {
179 		idx = intid / GIC_NUM_CFG_PER_REG;
180 		shift = (intid & (GIC_NUM_CFG_PER_REG - 1)) * 2;
181 
182 		val = sys_read32(ICFGR(base, idx));
183 		val &= ~(GICD_ICFGR_MASK << shift);
184 		if (flags & IRQ_TYPE_EDGE) {
185 			val |= (GICD_ICFGR_TYPE << shift);
186 		}
187 		sys_write32(val, ICFGR(base, idx));
188 	}
189 }
190 
arm_gic_irq_enable(unsigned int intid)191 void arm_gic_irq_enable(unsigned int intid)
192 {
193 #ifdef CONFIG_GIC_V3_ITS
194 	if (intid >= 8192) {
195 		arm_gic_lpi_setup(intid, true);
196 		return;
197 	}
198 #endif
199 	uint32_t mask = BIT(intid & (GIC_NUM_INTR_PER_REG - 1));
200 	uint32_t idx = intid / GIC_NUM_INTR_PER_REG;
201 
202 #if defined(CONFIG_ARMV8_A_NS) || defined(CONFIG_GIC_SINGLE_SECURITY_STATE)
203 	/*
204 	 * Affinity routing is enabled for Armv8-A Non-secure state (GICD_CTLR.ARE_NS
205 	 * is set to '1') and for GIC single security state (GICD_CTRL.ARE is set to '1'),
206 	 * so need to set SPI's affinity, now set it to be the PE on which it is enabled.
207 	 */
208 	if (GIC_IS_SPI(intid)) {
209 		arm_gic_write_irouter(MPIDR_TO_CORE(GET_MPIDR()), intid);
210 	}
211 #endif
212 
213 	sys_write32(mask, ISENABLER(GET_DIST_BASE(intid), idx));
214 }
215 
arm_gic_irq_disable(unsigned int intid)216 void arm_gic_irq_disable(unsigned int intid)
217 {
218 #ifdef CONFIG_GIC_V3_ITS
219 	if (intid >= 8192) {
220 		arm_gic_lpi_setup(intid, false);
221 		return;
222 	}
223 #endif
224 	uint32_t mask = BIT(intid & (GIC_NUM_INTR_PER_REG - 1));
225 	uint32_t idx = intid / GIC_NUM_INTR_PER_REG;
226 
227 	sys_write32(mask, ICENABLER(GET_DIST_BASE(intid), idx));
228 	/* poll to ensure write is complete */
229 	gic_wait_rwp(intid);
230 }
231 
arm_gic_irq_is_enabled(unsigned int intid)232 bool arm_gic_irq_is_enabled(unsigned int intid)
233 {
234 #ifdef CONFIG_GIC_V3_ITS
235 	if (intid >= 8192) {
236 		return arm_gic_lpi_is_enabled(intid);
237 	}
238 #endif
239 	uint32_t mask = BIT(intid & (GIC_NUM_INTR_PER_REG - 1));
240 	uint32_t idx = intid / GIC_NUM_INTR_PER_REG;
241 	uint32_t val;
242 
243 	val = sys_read32(ISENABLER(GET_DIST_BASE(intid), idx));
244 
245 	return (val & mask) != 0;
246 }
247 
arm_gic_irq_is_pending(unsigned int intid)248 bool arm_gic_irq_is_pending(unsigned int intid)
249 {
250 	uint32_t mask = BIT(intid & (GIC_NUM_INTR_PER_REG - 1));
251 	uint32_t idx = intid / GIC_NUM_INTR_PER_REG;
252 	uint32_t val;
253 
254 	val = sys_read32(ISPENDR(GET_DIST_BASE(intid), idx));
255 
256 	return (val & mask) != 0;
257 }
258 
arm_gic_irq_set_pending(unsigned int intid)259 void arm_gic_irq_set_pending(unsigned int intid)
260 {
261 	uint32_t mask = BIT(intid & (GIC_NUM_INTR_PER_REG - 1));
262 	uint32_t idx = intid / GIC_NUM_INTR_PER_REG;
263 
264 	sys_write32(mask, ISPENDR(GET_DIST_BASE(intid), idx));
265 }
266 
arm_gic_irq_clear_pending(unsigned int intid)267 void arm_gic_irq_clear_pending(unsigned int intid)
268 {
269 	uint32_t mask = BIT(intid & (GIC_NUM_INTR_PER_REG - 1));
270 	uint32_t idx = intid / GIC_NUM_INTR_PER_REG;
271 
272 	sys_write32(mask, ICPENDR(GET_DIST_BASE(intid), idx));
273 }
274 
arm_gic_get_active(void)275 unsigned int arm_gic_get_active(void)
276 {
277 	int intid;
278 
279 	/* (Pending -> Active / AP) or (AP -> AP) */
280 	intid = read_sysreg(ICC_IAR1_EL1);
281 
282 	return intid;
283 }
284 
arm_gic_eoi(unsigned int intid)285 void arm_gic_eoi(unsigned int intid)
286 {
287 	/*
288 	 * Interrupt request deassertion from peripheral to GIC happens
289 	 * by clearing interrupt condition by a write to the peripheral
290 	 * register. It is desired that the write transfer is complete
291 	 * before the core tries to change GIC state from 'AP/Active' to
292 	 * a new state on seeing 'EOI write'.
293 	 * Since ICC interface writes are not ordered against Device
294 	 * memory writes, a barrier is required to ensure the ordering.
295 	 * The dsb will also ensure *completion* of previous writes with
296 	 * DEVICE nGnRnE attribute.
297 	 */
298 	barrier_dsync_fence_full();
299 
300 	/* (AP -> Pending) Or (Active -> Inactive) or (AP to AP) nested case */
301 	write_sysreg(intid, ICC_EOIR1_EL1);
302 }
303 
gic_raise_sgi(unsigned int sgi_id,uint64_t target_aff,uint16_t target_list)304 void gic_raise_sgi(unsigned int sgi_id, uint64_t target_aff,
305 		   uint16_t target_list)
306 {
307 	uint32_t aff3, aff2, aff1;
308 	uint64_t sgi_val;
309 
310 	__ASSERT_NO_MSG(GIC_IS_SGI(sgi_id));
311 
312 	/* Extract affinity fields from target */
313 	aff1 = MPIDR_AFFLVL(target_aff, 1);
314 	aff2 = MPIDR_AFFLVL(target_aff, 2);
315 #if defined(CONFIG_ARM)
316 	/* There is no Aff3 in AArch32 MPIDR */
317 	aff3 = 0;
318 #else
319 	aff3 = MPIDR_AFFLVL(target_aff, 3);
320 #endif
321 	sgi_val = GICV3_SGIR_VALUE(aff3, aff2, aff1, sgi_id,
322 				   SGIR_IRM_TO_AFF, target_list);
323 
324 	barrier_dsync_fence_full();
325 	write_sysreg(sgi_val, ICC_SGI1R);
326 	barrier_isync_fence_full();
327 }
328 
329 /*
330  * Wake up GIC redistributor.
331  * clear ProcessorSleep and wait till ChildAsleep is cleared.
332  * ProcessSleep to be cleared only when ChildAsleep is set
333  * Check if redistributor is not powered already.
334  */
gicv3_rdist_enable(mem_addr_t rdist)335 static void gicv3_rdist_enable(mem_addr_t rdist)
336 {
337 	if (!(sys_read32(rdist + GICR_WAKER) & BIT(GICR_WAKER_CA))) {
338 		return;
339 	}
340 
341 	if (GICR_IIDR_PRODUCT_ID_GET(sys_read32(rdist + GICR_IIDR)) >= 0x2) {
342 		if (sys_read32(rdist + GICR_PWRR) & BIT(GICR_PWRR_RDPD)) {
343 			sys_set_bit(rdist + GICR_PWRR, GICR_PWRR_RDAG);
344 			sys_clear_bit(rdist + GICR_PWRR, GICR_PWRR_RDPD);
345 			while (sys_read32(rdist + GICR_PWRR) & BIT(GICR_PWRR_RDPD)) {
346 				;
347 			}
348 		}
349 	}
350 
351 	sys_clear_bit(rdist + GICR_WAKER, GICR_WAKER_PS);
352 	while (sys_read32(rdist + GICR_WAKER) & BIT(GICR_WAKER_CA)) {
353 		;
354 	}
355 }
356 
357 #ifdef CONFIG_GIC_V3_ITS
358 /*
359  * Setup LPIs Configuration & Pending tables for redistributors
360  * LPI configuration is global, each redistributor has a pending table
361  */
gicv3_rdist_setup_lpis(mem_addr_t rdist)362 static void gicv3_rdist_setup_lpis(mem_addr_t rdist)
363 {
364 	unsigned int lpi_id_bits = MIN(GICD_TYPER_IDBITS(sys_read32(GICD_TYPER)),
365 				       ITS_MAX_LPI_NRBITS);
366 	uintptr_t lpi_pend_table;
367 	uint64_t reg;
368 	uint32_t ctlr;
369 
370 	/* If not, alloc a common prop table for all redistributors */
371 	if (!lpi_prop_table) {
372 		lpi_prop_table = (uintptr_t)k_aligned_alloc(4 * 1024, LPI_PROPBASE_SZ(lpi_id_bits));
373 		memset((void *)lpi_prop_table, 0, LPI_PROPBASE_SZ(lpi_id_bits));
374 	}
375 
376 	lpi_pend_table = (uintptr_t)k_aligned_alloc(64 * 1024, LPI_PENDBASE_SZ(lpi_id_bits));
377 	memset((void *)lpi_pend_table, 0, LPI_PENDBASE_SZ(lpi_id_bits));
378 
379 	ctlr = sys_read32(rdist + GICR_CTLR);
380 	ctlr &= ~GICR_CTLR_ENABLE_LPIS;
381 	sys_write32(ctlr, rdist + GICR_CTLR);
382 
383 	/* PROPBASE */
384 	reg = (GIC_BASER_SHARE_INNER << GITR_PROPBASER_SHAREABILITY_SHIFT) |
385 	      (GIC_BASER_CACHE_RAWAWB << GITR_PROPBASER_INNER_CACHE_SHIFT) |
386 	      (lpi_prop_table & (GITR_PROPBASER_ADDR_MASK << GITR_PROPBASER_ADDR_SHIFT)) |
387 	      (GIC_BASER_CACHE_INNERLIKE << GITR_PROPBASER_OUTER_CACHE_SHIFT) |
388 	      ((lpi_id_bits - 1) & GITR_PROPBASER_ID_BITS_MASK);
389 	sys_write64(reg, rdist + GICR_PROPBASER);
390 	/* TOFIX: check SHAREABILITY validity */
391 
392 	/* PENDBASE */
393 	reg = (GIC_BASER_SHARE_INNER << GITR_PENDBASER_SHAREABILITY_SHIFT) |
394 	      (GIC_BASER_CACHE_RAWAWB << GITR_PENDBASER_INNER_CACHE_SHIFT) |
395 	      (lpi_pend_table & (GITR_PENDBASER_ADDR_MASK << GITR_PENDBASER_ADDR_SHIFT)) |
396 	      (GIC_BASER_CACHE_INNERLIKE << GITR_PENDBASER_OUTER_CACHE_SHIFT) |
397 	      GITR_PENDBASER_PTZ;
398 	sys_write64(reg, rdist + GICR_PENDBASER);
399 	/* TOFIX: check SHAREABILITY validity */
400 
401 	ctlr = sys_read32(rdist + GICR_CTLR);
402 	ctlr |= GICR_CTLR_ENABLE_LPIS;
403 	sys_write32(ctlr, rdist + GICR_CTLR);
404 
405 	barrier_dsync_fence_full();
406 }
407 #endif
408 
409 /*
410  * Initialize the cpu interface. This should be called by each core.
411  */
gicv3_cpuif_init(void)412 static void gicv3_cpuif_init(void)
413 {
414 	uint32_t icc_sre;
415 	uint32_t intid;
416 
417 	mem_addr_t base = gic_get_rdist() + GICR_SGI_BASE_OFF;
418 
419 	/* Disable all sgi ppi */
420 	sys_write32(BIT64_MASK(GIC_NUM_INTR_PER_REG), ICENABLER(base, 0));
421 	/* Any sgi/ppi intid ie. 0-31 will select GICR_CTRL */
422 	gic_wait_rwp(0);
423 
424 	/* Clear pending */
425 	sys_write32(BIT64_MASK(GIC_NUM_INTR_PER_REG), ICPENDR(base, 0));
426 
427 	/* Configure all SGIs/PPIs as G1S or G1NS depending on Zephyr
428 	 * is run in EL1S or EL1NS respectively.
429 	 * All interrupts will be delivered as irq
430 	 */
431 	sys_write32(IGROUPR_VAL, IGROUPR(base, 0));
432 	sys_write32(BIT64_MASK(GIC_NUM_INTR_PER_REG), IGROUPMODR(base, 0));
433 
434 	/*
435 	 * Configure default priorities for SGI 0:15 and PPI 0:15.
436 	 */
437 	for (intid = 0; intid < GIC_SPI_INT_BASE;
438 	     intid += GIC_NUM_PRI_PER_REG) {
439 		sys_write32(GIC_INT_DEF_PRI_X4, IPRIORITYR(base, intid));
440 	}
441 
442 	/* Configure PPIs as level triggered */
443 	sys_write32(0, ICFGR(base, 1));
444 
445 	/*
446 	 * Check if system interface can be enabled.
447 	 * 'icc_sre_el3' needs to be configured at 'EL3'
448 	 * to allow access to 'icc_sre_el1' at 'EL1'
449 	 * eg: z_arch_el3_plat_init can be used by platform.
450 	 */
451 	icc_sre = read_sysreg(ICC_SRE_EL1);
452 
453 	if (!(icc_sre & ICC_SRE_ELx_SRE_BIT)) {
454 		icc_sre = (icc_sre | ICC_SRE_ELx_SRE_BIT |
455 			   ICC_SRE_ELx_DIB_BIT | ICC_SRE_ELx_DFB_BIT);
456 		write_sysreg(icc_sre, ICC_SRE_EL1);
457 		icc_sre = read_sysreg(ICC_SRE_EL1);
458 
459 		__ASSERT_NO_MSG(icc_sre & ICC_SRE_ELx_SRE_BIT);
460 	}
461 
462 	write_sysreg(GIC_IDLE_PRIO, ICC_PMR_EL1);
463 
464 	/* Allow group1 interrupts */
465 	write_sysreg(1, ICC_IGRPEN1_EL1);
466 }
467 
468 /*
469  * TODO: Consider Zephyr in EL1NS.
470  */
gicv3_dist_init(void)471 static void gicv3_dist_init(void)
472 {
473 	unsigned int num_ints;
474 	unsigned int intid;
475 	unsigned int idx;
476 	mem_addr_t base = GIC_DIST_BASE;
477 
478 #ifdef CONFIG_GIC_SAFE_CONFIG
479 	/*
480 	 * Currently multiple OSes can run one the different CPU Cores which share single GIC,
481 	 * but GIC distributor should avoid to be re-configured in order to avoid crash the
482 	 * OSes has already been started.
483 	 */
484 	if (sys_read32(GICD_CTLR) & (BIT(GICD_CTLR_ENABLE_G0) | BIT(GICD_CTLR_ENABLE_G1NS))) {
485 		return;
486 	}
487 #endif
488 
489 	num_ints = sys_read32(GICD_TYPER);
490 	num_ints &= GICD_TYPER_ITLINESNUM_MASK;
491 	num_ints = (num_ints + 1) << 5;
492 
493 	/* Disable the distributor */
494 	sys_write32(0, GICD_CTLR);
495 	gic_wait_rwp(GIC_SPI_INT_BASE);
496 #ifdef CONFIG_GIC_SINGLE_SECURITY_STATE
497 	/*
498 	 * Before configuration, we need to check whether
499 	 * the GIC single security state mode is supported.
500 	 * Make sure GICD_CTRL_NS is 1.
501 	 */
502 	sys_set_bit(GICD_CTLR, GICD_CTRL_NS);
503 	__ASSERT(sys_test_bit(GICD_CTLR, GICD_CTRL_NS),
504 		"Current GIC does not support single security state");
505 #endif
506 
507 	/*
508 	 * Default configuration of all SPIs
509 	 */
510 	for (intid = GIC_SPI_INT_BASE; intid < num_ints;
511 	     intid += GIC_NUM_INTR_PER_REG) {
512 		idx = intid / GIC_NUM_INTR_PER_REG;
513 		/* Disable interrupt */
514 		sys_write32(BIT64_MASK(GIC_NUM_INTR_PER_REG),
515 			    ICENABLER(base, idx));
516 		/* Clear pending */
517 		sys_write32(BIT64_MASK(GIC_NUM_INTR_PER_REG),
518 			    ICPENDR(base, idx));
519 		sys_write32(IGROUPR_VAL, IGROUPR(base, idx));
520 		sys_write32(BIT64_MASK(GIC_NUM_INTR_PER_REG),
521 			    IGROUPMODR(base, idx));
522 
523 	}
524 	/* wait for rwp on GICD */
525 	gic_wait_rwp(GIC_SPI_INT_BASE);
526 
527 	/* Configure default priorities for all SPIs. */
528 	for (intid = GIC_SPI_INT_BASE; intid < num_ints;
529 	     intid += GIC_NUM_PRI_PER_REG) {
530 		sys_write32(GIC_INT_DEF_PRI_X4, IPRIORITYR(base, intid));
531 	}
532 
533 	/* Configure all SPIs as active low, level triggered by default */
534 	for (intid = GIC_SPI_INT_BASE; intid < num_ints;
535 	     intid += GIC_NUM_CFG_PER_REG) {
536 		idx = intid / GIC_NUM_CFG_PER_REG;
537 		sys_write32(0, ICFGR(base, idx));
538 	}
539 
540 #ifdef CONFIG_ARMV8_A_NS
541 	/* Enable distributor with ARE */
542 	sys_write32(BIT(GICD_CTRL_ARE_NS) | BIT(GICD_CTLR_ENABLE_G1NS),
543 		    GICD_CTLR);
544 #elif defined(CONFIG_GIC_SINGLE_SECURITY_STATE)
545 	/*
546 	 * For GIC single security state, the config GIC_SINGLE_SECURITY_STATE
547 	 * means the GIC is under single security state which has only two
548 	 * groups: group 0 and group 1.
549 	 * Then set GICD_CTLR_ARE and GICD_CTLR_ENABLE_G1 to enable Group 1
550 	 * interrupt.
551 	 * Since the GICD_CTLR_ARE and GICD_CTRL_ARE_S share BIT(4), and
552 	 * similarly the GICD_CTLR_ENABLE_G1 and GICD_CTLR_ENABLE_G1NS share
553 	 * BIT(1), we can reuse them.
554 	 */
555 	sys_write32(BIT(GICD_CTRL_ARE_S) | BIT(GICD_CTLR_ENABLE_G1NS),
556 		    GICD_CTLR);
557 #else
558 	/* enable Group 1 secure interrupts */
559 	sys_set_bit(GICD_CTLR, GICD_CTLR_ENABLE_G1S);
560 #endif
561 }
562 
arm_gic_mpidr_to_affinity(uint64_t mpidr)563 static uint64_t arm_gic_mpidr_to_affinity(uint64_t mpidr)
564 {
565 	uint64_t aff3, aff2, aff1, aff0;
566 
567 #if defined(CONFIG_ARM)
568 	/* There is no Aff3 in AArch32 MPIDR */
569 	aff3 = 0;
570 #else
571 	aff3 = MPIDR_AFFLVL(mpidr, 3);
572 #endif
573 
574 	aff2 = MPIDR_AFFLVL(mpidr, 2);
575 	aff1 = MPIDR_AFFLVL(mpidr, 1);
576 	aff0 = MPIDR_AFFLVL(mpidr, 0);
577 
578 	return (aff3 << 24 | aff2 << 16 | aff1 << 8 | aff0);
579 }
580 
arm_gic_aff_matching(uint64_t gicr_aff,uint64_t aff)581 static bool arm_gic_aff_matching(uint64_t gicr_aff, uint64_t aff)
582 {
583 #if defined(CONFIG_GIC_V3_RDIST_MATCHING_AFF0_ONLY)
584 	uint64_t mask = BIT64_MASK(8);
585 
586 	return (gicr_aff & mask) == (aff & mask);
587 #else
588 	return gicr_aff == aff;
589 #endif
590 }
591 
arm_gic_get_typer(mem_addr_t addr)592 static inline uint64_t arm_gic_get_typer(mem_addr_t addr)
593 {
594 	uint64_t val;
595 
596 #if defined(CONFIG_ARM)
597 	val = sys_read32(addr);
598 	val |= (uint64_t)sys_read32(addr + 4) << 32;
599 #else
600 	val = sys_read64(addr);
601 #endif
602 
603 	return val;
604 }
605 
arm_gic_iterate_rdists(void)606 static mem_addr_t arm_gic_iterate_rdists(void)
607 {
608 	uint64_t aff = arm_gic_mpidr_to_affinity(GET_MPIDR());
609 	uint32_t idx;
610 
611 	/* Skip the first array entry as it refers to the GIC distributor */
612 	for (idx = 1; idx < GIC_NUM_REDISTRIBUTOR_REGIONS + 1; idx++) {
613 		uint64_t val;
614 		mem_addr_t rdist_addr = gic_reg_regions[idx].base;
615 		mem_addr_t rdist_end = rdist_addr + gic_reg_regions[idx].size;
616 
617 		do {
618 			val = arm_gic_get_typer(rdist_addr + GICR_TYPER);
619 			uint64_t gicr_aff = GICR_TYPER_AFFINITY_VALUE_GET(val);
620 
621 			if (arm_gic_aff_matching(gicr_aff, aff)) {
622 				return rdist_addr;
623 			}
624 
625 			if (GIC_REDISTRIBUTOR_STRIDE > 0) {
626 				rdist_addr += GIC_REDISTRIBUTOR_STRIDE;
627 			} else {
628 				/*
629 				 * Skip RD_base and SGI_base
630 				 * In GICv3, GICR_TYPER.VLPIS bit is RES0 and can can be ignored
631 				 * as there are no VLPI and reserved pages.
632 				 */
633 				rdist_addr += KB(64) * 2;
634 			}
635 
636 		} while ((!GICR_TYPER_LAST_GET(val)) && (rdist_addr < rdist_end));
637 	}
638 
639 	return (mem_addr_t)NULL;
640 }
641 
__arm_gic_init(void)642 static void __arm_gic_init(void)
643 {
644 	uint8_t cpu;
645 	mem_addr_t gic_rd_base;
646 
647 	cpu = arch_curr_cpu()->id;
648 	gic_rd_base = arm_gic_iterate_rdists();
649 	__ASSERT(gic_rd_base != (mem_addr_t)NULL, "");
650 
651 	gic_rdists[cpu] = gic_rd_base;
652 
653 #ifdef CONFIG_GIC_V3_ITS
654 	/* Enable LPIs in Redistributor */
655 	gicv3_rdist_setup_lpis(gic_get_rdist());
656 #endif
657 
658 	gicv3_rdist_enable(gic_get_rdist());
659 
660 	gicv3_cpuif_init();
661 }
662 
arm_gic_init(const struct device * dev)663 int arm_gic_init(const struct device *dev)
664 {
665 	gicv3_dist_init();
666 
667 	__arm_gic_init();
668 
669 	return 0;
670 }
671 DEVICE_DT_INST_DEFINE(0, arm_gic_init, NULL, NULL, NULL,
672 		      PRE_KERNEL_1, CONFIG_INTC_INIT_PRIORITY, NULL);
673 
674 #ifdef CONFIG_SMP
arm_gic_secondary_init(void)675 void arm_gic_secondary_init(void)
676 {
677 	__arm_gic_init();
678 
679 #ifdef CONFIG_GIC_V3_ITS
680 	/* Map this CPU Redistributor in all the ITS Collection tables */
681 	its_rdist_map();
682 #endif
683 }
684 #endif
685