1 /*
2  * Copyright 2020 Broadcom
3  * Copyright 2024 NXP
4  *
5  * SPDX-License-Identifier: Apache-2.0
6  */
7 
8 #include <zephyr/device.h>
9 #include <zephyr/kernel.h>
10 #include <zephyr/arch/cpu.h>
11 #include <zephyr/sys/__assert.h>
12 #include <zephyr/sw_isr_table.h>
13 #include <zephyr/dt-bindings/interrupt-controller/arm-gic.h>
14 #include <zephyr/drivers/interrupt_controller/gic.h>
15 #include <zephyr/sys/barrier.h>
16 #include "intc_gic_common_priv.h"
17 #include "intc_gicv3_priv.h"
18 
19 #include <string.h>
20 
21 #define DT_DRV_COMPAT arm_gic_v3
22 
23 /* Redistributor base addresses for each core */
24 mem_addr_t gic_rdists[CONFIG_MP_MAX_NUM_CPUS];
25 
26 #if defined(CONFIG_ARMV8_A_NS) || defined(CONFIG_GIC_SINGLE_SECURITY_STATE)
27 #define IGROUPR_VAL	0xFFFFFFFFU
28 #else
29 #define IGROUPR_VAL	0x0U
30 #endif
31 
32 /*
33  * We allocate memory for PROPBASE to cover 2 ^ lpi_id_bits LPIs to
34  * deal with (one configuration byte per interrupt). PENDBASE has to
35  * be 64kB aligned (one bit per LPI, plus 8192 bits for SPI/PPI/SGI).
36  */
37 #define ITS_MAX_LPI_NRBITS	16 /* 64K LPIs */
38 
39 #define LPI_PROPBASE_SZ(nrbits)	ROUND_UP(BIT(nrbits), KB(64))
40 #define LPI_PENDBASE_SZ(nrbits)	ROUND_UP(BIT(nrbits) / 8, KB(64))
41 
42 #ifdef CONFIG_GIC_V3_ITS
43 static uintptr_t lpi_prop_table;
44 
45 atomic_t nlpi_intid = ATOMIC_INIT(8192);
46 #endif
47 
gic_get_rdist(void)48 static inline mem_addr_t gic_get_rdist(void)
49 {
50 	return gic_rdists[arch_curr_cpu()->id];
51 }
52 
53 /*
54  * Wait for register write pending
55  * TODO: add timed wait
56  */
gic_wait_rwp(uint32_t intid)57 static int gic_wait_rwp(uint32_t intid)
58 {
59 	uint32_t rwp_mask;
60 	mem_addr_t base;
61 
62 	if (intid < GIC_SPI_INT_BASE) {
63 		base = (gic_get_rdist() + GICR_CTLR);
64 		rwp_mask = BIT(GICR_CTLR_RWP);
65 	} else {
66 		base = GICD_CTLR;
67 		rwp_mask = BIT(GICD_CTLR_RWP);
68 	}
69 
70 	while (sys_read32(base) & rwp_mask) {
71 		;
72 	}
73 
74 	return 0;
75 }
76 
77 #ifdef CONFIG_GIC_V3_ITS
arm_gic_lpi_setup(unsigned int intid,bool enable)78 static void arm_gic_lpi_setup(unsigned int intid, bool enable)
79 {
80 	uint8_t *cfg = &((uint8_t *)lpi_prop_table)[intid - 8192];
81 
82 	if (enable) {
83 		*cfg |= BIT(0);
84 	} else {
85 		*cfg &= ~BIT(0);
86 	}
87 
88 	barrier_dsync_fence_full();
89 
90 	its_rdist_invall();
91 }
92 
arm_gic_lpi_set_priority(unsigned int intid,unsigned int prio)93 static void arm_gic_lpi_set_priority(unsigned int intid, unsigned int prio)
94 {
95 	uint8_t *cfg = &((uint8_t *)lpi_prop_table)[intid - 8192];
96 
97 	*cfg &= 0xfc;
98 	*cfg |= prio & 0xfc;
99 
100 	barrier_dsync_fence_full();
101 
102 	its_rdist_invall();
103 }
104 
arm_gic_lpi_is_enabled(unsigned int intid)105 static bool arm_gic_lpi_is_enabled(unsigned int intid)
106 {
107 	uint8_t *cfg = &((uint8_t *)lpi_prop_table)[intid - 8192];
108 
109 	return (*cfg & BIT(0));
110 }
111 #endif
112 
113 #if defined(CONFIG_ARMV8_A_NS) || defined(CONFIG_GIC_SINGLE_SECURITY_STATE)
arm_gic_write_irouter(uint64_t val,unsigned int intid)114 static inline void arm_gic_write_irouter(uint64_t val, unsigned int intid)
115 {
116 	mem_addr_t addr = IROUTER(GET_DIST_BASE(intid), intid);
117 
118 #ifdef CONFIG_ARM
119 	sys_write32((uint32_t)val, addr);
120 	sys_write32((uint32_t)(val >> 32U), addr + 4);
121 #else
122 	sys_write64(val, addr);
123 #endif
124 }
125 #endif
126 
arm_gic_irq_set_priority(unsigned int intid,unsigned int prio,uint32_t flags)127 void arm_gic_irq_set_priority(unsigned int intid,
128 			      unsigned int prio, uint32_t flags)
129 {
130 #ifdef CONFIG_GIC_V3_ITS
131 	if (intid >= 8192) {
132 		arm_gic_lpi_set_priority(intid, prio);
133 		return;
134 	}
135 #endif
136 	uint32_t mask = BIT(intid & (GIC_NUM_INTR_PER_REG - 1));
137 	uint32_t idx = intid / GIC_NUM_INTR_PER_REG;
138 	uint32_t shift;
139 	uint32_t val;
140 	mem_addr_t base = GET_DIST_BASE(intid);
141 
142 	/* Disable the interrupt */
143 	sys_write32(mask, ICENABLER(base, idx));
144 	gic_wait_rwp(intid);
145 
146 	/* PRIORITYR registers provide byte access */
147 	sys_write8(prio & GIC_PRI_MASK, IPRIORITYR(base, intid));
148 
149 	/* Interrupt type config */
150 	if (!GIC_IS_SGI(intid)) {
151 		idx = intid / GIC_NUM_CFG_PER_REG;
152 		shift = (intid & (GIC_NUM_CFG_PER_REG - 1)) * 2;
153 
154 		val = sys_read32(ICFGR(base, idx));
155 		val &= ~(GICD_ICFGR_MASK << shift);
156 		if (flags & IRQ_TYPE_EDGE) {
157 			val |= (GICD_ICFGR_TYPE << shift);
158 		}
159 		sys_write32(val, ICFGR(base, idx));
160 	}
161 }
162 
arm_gic_irq_enable(unsigned int intid)163 void arm_gic_irq_enable(unsigned int intid)
164 {
165 #ifdef CONFIG_GIC_V3_ITS
166 	if (intid >= 8192) {
167 		arm_gic_lpi_setup(intid, true);
168 		return;
169 	}
170 #endif
171 	uint32_t mask = BIT(intid & (GIC_NUM_INTR_PER_REG - 1));
172 	uint32_t idx = intid / GIC_NUM_INTR_PER_REG;
173 
174 #if defined(CONFIG_ARMV8_A_NS) || defined(CONFIG_GIC_SINGLE_SECURITY_STATE)
175 	/*
176 	 * Affinity routing is enabled for Armv8-A Non-secure state (GICD_CTLR.ARE_NS
177 	 * is set to '1') and for GIC single security state (GICD_CTRL.ARE is set to '1'),
178 	 * so need to set SPI's affinity, now set it to be the PE on which it is enabled.
179 	 */
180 	if (GIC_IS_SPI(intid)) {
181 		arm_gic_write_irouter(MPIDR_TO_CORE(GET_MPIDR()), intid);
182 	}
183 #endif
184 
185 	sys_write32(mask, ISENABLER(GET_DIST_BASE(intid), idx));
186 }
187 
arm_gic_irq_disable(unsigned int intid)188 void arm_gic_irq_disable(unsigned int intid)
189 {
190 #ifdef CONFIG_GIC_V3_ITS
191 	if (intid >= 8192) {
192 		arm_gic_lpi_setup(intid, false);
193 		return;
194 	}
195 #endif
196 	uint32_t mask = BIT(intid & (GIC_NUM_INTR_PER_REG - 1));
197 	uint32_t idx = intid / GIC_NUM_INTR_PER_REG;
198 
199 	sys_write32(mask, ICENABLER(GET_DIST_BASE(intid), idx));
200 	/* poll to ensure write is complete */
201 	gic_wait_rwp(intid);
202 }
203 
arm_gic_irq_is_enabled(unsigned int intid)204 bool arm_gic_irq_is_enabled(unsigned int intid)
205 {
206 #ifdef CONFIG_GIC_V3_ITS
207 	if (intid >= 8192) {
208 		return arm_gic_lpi_is_enabled(intid);
209 	}
210 #endif
211 	uint32_t mask = BIT(intid & (GIC_NUM_INTR_PER_REG - 1));
212 	uint32_t idx = intid / GIC_NUM_INTR_PER_REG;
213 	uint32_t val;
214 
215 	val = sys_read32(ISENABLER(GET_DIST_BASE(intid), idx));
216 
217 	return (val & mask) != 0;
218 }
219 
arm_gic_irq_is_pending(unsigned int intid)220 bool arm_gic_irq_is_pending(unsigned int intid)
221 {
222 	uint32_t mask = BIT(intid & (GIC_NUM_INTR_PER_REG - 1));
223 	uint32_t idx = intid / GIC_NUM_INTR_PER_REG;
224 	uint32_t val;
225 
226 	val = sys_read32(ISPENDR(GET_DIST_BASE(intid), idx));
227 
228 	return (val & mask) != 0;
229 }
230 
arm_gic_irq_set_pending(unsigned int intid)231 void arm_gic_irq_set_pending(unsigned int intid)
232 {
233 	uint32_t mask = BIT(intid & (GIC_NUM_INTR_PER_REG - 1));
234 	uint32_t idx = intid / GIC_NUM_INTR_PER_REG;
235 
236 	sys_write32(mask, ISPENDR(GET_DIST_BASE(intid), idx));
237 }
238 
arm_gic_irq_clear_pending(unsigned int intid)239 void arm_gic_irq_clear_pending(unsigned int intid)
240 {
241 	uint32_t mask = BIT(intid & (GIC_NUM_INTR_PER_REG - 1));
242 	uint32_t idx = intid / GIC_NUM_INTR_PER_REG;
243 
244 	sys_write32(mask, ICPENDR(GET_DIST_BASE(intid), idx));
245 }
246 
arm_gic_get_active(void)247 unsigned int arm_gic_get_active(void)
248 {
249 	int intid;
250 
251 	/* (Pending -> Active / AP) or (AP -> AP) */
252 	intid = read_sysreg(ICC_IAR1_EL1);
253 
254 	return intid;
255 }
256 
arm_gic_eoi(unsigned int intid)257 void arm_gic_eoi(unsigned int intid)
258 {
259 	/*
260 	 * Interrupt request deassertion from peripheral to GIC happens
261 	 * by clearing interrupt condition by a write to the peripheral
262 	 * register. It is desired that the write transfer is complete
263 	 * before the core tries to change GIC state from 'AP/Active' to
264 	 * a new state on seeing 'EOI write'.
265 	 * Since ICC interface writes are not ordered against Device
266 	 * memory writes, a barrier is required to ensure the ordering.
267 	 * The dsb will also ensure *completion* of previous writes with
268 	 * DEVICE nGnRnE attribute.
269 	 */
270 	barrier_dsync_fence_full();
271 
272 	/* (AP -> Pending) Or (Active -> Inactive) or (AP to AP) nested case */
273 	write_sysreg(intid, ICC_EOIR1_EL1);
274 }
275 
gic_raise_sgi(unsigned int sgi_id,uint64_t target_aff,uint16_t target_list)276 void gic_raise_sgi(unsigned int sgi_id, uint64_t target_aff,
277 		   uint16_t target_list)
278 {
279 	uint32_t aff3, aff2, aff1;
280 	uint64_t sgi_val;
281 
282 	__ASSERT_NO_MSG(GIC_IS_SGI(sgi_id));
283 
284 	/* Extract affinity fields from target */
285 	aff1 = MPIDR_AFFLVL(target_aff, 1);
286 	aff2 = MPIDR_AFFLVL(target_aff, 2);
287 #if defined(CONFIG_ARM)
288 	/* There is no Aff3 in AArch32 MPIDR */
289 	aff3 = 0;
290 #else
291 	aff3 = MPIDR_AFFLVL(target_aff, 3);
292 #endif
293 	sgi_val = GICV3_SGIR_VALUE(aff3, aff2, aff1, sgi_id,
294 				   SGIR_IRM_TO_AFF, target_list);
295 
296 	barrier_dsync_fence_full();
297 	write_sysreg(sgi_val, ICC_SGI1R);
298 	barrier_isync_fence_full();
299 }
300 
301 /*
302  * Wake up GIC redistributor.
303  * clear ProcessorSleep and wait till ChildAsleep is cleared.
304  * ProcessSleep to be cleared only when ChildAsleep is set
305  * Check if redistributor is not powered already.
306  */
gicv3_rdist_enable(mem_addr_t rdist)307 static void gicv3_rdist_enable(mem_addr_t rdist)
308 {
309 	if (!(sys_read32(rdist + GICR_WAKER) & BIT(GICR_WAKER_CA))) {
310 		return;
311 	}
312 
313 	if (GICR_IIDR_PRODUCT_ID_GET(sys_read32(rdist + GICR_IIDR)) >= 0x2) {
314 		if (sys_read32(rdist + GICR_PWRR) & BIT(GICR_PWRR_RDPD)) {
315 			sys_set_bit(rdist + GICR_PWRR, GICR_PWRR_RDAG);
316 			sys_clear_bit(rdist + GICR_PWRR, GICR_PWRR_RDPD);
317 			while (sys_read32(rdist + GICR_PWRR) & BIT(GICR_PWRR_RDPD)) {
318 				;
319 			}
320 		}
321 	}
322 
323 	sys_clear_bit(rdist + GICR_WAKER, GICR_WAKER_PS);
324 	while (sys_read32(rdist + GICR_WAKER) & BIT(GICR_WAKER_CA)) {
325 		;
326 	}
327 }
328 
329 #ifdef CONFIG_GIC_V3_ITS
330 /*
331  * Setup LPIs Configuration & Pending tables for redistributors
332  * LPI configuration is global, each redistributor has a pending table
333  */
gicv3_rdist_setup_lpis(mem_addr_t rdist)334 static void gicv3_rdist_setup_lpis(mem_addr_t rdist)
335 {
336 	unsigned int lpi_id_bits = MIN(GICD_TYPER_IDBITS(sys_read32(GICD_TYPER)),
337 				       ITS_MAX_LPI_NRBITS);
338 	uintptr_t lpi_pend_table;
339 	uint64_t reg;
340 	uint32_t ctlr;
341 
342 	/* If not, alloc a common prop table for all redistributors */
343 	if (!lpi_prop_table) {
344 		lpi_prop_table = (uintptr_t)k_aligned_alloc(4 * 1024, LPI_PROPBASE_SZ(lpi_id_bits));
345 		memset((void *)lpi_prop_table, 0, LPI_PROPBASE_SZ(lpi_id_bits));
346 	}
347 
348 	lpi_pend_table = (uintptr_t)k_aligned_alloc(64 * 1024, LPI_PENDBASE_SZ(lpi_id_bits));
349 	memset((void *)lpi_pend_table, 0, LPI_PENDBASE_SZ(lpi_id_bits));
350 
351 	ctlr = sys_read32(rdist + GICR_CTLR);
352 	ctlr &= ~GICR_CTLR_ENABLE_LPIS;
353 	sys_write32(ctlr, rdist + GICR_CTLR);
354 
355 	/* PROPBASE */
356 	reg = (GIC_BASER_SHARE_INNER << GITR_PROPBASER_SHAREABILITY_SHIFT) |
357 	      (GIC_BASER_CACHE_RAWAWB << GITR_PROPBASER_INNER_CACHE_SHIFT) |
358 	      (lpi_prop_table & (GITR_PROPBASER_ADDR_MASK << GITR_PROPBASER_ADDR_SHIFT)) |
359 	      (GIC_BASER_CACHE_INNERLIKE << GITR_PROPBASER_OUTER_CACHE_SHIFT) |
360 	      ((lpi_id_bits - 1) & GITR_PROPBASER_ID_BITS_MASK);
361 	sys_write64(reg, rdist + GICR_PROPBASER);
362 	/* TOFIX: check SHAREABILITY validity */
363 
364 	/* PENDBASE */
365 	reg = (GIC_BASER_SHARE_INNER << GITR_PENDBASER_SHAREABILITY_SHIFT) |
366 	      (GIC_BASER_CACHE_RAWAWB << GITR_PENDBASER_INNER_CACHE_SHIFT) |
367 	      (lpi_pend_table & (GITR_PENDBASER_ADDR_MASK << GITR_PENDBASER_ADDR_SHIFT)) |
368 	      (GIC_BASER_CACHE_INNERLIKE << GITR_PENDBASER_OUTER_CACHE_SHIFT) |
369 	      GITR_PENDBASER_PTZ;
370 	sys_write64(reg, rdist + GICR_PENDBASER);
371 	/* TOFIX: check SHAREABILITY validity */
372 
373 	ctlr = sys_read32(rdist + GICR_CTLR);
374 	ctlr |= GICR_CTLR_ENABLE_LPIS;
375 	sys_write32(ctlr, rdist + GICR_CTLR);
376 
377 	barrier_dsync_fence_full();
378 }
379 #endif
380 
381 /*
382  * Initialize the cpu interface. This should be called by each core.
383  */
gicv3_cpuif_init(void)384 static void gicv3_cpuif_init(void)
385 {
386 	uint32_t icc_sre;
387 	uint32_t intid;
388 
389 	mem_addr_t base = gic_get_rdist() + GICR_SGI_BASE_OFF;
390 
391 	/* Disable all sgi ppi */
392 	sys_write32(BIT64_MASK(GIC_NUM_INTR_PER_REG), ICENABLER(base, 0));
393 	/* Any sgi/ppi intid ie. 0-31 will select GICR_CTRL */
394 	gic_wait_rwp(0);
395 
396 	/* Clear pending */
397 	sys_write32(BIT64_MASK(GIC_NUM_INTR_PER_REG), ICPENDR(base, 0));
398 
399 	/* Configure all SGIs/PPIs as G1S or G1NS depending on Zephyr
400 	 * is run in EL1S or EL1NS respectively.
401 	 * All interrupts will be delivered as irq
402 	 */
403 	sys_write32(IGROUPR_VAL, IGROUPR(base, 0));
404 	sys_write32(BIT64_MASK(GIC_NUM_INTR_PER_REG), IGROUPMODR(base, 0));
405 
406 	/*
407 	 * Configure default priorities for SGI 0:15 and PPI 0:15.
408 	 */
409 	for (intid = 0; intid < GIC_SPI_INT_BASE;
410 	     intid += GIC_NUM_PRI_PER_REG) {
411 		sys_write32(GIC_INT_DEF_PRI_X4, IPRIORITYR(base, intid));
412 	}
413 
414 	/* Configure PPIs as level triggered */
415 	sys_write32(0, ICFGR(base, 1));
416 
417 	/*
418 	 * Check if system interface can be enabled.
419 	 * 'icc_sre_el3' needs to be configured at 'EL3'
420 	 * to allow access to 'icc_sre_el1' at 'EL1'
421 	 * eg: z_arch_el3_plat_init can be used by platform.
422 	 */
423 	icc_sre = read_sysreg(ICC_SRE_EL1);
424 
425 	if (!(icc_sre & ICC_SRE_ELx_SRE_BIT)) {
426 		icc_sre = (icc_sre | ICC_SRE_ELx_SRE_BIT |
427 			   ICC_SRE_ELx_DIB_BIT | ICC_SRE_ELx_DFB_BIT);
428 		write_sysreg(icc_sre, ICC_SRE_EL1);
429 		icc_sre = read_sysreg(ICC_SRE_EL1);
430 
431 		__ASSERT_NO_MSG(icc_sre & ICC_SRE_ELx_SRE_BIT);
432 	}
433 
434 	write_sysreg(GIC_IDLE_PRIO, ICC_PMR_EL1);
435 
436 	/* Allow group1 interrupts */
437 	write_sysreg(1, ICC_IGRPEN1_EL1);
438 }
439 
440 /*
441  * TODO: Consider Zephyr in EL1NS.
442  */
gicv3_dist_init(void)443 static void gicv3_dist_init(void)
444 {
445 	unsigned int num_ints;
446 	unsigned int intid;
447 	unsigned int idx;
448 	mem_addr_t base = GIC_DIST_BASE;
449 
450 #ifdef CONFIG_GIC_SAFE_CONFIG
451 	/*
452 	 * Currently multiple OSes can run one the different CPU Cores which share single GIC,
453 	 * but GIC distributor should avoid to be re-configured in order to avoid crash the
454 	 * OSes has already been started.
455 	 */
456 	if (sys_read32(GICD_CTLR) & (BIT(GICD_CTLR_ENABLE_G0) | BIT(GICD_CTLR_ENABLE_G1NS))) {
457 		return;
458 	}
459 #endif
460 
461 	num_ints = sys_read32(GICD_TYPER);
462 	num_ints &= GICD_TYPER_ITLINESNUM_MASK;
463 	num_ints = (num_ints + 1) << 5;
464 
465 	/* Disable the distributor */
466 	sys_write32(0, GICD_CTLR);
467 	gic_wait_rwp(GIC_SPI_INT_BASE);
468 #ifdef CONFIG_GIC_SINGLE_SECURITY_STATE
469 	/*
470 	 * Before configuration, we need to check whether
471 	 * the GIC single security state mode is supported.
472 	 * Make sure GICD_CTRL_NS is 1.
473 	 */
474 	sys_set_bit(GICD_CTLR, GICD_CTRL_NS);
475 	__ASSERT(sys_test_bit(GICD_CTLR, GICD_CTRL_NS),
476 		"Current GIC does not support single security state");
477 #endif
478 
479 	/*
480 	 * Default configuration of all SPIs
481 	 */
482 	for (intid = GIC_SPI_INT_BASE; intid < num_ints;
483 	     intid += GIC_NUM_INTR_PER_REG) {
484 		idx = intid / GIC_NUM_INTR_PER_REG;
485 		/* Disable interrupt */
486 		sys_write32(BIT64_MASK(GIC_NUM_INTR_PER_REG),
487 			    ICENABLER(base, idx));
488 		/* Clear pending */
489 		sys_write32(BIT64_MASK(GIC_NUM_INTR_PER_REG),
490 			    ICPENDR(base, idx));
491 		sys_write32(IGROUPR_VAL, IGROUPR(base, idx));
492 		sys_write32(BIT64_MASK(GIC_NUM_INTR_PER_REG),
493 			    IGROUPMODR(base, idx));
494 
495 	}
496 	/* wait for rwp on GICD */
497 	gic_wait_rwp(GIC_SPI_INT_BASE);
498 
499 	/* Configure default priorities for all SPIs. */
500 	for (intid = GIC_SPI_INT_BASE; intid < num_ints;
501 	     intid += GIC_NUM_PRI_PER_REG) {
502 		sys_write32(GIC_INT_DEF_PRI_X4, IPRIORITYR(base, intid));
503 	}
504 
505 	/* Configure all SPIs as active low, level triggered by default */
506 	for (intid = GIC_SPI_INT_BASE; intid < num_ints;
507 	     intid += GIC_NUM_CFG_PER_REG) {
508 		idx = intid / GIC_NUM_CFG_PER_REG;
509 		sys_write32(0, ICFGR(base, idx));
510 	}
511 
512 #ifdef CONFIG_ARMV8_A_NS
513 	/* Enable distributor with ARE */
514 	sys_write32(BIT(GICD_CTRL_ARE_NS) | BIT(GICD_CTLR_ENABLE_G1NS),
515 		    GICD_CTLR);
516 #elif defined(CONFIG_GIC_SINGLE_SECURITY_STATE)
517 	/*
518 	 * For GIC single security state, the config GIC_SINGLE_SECURITY_STATE
519 	 * means the GIC is under single security state which has only two
520 	 * groups: group 0 and group 1.
521 	 * Then set GICD_CTLR_ARE and GICD_CTLR_ENABLE_G1 to enable Group 1
522 	 * interrupt.
523 	 * Since the GICD_CTLR_ARE and GICD_CTRL_ARE_S share BIT(4), and
524 	 * similarly the GICD_CTLR_ENABLE_G1 and GICD_CTLR_ENABLE_G1NS share
525 	 * BIT(1), we can reuse them.
526 	 */
527 	sys_write32(BIT(GICD_CTRL_ARE_S) | BIT(GICD_CTLR_ENABLE_G1NS),
528 		    GICD_CTLR);
529 #else
530 	/* enable Group 1 secure interrupts */
531 	sys_set_bit(GICD_CTLR, GICD_CTLR_ENABLE_G1S);
532 #endif
533 }
534 
arm_gic_mpidr_to_affinity(uint64_t mpidr)535 static uint64_t arm_gic_mpidr_to_affinity(uint64_t mpidr)
536 {
537 	uint64_t aff3, aff2, aff1, aff0;
538 
539 #if defined(CONFIG_ARM)
540 	/* There is no Aff3 in AArch32 MPIDR */
541 	aff3 = 0;
542 #else
543 	aff3 = MPIDR_AFFLVL(mpidr, 3);
544 #endif
545 
546 	aff2 = MPIDR_AFFLVL(mpidr, 2);
547 	aff1 = MPIDR_AFFLVL(mpidr, 1);
548 	aff0 = MPIDR_AFFLVL(mpidr, 0);
549 
550 	return (aff3 << 24 | aff2 << 16 | aff1 << 8 | aff0);
551 }
552 
arm_gic_aff_matching(uint64_t gicr_aff,uint64_t aff)553 static bool arm_gic_aff_matching(uint64_t gicr_aff, uint64_t aff)
554 {
555 #if defined(CONFIG_GIC_V3_RDIST_MATCHING_AFF0_ONLY)
556 	uint64_t mask = BIT64_MASK(8);
557 
558 	return (gicr_aff & mask) == (aff & mask);
559 #else
560 	return gicr_aff == aff;
561 #endif
562 }
563 
arm_gic_get_typer(mem_addr_t addr)564 static inline uint64_t arm_gic_get_typer(mem_addr_t addr)
565 {
566 	uint64_t val;
567 
568 #if defined(CONFIG_ARM)
569 	val = sys_read32(addr);
570 	val |= (uint64_t)sys_read32(addr + 4) << 32;
571 #else
572 	val = sys_read64(addr);
573 #endif
574 
575 	return val;
576 }
577 
arm_gic_iterate_rdists(void)578 static mem_addr_t arm_gic_iterate_rdists(void)
579 {
580 	uint64_t aff = arm_gic_mpidr_to_affinity(GET_MPIDR());
581 
582 	for (mem_addr_t rdist_addr = GIC_RDIST_BASE;
583 		rdist_addr < GIC_RDIST_BASE + GIC_RDIST_SIZE;
584 		rdist_addr += 0x20000) {
585 		uint64_t val = arm_gic_get_typer(rdist_addr + GICR_TYPER);
586 		uint64_t gicr_aff = GICR_TYPER_AFFINITY_VALUE_GET(val);
587 
588 		if (arm_gic_aff_matching(gicr_aff, aff)) {
589 			return rdist_addr;
590 		}
591 
592 		if (GICR_TYPER_LAST_GET(val) == 1) {
593 			return (mem_addr_t)NULL;
594 		}
595 	}
596 
597 	return (mem_addr_t)NULL;
598 }
599 
__arm_gic_init(void)600 static void __arm_gic_init(void)
601 {
602 	uint8_t cpu;
603 	mem_addr_t gic_rd_base;
604 
605 	cpu = arch_curr_cpu()->id;
606 	gic_rd_base = arm_gic_iterate_rdists();
607 	__ASSERT(gic_rd_base != (mem_addr_t)NULL, "");
608 
609 	gic_rdists[cpu] = gic_rd_base;
610 
611 #ifdef CONFIG_GIC_V3_ITS
612 	/* Enable LPIs in Redistributor */
613 	gicv3_rdist_setup_lpis(gic_get_rdist());
614 #endif
615 
616 	gicv3_rdist_enable(gic_get_rdist());
617 
618 	gicv3_cpuif_init();
619 }
620 
arm_gic_init(const struct device * dev)621 int arm_gic_init(const struct device *dev)
622 {
623 	gicv3_dist_init();
624 
625 	__arm_gic_init();
626 
627 	return 0;
628 }
629 DEVICE_DT_INST_DEFINE(0, arm_gic_init, NULL, NULL, NULL,
630 		      PRE_KERNEL_1, CONFIG_INTC_INIT_PRIORITY, NULL);
631 
632 #ifdef CONFIG_SMP
arm_gic_secondary_init(void)633 void arm_gic_secondary_init(void)
634 {
635 	__arm_gic_init();
636 
637 #ifdef CONFIG_GIC_V3_ITS
638 	/* Map this CPU Redistributor in all the ITS Collection tables */
639 	its_rdist_map();
640 #endif
641 }
642 #endif
643