1 /*
2 * Copyright 2021 BayLibre, SAS
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/logging/log.h>
8 LOG_MODULE_REGISTER(intc_gicv3_its, LOG_LEVEL_ERR);
9
10 #include <zephyr/kernel.h>
11 #include <zephyr/device.h>
12 #include <zephyr/drivers/interrupt_controller/gicv3_its.h>
13 #include <zephyr/sys/barrier.h>
14
15 #include "intc_gic_common_priv.h"
16 #include "intc_gicv3_priv.h"
17
18 #define DT_DRV_COMPAT arm_gic_v3_its
19
20 /*
21 * Current ITS implementation only handle GICv3 ITS physical interruption generation
22 * Implementation is designed for the PCIe MSI/MSI-X use-case in mind.
23 */
24
25 #define GITS_BASER_NR_REGS 8
26
27 /* convenient access to all redistributors base address */
28 extern mem_addr_t gic_rdists[CONFIG_MP_MAX_NUM_CPUS];
29
30 #define SIZE_256 256
31 #define SIZE_4K KB(4)
32 #define SIZE_16K KB(16)
33 #define SIZE_64K KB(64)
34
35 struct its_cmd_block {
36 uint64_t raw_cmd[4];
37 };
38
39 #define ITS_CMD_QUEUE_SIZE SIZE_64K
40 #define ITS_CMD_QUEUE_NR_ENTRIES (ITS_CMD_QUEUE_SIZE / sizeof(struct its_cmd_block))
41
42 struct gicv3_its_data {
43 mm_reg_t base;
44 struct its_cmd_block *cmd_base;
45 struct its_cmd_block *cmd_write;
46 bool dev_table_is_indirect;
47 uint64_t *indirect_dev_lvl1_table;
48 size_t indirect_dev_lvl1_width;
49 size_t indirect_dev_lvl2_width;
50 size_t indirect_dev_page_size;
51 };
52
53 struct gicv3_its_config {
54 uintptr_t base_addr;
55 size_t base_size;
56 struct its_cmd_block *cmd_queue;
57 size_t cmd_queue_size;
58 };
59
fls_z(unsigned int x)60 static inline int fls_z(unsigned int x)
61 {
62 unsigned int bits = sizeof(x) * 8;
63 unsigned int cmp = 1 << (bits - 1);
64
65 while (bits) {
66 if (x & cmp) {
67 return bits;
68 }
69 cmp >>= 1;
70 bits--;
71 }
72
73 return 0;
74 }
75
76 /* wait 500ms & wakeup every millisecond */
77 #define WAIT_QUIESCENT 500
78
its_force_quiescent(struct gicv3_its_data * data)79 static int its_force_quiescent(struct gicv3_its_data *data)
80 {
81 unsigned int count = WAIT_QUIESCENT;
82 uint32_t reg = sys_read32(data->base + GITS_CTLR);
83
84 if (GITS_CTLR_ENABLED_GET(reg)) {
85 /* Disable ITS */
86 reg &= ~MASK(GITS_CTLR_ENABLED);
87 sys_write32(reg, data->base + GITS_CTLR);
88 }
89
90 while (1) {
91 if (GITS_CTLR_QUIESCENT_GET(reg)) {
92 return 0;
93 }
94
95 count--;
96 if (!count) {
97 return -EBUSY;
98 }
99
100 k_msleep(1);
101 reg = sys_read32(data->base + GITS_CTLR);
102 }
103
104 return 0;
105 }
106
107 static const char *const its_base_type_string[] = {
108 [GITS_BASER_TYPE_DEVICE] = "Devices",
109 [GITS_BASER_TYPE_COLLECTION] = "Interrupt Collections",
110 };
111
112 /* Probe the BASER(i) to get the largest supported page size */
its_probe_baser_page_size(struct gicv3_its_data * data,int i)113 static size_t its_probe_baser_page_size(struct gicv3_its_data *data, int i)
114 {
115 uint64_t page_size = GITS_BASER_PAGE_SIZE_64K;
116
117 while (page_size > GITS_BASER_PAGE_SIZE_4K) {
118 uint64_t reg = sys_read64(data->base + GITS_BASER(i));
119
120 reg &= ~MASK(GITS_BASER_PAGE_SIZE);
121 reg |= MASK_SET(page_size, GITS_BASER_PAGE_SIZE);
122
123 sys_write64(reg, data->base + GITS_BASER(i));
124
125 reg = sys_read64(data->base + GITS_BASER(i));
126
127 if (MASK_GET(reg, GITS_BASER_PAGE_SIZE) == page_size) {
128 break;
129 }
130
131 switch (page_size) {
132 case GITS_BASER_PAGE_SIZE_64K:
133 page_size = GITS_BASER_PAGE_SIZE_16K;
134 break;
135 default:
136 page_size = GITS_BASER_PAGE_SIZE_4K;
137 }
138 }
139
140 switch (page_size) {
141 case GITS_BASER_PAGE_SIZE_64K:
142 return SIZE_64K;
143 case GITS_BASER_PAGE_SIZE_16K:
144 return SIZE_16K;
145 default:
146 return SIZE_4K;
147 }
148 }
149
its_alloc_tables(struct gicv3_its_data * data)150 static int its_alloc_tables(struct gicv3_its_data *data)
151 {
152 unsigned int device_ids = GITS_TYPER_DEVBITS_GET(sys_read64(data->base + GITS_TYPER)) + 1;
153 int i;
154
155 for (i = 0; i < GITS_BASER_NR_REGS; ++i) {
156 uint64_t reg = sys_read64(data->base + GITS_BASER(i));
157 unsigned int type = GITS_BASER_TYPE_GET(reg);
158 size_t page_size, entry_size, page_cnt, lvl2_width = 0;
159 bool indirect = false;
160 void *alloc_addr;
161
162 entry_size = GITS_BASER_ENTRY_SIZE_GET(reg) + 1;
163
164 switch (GITS_BASER_PAGE_SIZE_GET(reg)) {
165 case GITS_BASER_PAGE_SIZE_4K:
166 page_size = SIZE_4K;
167 break;
168 case GITS_BASER_PAGE_SIZE_16K:
169 page_size = SIZE_16K;
170 break;
171 case GITS_BASER_PAGE_SIZE_64K:
172 page_size = SIZE_64K;
173 break;
174 default:
175 page_size = SIZE_4K;
176 }
177
178 switch (type) {
179 case GITS_BASER_TYPE_DEVICE:
180 if (device_ids > 16) {
181 /* Use the largest possible page size for indirect */
182 page_size = its_probe_baser_page_size(data, i);
183
184 /*
185 * lvl1 table size:
186 * subtract ID bits that sparse lvl2 table from 'ids'
187 * which is reported by ITS hardware times lvl1 table
188 * entry size.
189 */
190 lvl2_width = fls_z(page_size / entry_size) - 1;
191 device_ids -= lvl2_width + 1;
192
193 /* The level 1 entry size is a 64bit pointer */
194 entry_size = sizeof(uint64_t);
195
196 indirect = true;
197 }
198
199 page_cnt = ROUND_UP(entry_size << device_ids, page_size) / page_size;
200 break;
201 case GITS_BASER_TYPE_COLLECTION:
202 page_cnt =
203 ROUND_UP(entry_size * CONFIG_MP_MAX_NUM_CPUS, page_size)/page_size;
204 break;
205 default:
206 continue;
207 }
208
209 LOG_INF("Allocating %s table of %ldx%ldK pages (%ld bytes entry)",
210 its_base_type_string[type], page_cnt, page_size / 1024, entry_size);
211
212 alloc_addr = k_aligned_alloc(page_size, page_size * page_cnt);
213 if (!alloc_addr) {
214 return -ENOMEM;
215 }
216
217 memset(alloc_addr, 0, page_size * page_cnt);
218
219 switch (page_size) {
220 case SIZE_4K:
221 reg = MASK_SET(GITS_BASER_PAGE_SIZE_4K, GITS_BASER_PAGE_SIZE);
222 break;
223 case SIZE_16K:
224 reg = MASK_SET(GITS_BASER_PAGE_SIZE_16K, GITS_BASER_PAGE_SIZE);
225 break;
226 case SIZE_64K:
227 reg = MASK_SET(GITS_BASER_PAGE_SIZE_64K, GITS_BASER_PAGE_SIZE);
228 break;
229 }
230
231 reg |= MASK_SET(page_cnt - 1, GITS_BASER_SIZE);
232 reg |= MASK_SET(GIC_BASER_SHARE_INNER, GITS_BASER_SHAREABILITY);
233 reg |= MASK_SET((uintptr_t)alloc_addr >> GITS_BASER_ADDR_SHIFT, GITS_BASER_ADDR);
234 reg |= MASK_SET(GIC_BASER_CACHE_INNERLIKE, GITS_BASER_OUTER_CACHE);
235 reg |= MASK_SET(GIC_BASER_CACHE_RAWAWB, GITS_BASER_INNER_CACHE);
236 reg |= MASK_SET(indirect ? 1 : 0, GITS_BASER_INDIRECT);
237 reg |= MASK_SET(1, GITS_BASER_VALID);
238
239 sys_write64(reg, data->base + GITS_BASER(i));
240
241 /* TOFIX: check page size & SHAREABILITY validity after write */
242
243 if (type == GITS_BASER_TYPE_DEVICE && indirect) {
244 data->dev_table_is_indirect = indirect;
245 data->indirect_dev_lvl1_table = alloc_addr;
246 data->indirect_dev_lvl1_width = device_ids;
247 data->indirect_dev_lvl2_width = lvl2_width;
248 data->indirect_dev_page_size = page_size;
249 LOG_DBG("%s table Indirection enabled", its_base_type_string[type]);
250 }
251 }
252
253 return 0;
254 }
255
its_queue_full(struct gicv3_its_data * data)256 static bool its_queue_full(struct gicv3_its_data *data)
257 {
258 int widx;
259 int ridx;
260
261 widx = data->cmd_write - data->cmd_base;
262 ridx = sys_read32(data->base + GITS_CREADR) / sizeof(struct its_cmd_block);
263
264 /* This is incredibly unlikely to happen, unless the ITS locks up. */
265 return (((widx + 1) % ITS_CMD_QUEUE_NR_ENTRIES) == ridx);
266 }
267
its_allocate_entry(struct gicv3_its_data * data)268 static struct its_cmd_block *its_allocate_entry(struct gicv3_its_data *data)
269 {
270 struct its_cmd_block *cmd;
271 unsigned int count = 1000000; /* 1s! */
272
273 while (its_queue_full(data)) {
274 count--;
275 if (!count) {
276 LOG_ERR("ITS queue not draining");
277 return NULL;
278 }
279 k_usleep(1);
280 }
281
282 cmd = data->cmd_write++;
283
284 /* Handle queue wrapping */
285 if (data->cmd_write == (data->cmd_base + ITS_CMD_QUEUE_NR_ENTRIES)) {
286 data->cmd_write = data->cmd_base;
287 }
288
289 /* Clear command */
290 cmd->raw_cmd[0] = 0;
291 cmd->raw_cmd[1] = 0;
292 cmd->raw_cmd[2] = 0;
293 cmd->raw_cmd[3] = 0;
294
295 return cmd;
296 }
297
its_post_command(struct gicv3_its_data * data,struct its_cmd_block * cmd)298 static int its_post_command(struct gicv3_its_data *data, struct its_cmd_block *cmd)
299 {
300 uint64_t wr_idx, rd_idx, idx;
301 unsigned int count = 1000000; /* 1s! */
302
303 wr_idx = (data->cmd_write - data->cmd_base) * sizeof(*cmd);
304 rd_idx = sys_read32(data->base + GITS_CREADR);
305
306 barrier_dsync_fence_full();
307
308 sys_write32(wr_idx, data->base + GITS_CWRITER);
309
310 while (1) {
311 idx = sys_read32(data->base + GITS_CREADR);
312
313 if (idx == wr_idx) {
314 break;
315 }
316
317 count--;
318 if (!count) {
319 LOG_ERR("ITS queue timeout (rd %lld => %lld => wr %lld)",
320 rd_idx, idx, wr_idx);
321 return -ETIMEDOUT;
322 }
323 k_usleep(1);
324 }
325
326 return 0;
327 }
328
its_send_sync_cmd(struct gicv3_its_data * data,uintptr_t rd_addr)329 static int its_send_sync_cmd(struct gicv3_its_data *data, uintptr_t rd_addr)
330 {
331 struct its_cmd_block *cmd = its_allocate_entry(data);
332
333 if (!cmd) {
334 return -EBUSY;
335 }
336
337 cmd->raw_cmd[0] = MASK_SET(GITS_CMD_ID_SYNC, GITS_CMD_ID);
338 cmd->raw_cmd[2] = MASK_SET(rd_addr >> GITS_CMD_RDBASE_ALIGN, GITS_CMD_RDBASE);
339
340 return its_post_command(data, cmd);
341 }
342
its_send_mapc_cmd(struct gicv3_its_data * data,uint32_t icid,uintptr_t rd_addr,bool valid)343 static int its_send_mapc_cmd(struct gicv3_its_data *data, uint32_t icid,
344 uintptr_t rd_addr, bool valid)
345 {
346 struct its_cmd_block *cmd = its_allocate_entry(data);
347
348 if (!cmd) {
349 return -EBUSY;
350 }
351
352 cmd->raw_cmd[0] = MASK_SET(GITS_CMD_ID_MAPC, GITS_CMD_ID);
353 cmd->raw_cmd[2] = MASK_SET(icid, GITS_CMD_ICID) |
354 MASK_SET(rd_addr >> GITS_CMD_RDBASE_ALIGN, GITS_CMD_RDBASE) |
355 MASK_SET(valid ? 1 : 0, GITS_CMD_VALID);
356
357 return its_post_command(data, cmd);
358 }
359
its_send_mapd_cmd(struct gicv3_its_data * data,uint32_t device_id,uint32_t size,uintptr_t itt_addr,bool valid)360 static int its_send_mapd_cmd(struct gicv3_its_data *data, uint32_t device_id,
361 uint32_t size, uintptr_t itt_addr, bool valid)
362 {
363 struct its_cmd_block *cmd = its_allocate_entry(data);
364
365 if (!cmd) {
366 return -EBUSY;
367 }
368
369 cmd->raw_cmd[0] = MASK_SET(GITS_CMD_ID_MAPD, GITS_CMD_ID) |
370 MASK_SET(device_id, GITS_CMD_DEVICEID);
371 cmd->raw_cmd[1] = MASK_SET(size, GITS_CMD_SIZE);
372 cmd->raw_cmd[2] = MASK_SET(itt_addr >> GITS_CMD_ITTADDR_ALIGN, GITS_CMD_ITTADDR) |
373 MASK_SET(valid ? 1 : 0, GITS_CMD_VALID);
374
375 return its_post_command(data, cmd);
376 }
377
its_send_mapti_cmd(struct gicv3_its_data * data,uint32_t device_id,uint32_t event_id,uint32_t intid,uint32_t icid)378 static int its_send_mapti_cmd(struct gicv3_its_data *data, uint32_t device_id,
379 uint32_t event_id, uint32_t intid, uint32_t icid)
380 {
381 struct its_cmd_block *cmd = its_allocate_entry(data);
382
383 if (!cmd) {
384 return -EBUSY;
385 }
386
387 cmd->raw_cmd[0] = MASK_SET(GITS_CMD_ID_MAPTI, GITS_CMD_ID) |
388 MASK_SET(device_id, GITS_CMD_DEVICEID);
389 cmd->raw_cmd[1] = MASK_SET(event_id, GITS_CMD_EVENTID) |
390 MASK_SET(intid, GITS_CMD_PINTID);
391 cmd->raw_cmd[2] = MASK_SET(icid, GITS_CMD_ICID);
392
393 return its_post_command(data, cmd);
394 }
395
its_send_int_cmd(struct gicv3_its_data * data,uint32_t device_id,uint32_t event_id)396 static int its_send_int_cmd(struct gicv3_its_data *data, uint32_t device_id,
397 uint32_t event_id)
398 {
399 struct its_cmd_block *cmd = its_allocate_entry(data);
400
401 if (!cmd) {
402 return -EBUSY;
403 }
404
405 cmd->raw_cmd[0] = MASK_SET(GITS_CMD_ID_INT, GITS_CMD_ID) |
406 MASK_SET(device_id, GITS_CMD_DEVICEID);
407 cmd->raw_cmd[1] = MASK_SET(event_id, GITS_CMD_EVENTID);
408
409 return its_post_command(data, cmd);
410 }
411
its_send_invall_cmd(struct gicv3_its_data * data,uint32_t icid)412 static int its_send_invall_cmd(struct gicv3_its_data *data, uint32_t icid)
413 {
414 struct its_cmd_block *cmd = its_allocate_entry(data);
415
416 if (!cmd) {
417 return -EBUSY;
418 }
419
420 cmd->raw_cmd[0] = MASK_SET(GITS_CMD_ID_INVALL, GITS_CMD_ID);
421 cmd->raw_cmd[2] = MASK_SET(icid, GITS_CMD_ICID);
422
423 return its_post_command(data, cmd);
424 }
425
gicv3_its_send_int(const struct device * dev,uint32_t device_id,uint32_t event_id)426 static int gicv3_its_send_int(const struct device *dev, uint32_t device_id, uint32_t event_id)
427 {
428 struct gicv3_its_data *data = dev->data;
429
430 /* TOFIX check device_id & event_id bounds */
431
432 return its_send_int_cmd(data, device_id, event_id);
433 }
434
its_setup_cmd_queue(const struct device * dev)435 static void its_setup_cmd_queue(const struct device *dev)
436 {
437 const struct gicv3_its_config *cfg = dev->config;
438 struct gicv3_its_data *data = dev->data;
439 uint64_t reg = 0;
440
441 /* Zero out cmd table */
442 memset(cfg->cmd_queue, 0, cfg->cmd_queue_size);
443
444 reg |= MASK_SET(cfg->cmd_queue_size / SIZE_4K, GITS_CBASER_SIZE);
445 reg |= MASK_SET(GIC_BASER_SHARE_INNER, GITS_CBASER_SHAREABILITY);
446 reg |= MASK_SET((uintptr_t)cfg->cmd_queue >> GITS_CBASER_ADDR_SHIFT, GITS_CBASER_ADDR);
447 reg |= MASK_SET(GIC_BASER_CACHE_RAWAWB, GITS_CBASER_OUTER_CACHE);
448 reg |= MASK_SET(GIC_BASER_CACHE_RAWAWB, GITS_CBASER_INNER_CACHE);
449 reg |= MASK_SET(1, GITS_CBASER_VALID);
450
451 sys_write64(reg, data->base + GITS_CBASER);
452
453 data->cmd_base = (struct its_cmd_block *)cfg->cmd_queue;
454 data->cmd_write = data->cmd_base;
455
456 LOG_INF("Allocated %ld entries for command table", ITS_CMD_QUEUE_NR_ENTRIES);
457
458 sys_write64(0, data->base + GITS_CWRITER);
459 }
460
gicv3_rdist_get_rdbase(const struct device * dev,unsigned int cpuid)461 static uintptr_t gicv3_rdist_get_rdbase(const struct device *dev, unsigned int cpuid)
462 {
463 struct gicv3_its_data *data = dev->data;
464 uint64_t typer = sys_read64(data->base + GITS_TYPER);
465
466 if (GITS_TYPER_PTA_GET(typer)) {
467 return gic_rdists[cpuid];
468 } else {
469 return GICR_TYPER_PROCESSOR_NUMBER_GET(sys_read64(gic_rdists[cpuid] + GICR_TYPER));
470 }
471 }
472
gicv3_its_map_intid(const struct device * dev,uint32_t device_id,uint32_t event_id,unsigned int intid)473 static int gicv3_its_map_intid(const struct device *dev, uint32_t device_id, uint32_t event_id,
474 unsigned int intid)
475 {
476 struct gicv3_its_data *data = dev->data;
477 int ret;
478
479 /* TOFIX check device_id, event_id & intid bounds */
480
481 if (intid < 8192) {
482 return -EINVAL;
483 }
484
485 /* The CPU id directly maps as ICID for the current CPU redistributor */
486 ret = its_send_mapti_cmd(data, device_id, event_id, intid, arch_curr_cpu()->id);
487 if (ret) {
488 LOG_ERR("Failed to map eventid %d to intid %d for deviceid %x",
489 event_id, intid, device_id);
490 return ret;
491 }
492
493 return its_send_sync_cmd(data, gicv3_rdist_get_rdbase(dev, arch_curr_cpu()->id));
494 }
495
gicv3_its_init_device_id(const struct device * dev,uint32_t device_id,unsigned int nites)496 static int gicv3_its_init_device_id(const struct device *dev, uint32_t device_id,
497 unsigned int nites)
498 {
499 struct gicv3_its_data *data = dev->data;
500 size_t entry_size, alloc_size;
501 int nr_ites;
502 void *itt;
503 int ret;
504
505 /* TOFIX check device_id & nites bounds */
506
507 entry_size = GITS_TYPER_ITT_ENTRY_SIZE_GET(sys_read64(data->base + GITS_TYPER)) + 1;
508
509 if (data->dev_table_is_indirect) {
510 size_t offset = device_id >> data->indirect_dev_lvl2_width;
511
512 /* Check if DeviceID can fit in the Level 1 table */
513 if (offset > (1 << data->indirect_dev_lvl1_width)) {
514 return -EINVAL;
515 }
516
517 /* Check if a Level 2 table has already been allocated for the DeviceID */
518 if (!data->indirect_dev_lvl1_table[offset]) {
519 void *alloc_addr;
520
521 LOG_INF("Allocating Level 2 Device %ldK table",
522 data->indirect_dev_page_size / 1024);
523
524 alloc_addr = k_aligned_alloc(data->indirect_dev_page_size,
525 data->indirect_dev_page_size);
526 if (!alloc_addr) {
527 return -ENOMEM;
528 }
529
530 memset(alloc_addr, 0, data->indirect_dev_page_size);
531
532 data->indirect_dev_lvl1_table[offset] = (uintptr_t)alloc_addr |
533 MASK_SET(1, GITS_BASER_VALID);
534
535 barrier_dsync_fence_full();
536 }
537 }
538
539 /* ITT must be of power of 2 */
540 nr_ites = MAX(2, nites);
541 alloc_size = ROUND_UP(nr_ites * entry_size, 256);
542
543 LOG_INF("Allocating ITT for DeviceID %x and %d vectors (%ld bytes entry)",
544 device_id, nr_ites, entry_size);
545
546 itt = k_aligned_alloc(256, alloc_size);
547 if (!itt) {
548 return -ENOMEM;
549 }
550
551 /* size is log2(ites) - 1, equivalent to (fls(ites) - 1) - 1 */
552 ret = its_send_mapd_cmd(data, device_id, fls_z(nr_ites) - 2, (uintptr_t)itt, true);
553 if (ret) {
554 LOG_ERR("Failed to map device id %x ITT table", device_id);
555 return ret;
556 }
557
558 return 0;
559 }
560
gicv3_its_alloc_intid(const struct device * dev)561 static unsigned int gicv3_its_alloc_intid(const struct device *dev)
562 {
563 return atomic_inc(&nlpi_intid);
564 }
565
gicv3_its_get_msi_addr(const struct device * dev)566 static uint32_t gicv3_its_get_msi_addr(const struct device *dev)
567 {
568 const struct gicv3_its_config *cfg = (const struct gicv3_its_config *)dev->config;
569
570 return cfg->base_addr + GITS_TRANSLATER;
571 }
572
573 #define ITS_RDIST_MAP(n) \
574 { \
575 const struct device *const dev = DEVICE_DT_INST_GET(n); \
576 struct gicv3_its_data *data; \
577 int ret; \
578 \
579 if (dev) { \
580 data = (struct gicv3_its_data *) dev->data; \
581 ret = its_send_mapc_cmd(data, arch_curr_cpu()->id, \
582 gicv3_rdist_get_rdbase(dev, arch_curr_cpu()->id), \
583 true); \
584 if (ret) { \
585 LOG_ERR("Failed to map CPU%d redistributor", \
586 arch_curr_cpu()->id); \
587 } \
588 } \
589 }
590
its_rdist_map(void)591 void its_rdist_map(void)
592 {
593 DT_INST_FOREACH_STATUS_OKAY(ITS_RDIST_MAP)
594 }
595
596 #define ITS_RDIST_INVALL(n) \
597 { \
598 const struct device *const dev = DEVICE_DT_INST_GET(n); \
599 struct gicv3_its_data *data; \
600 int ret; \
601 \
602 if (dev) { \
603 data = (struct gicv3_its_data *) dev->data; \
604 ret = its_send_invall_cmd(data, arch_curr_cpu()->id); \
605 if (ret) { \
606 LOG_ERR("Failed to sync RDIST LPI cache for CPU%d", \
607 arch_curr_cpu()->id); \
608 } \
609 \
610 its_send_sync_cmd(data, \
611 gicv3_rdist_get_rdbase(dev, arch_curr_cpu()->id)); \
612 } \
613 }
614
its_rdist_invall(void)615 void its_rdist_invall(void)
616 {
617 DT_INST_FOREACH_STATUS_OKAY(ITS_RDIST_INVALL)
618 }
619
gicv3_its_init(const struct device * dev)620 static int gicv3_its_init(const struct device *dev)
621 {
622 const struct gicv3_its_config *cfg = dev->config;
623 struct gicv3_its_data *data = dev->data;
624 uint32_t reg;
625 int ret;
626
627 device_map(&data->base, cfg->base_addr, cfg->base_size, K_MEM_CACHE_NONE);
628
629 ret = its_force_quiescent(data);
630 if (ret) {
631 LOG_ERR("Failed to quiesce, giving up");
632 return ret;
633 }
634
635 ret = its_alloc_tables(data);
636 if (ret) {
637 LOG_ERR("Failed to allocate tables, giving up");
638 return ret;
639 }
640
641 its_setup_cmd_queue(dev);
642
643 reg = sys_read32(data->base + GITS_CTLR);
644 reg |= MASK_SET(1, GITS_CTLR_ENABLED);
645 sys_write32(reg, data->base + GITS_CTLR);
646
647 /* Map the boot CPU id to the CPU redistributor */
648 ret = its_send_mapc_cmd(data, arch_curr_cpu()->id,
649 gicv3_rdist_get_rdbase(dev, arch_curr_cpu()->id), true);
650 if (ret) {
651 LOG_ERR("Failed to map boot CPU redistributor");
652 return ret;
653 }
654
655 return 0;
656 }
657
658 struct its_driver_api gicv3_its_api = {
659 .alloc_intid = gicv3_its_alloc_intid,
660 .setup_deviceid = gicv3_its_init_device_id,
661 .map_intid = gicv3_its_map_intid,
662 .send_int = gicv3_its_send_int,
663 .get_msi_addr = gicv3_its_get_msi_addr,
664 };
665
666 #define GICV3_ITS_INIT(n) \
667 static struct its_cmd_block gicv3_its_cmd##n[ITS_CMD_QUEUE_NR_ENTRIES] \
668 __aligned(ITS_CMD_QUEUE_SIZE); \
669 static struct gicv3_its_data gicv3_its_data##n; \
670 static const struct gicv3_its_config gicv3_its_config##n = { \
671 .base_addr = DT_INST_REG_ADDR(n), \
672 .base_size = DT_INST_REG_SIZE(n), \
673 .cmd_queue = gicv3_its_cmd##n, \
674 .cmd_queue_size = sizeof(gicv3_its_cmd##n), \
675 }; \
676 DEVICE_DT_INST_DEFINE(n, &gicv3_its_init, NULL, \
677 &gicv3_its_data##n, \
678 &gicv3_its_config##n, \
679 PRE_KERNEL_1, \
680 CONFIG_INTC_INIT_PRIORITY, \
681 &gicv3_its_api);
682
683 DT_INST_FOREACH_STATUS_OKAY(GICV3_ITS_INIT)
684