Lines Matching +full:synquacer +full:- +full:pre +full:- +full:its
1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2013-2017 ARM Limited, All Rights Reserved.
34 #include <linux/irqchip/arm-gic-v3.h>
35 #include <linux/irqchip/arm-gic-v4.h>
40 #include "irq-gic-common.h"
67 * Collection structure - just an ID, and a redistributor address to
77 * The ITS_BASER structure - contains memory information, cached
78 * value of BASER register configuration and ITS page size.
90 * The ITS structure - contains most of the infrastructure, with the
91 * top-level MSI domain, the command queue, the collections, and the
120 u32 pre_its_base; /* for Socionext Synquacer */
124 #define is_v4(its) (!!((its)->typer & GITS_TYPER_VLPIS)) argument
125 #define is_v4_1(its) (!!((its)->typer & GITS_TYPER_VMAPP)) argument
126 #define device_ids(its) (FIELD_GET(GITS_TYPER_DEVBITS, (its)->typer) + 1) argument
134 if (gic_rdists->has_rvpeid && \
135 gic_rdists->gicd_typer2 & GICD_TYPER2_VIL) \
136 nvpeid = 1 + (gic_rdists->gicd_typer2 & \
158 * The ITS view of a device - belongs to an ITS, owns an interrupt
159 * translation table, and a list of interrupts. If it some of its
165 struct its_node *its; member
198 #define gic_data_rdist() (raw_cpu_ptr(gic_rdists->rdist))
199 #define gic_data_rdist_cpu(cpu) (per_cpu_ptr(gic_rdists->rdist, cpu))
200 #define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
207 static bool require_its_list_vmovp(struct its_vm *vm, struct its_node *its) in require_its_list_vmovp() argument
209 return (gic_rdists->has_rvpeid || vm->vlpi_count[its->list_nr]); in require_its_list_vmovp()
214 struct its_node *its; in get_its_list() local
217 list_for_each_entry(its, &its_nodes, entry) { in get_its_list()
218 if (!is_v4(its)) in get_its_list()
221 if (require_its_list_vmovp(vm, its)) in get_its_list()
222 __set_bit(its->list_nr, &its_list); in get_its_list()
231 return d->hwirq - its_dev->event_map.lpi_base; in its_get_event_id()
237 struct its_node *its = its_dev->its; in dev_event_to_col() local
239 return its->collections + its_dev->event_map.col_map[event]; in dev_event_to_col()
245 if (WARN_ON_ONCE(event >= its_dev->event_map.nr_lpis)) in dev_event_to_vlpi_map()
248 return &its_dev->event_map.vlpi_maps[event]; in dev_event_to_vlpi_map()
265 raw_spin_lock_irqsave(&vpe->vpe_lock, *flags); in vpe_to_cpuid_lock()
266 return vpe->col_idx; in vpe_to_cpuid_lock()
271 raw_spin_unlock_irqrestore(&vpe->vpe_lock, flags); in vpe_to_cpuid_unlock()
280 cpu = vpe_to_cpuid_lock(map->vpe, flags); in irq_to_cpuid_lock()
284 cpu = its_dev->event_map.col_map[its_get_event_id(d)]; in irq_to_cpuid_lock()
297 vpe_to_cpuid_unlock(map->vpe, flags); in irq_to_cpuid_unlock()
302 if (WARN_ON_ONCE(col->target_address & GENMASK_ULL(15, 0))) in valid_col()
308 static struct its_vpe *valid_vpe(struct its_node *its, struct its_vpe *vpe) in valid_vpe() argument
310 if (valid_col(its->collections + vpe->col_idx)) in valid_vpe()
317 * ITS command descriptors - parameters to be encoded in a command
416 * The ITS command block, which is what the ITS actually parses.
445 its_mask_encode(&cmd->raw_cmd[0], cmd_nr, 7, 0); in its_encode_cmd()
450 its_mask_encode(&cmd->raw_cmd[0], devid, 63, 32); in its_encode_devid()
455 its_mask_encode(&cmd->raw_cmd[1], id, 31, 0); in its_encode_event_id()
460 its_mask_encode(&cmd->raw_cmd[1], phys_id, 63, 32); in its_encode_phys_id()
465 its_mask_encode(&cmd->raw_cmd[1], size, 4, 0); in its_encode_size()
470 its_mask_encode(&cmd->raw_cmd[2], itt_addr >> 8, 51, 8); in its_encode_itt()
475 its_mask_encode(&cmd->raw_cmd[2], !!valid, 63, 63); in its_encode_valid()
480 its_mask_encode(&cmd->raw_cmd[2], target_addr >> 16, 51, 16); in its_encode_target()
485 its_mask_encode(&cmd->raw_cmd[2], col, 15, 0); in its_encode_collection()
490 its_mask_encode(&cmd->raw_cmd[1], vpeid, 47, 32); in its_encode_vpeid()
495 its_mask_encode(&cmd->raw_cmd[2], virt_id, 31, 0); in its_encode_virt_id()
500 its_mask_encode(&cmd->raw_cmd[2], db_phys_id, 63, 32); in its_encode_db_phys_id()
505 its_mask_encode(&cmd->raw_cmd[2], db_valid, 0, 0); in its_encode_db_valid()
510 its_mask_encode(&cmd->raw_cmd[0], seq_num, 47, 32); in its_encode_seq_num()
515 its_mask_encode(&cmd->raw_cmd[1], its_list, 15, 0); in its_encode_its_list()
520 its_mask_encode(&cmd->raw_cmd[3], vpt_pa >> 16, 51, 16); in its_encode_vpt_addr()
525 its_mask_encode(&cmd->raw_cmd[3], vpt_size, 4, 0); in its_encode_vpt_size()
530 its_mask_encode(&cmd->raw_cmd[0], vconf_pa >> 16, 51, 16); in its_encode_vconf_addr()
535 its_mask_encode(&cmd->raw_cmd[0], alloc, 8, 8); in its_encode_alloc()
540 its_mask_encode(&cmd->raw_cmd[0], ptz, 9, 9); in its_encode_ptz()
546 its_mask_encode(&cmd->raw_cmd[1], vpe_db_lpi, 31, 0); in its_encode_vmapp_default_db()
552 its_mask_encode(&cmd->raw_cmd[3], vpe_db_lpi, 31, 0); in its_encode_vmovp_default_db()
557 its_mask_encode(&cmd->raw_cmd[2], db, 63, 63); in its_encode_db()
562 its_mask_encode(&cmd->raw_cmd[0], sgi, 35, 32); in its_encode_sgi_intid()
567 its_mask_encode(&cmd->raw_cmd[0], prio >> 4, 23, 20); in its_encode_sgi_priority()
572 its_mask_encode(&cmd->raw_cmd[0], grp, 10, 10); in its_encode_sgi_group()
577 its_mask_encode(&cmd->raw_cmd[0], clr, 9, 9); in its_encode_sgi_clear()
582 its_mask_encode(&cmd->raw_cmd[0], en, 8, 8); in its_encode_sgi_enable()
588 cmd->raw_cmd_le[0] = cpu_to_le64(cmd->raw_cmd[0]); in its_fixup_cmd()
589 cmd->raw_cmd_le[1] = cpu_to_le64(cmd->raw_cmd[1]); in its_fixup_cmd()
590 cmd->raw_cmd_le[2] = cpu_to_le64(cmd->raw_cmd[2]); in its_fixup_cmd()
591 cmd->raw_cmd_le[3] = cpu_to_le64(cmd->raw_cmd[3]); in its_fixup_cmd()
594 static struct its_collection *its_build_mapd_cmd(struct its_node *its, in its_build_mapd_cmd() argument
599 u8 size = ilog2(desc->its_mapd_cmd.dev->nr_ites); in its_build_mapd_cmd()
601 itt_addr = virt_to_phys(desc->its_mapd_cmd.dev->itt); in its_build_mapd_cmd()
605 its_encode_devid(cmd, desc->its_mapd_cmd.dev->device_id); in its_build_mapd_cmd()
606 its_encode_size(cmd, size - 1); in its_build_mapd_cmd()
608 its_encode_valid(cmd, desc->its_mapd_cmd.valid); in its_build_mapd_cmd()
615 static struct its_collection *its_build_mapc_cmd(struct its_node *its, in its_build_mapc_cmd() argument
620 its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id); in its_build_mapc_cmd()
621 its_encode_target(cmd, desc->its_mapc_cmd.col->target_address); in its_build_mapc_cmd()
622 its_encode_valid(cmd, desc->its_mapc_cmd.valid); in its_build_mapc_cmd()
626 return desc->its_mapc_cmd.col; in its_build_mapc_cmd()
629 static struct its_collection *its_build_mapti_cmd(struct its_node *its, in its_build_mapti_cmd() argument
635 col = dev_event_to_col(desc->its_mapti_cmd.dev, in its_build_mapti_cmd()
636 desc->its_mapti_cmd.event_id); in its_build_mapti_cmd()
639 its_encode_devid(cmd, desc->its_mapti_cmd.dev->device_id); in its_build_mapti_cmd()
640 its_encode_event_id(cmd, desc->its_mapti_cmd.event_id); in its_build_mapti_cmd()
641 its_encode_phys_id(cmd, desc->its_mapti_cmd.phys_id); in its_build_mapti_cmd()
642 its_encode_collection(cmd, col->col_id); in its_build_mapti_cmd()
649 static struct its_collection *its_build_movi_cmd(struct its_node *its, in its_build_movi_cmd() argument
655 col = dev_event_to_col(desc->its_movi_cmd.dev, in its_build_movi_cmd()
656 desc->its_movi_cmd.event_id); in its_build_movi_cmd()
659 its_encode_devid(cmd, desc->its_movi_cmd.dev->device_id); in its_build_movi_cmd()
660 its_encode_event_id(cmd, desc->its_movi_cmd.event_id); in its_build_movi_cmd()
661 its_encode_collection(cmd, desc->its_movi_cmd.col->col_id); in its_build_movi_cmd()
668 static struct its_collection *its_build_discard_cmd(struct its_node *its, in its_build_discard_cmd() argument
674 col = dev_event_to_col(desc->its_discard_cmd.dev, in its_build_discard_cmd()
675 desc->its_discard_cmd.event_id); in its_build_discard_cmd()
678 its_encode_devid(cmd, desc->its_discard_cmd.dev->device_id); in its_build_discard_cmd()
679 its_encode_event_id(cmd, desc->its_discard_cmd.event_id); in its_build_discard_cmd()
686 static struct its_collection *its_build_inv_cmd(struct its_node *its, in its_build_inv_cmd() argument
692 col = dev_event_to_col(desc->its_inv_cmd.dev, in its_build_inv_cmd()
693 desc->its_inv_cmd.event_id); in its_build_inv_cmd()
696 its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id); in its_build_inv_cmd()
697 its_encode_event_id(cmd, desc->its_inv_cmd.event_id); in its_build_inv_cmd()
704 static struct its_collection *its_build_int_cmd(struct its_node *its, in its_build_int_cmd() argument
710 col = dev_event_to_col(desc->its_int_cmd.dev, in its_build_int_cmd()
711 desc->its_int_cmd.event_id); in its_build_int_cmd()
714 its_encode_devid(cmd, desc->its_int_cmd.dev->device_id); in its_build_int_cmd()
715 its_encode_event_id(cmd, desc->its_int_cmd.event_id); in its_build_int_cmd()
722 static struct its_collection *its_build_clear_cmd(struct its_node *its, in its_build_clear_cmd() argument
728 col = dev_event_to_col(desc->its_clear_cmd.dev, in its_build_clear_cmd()
729 desc->its_clear_cmd.event_id); in its_build_clear_cmd()
732 its_encode_devid(cmd, desc->its_clear_cmd.dev->device_id); in its_build_clear_cmd()
733 its_encode_event_id(cmd, desc->its_clear_cmd.event_id); in its_build_clear_cmd()
740 static struct its_collection *its_build_invall_cmd(struct its_node *its, in its_build_invall_cmd() argument
745 its_encode_collection(cmd, desc->its_invall_cmd.col->col_id); in its_build_invall_cmd()
749 return desc->its_invall_cmd.col; in its_build_invall_cmd()
752 static struct its_vpe *its_build_vinvall_cmd(struct its_node *its, in its_build_vinvall_cmd() argument
757 its_encode_vpeid(cmd, desc->its_vinvall_cmd.vpe->vpe_id); in its_build_vinvall_cmd()
761 return valid_vpe(its, desc->its_vinvall_cmd.vpe); in its_build_vinvall_cmd()
764 static struct its_vpe *its_build_vmapp_cmd(struct its_node *its, in its_build_vmapp_cmd() argument
773 its_encode_vpeid(cmd, desc->its_vmapp_cmd.vpe->vpe_id); in its_build_vmapp_cmd()
774 its_encode_valid(cmd, desc->its_vmapp_cmd.valid); in its_build_vmapp_cmd()
776 if (!desc->its_vmapp_cmd.valid) { in its_build_vmapp_cmd()
777 if (is_v4_1(its)) { in its_build_vmapp_cmd()
778 alloc = !atomic_dec_return(&desc->its_vmapp_cmd.vpe->vmapp_count); in its_build_vmapp_cmd()
785 vpt_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->vpt_page)); in its_build_vmapp_cmd()
786 target = desc->its_vmapp_cmd.col->target_address + its->vlpi_redist_offset; in its_build_vmapp_cmd()
790 its_encode_vpt_size(cmd, LPI_NRBITS - 1); in its_build_vmapp_cmd()
792 if (!is_v4_1(its)) in its_build_vmapp_cmd()
795 vconf_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->its_vm->vprop_page)); in its_build_vmapp_cmd()
797 alloc = !atomic_fetch_inc(&desc->its_vmapp_cmd.vpe->vmapp_count); in its_build_vmapp_cmd()
809 its_encode_vmapp_default_db(cmd, desc->its_vmapp_cmd.vpe->vpe_db_lpi); in its_build_vmapp_cmd()
814 return valid_vpe(its, desc->its_vmapp_cmd.vpe); in its_build_vmapp_cmd()
817 static struct its_vpe *its_build_vmapti_cmd(struct its_node *its, in its_build_vmapti_cmd() argument
823 if (!is_v4_1(its) && desc->its_vmapti_cmd.db_enabled) in its_build_vmapti_cmd()
824 db = desc->its_vmapti_cmd.vpe->vpe_db_lpi; in its_build_vmapti_cmd()
829 its_encode_devid(cmd, desc->its_vmapti_cmd.dev->device_id); in its_build_vmapti_cmd()
830 its_encode_vpeid(cmd, desc->its_vmapti_cmd.vpe->vpe_id); in its_build_vmapti_cmd()
831 its_encode_event_id(cmd, desc->its_vmapti_cmd.event_id); in its_build_vmapti_cmd()
833 its_encode_virt_id(cmd, desc->its_vmapti_cmd.virt_id); in its_build_vmapti_cmd()
837 return valid_vpe(its, desc->its_vmapti_cmd.vpe); in its_build_vmapti_cmd()
840 static struct its_vpe *its_build_vmovi_cmd(struct its_node *its, in its_build_vmovi_cmd() argument
846 if (!is_v4_1(its) && desc->its_vmovi_cmd.db_enabled) in its_build_vmovi_cmd()
847 db = desc->its_vmovi_cmd.vpe->vpe_db_lpi; in its_build_vmovi_cmd()
852 its_encode_devid(cmd, desc->its_vmovi_cmd.dev->device_id); in its_build_vmovi_cmd()
853 its_encode_vpeid(cmd, desc->its_vmovi_cmd.vpe->vpe_id); in its_build_vmovi_cmd()
854 its_encode_event_id(cmd, desc->its_vmovi_cmd.event_id); in its_build_vmovi_cmd()
860 return valid_vpe(its, desc->its_vmovi_cmd.vpe); in its_build_vmovi_cmd()
863 static struct its_vpe *its_build_vmovp_cmd(struct its_node *its, in its_build_vmovp_cmd() argument
869 target = desc->its_vmovp_cmd.col->target_address + its->vlpi_redist_offset; in its_build_vmovp_cmd()
871 its_encode_seq_num(cmd, desc->its_vmovp_cmd.seq_num); in its_build_vmovp_cmd()
872 its_encode_its_list(cmd, desc->its_vmovp_cmd.its_list); in its_build_vmovp_cmd()
873 its_encode_vpeid(cmd, desc->its_vmovp_cmd.vpe->vpe_id); in its_build_vmovp_cmd()
876 if (is_v4_1(its)) { in its_build_vmovp_cmd()
878 its_encode_vmovp_default_db(cmd, desc->its_vmovp_cmd.vpe->vpe_db_lpi); in its_build_vmovp_cmd()
883 return valid_vpe(its, desc->its_vmovp_cmd.vpe); in its_build_vmovp_cmd()
886 static struct its_vpe *its_build_vinv_cmd(struct its_node *its, in its_build_vinv_cmd() argument
892 map = dev_event_to_vlpi_map(desc->its_inv_cmd.dev, in its_build_vinv_cmd()
893 desc->its_inv_cmd.event_id); in its_build_vinv_cmd()
896 its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id); in its_build_vinv_cmd()
897 its_encode_event_id(cmd, desc->its_inv_cmd.event_id); in its_build_vinv_cmd()
901 return valid_vpe(its, map->vpe); in its_build_vinv_cmd()
904 static struct its_vpe *its_build_vint_cmd(struct its_node *its, in its_build_vint_cmd() argument
910 map = dev_event_to_vlpi_map(desc->its_int_cmd.dev, in its_build_vint_cmd()
911 desc->its_int_cmd.event_id); in its_build_vint_cmd()
914 its_encode_devid(cmd, desc->its_int_cmd.dev->device_id); in its_build_vint_cmd()
915 its_encode_event_id(cmd, desc->its_int_cmd.event_id); in its_build_vint_cmd()
919 return valid_vpe(its, map->vpe); in its_build_vint_cmd()
922 static struct its_vpe *its_build_vclear_cmd(struct its_node *its, in its_build_vclear_cmd() argument
928 map = dev_event_to_vlpi_map(desc->its_clear_cmd.dev, in its_build_vclear_cmd()
929 desc->its_clear_cmd.event_id); in its_build_vclear_cmd()
932 its_encode_devid(cmd, desc->its_clear_cmd.dev->device_id); in its_build_vclear_cmd()
933 its_encode_event_id(cmd, desc->its_clear_cmd.event_id); in its_build_vclear_cmd()
937 return valid_vpe(its, map->vpe); in its_build_vclear_cmd()
940 static struct its_vpe *its_build_invdb_cmd(struct its_node *its, in its_build_invdb_cmd() argument
944 if (WARN_ON(!is_v4_1(its))) in its_build_invdb_cmd()
948 its_encode_vpeid(cmd, desc->its_invdb_cmd.vpe->vpe_id); in its_build_invdb_cmd()
952 return valid_vpe(its, desc->its_invdb_cmd.vpe); in its_build_invdb_cmd()
955 static struct its_vpe *its_build_vsgi_cmd(struct its_node *its, in its_build_vsgi_cmd() argument
959 if (WARN_ON(!is_v4_1(its))) in its_build_vsgi_cmd()
963 its_encode_vpeid(cmd, desc->its_vsgi_cmd.vpe->vpe_id); in its_build_vsgi_cmd()
964 its_encode_sgi_intid(cmd, desc->its_vsgi_cmd.sgi); in its_build_vsgi_cmd()
965 its_encode_sgi_priority(cmd, desc->its_vsgi_cmd.priority); in its_build_vsgi_cmd()
966 its_encode_sgi_group(cmd, desc->its_vsgi_cmd.group); in its_build_vsgi_cmd()
967 its_encode_sgi_clear(cmd, desc->its_vsgi_cmd.clear); in its_build_vsgi_cmd()
968 its_encode_sgi_enable(cmd, desc->its_vsgi_cmd.enable); in its_build_vsgi_cmd()
972 return valid_vpe(its, desc->its_vsgi_cmd.vpe); in its_build_vsgi_cmd()
975 static u64 its_cmd_ptr_to_offset(struct its_node *its, in its_cmd_ptr_to_offset() argument
978 return (ptr - its->cmd_base) * sizeof(*ptr); in its_cmd_ptr_to_offset()
981 static int its_queue_full(struct its_node *its) in its_queue_full() argument
986 widx = its->cmd_write - its->cmd_base; in its_queue_full()
987 ridx = readl_relaxed(its->base + GITS_CREADR) / sizeof(struct its_cmd_block); in its_queue_full()
989 /* This is incredibly unlikely to happen, unless the ITS locks up. */ in its_queue_full()
996 static struct its_cmd_block *its_allocate_entry(struct its_node *its) in its_allocate_entry() argument
1001 while (its_queue_full(its)) { in its_allocate_entry()
1002 count--; in its_allocate_entry()
1004 pr_err_ratelimited("ITS queue not draining\n"); in its_allocate_entry()
1011 cmd = its->cmd_write++; in its_allocate_entry()
1014 if (its->cmd_write == (its->cmd_base + ITS_CMD_QUEUE_NR_ENTRIES)) in its_allocate_entry()
1015 its->cmd_write = its->cmd_base; in its_allocate_entry()
1018 cmd->raw_cmd[0] = 0; in its_allocate_entry()
1019 cmd->raw_cmd[1] = 0; in its_allocate_entry()
1020 cmd->raw_cmd[2] = 0; in its_allocate_entry()
1021 cmd->raw_cmd[3] = 0; in its_allocate_entry()
1026 static struct its_cmd_block *its_post_commands(struct its_node *its) in its_post_commands() argument
1028 u64 wr = its_cmd_ptr_to_offset(its, its->cmd_write); in its_post_commands()
1030 writel_relaxed(wr, its->base + GITS_CWRITER); in its_post_commands()
1032 return its->cmd_write; in its_post_commands()
1035 static void its_flush_cmd(struct its_node *its, struct its_cmd_block *cmd) in its_flush_cmd() argument
1039 * the ITS. in its_flush_cmd()
1041 if (its->flags & ITS_FLAGS_CMDQ_NEEDS_FLUSHING) in its_flush_cmd()
1047 static int its_wait_for_range_completion(struct its_node *its, in its_wait_for_range_completion() argument
1055 to_idx = its_cmd_ptr_to_offset(its, to); in its_wait_for_range_completion()
1064 rd_idx = readl_relaxed(its->base + GITS_CREADR); in its_wait_for_range_completion()
1068 * potential wrap-around into account. in its_wait_for_range_completion()
1070 delta = rd_idx - prev_idx; in its_wait_for_range_completion()
1078 count--; in its_wait_for_range_completion()
1080 pr_err_ratelimited("ITS queue timeout (%llu %llu)\n", in its_wait_for_range_completion()
1082 return -1; in its_wait_for_range_completion()
1094 void name(struct its_node *its, \
1103 raw_spin_lock_irqsave(&its->lock, flags); \
1105 cmd = its_allocate_entry(its); \
1107 raw_spin_unlock_irqrestore(&its->lock, flags); \
1110 sync_obj = builder(its, cmd, desc); \
1111 its_flush_cmd(its, cmd); \
1114 sync_cmd = its_allocate_entry(its); \
1118 buildfn(its, sync_cmd, sync_obj); \
1119 its_flush_cmd(its, sync_cmd); \
1123 rd_idx = readl_relaxed(its->base + GITS_CREADR); \
1124 next_cmd = its_post_commands(its); \
1125 raw_spin_unlock_irqrestore(&its->lock, flags); \
1127 if (its_wait_for_range_completion(its, rd_idx, next_cmd)) \
1128 pr_err_ratelimited("ITS cmd %ps failed\n", builder); \
1131 static void its_build_sync_cmd(struct its_node *its, in its_build_sync_cmd() argument
1136 its_encode_target(sync_cmd, sync_col->target_address); in its_build_sync_cmd()
1144 static void its_build_vsync_cmd(struct its_node *its, in BUILD_SINGLE_CMD_FUNC()
1149 its_encode_vpeid(sync_cmd, sync_vpe->vpe_id); in BUILD_SINGLE_CMD_FUNC()
1164 its_send_single_command(dev->its, its_build_int_cmd, &desc); in BUILD_SINGLE_CMD_FUNC()
1174 its_send_single_command(dev->its, its_build_clear_cmd, &desc); in its_send_clear()
1184 its_send_single_command(dev->its, its_build_inv_cmd, &desc); in its_send_inv()
1194 its_send_single_command(dev->its, its_build_mapd_cmd, &desc); in its_send_mapd()
1197 static void its_send_mapc(struct its_node *its, struct its_collection *col, in its_send_mapc() argument
1205 its_send_single_command(its, its_build_mapc_cmd, &desc); in its_send_mapc()
1216 its_send_single_command(dev->its, its_build_mapti_cmd, &desc); in its_send_mapti()
1228 its_send_single_command(dev->its, its_build_movi_cmd, &desc); in its_send_movi()
1238 its_send_single_command(dev->its, its_build_discard_cmd, &desc); in its_send_discard()
1241 static void its_send_invall(struct its_node *its, struct its_collection *col) in its_send_invall() argument
1247 its_send_single_command(its, its_build_invall_cmd, &desc); in its_send_invall()
1255 desc.its_vmapti_cmd.vpe = map->vpe; in its_send_vmapti()
1257 desc.its_vmapti_cmd.virt_id = map->vintid; in its_send_vmapti()
1259 desc.its_vmapti_cmd.db_enabled = map->db_enabled; in its_send_vmapti()
1261 its_send_single_vcommand(dev->its, its_build_vmapti_cmd, &desc); in its_send_vmapti()
1269 desc.its_vmovi_cmd.vpe = map->vpe; in its_send_vmovi()
1272 desc.its_vmovi_cmd.db_enabled = map->db_enabled; in its_send_vmovi()
1274 its_send_single_vcommand(dev->its, its_build_vmovi_cmd, &desc); in its_send_vmovi()
1277 static void its_send_vmapp(struct its_node *its, in its_send_vmapp() argument
1284 desc.its_vmapp_cmd.col = &its->collections[vpe->col_idx]; in its_send_vmapp()
1286 its_send_single_vcommand(its, its_build_vmapp_cmd, &desc); in its_send_vmapp()
1292 struct its_node *its; in its_send_vmovp() local
1294 int col_id = vpe->col_idx; in its_send_vmovp()
1299 its = list_first_entry(&its_nodes, struct its_node, entry); in its_send_vmovp()
1300 desc.its_vmovp_cmd.col = &its->collections[col_id]; in its_send_vmovp()
1301 its_send_single_vcommand(its, its_build_vmovp_cmd, &desc); in its_send_vmovp()
1311 * Wall <-- Head. in its_send_vmovp()
1316 desc.its_vmovp_cmd.its_list = get_its_list(vpe->its_vm); in its_send_vmovp()
1319 list_for_each_entry(its, &its_nodes, entry) { in its_send_vmovp()
1320 if (!is_v4(its)) in its_send_vmovp()
1323 if (!require_its_list_vmovp(vpe->its_vm, its)) in its_send_vmovp()
1326 desc.its_vmovp_cmd.col = &its->collections[col_id]; in its_send_vmovp()
1327 its_send_single_vcommand(its, its_build_vmovp_cmd, &desc); in its_send_vmovp()
1333 static void its_send_vinvall(struct its_node *its, struct its_vpe *vpe) in its_send_vinvall() argument
1338 its_send_single_vcommand(its, its_build_vinvall_cmd, &desc); in its_send_vinvall()
1352 its_send_single_vcommand(dev->its, its_build_vinv_cmd, &desc); in its_send_vinv()
1366 its_send_single_vcommand(dev->its, its_build_vint_cmd, &desc); in its_send_vint()
1380 its_send_single_vcommand(dev->its, its_build_vclear_cmd, &desc); in its_send_vclear()
1383 static void its_send_invdb(struct its_node *its, struct its_vpe *vpe) in its_send_invdb() argument
1388 its_send_single_vcommand(its, its_build_invdb_cmd, &desc); in its_send_invdb()
1392 * irqchip functions - assumes MSI, mostly.
1402 va = page_address(map->vm->vprop_page); in lpi_write_config()
1403 hwirq = map->vintid; in lpi_write_config()
1406 map->properties &= ~clr; in lpi_write_config()
1407 map->properties |= set | LPI_PROP_GROUP1; in lpi_write_config()
1409 va = gic_rdists->prop_table_va; in lpi_write_config()
1410 hwirq = d->hwirq; in lpi_write_config()
1413 cfg = va + hwirq - 8192; in lpi_write_config()
1422 if (gic_rdists->flags & RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING) in lpi_write_config()
1445 WARN_ON(!is_v4_1(its_dev->its)); in direct_lpi_inv()
1448 val |= FIELD_PREP(GICR_INVLPIR_VPEID, map->vpe->vpe_id); in direct_lpi_inv()
1449 val |= FIELD_PREP(GICR_INVLPIR_INTID, map->vintid); in direct_lpi_inv()
1451 val = d->hwirq; in direct_lpi_inv()
1456 raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock); in direct_lpi_inv()
1457 rdbase = per_cpu_ptr(gic_rdists->rdist, cpu)->rd_base; in direct_lpi_inv()
1461 raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock); in direct_lpi_inv()
1470 if (gic_rdists->has_direct_lpi && in lpi_update_config()
1471 (is_v4_1(its_dev->its) || !irqd_is_forwarded_to_vcpu(d))) in lpi_update_config()
1486 * GICv4.1 does away with the per-LPI nonsense, nothing to do in its_vlpi_set_doorbell()
1489 if (is_v4_1(its_dev->its)) in its_vlpi_set_doorbell()
1494 if (map->db_enabled == enable) in its_vlpi_set_doorbell()
1497 map->db_enabled = enable; in its_vlpi_set_doorbell()
1502 * Ideally, we'd issue a VMAPTI to set the doorbell to its LPI in its_vlpi_set_doorbell()
1531 return atomic_read(&per_cpu_ptr(&cpu_lpi_count, cpu)->managed); in its_read_lpi_count()
1533 return atomic_read(&per_cpu_ptr(&cpu_lpi_count, cpu)->unmanaged); in its_read_lpi_count()
1539 atomic_inc(&per_cpu_ptr(&cpu_lpi_count, cpu)->managed); in its_inc_lpi_count()
1541 atomic_inc(&per_cpu_ptr(&cpu_lpi_count, cpu)->unmanaged); in its_inc_lpi_count()
1547 atomic_dec(&per_cpu_ptr(&cpu_lpi_count, cpu)->managed); in its_dec_lpi_count()
1549 atomic_dec(&per_cpu_ptr(&cpu_lpi_count, cpu)->unmanaged); in its_dec_lpi_count()
1582 node = its_dev->its->numa_node; in its_select_cpu()
1604 * ITS placed next to two NUMA nodes. in its_select_cpu()
1614 if ((its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144)) in its_select_cpu()
1632 if ((its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) && in its_select_cpu()
1641 pr_debug("IRQ%d -> %*pbl CPU%d\n", d->irq, cpumask_pr_args(aff_mask), cpu); in its_select_cpu()
1655 return -EINVAL; in its_set_affinity()
1657 prev_cpu = its_dev->event_map.col_map[id]; in its_set_affinity()
1670 target_col = &its_dev->its->collections[cpu]; in its_set_affinity()
1672 its_dev->event_map.col_map[id] = cpu; in its_set_affinity()
1682 return -EINVAL; in its_set_affinity()
1687 struct its_node *its = its_dev->its; in its_irq_get_msi_base() local
1689 return its->phys_base + GITS_TRANSLATER; in its_irq_get_msi_base()
1695 struct its_node *its; in its_irq_compose_msi_msg() local
1698 its = its_dev->its; in its_irq_compose_msi_msg()
1699 addr = its->get_msi_base(its_dev); in its_irq_compose_msi_msg()
1701 msg->address_lo = lower_32_bits(addr); in its_irq_compose_msi_msg()
1702 msg->address_hi = upper_32_bits(addr); in its_irq_compose_msi_msg()
1703 msg->data = its_get_event_id(d); in its_irq_compose_msi_msg()
1716 return -EINVAL; in its_irq_set_irqchip_state()
1752 if (!its_list_map || gic_rdists->has_rvpeid) in gic_requires_eager_mapping()
1758 static void its_map_vm(struct its_node *its, struct its_vm *vm) in its_map_vm() argument
1771 vm->vlpi_count[its->list_nr]++; in its_map_vm()
1773 if (vm->vlpi_count[its->list_nr] == 1) { in its_map_vm()
1776 for (i = 0; i < vm->nr_vpes; i++) { in its_map_vm()
1777 struct its_vpe *vpe = vm->vpes[i]; in its_map_vm()
1778 struct irq_data *d = irq_get_irq_data(vpe->irq); in its_map_vm()
1781 vpe->col_idx = cpumask_first(cpu_online_mask); in its_map_vm()
1782 its_send_vmapp(its, vpe, true); in its_map_vm()
1783 its_send_vinvall(its, vpe); in its_map_vm()
1784 irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx)); in its_map_vm()
1791 static void its_unmap_vm(struct its_node *its, struct its_vm *vm) in its_unmap_vm() argument
1795 /* Not using the ITS list? Everything is always mapped. */ in its_unmap_vm()
1801 if (!--vm->vlpi_count[its->list_nr]) { in its_unmap_vm()
1804 for (i = 0; i < vm->nr_vpes; i++) in its_unmap_vm()
1805 its_send_vmapp(its, vm->vpes[i], false); in its_unmap_vm()
1817 if (!info->map) in its_vlpi_map()
1818 return -EINVAL; in its_vlpi_map()
1820 raw_spin_lock(&its_dev->event_map.vlpi_lock); in its_vlpi_map()
1822 if (!its_dev->event_map.vm) { in its_vlpi_map()
1825 maps = kcalloc(its_dev->event_map.nr_lpis, sizeof(*maps), in its_vlpi_map()
1828 ret = -ENOMEM; in its_vlpi_map()
1832 its_dev->event_map.vm = info->map->vm; in its_vlpi_map()
1833 its_dev->event_map.vlpi_maps = maps; in its_vlpi_map()
1834 } else if (its_dev->event_map.vm != info->map->vm) { in its_vlpi_map()
1835 ret = -EINVAL; in its_vlpi_map()
1840 its_dev->event_map.vlpi_maps[event] = *info->map; in its_vlpi_map()
1846 /* Ensure all the VPEs are mapped on this ITS */ in its_vlpi_map()
1847 its_map_vm(its_dev->its, info->map->vm); in its_vlpi_map()
1856 lpi_write_config(d, 0xff, info->map->properties); in its_vlpi_map()
1865 its_dev->event_map.nr_vlpis++; in its_vlpi_map()
1869 raw_spin_unlock(&its_dev->event_map.vlpi_lock); in its_vlpi_map()
1879 raw_spin_lock(&its_dev->event_map.vlpi_lock); in its_vlpi_get()
1883 if (!its_dev->event_map.vm || !map) { in its_vlpi_get()
1884 ret = -EINVAL; in its_vlpi_get()
1889 *info->map = *map; in its_vlpi_get()
1892 raw_spin_unlock(&its_dev->event_map.vlpi_lock); in its_vlpi_get()
1902 raw_spin_lock(&its_dev->event_map.vlpi_lock); in its_vlpi_unmap()
1904 if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d)) { in its_vlpi_unmap()
1905 ret = -EINVAL; in its_vlpi_unmap()
1914 its_send_mapti(its_dev, d->hwirq, event); in its_vlpi_unmap()
1919 /* Potentially unmap the VM from this ITS */ in its_vlpi_unmap()
1920 its_unmap_vm(its_dev->its, its_dev->event_map.vm); in its_vlpi_unmap()
1926 if (!--its_dev->event_map.nr_vlpis) { in its_vlpi_unmap()
1927 its_dev->event_map.vm = NULL; in its_vlpi_unmap()
1928 kfree(its_dev->event_map.vlpi_maps); in its_vlpi_unmap()
1932 raw_spin_unlock(&its_dev->event_map.vlpi_lock); in its_vlpi_unmap()
1940 if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d)) in its_vlpi_prop_update()
1941 return -EINVAL; in its_vlpi_prop_update()
1943 if (info->cmd_type == PROP_UPDATE_AND_INV_VLPI) in its_vlpi_prop_update()
1944 lpi_update_config(d, 0xff, info->config); in its_vlpi_prop_update()
1946 lpi_write_config(d, 0xff, info->config); in its_vlpi_prop_update()
1947 its_vlpi_set_doorbell(d, !!(info->config & LPI_PROP_ENABLED)); in its_vlpi_prop_update()
1957 /* Need a v4 ITS */ in its_irq_set_vcpu_affinity()
1958 if (!is_v4(its_dev->its)) in its_irq_set_vcpu_affinity()
1959 return -EINVAL; in its_irq_set_vcpu_affinity()
1965 switch (info->cmd_type) { in its_irq_set_vcpu_affinity()
1977 return -EINVAL; in its_irq_set_vcpu_affinity()
1982 .name = "ITS",
2026 range->base_id = base; in mk_lpi_range()
2027 range->span = span; in mk_lpi_range()
2036 int err = -ENOSPC; in alloc_lpi_range()
2041 if (range->span >= nr_lpis) { in alloc_lpi_range()
2042 *base = range->base_id; in alloc_lpi_range()
2043 range->base_id += nr_lpis; in alloc_lpi_range()
2044 range->span -= nr_lpis; in alloc_lpi_range()
2046 if (range->span == 0) { in alloc_lpi_range()
2047 list_del(&range->entry); in alloc_lpi_range()
2058 pr_debug("ITS: alloc %u:%u\n", *base, nr_lpis); in alloc_lpi_range()
2064 if (&a->entry == &lpi_range_list || &b->entry == &lpi_range_list) in merge_lpi_ranges()
2066 if (a->base_id + a->span != b->base_id) in merge_lpi_ranges()
2068 b->base_id = a->base_id; in merge_lpi_ranges()
2069 b->span += a->span; in merge_lpi_ranges()
2070 list_del(&a->entry); in merge_lpi_ranges()
2080 return -ENOMEM; in free_lpi_range()
2085 if (old->base_id < base) in free_lpi_range()
2089 * old is the last element with ->base_id smaller than base, in free_lpi_range()
2091 * ->base_id smaller than base, &old->entry ends up pointing in free_lpi_range()
2095 list_add(&new->entry, &old->entry); in free_lpi_range()
2109 u32 lpis = (1UL << id_bits) - 8192; in its_lpi_init()
2113 numlpis = 1UL << GICD_TYPER_NUM_LPIS(gic_rdists->gicd_typer); in its_lpi_init()
2117 pr_info("ITS: Using hypervisor restricted LPI range [%u]\n", in its_lpi_init()
2126 pr_debug("ITS: Allocator initialized for %u LPIs\n", lpis); in its_lpi_init()
2144 err = -ENOSPC; in its_lpi_alloc()
2170 /* Priority 0xa0, Group-1, disabled */ in gic_reset_prop_table()
2209 addr_end = addr + size - 1; in gic_check_reserved_range()
2233 if (gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED) { in its_setup_lpi_prop_table()
2239 gic_rdists->prop_table_pa = val & GENMASK_ULL(51, 12); in its_setup_lpi_prop_table()
2240 gic_rdists->prop_table_va = memremap(gic_rdists->prop_table_pa, in its_setup_lpi_prop_table()
2243 gic_reset_prop_table(gic_rdists->prop_table_va); in its_setup_lpi_prop_table()
2248 GICD_TYPER_ID_BITS(gic_rdists->gicd_typer), in its_setup_lpi_prop_table()
2253 return -ENOMEM; in its_setup_lpi_prop_table()
2256 gic_rdists->prop_table_pa = page_to_phys(page); in its_setup_lpi_prop_table()
2257 gic_rdists->prop_table_va = page_address(page); in its_setup_lpi_prop_table()
2258 WARN_ON(gic_reserve_range(gic_rdists->prop_table_pa, in its_setup_lpi_prop_table()
2263 &gic_rdists->prop_table_pa); in its_setup_lpi_prop_table()
2278 static u64 its_read_baser(struct its_node *its, struct its_baser *baser) in its_read_baser() argument
2280 u32 idx = baser - its->tables; in its_read_baser()
2282 return gits_read_baser(its->base + GITS_BASER + (idx << 3)); in its_read_baser()
2285 static void its_write_baser(struct its_node *its, struct its_baser *baser, in its_write_baser() argument
2288 u32 idx = baser - its->tables; in its_write_baser()
2290 gits_write_baser(val, its->base + GITS_BASER + (idx << 3)); in its_write_baser()
2291 baser->val = its_read_baser(its, baser); in its_write_baser()
2294 static int its_setup_baser(struct its_node *its, struct its_baser *baser, in its_setup_baser() argument
2297 u64 val = its_read_baser(its, baser); in its_setup_baser()
2305 psz = baser->psz; in its_setup_baser()
2308 pr_warn("ITS@%pa: %s too large, reduce ITS pages %u->%u\n", in its_setup_baser()
2309 &its->phys_base, its_base_type_string[type], in its_setup_baser()
2315 page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, order); in its_setup_baser()
2317 return -ENOMEM; in its_setup_baser()
2327 pr_err("ITS: no 52bit PA support when psz=%d\n", psz); in its_setup_baser()
2329 return -ENXIO; in its_setup_baser()
2339 ((esz - 1) << GITS_BASER_ENTRY_SIZE_SHIFT) | in its_setup_baser()
2340 ((alloc_pages - 1) << GITS_BASER_PAGES_SHIFT) | in its_setup_baser()
2359 its_write_baser(its, baser, val); in its_setup_baser()
2360 tmp = baser->val; in its_setup_baser()
2368 * non-cacheable as well. in its_setup_baser()
2379 pr_err("ITS@%pa: %s doesn't stick: %llx %llx\n", in its_setup_baser()
2380 &its->phys_base, its_base_type_string[type], in its_setup_baser()
2383 return -ENXIO; in its_setup_baser()
2386 baser->order = order; in its_setup_baser()
2387 baser->base = base; in its_setup_baser()
2388 baser->psz = psz; in its_setup_baser()
2391 pr_info("ITS@%pa: allocated %d %s @%lx (%s, esz %d, psz %dK, shr %d)\n", in its_setup_baser()
2392 &its->phys_base, (int)(PAGE_ORDER_TO_SIZE(order) / (int)tmp), in its_setup_baser()
2401 static bool its_parse_indirect_baser(struct its_node *its, in its_parse_indirect_baser() argument
2405 u64 tmp = its_read_baser(its, baser); in its_parse_indirect_baser()
2410 u32 psz = baser->psz; in its_parse_indirect_baser()
2416 * Find out whether hw supports a single or two-level table by in its_parse_indirect_baser()
2419 its_write_baser(its, baser, val | GITS_BASER_INDIRECT); in its_parse_indirect_baser()
2420 indirect = !!(baser->val & GITS_BASER_INDIRECT); in its_parse_indirect_baser()
2424 * The size of the lvl2 table is equal to ITS page size in its_parse_indirect_baser()
2427 * which is reported by ITS hardware times lvl1 table in its_parse_indirect_baser()
2430 ids -= ilog2(psz / (int)esz); in its_parse_indirect_baser()
2437 * range of device IDs that the ITS can grok... The ID in its_parse_indirect_baser()
2439 * massive waste of memory if two-level device table in its_parse_indirect_baser()
2444 new_order = MAX_ORDER - 1; in its_parse_indirect_baser()
2446 pr_warn("ITS@%pa: %s Table too large, reduce ids %llu->%u\n", in its_parse_indirect_baser()
2447 &its->phys_base, its_base_type_string[type], in its_parse_indirect_baser()
2448 device_ids(its), ids); in its_parse_indirect_baser()
2466 static u32 compute_its_aff(struct its_node *its) in compute_its_aff() argument
2472 * Reencode the ITS SVPET and MPIDR as a GICR_TYPER, and compute in compute_its_aff()
2476 svpet = FIELD_GET(GITS_TYPER_SVPET, its->typer); in compute_its_aff()
2478 val |= FIELD_PREP(GICR_TYPER_AFFINITY, its->mpidr); in compute_its_aff()
2484 struct its_node *its; in find_sibling_its() local
2487 if (!FIELD_GET(GITS_TYPER_SVPET, cur_its->typer)) in find_sibling_its()
2492 list_for_each_entry(its, &its_nodes, entry) { in find_sibling_its()
2495 if (!is_v4_1(its) || its == cur_its) in find_sibling_its()
2498 if (!FIELD_GET(GITS_TYPER_SVPET, its->typer)) in find_sibling_its()
2501 if (aff != compute_its_aff(its)) in find_sibling_its()
2505 baser = its->tables[2].val; in find_sibling_its()
2509 return its; in find_sibling_its()
2515 static void its_free_tables(struct its_node *its) in its_free_tables() argument
2520 if (its->tables[i].base) { in its_free_tables()
2521 free_pages((unsigned long)its->tables[i].base, in its_free_tables()
2522 its->tables[i].order); in its_free_tables()
2523 its->tables[i].base = NULL; in its_free_tables()
2528 static int its_probe_baser_psz(struct its_node *its, struct its_baser *baser) in its_probe_baser_psz() argument
2535 val = its_read_baser(its, baser); in its_probe_baser_psz()
2554 its_write_baser(its, baser, val); in its_probe_baser_psz()
2556 if (FIELD_GET(GITS_BASER_PAGE_SIZE_MASK, baser->val) == gpsz) in its_probe_baser_psz()
2568 return -1; in its_probe_baser_psz()
2572 baser->psz = psz; in its_probe_baser_psz()
2576 static int its_alloc_tables(struct its_node *its) in its_alloc_tables() argument
2582 if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_22375) in its_alloc_tables()
2587 struct its_baser *baser = its->tables + i; in its_alloc_tables()
2588 u64 val = its_read_baser(its, baser); in its_alloc_tables()
2596 if (its_probe_baser_psz(its, baser)) { in its_alloc_tables()
2597 its_free_tables(its); in its_alloc_tables()
2598 return -ENXIO; in its_alloc_tables()
2601 order = get_order(baser->psz); in its_alloc_tables()
2605 indirect = its_parse_indirect_baser(its, baser, &order, in its_alloc_tables()
2606 device_ids(its)); in its_alloc_tables()
2610 if (is_v4_1(its)) { in its_alloc_tables()
2614 if ((sibling = find_sibling_its(its))) { in its_alloc_tables()
2615 *baser = sibling->tables[2]; in its_alloc_tables()
2616 its_write_baser(its, baser, baser->val); in its_alloc_tables()
2621 indirect = its_parse_indirect_baser(its, baser, &order, in its_alloc_tables()
2626 err = its_setup_baser(its, baser, cache, shr, order, indirect); in its_alloc_tables()
2628 its_free_tables(its); in its_alloc_tables()
2633 cache = baser->val & GITS_BASER_CACHEABILITY_MASK; in its_alloc_tables()
2634 shr = baser->val & GITS_BASER_SHAREABILITY_MASK; in its_alloc_tables()
2642 struct its_node *its; in inherit_vpe_l1_table_from_its() local
2649 list_for_each_entry(its, &its_nodes, entry) { in inherit_vpe_l1_table_from_its()
2652 if (!is_v4_1(its)) in inherit_vpe_l1_table_from_its()
2655 if (!FIELD_GET(GITS_TYPER_SVPET, its->typer)) in inherit_vpe_l1_table_from_its()
2658 if (aff != compute_its_aff(its)) in inherit_vpe_l1_table_from_its()
2662 baser = its->tables[2].val; in inherit_vpe_l1_table_from_its()
2667 gic_data_rdist()->vpe_l1_base = its->tables[2].base; in inherit_vpe_l1_table_from_its()
2687 val |= FIELD_PREP(GICR_VPROPBASER_4_1_SIZE, GITS_BASER_NR_PAGES(baser) - 1); in inherit_vpe_l1_table_from_its()
2705 void __iomem *base = gic_data_rdist_cpu(cpu)->rd_base; in inherit_vpe_l1_table_from_rd()
2717 * ours wrt CommonLPIAff. Let's use its own VPROPBASER. in inherit_vpe_l1_table_from_rd()
2723 gic_data_rdist()->vpe_l1_base = gic_data_rdist_cpu(cpu)->vpe_l1_base; in inherit_vpe_l1_table_from_rd()
2724 *mask = gic_data_rdist_cpu(cpu)->vpe_table_mask; in inherit_vpe_l1_table_from_rd()
2734 void __iomem *base = gic_data_rdist_cpu(cpu)->rd_base; in allocate_vpe_l2_table()
2740 if (!gic_rdists->has_rvpeid) in allocate_vpe_l2_table()
2743 /* Skip non-present CPUs */ in allocate_vpe_l2_table()
2777 table = gic_data_rdist_cpu(cpu)->vpe_l1_base; in allocate_vpe_l2_table()
2810 if (!gic_rdists->has_rvpeid) in allocate_vpe_l1_table()
2829 val = inherit_vpe_l1_table_from_rd(&gic_data_rdist()->vpe_table_mask); in allocate_vpe_l1_table()
2833 gic_data_rdist()->vpe_table_mask = kzalloc(sizeof(cpumask_t), GFP_ATOMIC); in allocate_vpe_l1_table()
2834 if (!gic_data_rdist()->vpe_table_mask) in allocate_vpe_l1_table()
2835 return -ENOMEM; in allocate_vpe_l1_table()
2893 val |= FIELD_PREP(GICR_VPROPBASER_4_1_SIZE, npg - 1); in allocate_vpe_l1_table()
2902 return -ENOMEM; in allocate_vpe_l1_table()
2904 gic_data_rdist()->vpe_l1_base = page_address(page); in allocate_vpe_l1_table()
2916 cpumask_set_cpu(smp_processor_id(), gic_data_rdist()->vpe_table_mask); in allocate_vpe_l1_table()
2920 cpumask_pr_args(gic_data_rdist()->vpe_table_mask)); in allocate_vpe_l1_table()
2925 static int its_alloc_collections(struct its_node *its) in its_alloc_collections() argument
2929 its->collections = kcalloc(nr_cpu_ids, sizeof(*its->collections), in its_alloc_collections()
2931 if (!its->collections) in its_alloc_collections()
2932 return -ENOMEM; in its_alloc_collections()
2935 its->collections[i].target_address = ~0ULL; in its_alloc_collections()
2949 /* Make sure the GIC will observe the zero-ed page */ in its_allocate_pending_table()
2983 * flag the RD tables as pre-allocated if the stars do align. in allocate_lpi_tables()
2987 gic_rdists->flags |= (RDIST_FLAGS_RD_TABLES_PREALLOCATED | in allocate_lpi_tables()
3007 return -ENOMEM; in allocate_lpi_tables()
3010 gic_data_rdist_cpu(cpu)->pend_page = pend_page; in allocate_lpi_tables()
3026 count--; in read_vpend_dirty_clear()
3033 pr_err_ratelimited("ITS virtual pending table not cleaning\n"); in read_vpend_dirty_clear()
3063 if (gic_data_rdist()->flags & RD_LOCAL_LPI_ENABLED) in its_cpu_init_lpis()
3067 if ((gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED) && in its_cpu_init_lpis()
3075 if (WARN_ON(gic_rdists->prop_table_pa != paddr)) in its_cpu_init_lpis()
3082 gic_data_rdist()->flags |= RD_LOCAL_PENDTABLE_PREALLOCATED; in its_cpu_init_lpis()
3087 pend_page = gic_data_rdist()->pend_page; in its_cpu_init_lpis()
3091 val = (gic_rdists->prop_table_pa | in its_cpu_init_lpis()
3094 ((LPI_NRBITS - 1) & GICR_PROPBASER_IDBITS_MASK)); in its_cpu_init_lpis()
3102 * The HW reports non-shareable, we must in its_cpu_init_lpis()
3112 gic_rdists->flags |= RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING; in its_cpu_init_lpis()
3125 * The HW reports non-shareable, we must remove the in its_cpu_init_lpis()
3139 if (gic_rdists->has_vlpis && !gic_rdists->has_rvpeid) { in its_cpu_init_lpis()
3149 val = (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK; in its_cpu_init_lpis()
3168 gic_rdists->has_rvpeid = false; in its_cpu_init_lpis()
3169 gic_rdists->has_vlpis = false; in its_cpu_init_lpis()
3175 gic_data_rdist()->flags |= RD_LOCAL_LPI_ENABLED; in its_cpu_init_lpis()
3178 gic_data_rdist()->flags & RD_LOCAL_PENDTABLE_PREALLOCATED ? in its_cpu_init_lpis()
3183 static void its_cpu_init_collection(struct its_node *its) in its_cpu_init_collection() argument
3188 /* avoid cross node collections and its mapping */ in its_cpu_init_collection()
3189 if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) { in its_cpu_init_collection()
3193 if (its->numa_node != NUMA_NO_NODE && in its_cpu_init_collection()
3194 its->numa_node != of_node_to_nid(cpu_node)) in its_cpu_init_collection()
3199 * We now have to bind each collection to its target in its_cpu_init_collection()
3202 if (gic_read_typer(its->base + GITS_TYPER) & GITS_TYPER_PTA) { in its_cpu_init_collection()
3204 * This ITS wants the physical address of the in its_cpu_init_collection()
3207 target = gic_data_rdist()->phys_base; in its_cpu_init_collection()
3209 /* This ITS wants a linear CPU number. */ in its_cpu_init_collection()
3215 its->collections[cpu].target_address = target; in its_cpu_init_collection()
3216 its->collections[cpu].col_id = cpu; in its_cpu_init_collection()
3218 its_send_mapc(its, &its->collections[cpu], 1); in its_cpu_init_collection()
3219 its_send_invall(its, &its->collections[cpu]); in its_cpu_init_collection()
3224 struct its_node *its; in its_cpu_init_collections() local
3228 list_for_each_entry(its, &its_nodes, entry) in its_cpu_init_collections()
3229 its_cpu_init_collection(its); in its_cpu_init_collections()
3234 static struct its_device *its_find_device(struct its_node *its, u32 dev_id) in its_find_device() argument
3239 raw_spin_lock_irqsave(&its->lock, flags); in its_find_device()
3241 list_for_each_entry(tmp, &its->its_device_list, entry) { in its_find_device()
3242 if (tmp->device_id == dev_id) { in its_find_device()
3248 raw_spin_unlock_irqrestore(&its->lock, flags); in its_find_device()
3253 static struct its_baser *its_get_baser(struct its_node *its, u32 type) in its_get_baser() argument
3258 if (GITS_BASER_TYPE(its->tables[i].val) == type) in its_get_baser()
3259 return &its->tables[i]; in its_get_baser()
3265 static bool its_alloc_table_entry(struct its_node *its, in its_alloc_table_entry() argument
3273 esz = GITS_BASER_ENTRY_SIZE(baser->val); in its_alloc_table_entry()
3274 if (!(baser->val & GITS_BASER_INDIRECT)) in its_alloc_table_entry()
3275 return (id < (PAGE_ORDER_TO_SIZE(baser->order) / esz)); in its_alloc_table_entry()
3278 idx = id >> ilog2(baser->psz / esz); in its_alloc_table_entry()
3279 if (idx >= (PAGE_ORDER_TO_SIZE(baser->order) / GITS_LVL1_ENTRY_SIZE)) in its_alloc_table_entry()
3282 table = baser->base; in its_alloc_table_entry()
3286 page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, in its_alloc_table_entry()
3287 get_order(baser->psz)); in its_alloc_table_entry()
3292 if (!(baser->val & GITS_BASER_SHAREABILITY_MASK)) in its_alloc_table_entry()
3293 gic_flush_dcache_to_poc(page_address(page), baser->psz); in its_alloc_table_entry()
3298 if (!(baser->val & GITS_BASER_SHAREABILITY_MASK)) in its_alloc_table_entry()
3301 /* Ensure updated table contents are visible to ITS hardware */ in its_alloc_table_entry()
3308 static bool its_alloc_device_table(struct its_node *its, u32 dev_id) in its_alloc_device_table() argument
3312 baser = its_get_baser(its, GITS_BASER_TYPE_DEVICE); in its_alloc_device_table()
3314 /* Don't allow device id that exceeds ITS hardware limit */ in its_alloc_device_table()
3316 return (ilog2(dev_id) < device_ids(its)); in its_alloc_device_table()
3318 return its_alloc_table_entry(its, baser, dev_id); in its_alloc_device_table()
3323 struct its_node *its; in its_alloc_vpe_table() local
3333 list_for_each_entry(its, &its_nodes, entry) { in its_alloc_vpe_table()
3336 if (!is_v4(its)) in its_alloc_vpe_table()
3339 baser = its_get_baser(its, GITS_BASER_TYPE_VCPU); in its_alloc_vpe_table()
3343 if (!its_alloc_table_entry(its, baser, vpe_id)) in its_alloc_vpe_table()
3348 if (!gic_rdists->has_rvpeid) in its_alloc_vpe_table()
3363 static struct its_device *its_create_device(struct its_node *its, u32 dev_id, in its_create_device() argument
3376 if (!its_alloc_device_table(its, dev_id)) in its_create_device()
3388 sz = nr_ites * (FIELD_GET(GITS_TYPER_ITT_ENTRY_SIZE, its->typer) + 1); in its_create_device()
3389 sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1; in its_create_device()
3390 itt = kzalloc_node(sz, GFP_KERNEL, its->numa_node); in its_create_device()
3412 dev->its = its; in its_create_device()
3413 dev->itt = itt; in its_create_device()
3414 dev->nr_ites = nr_ites; in its_create_device()
3415 dev->event_map.lpi_map = lpi_map; in its_create_device()
3416 dev->event_map.col_map = col_map; in its_create_device()
3417 dev->event_map.lpi_base = lpi_base; in its_create_device()
3418 dev->event_map.nr_lpis = nr_lpis; in its_create_device()
3419 raw_spin_lock_init(&dev->event_map.vlpi_lock); in its_create_device()
3420 dev->device_id = dev_id; in its_create_device()
3421 INIT_LIST_HEAD(&dev->entry); in its_create_device()
3423 raw_spin_lock_irqsave(&its->lock, flags); in its_create_device()
3424 list_add(&dev->entry, &its->its_device_list); in its_create_device()
3425 raw_spin_unlock_irqrestore(&its->lock, flags); in its_create_device()
3427 /* Map device to its ITT */ in its_create_device()
3437 raw_spin_lock_irqsave(&its_dev->its->lock, flags); in its_free_device()
3438 list_del(&its_dev->entry); in its_free_device()
3439 raw_spin_unlock_irqrestore(&its_dev->its->lock, flags); in its_free_device()
3440 kfree(its_dev->event_map.col_map); in its_free_device()
3441 kfree(its_dev->itt); in its_free_device()
3450 idx = bitmap_find_free_region(dev->event_map.lpi_map, in its_alloc_device_irq()
3451 dev->event_map.nr_lpis, in its_alloc_device_irq()
3454 return -ENOSPC; in its_alloc_device_irq()
3456 *hwirq = dev->event_map.lpi_base + idx; in its_alloc_device_irq()
3464 struct its_node *its; in its_msi_prepare() local
3474 * are built on top of the ITS. in its_msi_prepare()
3476 dev_id = info->scratchpad[0].ul; in its_msi_prepare()
3479 its = msi_info->data; in its_msi_prepare()
3481 if (!gic_rdists->has_direct_lpi && in its_msi_prepare()
3483 vpe_proxy.dev->its == its && in its_msi_prepare()
3484 dev_id == vpe_proxy.dev->device_id) { in its_msi_prepare()
3488 return -EINVAL; in its_msi_prepare()
3491 mutex_lock(&its->dev_alloc_lock); in its_msi_prepare()
3492 its_dev = its_find_device(its, dev_id); in its_msi_prepare()
3499 its_dev->shared = true; in its_msi_prepare()
3504 its_dev = its_create_device(its, dev_id, nvec, true); in its_msi_prepare()
3506 err = -ENOMEM; in its_msi_prepare()
3510 if (info->flags & MSI_ALLOC_FLAGS_PROXY_DEVICE) in its_msi_prepare()
3511 its_dev->shared = true; in its_msi_prepare()
3515 mutex_unlock(&its->dev_alloc_lock); in its_msi_prepare()
3516 info->scratchpad[0].ptr = its_dev; in its_msi_prepare()
3530 if (irq_domain_get_of_node(domain->parent)) { in its_irq_gic_domain_alloc()
3531 fwspec.fwnode = domain->parent->fwnode; in its_irq_gic_domain_alloc()
3536 } else if (is_fwnode_irqchip(domain->parent->fwnode)) { in its_irq_gic_domain_alloc()
3537 fwspec.fwnode = domain->parent->fwnode; in its_irq_gic_domain_alloc()
3542 return -EINVAL; in its_irq_gic_domain_alloc()
3552 struct its_device *its_dev = info->scratchpad[0].ptr; in its_irq_domain_alloc()
3553 struct its_node *its = its_dev->its; in its_irq_domain_alloc() local
3563 err = iommu_dma_prepare_msi(info->desc, its->get_msi_base(its_dev)); in its_irq_domain_alloc()
3578 (int)(hwirq + i - its_dev->event_map.lpi_base), in its_irq_domain_alloc()
3594 return -EINVAL; in its_irq_domain_activate()
3597 its_dev->event_map.col_map[event] = cpu; in its_irq_domain_activate()
3601 its_send_mapti(its_dev, d->hwirq, event); in its_irq_domain_activate()
3611 its_dec_lpi_count(d, its_dev->event_map.col_map[event]); in its_irq_domain_deactivate()
3621 struct its_node *its = its_dev->its; in its_irq_domain_free() local
3624 bitmap_release_region(its_dev->event_map.lpi_map, in its_irq_domain_free()
3635 mutex_lock(&its->dev_alloc_lock); in its_irq_domain_free()
3641 if (!its_dev->shared && in its_irq_domain_free()
3642 bitmap_empty(its_dev->event_map.lpi_map, in its_irq_domain_free()
3643 its_dev->event_map.nr_lpis)) { in its_irq_domain_free()
3644 its_lpi_free(its_dev->event_map.lpi_map, in its_irq_domain_free()
3645 its_dev->event_map.lpi_base, in its_irq_domain_free()
3646 its_dev->event_map.nr_lpis); in its_irq_domain_free()
3653 mutex_unlock(&its->dev_alloc_lock); in its_irq_domain_free()
3687 if (gic_rdists->has_rvpeid) in its_vpe_db_proxy_unmap_locked()
3691 if (vpe->vpe_proxy_event == -1) in its_vpe_db_proxy_unmap_locked()
3694 its_send_discard(vpe_proxy.dev, vpe->vpe_proxy_event); in its_vpe_db_proxy_unmap_locked()
3695 vpe_proxy.vpes[vpe->vpe_proxy_event] = NULL; in its_vpe_db_proxy_unmap_locked()
3705 vpe_proxy.next_victim = vpe->vpe_proxy_event; in its_vpe_db_proxy_unmap_locked()
3707 vpe->vpe_proxy_event = -1; in its_vpe_db_proxy_unmap_locked()
3713 if (gic_rdists->has_rvpeid) in its_vpe_db_proxy_unmap()
3716 if (!gic_rdists->has_direct_lpi) { in its_vpe_db_proxy_unmap()
3728 if (gic_rdists->has_rvpeid) in its_vpe_db_proxy_map_locked()
3732 if (vpe->vpe_proxy_event != -1) in its_vpe_db_proxy_map_locked()
3741 vpe->vpe_proxy_event = vpe_proxy.next_victim; in its_vpe_db_proxy_map_locked()
3742 vpe_proxy.next_victim = (vpe_proxy.next_victim + 1) % vpe_proxy.dev->nr_ites; in its_vpe_db_proxy_map_locked()
3744 vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = vpe->col_idx; in its_vpe_db_proxy_map_locked()
3745 its_send_mapti(vpe_proxy.dev, vpe->vpe_db_lpi, vpe->vpe_proxy_event); in its_vpe_db_proxy_map_locked()
3754 if (gic_rdists->has_rvpeid) in its_vpe_db_proxy_move()
3757 if (gic_rdists->has_direct_lpi) { in its_vpe_db_proxy_move()
3760 rdbase = per_cpu_ptr(gic_rdists->rdist, from)->rd_base; in its_vpe_db_proxy_move()
3761 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR); in its_vpe_db_proxy_move()
3771 target_col = &vpe_proxy.dev->its->collections[to]; in its_vpe_db_proxy_move()
3772 its_send_movi(vpe_proxy.dev, target_col, vpe->vpe_proxy_event); in its_vpe_db_proxy_move()
3773 vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = to; in its_vpe_db_proxy_move()
3790 * interrupt to its new location. in its_vpe_set_affinity()
3795 * protect us, and that we must ensure nobody samples vpe->col_idx in its_vpe_set_affinity()
3797 * taken on any vLPI handling path that evaluates vpe->col_idx. in its_vpe_set_affinity()
3803 vpe->col_idx = cpu; in its_vpe_set_affinity()
3807 * is sharing its VPE table with the current one. in its_vpe_set_affinity()
3809 if (gic_data_rdist_cpu(cpu)->vpe_table_mask && in its_vpe_set_affinity()
3810 cpumask_test_cpu(from, gic_data_rdist_cpu(cpu)->vpe_table_mask)) in its_vpe_set_affinity()
3828 if (!gic_rdists->has_vpend_valid_dirty) in its_wait_vpt_parse_complete()
3843 val = virt_to_phys(page_address(vpe->its_vm->vprop_page)) & in its_vpe_schedule()
3845 val |= (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK; in its_vpe_schedule()
3850 val = virt_to_phys(page_address(vpe->vpt_page)) & in its_vpe_schedule()
3857 * easily. So in the end, vpe->pending_last is only an in its_vpe_schedule()
3860 * would be able to read its coarse map pretty quickly anyway, in its_vpe_schedule()
3864 val |= vpe->idai ? GICR_VPENDBASER_IDAI : 0; in its_vpe_schedule()
3876 vpe->idai = !!(val & GICR_VPENDBASER_IDAI); in its_vpe_deschedule()
3877 vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast); in its_vpe_deschedule()
3882 struct its_node *its; in its_vpe_invall() local
3884 list_for_each_entry(its, &its_nodes, entry) { in its_vpe_invall()
3885 if (!is_v4(its)) in its_vpe_invall()
3888 if (its_list_map && !vpe->its_vm->vlpi_count[its->list_nr]) in its_vpe_invall()
3892 * Sending a VINVALL to a single ITS is enough, as all in its_vpe_invall()
3895 its_send_vinvall(its, vpe); in its_vpe_invall()
3905 switch (info->cmd_type) { in its_vpe_set_vcpu_affinity()
3923 return -EINVAL; in its_vpe_set_vcpu_affinity()
3935 cmd(vpe_proxy.dev, vpe->vpe_proxy_event); in its_vpe_send_cmd()
3944 if (gic_rdists->has_direct_lpi) { in its_vpe_send_inv()
3948 raw_spin_lock(&gic_data_rdist_cpu(vpe->col_idx)->rd_lock); in its_vpe_send_inv()
3949 rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base; in its_vpe_send_inv()
3950 gic_write_lpir(d->parent_data->hwirq, rdbase + GICR_INVLPIR); in its_vpe_send_inv()
3952 raw_spin_unlock(&gic_data_rdist_cpu(vpe->col_idx)->rd_lock); in its_vpe_send_inv()
3966 lpi_write_config(d->parent_data, LPI_PROP_ENABLED, 0); in its_vpe_mask_irq()
3973 lpi_write_config(d->parent_data, 0, LPI_PROP_ENABLED); in its_vpe_unmask_irq()
3984 return -EINVAL; in its_vpe_set_irqchip_state()
3986 if (gic_rdists->has_direct_lpi) { in its_vpe_set_irqchip_state()
3989 rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base; in its_vpe_set_irqchip_state()
3991 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_SETLPIR); in its_vpe_set_irqchip_state()
3993 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR); in its_vpe_set_irqchip_state()
4012 .name = "GICv4-vpe",
4024 static struct its_node *its = NULL; in find_4_1_its() local
4026 if (!its) { in find_4_1_its()
4027 list_for_each_entry(its, &its_nodes, entry) { in find_4_1_its()
4028 if (is_v4_1(its)) in find_4_1_its()
4029 return its; in find_4_1_its()
4033 its = NULL; in find_4_1_its()
4036 return its; in find_4_1_its()
4042 struct its_node *its; in its_vpe_4_1_send_inv() local
4047 * it to the first valid ITS, and let the HW do its magic. in its_vpe_4_1_send_inv()
4049 its = find_4_1_its(); in its_vpe_4_1_send_inv()
4050 if (its) in its_vpe_4_1_send_inv()
4051 its_send_invdb(its, vpe); in its_vpe_4_1_send_inv()
4056 lpi_write_config(d->parent_data, LPI_PROP_ENABLED, 0); in its_vpe_4_1_mask_irq()
4062 lpi_write_config(d->parent_data, 0, LPI_PROP_ENABLED); in its_vpe_4_1_unmask_irq()
4074 val |= info->g0en ? GICR_VPENDBASER_4_1_VGRP0EN : 0; in its_vpe_4_1_schedule()
4075 val |= info->g1en ? GICR_VPENDBASER_4_1_VGRP1EN : 0; in its_vpe_4_1_schedule()
4076 val |= FIELD_PREP(GICR_VPENDBASER_4_1_VPEID, vpe->vpe_id); in its_vpe_4_1_schedule()
4087 if (info->req_db) { in its_vpe_4_1_deschedule()
4091 * vPE is going to block: make the vPE non-resident with in its_vpe_4_1_deschedule()
4093 * we read-back PendingLast clear, then a doorbell will be in its_vpe_4_1_deschedule()
4100 raw_spin_lock_irqsave(&vpe->vpe_lock, flags); in its_vpe_4_1_deschedule()
4104 vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast); in its_vpe_4_1_deschedule()
4105 raw_spin_unlock_irqrestore(&vpe->vpe_lock, flags); in its_vpe_4_1_deschedule()
4108 * We're not blocking, so just make the vPE non-resident in its_vpe_4_1_deschedule()
4114 vpe->pending_last = true; in its_vpe_4_1_deschedule()
4126 val |= FIELD_PREP(GICR_INVALLR_VPEID, vpe->vpe_id); in its_vpe_4_1_invall()
4130 raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock); in its_vpe_4_1_invall()
4131 rdbase = per_cpu_ptr(gic_rdists->rdist, cpu)->rd_base; in its_vpe_4_1_invall()
4135 raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock); in its_vpe_4_1_invall()
4144 switch (info->cmd_type) { in its_vpe_4_1_set_vcpu_affinity()
4162 return -EINVAL; in its_vpe_4_1_set_vcpu_affinity()
4167 .name = "GICv4.1-vpe",
4181 desc.its_vsgi_cmd.sgi = d->hwirq; in its_configure_sgi()
4182 desc.its_vsgi_cmd.priority = vpe->sgi_config[d->hwirq].priority; in its_configure_sgi()
4183 desc.its_vsgi_cmd.enable = vpe->sgi_config[d->hwirq].enabled; in its_configure_sgi()
4184 desc.its_vsgi_cmd.group = vpe->sgi_config[d->hwirq].group; in its_configure_sgi()
4188 * GICv4.1 allows us to send VSGI commands to any ITS as long as the in its_configure_sgi()
4190 * activation time, we're pretty sure the first GICv4.1 ITS will do. in its_configure_sgi()
4199 vpe->sgi_config[d->hwirq].enabled = false; in its_sgi_mask_irq()
4207 vpe->sgi_config[d->hwirq].enabled = true; in its_sgi_unmask_irq()
4229 return -EINVAL; in its_sgi_set_irqchip_state()
4233 struct its_node *its = find_4_1_its(); in its_sgi_set_irqchip_state() local
4236 val = FIELD_PREP(GITS_SGIR_VPEID, vpe->vpe_id); in its_sgi_set_irqchip_state()
4237 val |= FIELD_PREP(GITS_SGIR_VINTID, d->hwirq); in its_sgi_set_irqchip_state()
4238 writeq_relaxed(val, its->sgir_base + GITS_SGIR - SZ_128K); in its_sgi_set_irqchip_state()
4257 return -EINVAL; in its_sgi_get_irqchip_state()
4262 * - Concurrent vPE affinity change: we must make sure it cannot in its_sgi_get_irqchip_state()
4266 * - Concurrent VSGIPENDR access: As it involves accessing two in its_sgi_get_irqchip_state()
4270 raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock); in its_sgi_get_irqchip_state()
4271 base = gic_data_rdist_cpu(cpu)->rd_base + SZ_128K; in its_sgi_get_irqchip_state()
4272 writel_relaxed(vpe->vpe_id, base + GICR_VSGIR); in its_sgi_get_irqchip_state()
4278 count--; in its_sgi_get_irqchip_state()
4288 raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock); in its_sgi_get_irqchip_state()
4292 return -ENXIO; in its_sgi_get_irqchip_state()
4294 *val = !!(status & (1 << d->hwirq)); in its_sgi_get_irqchip_state()
4304 switch (info->cmd_type) { in its_sgi_set_vcpu_affinity()
4306 vpe->sgi_config[d->hwirq].priority = info->priority; in its_sgi_set_vcpu_affinity()
4307 vpe->sgi_config[d->hwirq].group = info->group; in its_sgi_set_vcpu_affinity()
4312 return -EINVAL; in its_sgi_set_vcpu_affinity()
4317 .name = "GICv4.1-sgi",
4337 vpe->sgi_config[i].priority = 0; in its_sgi_irq_domain_alloc()
4338 vpe->sgi_config[i].enabled = false; in its_sgi_irq_domain_alloc()
4339 vpe->sgi_config[i].group = false; in its_sgi_irq_domain_alloc()
4372 * - To change the configuration, CLEAR must be set to false, in its_sgi_irq_domain_deactivate()
4374 * - To clear the pending bit, CLEAR must be set to true, leaving in its_sgi_irq_domain_deactivate()
4379 vpe->sgi_config[d->hwirq].enabled = false; in its_sgi_irq_domain_deactivate()
4415 return -ENOMEM; in its_vpe_init()
4421 return -ENOMEM; in its_vpe_init()
4424 raw_spin_lock_init(&vpe->vpe_lock); in its_vpe_init()
4425 vpe->vpe_id = vpe_id; in its_vpe_init()
4426 vpe->vpt_page = vpt_page; in its_vpe_init()
4427 if (gic_rdists->has_rvpeid) in its_vpe_init()
4428 atomic_set(&vpe->vmapp_count, 0); in its_vpe_init()
4430 vpe->vpe_proxy_event = -1; in its_vpe_init()
4438 its_vpe_id_free(vpe->vpe_id); in its_vpe_teardown()
4439 its_free_pending_table(vpe->vpt_page); in its_vpe_teardown()
4446 struct its_vm *vm = domain->host_data; in its_vpe_irq_domain_free()
4456 BUG_ON(vm != vpe->its_vm); in its_vpe_irq_domain_free()
4458 clear_bit(data->hwirq, vm->db_bitmap); in its_vpe_irq_domain_free()
4463 if (bitmap_empty(vm->db_bitmap, vm->nr_db_lpis)) { in its_vpe_irq_domain_free()
4464 its_lpi_free(vm->db_bitmap, vm->db_lpi_base, vm->nr_db_lpis); in its_vpe_irq_domain_free()
4465 its_free_prop_table(vm->vprop_page); in its_vpe_irq_domain_free()
4482 return -ENOMEM; in its_vpe_irq_domain_alloc()
4486 return -ENOMEM; in its_vpe_irq_domain_alloc()
4492 return -ENOMEM; in its_vpe_irq_domain_alloc()
4495 vm->db_bitmap = bitmap; in its_vpe_irq_domain_alloc()
4496 vm->db_lpi_base = base; in its_vpe_irq_domain_alloc()
4497 vm->nr_db_lpis = nr_ids; in its_vpe_irq_domain_alloc()
4498 vm->vprop_page = vprop_page; in its_vpe_irq_domain_alloc()
4500 if (gic_rdists->has_rvpeid) in its_vpe_irq_domain_alloc()
4504 vm->vpes[i]->vpe_db_lpi = base + i; in its_vpe_irq_domain_alloc()
4505 err = its_vpe_init(vm->vpes[i]); in its_vpe_irq_domain_alloc()
4509 vm->vpes[i]->vpe_db_lpi); in its_vpe_irq_domain_alloc()
4513 irqchip, vm->vpes[i]); in its_vpe_irq_domain_alloc()
4532 struct its_node *its; in its_vpe_irq_domain_activate() local
4543 vpe->col_idx = cpumask_first(cpu_online_mask); in its_vpe_irq_domain_activate()
4545 list_for_each_entry(its, &its_nodes, entry) { in its_vpe_irq_domain_activate()
4546 if (!is_v4(its)) in its_vpe_irq_domain_activate()
4549 its_send_vmapp(its, vpe, true); in its_vpe_irq_domain_activate()
4550 its_send_vinvall(its, vpe); in its_vpe_irq_domain_activate()
4553 irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx)); in its_vpe_irq_domain_activate()
4562 struct its_node *its; in its_vpe_irq_domain_deactivate() local
4571 list_for_each_entry(its, &its_nodes, entry) { in its_vpe_irq_domain_deactivate()
4572 if (!is_v4(its)) in its_vpe_irq_domain_deactivate()
4575 its_send_vmapp(its, vpe, false); in its_vpe_irq_domain_deactivate()
4583 if (find_4_1_its() && !atomic_read(&vpe->vmapp_count)) in its_vpe_irq_domain_deactivate()
4584 gic_flush_dcache_to_poc(page_address(vpe->vpt_page), in its_vpe_irq_domain_deactivate()
4602 * GIC architecture specification requires the ITS to be both in its_force_quiescent()
4609 /* Disable the generation of all interrupts to this ITS */ in its_force_quiescent()
4613 /* Poll GITS_CTLR and wait until ITS becomes quiescent */ in its_force_quiescent()
4619 count--; in its_force_quiescent()
4621 return -EBUSY; in its_force_quiescent()
4630 struct its_node *its = data; in its_enable_quirk_cavium_22375() local
4633 its->typer &= ~GITS_TYPER_DEVBITS; in its_enable_quirk_cavium_22375()
4634 its->typer |= FIELD_PREP(GITS_TYPER_DEVBITS, 20 - 1); in its_enable_quirk_cavium_22375()
4635 its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_22375; in its_enable_quirk_cavium_22375()
4642 struct its_node *its = data; in its_enable_quirk_cavium_23144() local
4644 its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_23144; in its_enable_quirk_cavium_23144()
4651 struct its_node *its = data; in its_enable_quirk_qdf2400_e0065() local
4654 its->typer &= ~GITS_TYPER_ITT_ENTRY_SIZE; in its_enable_quirk_qdf2400_e0065()
4655 its->typer |= FIELD_PREP(GITS_TYPER_ITT_ENTRY_SIZE, 16 - 1); in its_enable_quirk_qdf2400_e0065()
4662 struct its_node *its = its_dev->its; in its_irq_get_msi_base_pre_its() local
4665 * The Socionext Synquacer SoC has a so-called 'pre-ITS', in its_irq_get_msi_base_pre_its()
4666 * which maps 32-bit writes targeted at a separate window of in its_irq_get_msi_base_pre_its()
4671 return its->pre_its_base + (its_dev->device_id << 2); in its_irq_get_msi_base_pre_its()
4676 struct its_node *its = data; in its_enable_quirk_socionext_synquacer() local
4680 if (!fwnode_property_read_u32_array(its->fwnode_handle, in its_enable_quirk_socionext_synquacer()
4681 "socionext,synquacer-pre-its", in its_enable_quirk_socionext_synquacer()
4685 its->pre_its_base = pre_its_window[0]; in its_enable_quirk_socionext_synquacer()
4686 its->get_msi_base = its_irq_get_msi_base_pre_its; in its_enable_quirk_socionext_synquacer()
4688 ids = ilog2(pre_its_window[1]) - 2; in its_enable_quirk_socionext_synquacer()
4689 if (device_ids(its) > ids) { in its_enable_quirk_socionext_synquacer()
4690 its->typer &= ~GITS_TYPER_DEVBITS; in its_enable_quirk_socionext_synquacer()
4691 its->typer |= FIELD_PREP(GITS_TYPER_DEVBITS, ids - 1); in its_enable_quirk_socionext_synquacer()
4694 /* the pre-ITS breaks isolation, so disable MSI remapping */ in its_enable_quirk_socionext_synquacer()
4695 its->msi_domain_flags &= ~IRQ_DOMAIN_FLAG_MSI_REMAP; in its_enable_quirk_socionext_synquacer()
4703 struct its_node *its = data; in its_enable_quirk_hip07_161600802() local
4709 its->vlpi_redist_offset = SZ_128K; in its_enable_quirk_hip07_161600802()
4716 .desc = "ITS: Cavium errata 22375, 24313",
4724 .desc = "ITS: Cavium erratum 23144",
4732 .desc = "ITS: QDF2400 erratum 0065",
4733 .iidr = 0x00001070, /* QDF2400 ITS rev 1.x */
4741 * The Socionext Synquacer SoC incorporates ARM's own GIC-500
4742 * implementation, but with a 'pre-ITS' added that requires
4745 .desc = "ITS: Socionext Synquacer pre-ITS",
4753 .desc = "ITS: Hip07 erratum 161600802",
4763 static void its_enable_quirks(struct its_node *its) in its_enable_quirks() argument
4765 u32 iidr = readl_relaxed(its->base + GITS_IIDR); in its_enable_quirks()
4767 gic_enable_quirks(iidr, its_quirks, its); in its_enable_quirks()
4772 struct its_node *its; in its_save_disable() local
4776 list_for_each_entry(its, &its_nodes, entry) { in its_save_disable()
4779 base = its->base; in its_save_disable()
4780 its->ctlr_save = readl_relaxed(base + GITS_CTLR); in its_save_disable()
4783 pr_err("ITS@%pa: failed to quiesce: %d\n", in its_save_disable()
4784 &its->phys_base, err); in its_save_disable()
4785 writel_relaxed(its->ctlr_save, base + GITS_CTLR); in its_save_disable()
4789 its->cbaser_save = gits_read_cbaser(base + GITS_CBASER); in its_save_disable()
4794 list_for_each_entry_continue_reverse(its, &its_nodes, entry) { in its_save_disable()
4797 base = its->base; in its_save_disable()
4798 writel_relaxed(its->ctlr_save, base + GITS_CTLR); in its_save_disable()
4808 struct its_node *its; in its_restore_enable() local
4812 list_for_each_entry(its, &its_nodes, entry) { in its_restore_enable()
4816 base = its->base; in its_restore_enable()
4819 * Make sure that the ITS is disabled. If it fails to quiesce, in its_restore_enable()
4821 * registers is undefined according to the GIC v3 ITS in its_restore_enable()
4824 * Firmware resuming with the ITS enabled is terminally broken. in its_restore_enable()
4829 pr_err("ITS@%pa: failed to quiesce on resume: %d\n", in its_restore_enable()
4830 &its->phys_base, ret); in its_restore_enable()
4834 gits_write_cbaser(its->cbaser_save, base + GITS_CBASER); in its_restore_enable()
4840 its->cmd_write = its->cmd_base; in its_restore_enable()
4845 struct its_baser *baser = &its->tables[i]; in its_restore_enable()
4847 if (!(baser->val & GITS_BASER_VALID)) in its_restore_enable()
4850 its_write_baser(its, baser, baser->val); in its_restore_enable()
4852 writel_relaxed(its->ctlr_save, base + GITS_CTLR); in its_restore_enable()
4855 * Reinit the collection if it's stored in the ITS. This is in its_restore_enable()
4859 if (its->collections[smp_processor_id()].col_id < in its_restore_enable()
4861 its_cpu_init_collection(its); in its_restore_enable()
4876 its_base = ioremap(res->start, SZ_64K); in its_map_one()
4878 pr_warn("ITS@%pa: Unable to map ITS registers\n", &res->start); in its_map_one()
4879 *err = -ENOMEM; in its_map_one()
4885 pr_warn("ITS@%pa: No ITS detected, giving up\n", &res->start); in its_map_one()
4886 *err = -ENODEV; in its_map_one()
4892 pr_warn("ITS@%pa: Failed to quiesce, giving up\n", &res->start); in its_map_one()
4903 static int its_init_domain(struct fwnode_handle *handle, struct its_node *its) in its_init_domain() argument
4910 return -ENOMEM; in its_init_domain()
4912 inner_domain = irq_domain_create_tree(handle, &its_domain_ops, its); in its_init_domain()
4915 return -ENOMEM; in its_init_domain()
4918 inner_domain->parent = its_parent; in its_init_domain()
4920 inner_domain->flags |= its->msi_domain_flags; in its_init_domain()
4921 info->ops = &its_msi_domain_ops; in its_init_domain()
4922 info->data = its; in its_init_domain()
4923 inner_domain->host_data = info; in its_init_domain()
4930 struct its_node *its; in its_init_vpe_domain() local
4934 if (gic_rdists->has_direct_lpi) { in its_init_vpe_domain()
4935 pr_info("ITS: Using DirectLPI for VPE invalidation\n"); in its_init_vpe_domain()
4939 /* Any ITS will do, even if not v4 */ in its_init_vpe_domain()
4940 its = list_first_entry(&its_nodes, struct its_node, entry); in its_init_vpe_domain()
4946 return -ENOMEM; in its_init_vpe_domain()
4949 devid = GENMASK(device_ids(its) - 1, 0); in its_init_vpe_domain()
4950 vpe_proxy.dev = its_create_device(its, devid, entries, false); in its_init_vpe_domain()
4953 pr_err("ITS: Can't allocate GICv4 proxy device\n"); in its_init_vpe_domain()
4954 return -ENOMEM; in its_init_vpe_domain()
4957 BUG_ON(entries > vpe_proxy.dev->nr_ites); in its_init_vpe_domain()
4961 pr_info("ITS: Allocated DevID %x as GICv4 proxy device (%d slots)\n", in its_init_vpe_domain()
4962 devid, vpe_proxy.dev->nr_ites); in its_init_vpe_domain()
4975 * guaranteed to be single-threaded, hence no in its_compute_its_list_map()
4981 pr_err("ITS@%pa: No ITSList entry available!\n", in its_compute_its_list_map()
4982 &res->start); in its_compute_its_list_map()
4983 return -EINVAL; in its_compute_its_list_map()
4997 pr_err("ITS@%pa: Duplicate ITSList entry %d\n", in its_compute_its_list_map()
4998 &res->start, its_number); in its_compute_its_list_map()
4999 return -EINVAL; in its_compute_its_list_map()
5008 struct its_node *its; in its_probe_one() local
5019 pr_info("ITS %pR\n", res); in its_probe_one()
5021 its = kzalloc(sizeof(*its), GFP_KERNEL); in its_probe_one()
5022 if (!its) { in its_probe_one()
5023 err = -ENOMEM; in its_probe_one()
5027 raw_spin_lock_init(&its->lock); in its_probe_one()
5028 mutex_init(&its->dev_alloc_lock); in its_probe_one()
5029 INIT_LIST_HEAD(&its->entry); in its_probe_one()
5030 INIT_LIST_HEAD(&its->its_device_list); in its_probe_one()
5032 its->typer = typer; in its_probe_one()
5033 its->base = its_base; in its_probe_one()
5034 its->phys_base = res->start; in its_probe_one()
5035 if (is_v4(its)) { in its_probe_one()
5041 its->list_nr = err; in its_probe_one()
5043 pr_info("ITS@%pa: Using ITS number %d\n", in its_probe_one()
5044 &res->start, err); in its_probe_one()
5046 pr_info("ITS@%pa: Single VMOVP capable\n", &res->start); in its_probe_one()
5049 if (is_v4_1(its)) { in its_probe_one()
5052 its->sgir_base = ioremap(res->start + SZ_128K, SZ_64K); in its_probe_one()
5053 if (!its->sgir_base) { in its_probe_one()
5054 err = -ENOMEM; in its_probe_one()
5058 its->mpidr = readl_relaxed(its_base + GITS_MPIDR); in its_probe_one()
5060 pr_info("ITS@%pa: Using GICv4.1 mode %08x %08x\n", in its_probe_one()
5061 &res->start, its->mpidr, svpet); in its_probe_one()
5065 its->numa_node = numa_node; in its_probe_one()
5067 page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, in its_probe_one()
5070 err = -ENOMEM; in its_probe_one()
5073 its->cmd_base = (void *)page_address(page); in its_probe_one()
5074 its->cmd_write = its->cmd_base; in its_probe_one()
5075 its->fwnode_handle = handle; in its_probe_one()
5076 its->get_msi_base = its_irq_get_msi_base; in its_probe_one()
5077 its->msi_domain_flags = IRQ_DOMAIN_FLAG_MSI_REMAP; in its_probe_one()
5079 its_enable_quirks(its); in its_probe_one()
5081 err = its_alloc_tables(its); in its_probe_one()
5085 err = its_alloc_collections(its); in its_probe_one()
5089 baser = (virt_to_phys(its->cmd_base) | in its_probe_one()
5092 (ITS_CMD_QUEUE_SZ / SZ_4K - 1) | in its_probe_one()
5095 gits_write_cbaser(baser, its->base + GITS_CBASER); in its_probe_one()
5096 tmp = gits_read_cbaser(its->base + GITS_CBASER); in its_probe_one()
5101 * The HW reports non-shareable, we must in its_probe_one()
5108 gits_write_cbaser(baser, its->base + GITS_CBASER); in its_probe_one()
5110 pr_info("ITS: using cache flushing for cmd queue\n"); in its_probe_one()
5111 its->flags |= ITS_FLAGS_CMDQ_NEEDS_FLUSHING; in its_probe_one()
5114 gits_write_cwriter(0, its->base + GITS_CWRITER); in its_probe_one()
5115 ctlr = readl_relaxed(its->base + GITS_CTLR); in its_probe_one()
5117 if (is_v4(its)) in its_probe_one()
5119 writel_relaxed(ctlr, its->base + GITS_CTLR); in its_probe_one()
5121 err = its_init_domain(handle, its); in its_probe_one()
5126 list_add(&its->entry, &its_nodes); in its_probe_one()
5132 its_free_tables(its); in its_probe_one()
5134 free_pages((unsigned long)its->cmd_base, get_order(ITS_CMD_QUEUE_SZ)); in its_probe_one()
5136 if (its->sgir_base) in its_probe_one()
5137 iounmap(its->sgir_base); in its_probe_one()
5139 kfree(its); in its_probe_one()
5142 pr_err("ITS@%pa: failed probing (%d)\n", &res->start, err); in its_probe_one()
5159 return -ENXIO; in redist_disable_lpis()
5168 * LPIs before trying to re-enable them. They are already in redist_disable_lpis()
5173 if ((gic_data_rdist()->flags & RD_LOCAL_LPI_ENABLED) || in redist_disable_lpis()
5174 (gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED)) in redist_disable_lpis()
5200 return -ETIMEDOUT; in redist_disable_lpis()
5203 timeout--; in redist_disable_lpis()
5213 return -EBUSY; in redist_disable_lpis()
5237 cpuhp_remove_state_nocalls(gic_rdists->cpuhp_memreserve_state); in rdist_memreserve_cpuhp_cleanup_workfn()
5238 gic_rdists->cpuhp_memreserve_state = CPUHP_INVALID; in rdist_memreserve_cpuhp_cleanup_workfn()
5250 if (gic_data_rdist()->flags & RD_LOCAL_MEMRESERVE_DONE) in its_cpu_memreserve_lpi()
5253 pend_page = gic_data_rdist()->pend_page; in its_cpu_memreserve_lpi()
5255 ret = -ENOMEM; in its_cpu_memreserve_lpi()
5259 * If the pending table was pre-programmed, free the memory we in its_cpu_memreserve_lpi()
5263 if (gic_data_rdist()->flags & RD_LOCAL_PENDTABLE_PREALLOCATED) { in its_cpu_memreserve_lpi()
5265 gic_data_rdist()->pend_page = NULL; in its_cpu_memreserve_lpi()
5277 gic_data_rdist()->flags |= RD_LOCAL_MEMRESERVE_DONE; in its_cpu_memreserve_lpi()
5299 { .compatible = "arm,gic-v3-its", },
5309 * Make sure *all* the ITS are reset before we probe any, as in its_of_probe()
5310 * they may be sharing memory. If any of the ITS fails to in its_of_probe()
5319 !of_property_read_bool(np, "msi-controller") || in its_of_probe()
5332 if (!of_property_read_bool(np, "msi-controller")) { in its_of_probe()
5333 pr_warn("%pOF: no msi-controller property, ITS ignored\n", in its_of_probe()
5343 its_probe_one(&res, &np->fwnode, of_node_to_nid(np)); in its_of_probe()
5356 /* GIC ITS ID */
5388 return -EINVAL; in gic_acpi_parse_srat_its()
5390 if (its_affinity->header.length < sizeof(*its_affinity)) { in gic_acpi_parse_srat_its()
5391 pr_err("SRAT: Invalid header length %d in ITS affinity\n", in gic_acpi_parse_srat_its()
5392 its_affinity->header.length); in gic_acpi_parse_srat_its()
5393 return -EINVAL; in gic_acpi_parse_srat_its()
5401 node = pxm_to_node(its_affinity->proximity_domain); in gic_acpi_parse_srat_its()
5404 pr_err("SRAT: Invalid NUMA node %d in ITS affinity\n", node); in gic_acpi_parse_srat_its()
5409 its_srat_maps[its_in_srat].its_id = its_affinity->its_id; in gic_acpi_parse_srat_its()
5411 pr_info("SRAT: PXM %d -> ITS %d -> Node %d\n", in gic_acpi_parse_srat_its()
5412 its_affinity->proximity_domain, its_affinity->its_id, node); in gic_acpi_parse_srat_its()
5439 /* free the its_srat_maps after ITS probing */
5460 res.start = its_entry->base_address; in gic_acpi_parse_madt_its()
5461 res.end = its_entry->base_address + ACPI_GICV3_ITS_MEM_SIZE - 1; in gic_acpi_parse_madt_its()
5466 pr_err("ITS@%pa: Unable to allocate GICv3 ITS domain token\n", in gic_acpi_parse_madt_its()
5468 return -ENOMEM; in gic_acpi_parse_madt_its()
5471 err = iort_register_domain_token(its_entry->translation_id, res.start, in gic_acpi_parse_madt_its()
5474 pr_err("ITS@%pa: Unable to register GICv3 ITS domain token (ITS ID %d) to IORT\n", in gic_acpi_parse_madt_its()
5475 &res.start, its_entry->translation_id); in gic_acpi_parse_madt_its()
5480 acpi_get_its_numa_node(its_entry->translation_id)); in gic_acpi_parse_madt_its()
5484 iort_deregister_domain_token(its_entry->translation_id); in gic_acpi_parse_madt_its()
5498 .start = its_entry->base_address, in its_acpi_reset()
5499 .end = its_entry->base_address + ACPI_GICV3_ITS_MEM_SIZE - 1, in its_acpi_reset()
5510 * Make sure *all* the ITS are reset before we probe any, as in its_acpi_probe()
5511 * they may be sharing memory. If any of the ITS fails to in its_acpi_probe()
5535 gic_rdists->cpuhp_memreserve_state = CPUHP_INVALID; in its_lpi_memreserve_init()
5543 gic_rdists->cpuhp_memreserve_state = state; in its_lpi_memreserve_init()
5552 struct its_node *its; in its_init() local
5567 pr_warn("ITS: No ITS available, not enabling LPIs\n"); in its_init()
5568 return -ENXIO; in its_init()
5575 list_for_each_entry(its, &its_nodes, entry) { in its_init()
5576 has_v4 |= is_v4(its); in its_init()
5577 has_v4_1 |= is_v4_1(its); in its_init()
5581 if (WARN_ON(!has_v4_1 && rdists->has_rvpeid)) in its_init()
5582 rdists->has_rvpeid = false; in its_init()
5584 if (has_v4 & rdists->has_vlpis) { in its_init()
5594 rdists->has_vlpis = false; in its_init()
5595 pr_err("ITS: Disabling GICv4 support\n"); in its_init()