Lines Matching +full:iommu +full:- +full:map +full:- +full:mask
1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (C) 2006-2008 Intel Corporation
14 * These routines are used by both DMA-remapping and Interrupt-remapping
28 #include <linux/iommu.h>
33 #include "iommu.h"
49 * 1) The hotplug framework guarentees that DMAR unit will be hot-added
51 * 2) The hotplug framework guarantees that DMAR unit will be hot-removed
67 static void free_iommu(struct intel_iommu *iommu);
75 if (drhd->include_all) in dmar_register_drhd_unit()
76 list_add_tail_rcu(&drhd->list, &dmar_drhd_units); in dmar_register_drhd_unit()
78 list_add_rcu(&drhd->list, &dmar_drhd_units); in dmar_register_drhd_unit()
88 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_NAMESPACE || in dmar_alloc_dev_scope()
89 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT || in dmar_alloc_dev_scope()
90 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE) in dmar_alloc_dev_scope()
92 else if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_IOAPIC && in dmar_alloc_dev_scope()
93 scope->entry_type != ACPI_DMAR_SCOPE_TYPE_HPET) { in dmar_alloc_dev_scope()
96 start += scope->length; in dmar_alloc_dev_scope()
134 if (pci_domain_nr(dev->bus) > U16_MAX) in dmar_alloc_pci_notify_info()
139 for (tmp = dev; tmp; tmp = tmp->bus->self) in dmar_alloc_pci_notify_info()
149 dmar_dev_scope_status = -ENOMEM; in dmar_alloc_pci_notify_info()
154 info->event = event; in dmar_alloc_pci_notify_info()
155 info->dev = dev; in dmar_alloc_pci_notify_info()
156 info->seg = pci_domain_nr(dev->bus); in dmar_alloc_pci_notify_info()
157 info->level = level; in dmar_alloc_pci_notify_info()
159 for (tmp = dev; tmp; tmp = tmp->bus->self) { in dmar_alloc_pci_notify_info()
160 level--; in dmar_alloc_pci_notify_info()
161 info->path[level].bus = tmp->bus->number; in dmar_alloc_pci_notify_info()
162 info->path[level].device = PCI_SLOT(tmp->devfn); in dmar_alloc_pci_notify_info()
163 info->path[level].function = PCI_FUNC(tmp->devfn); in dmar_alloc_pci_notify_info()
164 if (pci_is_root_bus(tmp->bus)) in dmar_alloc_pci_notify_info()
165 info->bus = tmp->bus->number; in dmar_alloc_pci_notify_info()
183 if (info->bus != bus) in dmar_match_pci_path()
185 if (info->level != count) in dmar_match_pci_path()
189 if (path[i].device != info->path[i].device || in dmar_match_pci_path()
190 path[i].function != info->path[i].function) in dmar_match_pci_path()
201 i = info->level - 1; in dmar_match_pci_path()
202 if (bus == info->path[i].bus && in dmar_match_pci_path()
203 path[0].device == info->path[i].device && in dmar_match_pci_path()
204 path[0].function == info->path[i].function) { in dmar_match_pci_path()
205 pr_info(FW_BUG "RMRR entry for device %02x:%02x.%x is broken - applying workaround\n", in dmar_match_pci_path()
220 struct device *tmp, *dev = &info->dev->dev; in dmar_insert_dev_scope()
224 if (segment != info->seg) in dmar_insert_dev_scope()
227 for (; start < end; start += scope->length) { in dmar_insert_dev_scope()
229 if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_ENDPOINT && in dmar_insert_dev_scope()
230 scope->entry_type != ACPI_DMAR_SCOPE_TYPE_BRIDGE) in dmar_insert_dev_scope()
234 level = (scope->length - sizeof(*scope)) / sizeof(*path); in dmar_insert_dev_scope()
235 if (!dmar_match_pci_path(info, scope->bus, path, level)) in dmar_insert_dev_scope()
244 * "BRIDGE_OTHER" (0680h) - we don't declare a socpe mismatch in dmar_insert_dev_scope()
247 if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT && in dmar_insert_dev_scope()
248 info->dev->hdr_type != PCI_HEADER_TYPE_NORMAL) || in dmar_insert_dev_scope()
249 (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE && in dmar_insert_dev_scope()
250 (info->dev->hdr_type == PCI_HEADER_TYPE_NORMAL && in dmar_insert_dev_scope()
251 info->dev->class >> 16 != PCI_BASE_CLASS_BRIDGE))) { in dmar_insert_dev_scope()
253 pci_name(info->dev)); in dmar_insert_dev_scope()
254 return -EINVAL; in dmar_insert_dev_scope()
259 devices[i].bus = info->dev->bus->number; in dmar_insert_dev_scope()
260 devices[i].devfn = info->dev->devfn; in dmar_insert_dev_scope()
266 return -EINVAL; in dmar_insert_dev_scope()
278 if (info->seg != segment) in dmar_remove_dev_scope()
282 if (tmp == &info->dev->dev) { in dmar_remove_dev_scope()
299 if (dmaru->include_all) in dmar_pci_bus_add_dev()
302 drhd = container_of(dmaru->hdr, in dmar_pci_bus_add_dev()
305 ((void *)drhd) + drhd->header.length, in dmar_pci_bus_add_dev()
306 dmaru->segment, in dmar_pci_bus_add_dev()
307 dmaru->devices, dmaru->devices_cnt); in dmar_pci_bus_add_dev()
327 if (dmar_remove_dev_scope(info, dmaru->segment, in dmar_pci_bus_del_dev()
328 dmaru->devices, dmaru->devices_cnt)) in dmar_pci_bus_del_dev()
337 dev_set_msi_domain(&pdev->dev, dev_get_msi_domain(&physfn->dev)); in vf_inherit_msi_domain()
349 if (pdev->is_virtfn) { in dmar_pci_bus_notifier()
395 if (dmaru->segment == drhd->segment && in dmar_find_dmaru()
396 dmaru->reg_base_addr == drhd->address) in dmar_find_dmaru()
403 * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
418 dmaru = kzalloc(sizeof(*dmaru) + header->length, GFP_KERNEL); in dmar_parse_one_drhd()
420 return -ENOMEM; in dmar_parse_one_drhd()
426 dmaru->hdr = (void *)(dmaru + 1); in dmar_parse_one_drhd()
427 memcpy(dmaru->hdr, header, header->length); in dmar_parse_one_drhd()
428 dmaru->reg_base_addr = drhd->address; in dmar_parse_one_drhd()
429 dmaru->segment = drhd->segment; in dmar_parse_one_drhd()
431 dmaru->reg_size = 1UL << (drhd->size + 12); in dmar_parse_one_drhd()
432 dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */ in dmar_parse_one_drhd()
433 dmaru->devices = dmar_alloc_dev_scope((void *)(drhd + 1), in dmar_parse_one_drhd()
434 ((void *)drhd) + drhd->header.length, in dmar_parse_one_drhd()
435 &dmaru->devices_cnt); in dmar_parse_one_drhd()
436 if (dmaru->devices_cnt && dmaru->devices == NULL) { in dmar_parse_one_drhd()
438 return -ENOMEM; in dmar_parse_one_drhd()
443 dmar_free_dev_scope(&dmaru->devices, in dmar_parse_one_drhd()
444 &dmaru->devices_cnt); in dmar_parse_one_drhd()
459 if (dmaru->devices && dmaru->devices_cnt) in dmar_free_drhd()
460 dmar_free_dev_scope(&dmaru->devices, &dmaru->devices_cnt); in dmar_free_drhd()
461 if (dmaru->iommu) in dmar_free_drhd()
462 free_iommu(dmaru->iommu); in dmar_free_drhd()
472 if (strnlen(andd->device_name, header->length - 8) == header->length - 8) { in dmar_parse_one_andd()
474 "Your BIOS is broken; ANDD object name is not NUL-terminated\n" in dmar_parse_one_andd()
480 return -EINVAL; in dmar_parse_one_andd()
482 pr_info("ANDD device: %x name: %s\n", andd->device_number, in dmar_parse_one_andd()
483 andd->device_name); in dmar_parse_one_andd()
496 if (drhd->reg_base_addr == rhsa->base_address) { in dmar_parse_one_rhsa()
497 int node = pxm_to_node(rhsa->proximity_domain); in dmar_parse_one_rhsa()
501 drhd->iommu->node = node; in dmar_parse_one_rhsa()
506 "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n" in dmar_parse_one_rhsa()
508 rhsa->base_address, in dmar_parse_one_rhsa()
529 switch (header->type) { in dmar_table_print_dmar_entry()
534 (unsigned long long)drhd->address, drhd->flags); in dmar_table_print_dmar_entry()
540 (unsigned long long)rmrr->base_address, in dmar_table_print_dmar_entry()
541 (unsigned long long)rmrr->end_address); in dmar_table_print_dmar_entry()
545 pr_info("ATSR flags: %#x\n", atsr->flags); in dmar_table_print_dmar_entry()
550 (unsigned long long)rhsa->base_address, in dmar_table_print_dmar_entry()
551 rhsa->proximity_domain); in dmar_table_print_dmar_entry()
554 /* We don't print this here because we need to sanity-check in dmar_table_print_dmar_entry()
559 pr_info("SATC flags: 0x%x\n", satc->flags); in dmar_table_print_dmar_entry()
565 * dmar_table_detect - checks to see if the platform supports DMAR devices
575 pr_warn("Unable to map DMAR\n"); in dmar_table_detect()
579 return ACPI_SUCCESS(status) ? 0 : -ENOENT; in dmar_table_detect()
589 next = (void *)iter + iter->length; in dmar_walk_remapping_entries()
590 if (iter->length == 0) { in dmar_walk_remapping_entries()
592 pr_debug(FW_BUG "Invalid 0-length structure\n"); in dmar_walk_remapping_entries()
597 return -EINVAL; in dmar_walk_remapping_entries()
600 if (cb->print_entry) in dmar_walk_remapping_entries()
603 if (iter->type >= ACPI_DMAR_TYPE_RESERVED) { in dmar_walk_remapping_entries()
606 iter->type); in dmar_walk_remapping_entries()
607 } else if (cb->cb[iter->type]) { in dmar_walk_remapping_entries()
610 ret = cb->cb[iter->type](iter, cb->arg[iter->type]); in dmar_walk_remapping_entries()
613 } else if (!cb->ignore_unhandled) { in dmar_walk_remapping_entries()
615 iter->type); in dmar_walk_remapping_entries()
616 return -EINVAL; in dmar_walk_remapping_entries()
627 dmar->header.length - sizeof(*dmar), cb); in dmar_walk_dmar_table()
631 * parse_dmar_table - parses the DMA reporting table
653 * fixed map. in parse_dmar_table()
665 return -ENODEV; in parse_dmar_table()
667 if (dmar->width < PAGE_SHIFT - 1) { in parse_dmar_table()
669 return -EINVAL; in parse_dmar_table()
672 pr_info("Host address width %d\n", dmar->width + 1); in parse_dmar_table()
692 dev = dev->bus->self; in dmar_pci_device_match()
708 drhd = container_of(dmaru->hdr, in dmar_find_matched_drhd_unit()
712 if (dmaru->include_all && in dmar_find_matched_drhd_unit()
713 drhd->segment == pci_domain_nr(dev->bus)) in dmar_find_matched_drhd_unit()
716 if (dmar_pci_device_match(dmaru->devices, in dmar_find_matched_drhd_unit()
717 dmaru->devices_cnt, dev)) in dmar_find_matched_drhd_unit()
738 drhd = container_of(dmaru->hdr, in dmar_acpi_insert_dev_scope()
743 (unsigned long)scope < ((unsigned long)drhd) + drhd->header.length; in dmar_acpi_insert_dev_scope()
744 scope = ((void *)scope) + scope->length) { in dmar_acpi_insert_dev_scope()
745 if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_NAMESPACE) in dmar_acpi_insert_dev_scope()
747 if (scope->enumeration_id != device_number) in dmar_acpi_insert_dev_scope()
752 dev_name(&adev->dev), dmaru->reg_base_addr, in dmar_acpi_insert_dev_scope()
753 scope->bus, path->device, path->function); in dmar_acpi_insert_dev_scope()
754 for_each_dev_scope(dmaru->devices, dmaru->devices_cnt, i, tmp) in dmar_acpi_insert_dev_scope()
756 dmaru->devices[i].bus = scope->bus; in dmar_acpi_insert_dev_scope()
757 dmaru->devices[i].devfn = PCI_DEVFN(path->device, in dmar_acpi_insert_dev_scope()
758 path->function); in dmar_acpi_insert_dev_scope()
759 rcu_assign_pointer(dmaru->devices[i].dev, in dmar_acpi_insert_dev_scope()
760 get_device(&adev->dev)); in dmar_acpi_insert_dev_scope()
763 BUG_ON(i >= dmaru->devices_cnt); in dmar_acpi_insert_dev_scope()
766 pr_warn("No IOMMU scope found for ANDD enumeration ID %d (%s)\n", in dmar_acpi_insert_dev_scope()
767 device_number, dev_name(&adev->dev)); in dmar_acpi_insert_dev_scope()
775 return -ENODEV; in dmar_acpi_dev_scope_init()
778 ((unsigned long)andd) < ((unsigned long)dmar_tbl) + dmar_tbl->length; in dmar_acpi_dev_scope_init()
779 andd = ((void *)andd) + andd->header.length) { in dmar_acpi_dev_scope_init()
780 if (andd->header.type == ACPI_DMAR_TYPE_NAMESPACE) { in dmar_acpi_dev_scope_init()
785 andd->device_name, in dmar_acpi_dev_scope_init()
788 andd->device_name); in dmar_acpi_dev_scope_init()
794 andd->device_name); in dmar_acpi_dev_scope_init()
797 dmar_acpi_insert_dev_scope(andd->device_number, adev); in dmar_acpi_dev_scope_init()
812 dmar_dev_scope_status = -ENODEV; in dmar_dev_scope_init()
819 if (dev->is_virtfn) in dmar_dev_scope_init()
851 if (ret != -ENODEV) in dmar_table_init()
855 ret = -ENODEV; in dmar_table_init()
887 if (!drhd->address) { in dmar_validate_one_drhd()
889 return -EINVAL; in dmar_validate_one_drhd()
893 addr = ioremap(drhd->address, VTD_PAGE_SIZE); in dmar_validate_one_drhd()
895 addr = early_ioremap(drhd->address, VTD_PAGE_SIZE); in dmar_validate_one_drhd()
897 pr_warn("Can't validate DRHD address: %llx\n", drhd->address); in dmar_validate_one_drhd()
898 return -EINVAL; in dmar_validate_one_drhd()
909 if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) { in dmar_validate_one_drhd()
910 warn_invalid_dmar(drhd->address, " returns all ones"); in dmar_validate_one_drhd()
911 return -EINVAL; in dmar_validate_one_drhd()
939 x86_init.iommu.iommu_init = intel_iommu_init; in detect_intel_iommu()
952 static void unmap_iommu(struct intel_iommu *iommu) in unmap_iommu() argument
954 iounmap(iommu->reg); in unmap_iommu()
955 release_mem_region(iommu->reg_phys, iommu->reg_size); in unmap_iommu()
959 * map_iommu: map the iommu's registers
960 * @iommu: the iommu to map
963 * Memory map the iommu's registers. Start w/ a single page, and
966 static int map_iommu(struct intel_iommu *iommu, struct dmar_drhd_unit *drhd) in map_iommu() argument
968 u64 phys_addr = drhd->reg_base_addr; in map_iommu()
971 iommu->reg_phys = phys_addr; in map_iommu()
972 iommu->reg_size = drhd->reg_size; in map_iommu()
974 if (!request_mem_region(iommu->reg_phys, iommu->reg_size, iommu->name)) { in map_iommu()
976 err = -EBUSY; in map_iommu()
980 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size); in map_iommu()
981 if (!iommu->reg) { in map_iommu()
982 pr_err("Can't map the region\n"); in map_iommu()
983 err = -ENOMEM; in map_iommu()
987 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG); in map_iommu()
988 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG); in map_iommu()
990 if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) { in map_iommu()
991 err = -EINVAL; in map_iommu()
997 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap), in map_iommu()
998 cap_max_fault_reg_offset(iommu->cap)); in map_iommu()
1000 if (map_size > iommu->reg_size) { in map_iommu()
1001 iounmap(iommu->reg); in map_iommu()
1002 release_mem_region(iommu->reg_phys, iommu->reg_size); in map_iommu()
1003 iommu->reg_size = map_size; in map_iommu()
1004 if (!request_mem_region(iommu->reg_phys, iommu->reg_size, in map_iommu()
1005 iommu->name)) { in map_iommu()
1007 err = -EBUSY; in map_iommu()
1010 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size); in map_iommu()
1011 if (!iommu->reg) { in map_iommu()
1012 pr_err("Can't map the region\n"); in map_iommu()
1013 err = -ENOMEM; in map_iommu()
1018 if (cap_ecmds(iommu->cap)) { in map_iommu()
1022 iommu->ecmdcap[i] = dmar_readq(iommu->reg + DMAR_ECCAP_REG + in map_iommu()
1031 iounmap(iommu->reg); in map_iommu()
1033 release_mem_region(iommu->reg_phys, iommu->reg_size); in map_iommu()
1040 struct intel_iommu *iommu; in alloc_iommu() local
1042 int agaw = -1; in alloc_iommu()
1043 int msagaw = -1; in alloc_iommu()
1046 if (!drhd->reg_base_addr) { in alloc_iommu()
1048 return -EINVAL; in alloc_iommu()
1051 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL); in alloc_iommu()
1052 if (!iommu) in alloc_iommu()
1053 return -ENOMEM; in alloc_iommu()
1055 iommu->seq_id = ida_alloc_range(&dmar_seq_ids, 0, in alloc_iommu()
1056 DMAR_UNITS_SUPPORTED - 1, GFP_KERNEL); in alloc_iommu()
1057 if (iommu->seq_id < 0) { in alloc_iommu()
1059 err = iommu->seq_id; in alloc_iommu()
1062 sprintf(iommu->name, "dmar%d", iommu->seq_id); in alloc_iommu()
1064 err = map_iommu(iommu, drhd); in alloc_iommu()
1066 pr_err("Failed to map %s\n", iommu->name); in alloc_iommu()
1070 err = -EINVAL; in alloc_iommu()
1071 if (!cap_sagaw(iommu->cap) && in alloc_iommu()
1072 (!ecap_smts(iommu->ecap) || ecap_slts(iommu->ecap))) { in alloc_iommu()
1074 iommu->name); in alloc_iommu()
1075 drhd->ignored = 1; in alloc_iommu()
1078 if (!drhd->ignored) { in alloc_iommu()
1079 agaw = iommu_calculate_agaw(iommu); in alloc_iommu()
1081 pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n", in alloc_iommu()
1082 iommu->seq_id); in alloc_iommu()
1083 drhd->ignored = 1; in alloc_iommu()
1086 if (!drhd->ignored) { in alloc_iommu()
1087 msagaw = iommu_calculate_max_sagaw(iommu); in alloc_iommu()
1089 pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n", in alloc_iommu()
1090 iommu->seq_id); in alloc_iommu()
1091 drhd->ignored = 1; in alloc_iommu()
1092 agaw = -1; in alloc_iommu()
1095 iommu->agaw = agaw; in alloc_iommu()
1096 iommu->msagaw = msagaw; in alloc_iommu()
1097 iommu->segment = drhd->segment; in alloc_iommu()
1099 iommu->node = NUMA_NO_NODE; in alloc_iommu()
1101 ver = readl(iommu->reg + DMAR_VER_REG); in alloc_iommu()
1103 iommu->name, in alloc_iommu()
1104 (unsigned long long)drhd->reg_base_addr, in alloc_iommu()
1106 (unsigned long long)iommu->cap, in alloc_iommu()
1107 (unsigned long long)iommu->ecap); in alloc_iommu()
1110 sts = readl(iommu->reg + DMAR_GSTS_REG); in alloc_iommu()
1112 iommu->gcmd |= DMA_GCMD_IRE; in alloc_iommu()
1114 iommu->gcmd |= DMA_GCMD_TE; in alloc_iommu()
1116 iommu->gcmd |= DMA_GCMD_QIE; in alloc_iommu()
1118 if (alloc_iommu_pmu(iommu)) in alloc_iommu()
1119 pr_debug("Cannot alloc PMU for iommu (seq_id = %d)\n", iommu->seq_id); in alloc_iommu()
1121 raw_spin_lock_init(&iommu->register_lock); in alloc_iommu()
1127 if (pasid_supported(iommu)) in alloc_iommu()
1128 iommu->iommu.max_pasids = 2UL << ecap_pss(iommu->ecap); in alloc_iommu()
1135 if (intel_iommu_enabled && !drhd->ignored) { in alloc_iommu()
1136 err = iommu_device_sysfs_add(&iommu->iommu, NULL, in alloc_iommu()
1138 "%s", iommu->name); in alloc_iommu()
1142 err = iommu_device_register(&iommu->iommu, &intel_iommu_ops, NULL); in alloc_iommu()
1146 iommu_pmu_register(iommu); in alloc_iommu()
1149 drhd->iommu = iommu; in alloc_iommu()
1150 iommu->drhd = drhd; in alloc_iommu()
1155 iommu_device_sysfs_remove(&iommu->iommu); in alloc_iommu()
1157 free_iommu_pmu(iommu); in alloc_iommu()
1158 unmap_iommu(iommu); in alloc_iommu()
1160 ida_free(&dmar_seq_ids, iommu->seq_id); in alloc_iommu()
1162 kfree(iommu); in alloc_iommu()
1166 static void free_iommu(struct intel_iommu *iommu) in free_iommu() argument
1168 if (intel_iommu_enabled && !iommu->drhd->ignored) { in free_iommu()
1169 iommu_pmu_unregister(iommu); in free_iommu()
1170 iommu_device_unregister(&iommu->iommu); in free_iommu()
1171 iommu_device_sysfs_remove(&iommu->iommu); in free_iommu()
1174 free_iommu_pmu(iommu); in free_iommu()
1176 if (iommu->irq) { in free_iommu()
1177 if (iommu->pr_irq) { in free_iommu()
1178 free_irq(iommu->pr_irq, iommu); in free_iommu()
1179 dmar_free_hwirq(iommu->pr_irq); in free_iommu()
1180 iommu->pr_irq = 0; in free_iommu()
1182 free_irq(iommu->irq, iommu); in free_iommu()
1183 dmar_free_hwirq(iommu->irq); in free_iommu()
1184 iommu->irq = 0; in free_iommu()
1187 if (iommu->qi) { in free_iommu()
1188 free_page((unsigned long)iommu->qi->desc); in free_iommu()
1189 kfree(iommu->qi->desc_status); in free_iommu()
1190 kfree(iommu->qi); in free_iommu()
1193 if (iommu->reg) in free_iommu()
1194 unmap_iommu(iommu); in free_iommu()
1196 ida_free(&dmar_seq_ids, iommu->seq_id); in free_iommu()
1197 kfree(iommu); in free_iommu()
1205 while (qi->desc_status[qi->free_tail] == QI_DONE || in reclaim_free_desc()
1206 qi->desc_status[qi->free_tail] == QI_ABORT) { in reclaim_free_desc()
1207 qi->desc_status[qi->free_tail] = QI_FREE; in reclaim_free_desc()
1208 qi->free_tail = (qi->free_tail + 1) % QI_LENGTH; in reclaim_free_desc()
1209 qi->free_cnt++; in reclaim_free_desc()
1217 return "Context-cache Invalidation"; in qi_type_string()
1221 return "Device-TLB Invalidation"; in qi_type_string()
1227 return "PASID-based IOTLB Invalidation"; in qi_type_string()
1229 return "PASID-cache Invalidation"; in qi_type_string()
1231 return "PASID-based Device-TLB Invalidation"; in qi_type_string()
1239 static void qi_dump_fault(struct intel_iommu *iommu, u32 fault) in qi_dump_fault() argument
1241 unsigned int head = dmar_readl(iommu->reg + DMAR_IQH_REG); in qi_dump_fault()
1242 u64 iqe_err = dmar_readq(iommu->reg + DMAR_IQER_REG); in qi_dump_fault()
1243 struct qi_desc *desc = iommu->qi->desc + head; in qi_dump_fault()
1246 pr_err("VT-d detected Invalidation Queue Error: Reason %llx", in qi_dump_fault()
1249 pr_err("VT-d detected Invalidation Time-out Error: SID %llx", in qi_dump_fault()
1252 pr_err("VT-d detected Invalidation Completion Error: SID %llx", in qi_dump_fault()
1256 qi_type_string(desc->qw0 & 0xf), in qi_dump_fault()
1257 (unsigned long long)desc->qw0, in qi_dump_fault()
1258 (unsigned long long)desc->qw1); in qi_dump_fault()
1260 head = ((head >> qi_shift(iommu)) + QI_LENGTH - 1) % QI_LENGTH; in qi_dump_fault()
1261 head <<= qi_shift(iommu); in qi_dump_fault()
1262 desc = iommu->qi->desc + head; in qi_dump_fault()
1265 qi_type_string(desc->qw0 & 0xf), in qi_dump_fault()
1266 (unsigned long long)desc->qw0, in qi_dump_fault()
1267 (unsigned long long)desc->qw1); in qi_dump_fault()
1270 static int qi_check_fault(struct intel_iommu *iommu, int index, int wait_index) in qi_check_fault() argument
1274 struct q_inval *qi = iommu->qi; in qi_check_fault()
1275 int shift = qi_shift(iommu); in qi_check_fault()
1277 if (qi->desc_status[wait_index] == QI_ABORT) in qi_check_fault()
1278 return -EAGAIN; in qi_check_fault()
1280 fault = readl(iommu->reg + DMAR_FSTS_REG); in qi_check_fault()
1282 qi_dump_fault(iommu, fault); in qi_check_fault()
1290 head = readl(iommu->reg + DMAR_IQH_REG); in qi_check_fault()
1292 struct qi_desc *desc = qi->desc + head; in qi_check_fault()
1295 * desc->qw2 and desc->qw3 are either reserved or in qi_check_fault()
1299 memcpy(desc, qi->desc + (wait_index << shift), in qi_check_fault()
1301 writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG); in qi_check_fault()
1303 return -EINVAL; in qi_check_fault()
1312 head = readl(iommu->reg + DMAR_IQH_REG); in qi_check_fault()
1313 head = ((head >> shift) - 1 + QI_LENGTH) % QI_LENGTH; in qi_check_fault()
1315 tail = readl(iommu->reg + DMAR_IQT_REG); in qi_check_fault()
1316 tail = ((tail >> shift) - 1 + QI_LENGTH) % QI_LENGTH; in qi_check_fault()
1318 writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG); in qi_check_fault()
1319 pr_info("Invalidation Time-out Error (ITE) cleared\n"); in qi_check_fault()
1322 if (qi->desc_status[head] == QI_IN_USE) in qi_check_fault()
1323 qi->desc_status[head] = QI_ABORT; in qi_check_fault()
1324 head = (head - 2 + QI_LENGTH) % QI_LENGTH; in qi_check_fault()
1327 if (qi->desc_status[wait_index] == QI_ABORT) in qi_check_fault()
1328 return -EAGAIN; in qi_check_fault()
1332 writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG); in qi_check_fault()
1346 int qi_submit_sync(struct intel_iommu *iommu, struct qi_desc *desc, in qi_submit_sync() argument
1349 struct q_inval *qi = iommu->qi; in qi_submit_sync()
1363 type = desc->qw0 & GENMASK_ULL(3, 0); in qi_submit_sync()
1366 dmar_latency_enabled(iommu, DMAR_LATENCY_INV_IOTLB)) in qi_submit_sync()
1370 dmar_latency_enabled(iommu, DMAR_LATENCY_INV_DEVTLB)) in qi_submit_sync()
1374 dmar_latency_enabled(iommu, DMAR_LATENCY_INV_IEC)) in qi_submit_sync()
1380 raw_spin_lock_irqsave(&qi->q_lock, flags); in qi_submit_sync()
1386 while (qi->free_cnt < count + 2) { in qi_submit_sync()
1387 raw_spin_unlock_irqrestore(&qi->q_lock, flags); in qi_submit_sync()
1389 raw_spin_lock_irqsave(&qi->q_lock, flags); in qi_submit_sync()
1392 index = qi->free_head; in qi_submit_sync()
1394 shift = qi_shift(iommu); in qi_submit_sync()
1398 memcpy(qi->desc + offset, &desc[i], 1 << shift); in qi_submit_sync()
1399 qi->desc_status[(index + i) % QI_LENGTH] = QI_IN_USE; in qi_submit_sync()
1400 trace_qi_submit(iommu, desc[i].qw0, desc[i].qw1, in qi_submit_sync()
1403 qi->desc_status[wait_index] = QI_IN_USE; in qi_submit_sync()
1409 wait_desc.qw1 = virt_to_phys(&qi->desc_status[wait_index]); in qi_submit_sync()
1414 memcpy(qi->desc + offset, &wait_desc, 1 << shift); in qi_submit_sync()
1416 qi->free_head = (qi->free_head + count + 1) % QI_LENGTH; in qi_submit_sync()
1417 qi->free_cnt -= count + 1; in qi_submit_sync()
1423 writel(qi->free_head << shift, iommu->reg + DMAR_IQT_REG); in qi_submit_sync()
1425 while (qi->desc_status[wait_index] != QI_DONE) { in qi_submit_sync()
1433 rc = qi_check_fault(iommu, index, wait_index); in qi_submit_sync()
1437 raw_spin_unlock(&qi->q_lock); in qi_submit_sync()
1439 raw_spin_lock(&qi->q_lock); in qi_submit_sync()
1443 qi->desc_status[(index + i) % QI_LENGTH] = QI_DONE; in qi_submit_sync()
1446 raw_spin_unlock_irqrestore(&qi->q_lock, flags); in qi_submit_sync()
1448 if (rc == -EAGAIN) in qi_submit_sync()
1452 dmar_latency_update(iommu, DMAR_LATENCY_INV_IOTLB, in qi_submit_sync()
1453 ktime_to_ns(ktime_get()) - iotlb_start_ktime); in qi_submit_sync()
1456 dmar_latency_update(iommu, DMAR_LATENCY_INV_DEVTLB, in qi_submit_sync()
1457 ktime_to_ns(ktime_get()) - devtlb_start_ktime); in qi_submit_sync()
1460 dmar_latency_update(iommu, DMAR_LATENCY_INV_IEC, in qi_submit_sync()
1461 ktime_to_ns(ktime_get()) - iec_start_ktime); in qi_submit_sync()
1469 void qi_global_iec(struct intel_iommu *iommu) in qi_global_iec() argument
1479 qi_submit_sync(iommu, &desc, 1, 0); in qi_global_iec()
1482 void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm, in qi_flush_context() argument
1493 qi_submit_sync(iommu, &desc, 1, 0); in qi_flush_context()
1496 void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, in qi_flush_iotlb() argument
1504 if (cap_write_drain(iommu->cap)) in qi_flush_iotlb()
1507 if (cap_read_drain(iommu->cap)) in qi_flush_iotlb()
1517 qi_submit_sync(iommu, &desc, 1, 0); in qi_flush_iotlb()
1520 void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid, in qi_flush_dev_iotlb() argument
1521 u16 qdep, u64 addr, unsigned mask) in qi_flush_dev_iotlb() argument
1525 if (mask) { in qi_flush_dev_iotlb()
1526 addr |= (1ULL << (VTD_PAGE_SHIFT + mask - 1)) - 1; in qi_flush_dev_iotlb()
1539 qi_submit_sync(iommu, &desc, 1, 0); in qi_flush_dev_iotlb()
1542 /* PASID-based IOTLB invalidation */
1543 void qi_flush_piotlb(struct intel_iommu *iommu, u16 did, u32 pasid, u64 addr, in qi_flush_piotlb() argument
1549 * npages == -1 means a PASID-selective invalidation, otherwise, in qi_flush_piotlb()
1550 * a positive value for Page-selective-within-PASID invalidation. in qi_flush_piotlb()
1558 if (npages == -1) { in qi_flush_piotlb()
1565 int mask = ilog2(__roundup_pow_of_two(npages)); in qi_flush_piotlb() local
1566 unsigned long align = (1ULL << (VTD_PAGE_SHIFT + mask)); in qi_flush_piotlb()
1577 QI_EIOTLB_AM(mask); in qi_flush_piotlb()
1580 qi_submit_sync(iommu, &desc, 1, 0); in qi_flush_piotlb()
1583 /* PASID-based device IOTLB Invalidate */
1584 void qi_flush_dev_iotlb_pasid(struct intel_iommu *iommu, u16 sid, u16 pfsid, in qi_flush_dev_iotlb_pasid() argument
1587 unsigned long mask = 1UL << (VTD_PAGE_SHIFT + size_order - 1); in qi_flush_dev_iotlb_pasid() local
1597 * range. VT-d spec 6.5.2.6. in qi_flush_dev_iotlb_pasid()
1604 pr_warn_ratelimited("Invalidate non-aligned address %llx, order %d\n", in qi_flush_dev_iotlb_pasid()
1616 desc.qw1 |= GENMASK_ULL(size_order + VTD_PAGE_SHIFT - 1, in qi_flush_dev_iotlb_pasid()
1619 desc.qw1 &= ~mask; in qi_flush_dev_iotlb_pasid()
1624 qi_submit_sync(iommu, &desc, 1, 0); in qi_flush_dev_iotlb_pasid()
1627 void qi_flush_pasid_cache(struct intel_iommu *iommu, u16 did, in qi_flush_pasid_cache() argument
1634 qi_submit_sync(iommu, &desc, 1, 0); in qi_flush_pasid_cache()
1640 void dmar_disable_qi(struct intel_iommu *iommu) in dmar_disable_qi() argument
1646 if (!ecap_qis(iommu->ecap)) in dmar_disable_qi()
1649 raw_spin_lock_irqsave(&iommu->register_lock, flags); in dmar_disable_qi()
1651 sts = readl(iommu->reg + DMAR_GSTS_REG); in dmar_disable_qi()
1658 while ((readl(iommu->reg + DMAR_IQT_REG) != in dmar_disable_qi()
1659 readl(iommu->reg + DMAR_IQH_REG)) && in dmar_disable_qi()
1660 (DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time))) in dmar_disable_qi()
1663 iommu->gcmd &= ~DMA_GCMD_QIE; in dmar_disable_qi()
1664 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); in dmar_disable_qi()
1666 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, in dmar_disable_qi()
1669 raw_spin_unlock_irqrestore(&iommu->register_lock, flags); in dmar_disable_qi()
1675 static void __dmar_enable_qi(struct intel_iommu *iommu) in __dmar_enable_qi() argument
1679 struct q_inval *qi = iommu->qi; in __dmar_enable_qi()
1680 u64 val = virt_to_phys(qi->desc); in __dmar_enable_qi()
1682 qi->free_head = qi->free_tail = 0; in __dmar_enable_qi()
1683 qi->free_cnt = QI_LENGTH; in __dmar_enable_qi()
1689 if (ecap_smts(iommu->ecap)) in __dmar_enable_qi()
1692 raw_spin_lock_irqsave(&iommu->register_lock, flags); in __dmar_enable_qi()
1695 writel(0, iommu->reg + DMAR_IQT_REG); in __dmar_enable_qi()
1697 dmar_writeq(iommu->reg + DMAR_IQA_REG, val); in __dmar_enable_qi()
1699 iommu->gcmd |= DMA_GCMD_QIE; in __dmar_enable_qi()
1700 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); in __dmar_enable_qi()
1703 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts); in __dmar_enable_qi()
1705 raw_spin_unlock_irqrestore(&iommu->register_lock, flags); in __dmar_enable_qi()
1710 * interrupt-remapping. Also used by DMA-remapping, which replaces
1713 int dmar_enable_qi(struct intel_iommu *iommu) in dmar_enable_qi() argument
1718 if (!ecap_qis(iommu->ecap)) in dmar_enable_qi()
1719 return -ENOENT; in dmar_enable_qi()
1724 if (iommu->qi) in dmar_enable_qi()
1727 iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC); in dmar_enable_qi()
1728 if (!iommu->qi) in dmar_enable_qi()
1729 return -ENOMEM; in dmar_enable_qi()
1731 qi = iommu->qi; in dmar_enable_qi()
1737 desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, in dmar_enable_qi()
1738 !!ecap_smts(iommu->ecap)); in dmar_enable_qi()
1741 iommu->qi = NULL; in dmar_enable_qi()
1742 return -ENOMEM; in dmar_enable_qi()
1745 qi->desc = page_address(desc_page); in dmar_enable_qi()
1747 qi->desc_status = kcalloc(QI_LENGTH, sizeof(int), GFP_ATOMIC); in dmar_enable_qi()
1748 if (!qi->desc_status) { in dmar_enable_qi()
1749 free_page((unsigned long) qi->desc); in dmar_enable_qi()
1751 iommu->qi = NULL; in dmar_enable_qi()
1752 return -ENOMEM; in dmar_enable_qi()
1755 raw_spin_lock_init(&qi->q_lock); in dmar_enable_qi()
1757 __dmar_enable_qi(iommu); in dmar_enable_qi()
1762 /* iommu interrupt handling. Most stuff are MSI-like. */
1782 "non-zero reserved fields in RTP",
1783 "non-zero reserved fields in CTP",
1784 "non-zero reserved fields in PTE",
1792 "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x33-0x37 */
1795 "SM: Non-zero reserved field set in Root Entry",
1796 "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x3B-0x3F */
1799 "SM: Non-zero reserved field set in the Context Entry",
1804 "SM: PRE field in Context-Entry is clear",
1805 "SM: RID_PASID field error in Context-Entry",
1806 "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x49-0x4F */
1809 "SM: Non-zero reserved field set in PASID Directory Entry",
1810 "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x53-0x57 */
1813 "SM: Non-zero reserved field set in PASID Table Entry",
1814 "SM: Invalid Scalable-Mode PASID Table Entry",
1817 "Unknown", "Unknown",/* 0x5E-0x5F */
1818 …"Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x60-0x…
1819 …"Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x68-0x…
1820 "SM: Error attempting to access first-level paging entry",
1821 "SM: Present bit in first-level paging entry is clear",
1822 "SM: Non-zero reserved field set in first-level paging entry",
1823 "SM: Error attempting to access FL-PML4 entry",
1824 "SM: First-level entry address beyond MGAW in Nested translation",
1825 "SM: Read permission error in FL-PML4 entry in Nested translation",
1826 "SM: Read permission error in first-level paging entry in Nested translation",
1827 "SM: Write permission error in first-level paging entry in Nested translation",
1828 "SM: Error attempting to access second-level paging entry",
1829 "SM: Read/Write permission error in second-level paging entry",
1830 "SM: Non-zero reserved field set in second-level paging entry",
1831 "SM: Invalid second-level page table pointer",
1832 "SM: A/D bit update needed in second-level entry when set up in no snoop",
1833 "Unknown", "Unknown", "Unknown", /* 0x7D-0x7F */
1834 "SM: Address in first-level translation is not canonical",
1835 "SM: U/S set 0 for first-level translation with user privilege",
1838 "SM: Second-level entry address beyond the max",
1841 "SM: Invalid address-interrupt address",
1842 …"Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x88-0x…
1843 "SM: A/D bit update needed in first-level entry when set up in no snoop",
1848 "Detected reserved fields in the decoded interrupt-remapped request",
1849 "Interrupt index exceeded the interrupt-remapping table size",
1851 "Error accessing interrupt-remapping table pointed by IRTA_REG",
1854 "Blocked an interrupt request due to source-id verification failure",
1859 if (fault_reason >= 0x20 && (fault_reason - 0x20 < in dmar_get_fault_reason()
1862 return irq_remap_fault_reasons[fault_reason - 0x20]; in dmar_get_fault_reason()
1863 } else if (fault_reason >= 0x30 && (fault_reason - 0x30 < in dmar_get_fault_reason()
1866 return dma_remap_sm_fault_reasons[fault_reason - 0x30]; in dmar_get_fault_reason()
1877 static inline int dmar_msi_reg(struct intel_iommu *iommu, int irq) in dmar_msi_reg() argument
1879 if (iommu->irq == irq) in dmar_msi_reg()
1881 else if (iommu->pr_irq == irq) in dmar_msi_reg()
1883 else if (iommu->perf_irq == irq) in dmar_msi_reg()
1891 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data); in dmar_msi_unmask() local
1892 int reg = dmar_msi_reg(iommu, data->irq); in dmar_msi_unmask()
1896 raw_spin_lock_irqsave(&iommu->register_lock, flag); in dmar_msi_unmask()
1897 writel(0, iommu->reg + reg); in dmar_msi_unmask()
1899 readl(iommu->reg + reg); in dmar_msi_unmask()
1900 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in dmar_msi_unmask()
1905 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data); in dmar_msi_mask() local
1906 int reg = dmar_msi_reg(iommu, data->irq); in dmar_msi_mask()
1909 /* mask it */ in dmar_msi_mask()
1910 raw_spin_lock_irqsave(&iommu->register_lock, flag); in dmar_msi_mask()
1911 writel(DMA_FECTL_IM, iommu->reg + reg); in dmar_msi_mask()
1913 readl(iommu->reg + reg); in dmar_msi_mask()
1914 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in dmar_msi_mask()
1919 struct intel_iommu *iommu = irq_get_handler_data(irq); in dmar_msi_write() local
1920 int reg = dmar_msi_reg(iommu, irq); in dmar_msi_write()
1923 raw_spin_lock_irqsave(&iommu->register_lock, flag); in dmar_msi_write()
1924 writel(msg->data, iommu->reg + reg + 4); in dmar_msi_write()
1925 writel(msg->address_lo, iommu->reg + reg + 8); in dmar_msi_write()
1926 writel(msg->address_hi, iommu->reg + reg + 12); in dmar_msi_write()
1927 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in dmar_msi_write()
1932 struct intel_iommu *iommu = irq_get_handler_data(irq); in dmar_msi_read() local
1933 int reg = dmar_msi_reg(iommu, irq); in dmar_msi_read()
1936 raw_spin_lock_irqsave(&iommu->register_lock, flag); in dmar_msi_read()
1937 msg->data = readl(iommu->reg + reg + 4); in dmar_msi_read()
1938 msg->address_lo = readl(iommu->reg + reg + 8); in dmar_msi_read()
1939 msg->address_hi = readl(iommu->reg + reg + 12); in dmar_msi_read()
1940 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in dmar_msi_read()
1943 static int dmar_fault_do_one(struct intel_iommu *iommu, int type, in dmar_fault_do_one() argument
1953 pr_err("[INTR-REMAP] Request device [%02x:%02x.%d] fault index 0x%llx [fault reason 0x%02x] %s\n", in dmar_fault_do_one()
1974 dmar_fault_dump_ptes(iommu, source_id, addr, pasid); in dmar_fault_do_one()
1982 struct intel_iommu *iommu = dev_id; in dmar_fault() local
1990 raw_spin_lock_irqsave(&iommu->register_lock, flag); in dmar_fault()
1991 fault_status = readl(iommu->reg + DMAR_FSTS_REG); in dmar_fault()
2000 reg = cap_fault_reg_offset(iommu->cap); in dmar_fault()
2013 data = readl(iommu->reg + reg + in dmar_fault()
2023 data = readl(iommu->reg + reg + in dmar_fault()
2028 guest_addr = dmar_readq(iommu->reg + reg + in dmar_fault()
2034 writel(DMA_FRCD_F, iommu->reg + reg + in dmar_fault()
2037 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in dmar_fault()
2040 /* Using pasid -1 if pasid is not present */ in dmar_fault()
2041 dmar_fault_do_one(iommu, type, fault_reason, in dmar_fault()
2046 if (fault_index >= cap_num_fault_regs(iommu->cap)) in dmar_fault()
2048 raw_spin_lock_irqsave(&iommu->register_lock, flag); in dmar_fault()
2052 iommu->reg + DMAR_FSTS_REG); in dmar_fault()
2055 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in dmar_fault()
2059 int dmar_set_interrupt(struct intel_iommu *iommu) in dmar_set_interrupt() argument
2066 if (iommu->irq) in dmar_set_interrupt()
2069 irq = dmar_alloc_hwirq(iommu->seq_id, iommu->node, iommu); in dmar_set_interrupt()
2071 iommu->irq = irq; in dmar_set_interrupt()
2074 return -EINVAL; in dmar_set_interrupt()
2077 ret = request_irq(irq, dmar_fault, IRQF_NO_THREAD, iommu->name, iommu); in dmar_set_interrupt()
2086 struct intel_iommu *iommu; in enable_drhd_fault_handling() local
2091 for_each_iommu(iommu, drhd) { in enable_drhd_fault_handling()
2093 int ret = dmar_set_interrupt(iommu); in enable_drhd_fault_handling()
2097 (unsigned long long)drhd->reg_base_addr, ret); in enable_drhd_fault_handling()
2098 return -1; in enable_drhd_fault_handling()
2104 dmar_fault(iommu->irq, iommu); in enable_drhd_fault_handling()
2105 fault_status = readl(iommu->reg + DMAR_FSTS_REG); in enable_drhd_fault_handling()
2106 writel(fault_status, iommu->reg + DMAR_FSTS_REG); in enable_drhd_fault_handling()
2113 * Re-enable Queued Invalidation interface.
2115 int dmar_reenable_qi(struct intel_iommu *iommu) in dmar_reenable_qi() argument
2117 if (!ecap_qis(iommu->ecap)) in dmar_reenable_qi()
2118 return -ENOENT; in dmar_reenable_qi()
2120 if (!iommu->qi) in dmar_reenable_qi()
2121 return -ENOENT; in dmar_reenable_qi()
2126 dmar_disable_qi(iommu); in dmar_reenable_qi()
2129 * invalidation requests now, it's safe to re-enable queued in dmar_reenable_qi()
2132 __dmar_enable_qi(iommu); in dmar_reenable_qi()
2146 return dmar->flags & 0x1; in dmar_ir_support()
2167 list_del(&dmaru->list); in dmar_free_unused_resources()
2180 * for Directed-IO Architecture Specifiction, Rev 2.2, Section 8.8
2205 int ret = -ENODEV; in dmar_walk_dsm_resource()
2222 return -ENODEV; in dmar_walk_dsm_resource()
2227 start = (struct acpi_dmar_header *)obj->buffer.pointer; in dmar_walk_dsm_resource()
2228 ret = dmar_walk_remapping_entries(start, obj->buffer.length, &callback); in dmar_walk_dsm_resource()
2242 return -ENODEV; in dmar_hp_add_drhd()
2264 if (!dmaru->include_all && dmaru->devices && dmaru->devices_cnt) { in dmar_hp_remove_drhd()
2265 for_each_active_dev_scope(dmaru->devices, in dmar_hp_remove_drhd()
2266 dmaru->devices_cnt, i, dev) in dmar_hp_remove_drhd()
2267 return -EBUSY; in dmar_hp_remove_drhd()
2283 list_del_rcu(&dmaru->list); in dmar_hp_release_drhd()
2392 return -ENXIO; in dmar_device_hotplug()
2419 * dmar_platform_optin - Is %DMA_CTRL_PLATFORM_OPT_IN_FLAG set in DMAR table
2436 ret = !!(dmar->flags & DMAR_PLATFORM_OPT_IN); in dmar_platform_optin()