Lines Matching +full:smmu +full:- +full:v3
1 // SPDX-License-Identifier: GPL-2.0-only
21 #include <linux/dma-map-ops.h>
44 * iort_set_fwnode() - Create iort_fwnode and use it to register
61 return -ENOMEM; in iort_set_fwnode()
63 INIT_LIST_HEAD(&np->list); in iort_set_fwnode()
64 np->iort_node = iort_node; in iort_set_fwnode()
65 np->fwnode = fwnode; in iort_set_fwnode()
68 list_add_tail(&np->list, &iort_fwnode_list); in iort_set_fwnode()
75 * iort_get_fwnode() - Retrieve fwnode associated with an IORT node
77 * @node: IORT table node to be looked-up
89 if (curr->iort_node == node) { in iort_get_fwnode()
90 fwnode = curr->fwnode; in iort_get_fwnode()
100 * iort_delete_fwnode() - Delete fwnode associated with an IORT node
110 if (curr->iort_node == node) { in iort_delete_fwnode()
111 list_del(&curr->list); in iort_delete_fwnode()
120 * iort_get_iort_node() - Retrieve iort_node associated with an fwnode
122 * @fwnode: fwnode associated with device to be looked-up
134 if (curr->fwnode == fwnode) { in iort_get_iort_node()
135 iort_node = curr->iort_node; in iort_get_iort_node()
154 * iort_register_domain_token() - register domain token along with related
160 * Returns: 0 on success, -ENOMEM if no memory when allocating list element
169 return -ENOMEM; in iort_register_domain_token()
171 its_msi_chip->fw_node = fw_node; in iort_register_domain_token()
172 its_msi_chip->translation_id = trans_id; in iort_register_domain_token()
173 its_msi_chip->base_addr = base; in iort_register_domain_token()
176 list_add(&its_msi_chip->list, &iort_msi_chip_list); in iort_register_domain_token()
183 * iort_deregister_domain_token() - Deregister domain token based on ITS ID
194 if (its_msi_chip->translation_id == trans_id) { in iort_deregister_domain_token()
195 list_del(&its_msi_chip->list); in iort_deregister_domain_token()
204 * iort_find_domain_token() - Find domain token based on given ITS ID
216 if (its_msi_chip->translation_id == trans_id) { in iort_find_domain_token()
217 fw_node = its_msi_chip->fw_node; in iort_find_domain_token()
240 iort->node_offset); in iort_scan_node()
242 iort_table->length); in iort_scan_node()
244 for (i = 0; i < iort->node_count; i++) { in iort_scan_node()
249 if (iort_node->type == type && in iort_scan_node()
254 iort_node->length); in iort_scan_node()
266 if (node->type == ACPI_IORT_NODE_NAMED_COMPONENT) { in iort_match_node_callback()
284 nc_dev = nc_dev->parent; in iort_match_node_callback()
290 status = acpi_get_name(adev->handle, ACPI_FULL_PATHNAME, &buf); in iort_match_node_callback()
296 ncomp = (struct acpi_iort_named_component *)node->node_data; in iort_match_node_callback()
297 status = !strcmp(ncomp->device_name, buf.pointer) ? in iort_match_node_callback()
300 } else if (node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) { in iort_match_node_callback()
305 pci_rc = (struct acpi_iort_root_complex *)node->node_data; in iort_match_node_callback()
308 * It is assumed that PCI segment numbers maps one-to-one in iort_match_node_callback()
312 status = pci_rc->pci_segment_number == pci_domain_nr(bus) ? in iort_match_node_callback()
323 if (map->flags & ACPI_IORT_ID_SINGLE_MAPPING) { in iort_id_map()
326 *rid_out = map->output_base; in iort_id_map()
332 return -ENXIO; in iort_id_map()
335 if (rid_in < map->input_base || in iort_id_map()
336 (rid_in > map->input_base + map->id_count)) in iort_id_map()
337 return -ENXIO; in iort_id_map()
343 * region, we assume the prior match was due to the off-by-1 in iort_id_map()
350 if (rid_in != map->input_base) in iort_id_map()
351 return -ENXIO; in iort_id_map()
356 *rid_out = map->output_base + (rid_in - map->input_base); in iort_id_map()
364 if (map->id_count > 0 && rid_in == map->input_base + map->id_count) in iort_id_map()
365 return -EAGAIN; in iort_id_map()
375 if (!node->mapping_offset || !node->mapping_count || in iort_node_get_id()
376 index >= node->mapping_count) in iort_node_get_id()
380 node->mapping_offset + index * sizeof(*map)); in iort_node_get_id()
383 if (!map->output_reference) { in iort_node_get_id()
385 node, node->type); in iort_node_get_id()
390 map->output_reference); in iort_node_get_id()
392 if (map->flags & ACPI_IORT_ID_SINGLE_MAPPING) { in iort_node_get_id()
393 if (node->type == ACPI_IORT_NODE_NAMED_COMPONENT || in iort_node_get_id()
394 node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX || in iort_node_get_id()
395 node->type == ACPI_IORT_NODE_SMMU_V3 || in iort_node_get_id()
396 node->type == ACPI_IORT_NODE_PMCG) { in iort_node_get_id()
397 *id_out = map->output_base; in iort_node_get_id()
407 struct acpi_iort_smmu_v3 *smmu; in iort_get_id_mapping_index() local
410 switch (node->type) { in iort_get_id_mapping_index()
416 if (node->revision < 1) in iort_get_id_mapping_index()
417 return -EINVAL; in iort_get_id_mapping_index()
419 smmu = (struct acpi_iort_smmu_v3 *)node->node_data; in iort_get_id_mapping_index()
424 if (smmu->event_gsiv && smmu->pri_gsiv && smmu->gerr_gsiv in iort_get_id_mapping_index()
425 && smmu->sync_gsiv) in iort_get_id_mapping_index()
426 return -EINVAL; in iort_get_id_mapping_index()
428 if (smmu->id_mapping_index >= node->mapping_count) { in iort_get_id_mapping_index()
430 node, node->type); in iort_get_id_mapping_index()
431 return -EINVAL; in iort_get_id_mapping_index()
434 return smmu->id_mapping_index; in iort_get_id_mapping_index()
436 pmcg = (struct acpi_iort_pmcg *)node->node_data; in iort_get_id_mapping_index()
437 if (pmcg->overflow_gsiv || node->mapping_count == 0) in iort_get_id_mapping_index()
438 return -EINVAL; in iort_get_id_mapping_index()
442 return -EINVAL; in iort_get_id_mapping_index()
458 if (IORT_TYPE_MASK(node->type) & type_mask) { in iort_node_map_id()
464 if (!node->mapping_offset || !node->mapping_count) in iort_node_map_id()
468 node->mapping_offset); in iort_node_map_id()
471 if (!map->output_reference) { in iort_node_map_id()
473 node, node->type); in iort_node_map_id()
479 * associated ID map to prevent erroneous multi-stage in iort_node_map_id()
485 for (i = 0; i < node->mapping_count; i++, map++) { in iort_node_map_id()
490 rc = iort_id_map(map, node->type, map_id, &id, out_ref); in iort_node_map_id()
493 if (rc == -EAGAIN) in iort_node_map_id()
494 out_ref = map->output_reference; in iort_node_map_id()
497 if (i == node->mapping_count && !out_ref) in iort_node_map_id()
501 rc ? out_ref : map->output_reference); in iort_node_map_id()
527 * as NC (named component) -> SMMU -> ITS. If the type is matched, in iort_node_map_platform_id()
530 if (!(IORT_TYPE_MASK(parent->type) & type_mask)) in iort_node_map_platform_id()
547 * device (such as SMMU, PMCG),its iort node already cached in iort_find_dev_node()
551 node = iort_get_iort_node(dev->fwnode); in iort_find_dev_node()
562 pbus = to_pci_dev(dev)->bus; in iort_find_dev_node()
565 iort_match_node_callback, &pbus->dev); in iort_find_dev_node()
569 * iort_msi_map_id() - Map a MSI input ID for a device
589 * iort_pmsi_get_dev_id() - Get the device id for a device
593 * Returns: 0 for successful find a dev id, -ENODEV on error
602 return -ENODEV; in iort_pmsi_get_dev_id()
610 for (i = 0; i < node->mapping_count; i++) { in iort_pmsi_get_dev_id()
617 return -ENODEV; in iort_pmsi_get_dev_id()
623 int ret = -ENODEV; in iort_find_its_base()
627 if (its_msi_chip->translation_id == its_id) { in iort_find_its_base()
628 *base = its_msi_chip->base_addr; in iort_find_its_base()
639 * iort_dev_find_its_id() - Find the ITS identifier for a device
655 return -ENXIO; in iort_dev_find_its_id()
659 return -ENXIO; in iort_dev_find_its_id()
662 its = (struct acpi_iort_its_group *)node->node_data; in iort_dev_find_its_id()
663 if (idx >= its->its_count) { in iort_dev_find_its_id()
665 idx, its->its_count); in iort_dev_find_its_id()
666 return -ENXIO; in iort_dev_find_its_id()
669 *its_id = its->identifiers[idx]; in iort_dev_find_its_id()
674 * iort_get_device_domain() - Find MSI domain related to a device
712 node->mapping_offset + index * sizeof(*map)); in iort_set_device_domain()
715 if (!map->output_reference || in iort_set_device_domain()
716 !(map->flags & ACPI_IORT_ID_SINGLE_MAPPING)) { in iort_set_device_domain()
718 node, node->type); in iort_set_device_domain()
723 map->output_reference); in iort_set_device_domain()
725 if (!msi_parent || msi_parent->type != ACPI_IORT_NODE_ITS_GROUP) in iort_set_device_domain()
729 its = (struct acpi_iort_its_group *)msi_parent->node_data; in iort_set_device_domain()
731 iort_fwnode = iort_find_domain_token(its->identifiers[0]); in iort_set_device_domain()
741 * iort_get_platform_device_domain() - Find MSI domain related to a
761 for (i = 0; i < node->mapping_count; i++) { in iort_get_platform_device_domain()
772 its = (struct acpi_iort_its_group *)msi_parent->node_data; in iort_get_platform_device_domain()
774 iort_fwnode = iort_find_domain_token(its->identifiers[0]); in iort_get_platform_device_domain()
796 iommu = iort_get_iort_node(fwspec->iommu_fwnode); in iort_get_msi_resv_iommu()
798 if (iommu && (iommu->type == ACPI_IORT_NODE_SMMU_V3)) { in iort_get_msi_resv_iommu()
799 struct acpi_iort_smmu_v3 *smmu; in iort_get_msi_resv_iommu() local
801 smmu = (struct acpi_iort_smmu_v3 *)iommu->node_data; in iort_get_msi_resv_iommu()
802 if (smmu->model == ACPI_IORT_SMMU_V3_HISILICON_HI161X) in iort_get_msi_resv_iommu()
810 * iort_iommu_msi_get_resv_regions - Reserved region driver helper
840 for (i = 0; i < fwspec->num_ids; i++) { in iort_iommu_msi_get_resv_regions()
842 fwspec->ids[i], in iort_iommu_msi_get_resv_regions()
852 its = (struct acpi_iort_its_group *)its_node->node_data; in iort_iommu_msi_get_resv_regions()
854 for (i = 0; i < its->its_count; i++) { in iort_iommu_msi_get_resv_regions()
857 if (!iort_find_its_base(its->identifiers[i], &base)) { in iort_iommu_msi_get_resv_regions()
864 list_add_tail(®ion->list, head); in iort_iommu_msi_get_resv_regions()
870 return (resv == its->its_count) ? resv : -ENODEV; in iort_iommu_msi_get_resv_regions()
881 pr_warn("IORT node type %u does not describe an SMMU\n", type); in iort_iommu_driver_enabled()
890 pci_rc = (struct acpi_iort_root_complex *)node->node_data; in iort_pci_rc_supports_ats()
891 return pci_rc->ats_attribute & ACPI_IORT_ATS_SUPPORTED; in iort_pci_rc_supports_ats()
901 return -ENODEV; in iort_iommu_xlate()
905 return -ENODEV; in iort_iommu_xlate()
908 * If the ops look-up fails, this means that either in iort_iommu_xlate()
909 * the SMMU drivers have not been probed yet or that in iort_iommu_xlate()
910 * the SMMU drivers are not built in the kernel; in iort_iommu_xlate()
911 * Depending on whether the SMMU drivers are built-in in iort_iommu_xlate()
917 return iort_iommu_driver_enabled(node->type) ? in iort_iommu_xlate()
918 -EPROBE_DEFER : -ENODEV; in iort_iommu_xlate()
934 parent = iort_node_map_id(info->node, alias, &streamid, in iort_pci_iommu_init()
936 return iort_iommu_xlate(info->dev, parent, streamid); in iort_pci_iommu_init()
945 nc = (struct acpi_iort_named_component *)node->node_data; in iort_named_component_init()
946 props[0] = PROPERTY_ENTRY_U32("pasid-num-bits", in iort_named_component_init()
948 nc->node_flags)); in iort_named_component_init()
949 if (nc->node_flags & ACPI_IORT_NC_STALL_SUPPORTED) in iort_named_component_init()
950 props[1] = PROPERTY_ENTRY_BOOL("dma-can-stall"); in iort_named_component_init()
959 int err = -ENODEV, i = 0; in iort_nc_iommu_map()
986 return -ENODEV; in iort_nc_iommu_map_id()
991 * iort_iommu_configure_id - Set-up IOMMU configuration for a device.
1001 int err = -ENODEV; in iort_iommu_configure_id()
1005 struct pci_bus *bus = to_pci_dev(dev)->bus; in iort_iommu_configure_id()
1009 iort_match_node_callback, &bus->dev); in iort_iommu_configure_id()
1011 return -ENODEV; in iort_iommu_configure_id()
1019 fwspec->flags |= IOMMU_FWSPEC_PCI_RC_ATS; in iort_iommu_configure_id()
1024 return -ENODEV; in iort_iommu_configure_id()
1040 { return -ENODEV; } in iort_iommu_configure_id()
1051 return -ENODEV; in nc_dma_get_range()
1053 ncomp = (struct acpi_iort_named_component *)node->node_data; in nc_dma_get_range()
1055 if (!ncomp->memory_address_limit) { in nc_dma_get_range()
1057 return -EINVAL; in nc_dma_get_range()
1060 *size = ncomp->memory_address_limit >= 64 ? U64_MAX : in nc_dma_get_range()
1061 1ULL<<ncomp->memory_address_limit; in nc_dma_get_range()
1070 struct pci_bus *pbus = to_pci_dev(dev)->bus; in rc_dma_get_range()
1073 iort_match_node_callback, &pbus->dev); in rc_dma_get_range()
1074 if (!node || node->revision < 1) in rc_dma_get_range()
1075 return -ENODEV; in rc_dma_get_range()
1077 rc = (struct acpi_iort_root_complex *)node->node_data; in rc_dma_get_range()
1079 if (!rc->memory_address_limit) { in rc_dma_get_range()
1081 return -EINVAL; in rc_dma_get_range()
1084 *size = rc->memory_address_limit >= 64 ? U64_MAX : in rc_dma_get_range()
1085 1ULL<<rc->memory_address_limit; in rc_dma_get_range()
1091 * iort_dma_get_ranges() - Look up DMA addressing limit for the device
1118 res->start = irq; in acpi_iort_register_irq()
1119 res->end = irq; in acpi_iort_register_irq()
1120 res->flags = IORESOURCE_IRQ; in acpi_iort_register_irq()
1121 res->name = name; in acpi_iort_register_irq()
1126 struct acpi_iort_smmu_v3 *smmu; in arm_smmu_v3_count_resources() local
1131 smmu = (struct acpi_iort_smmu_v3 *)node->node_data; in arm_smmu_v3_count_resources()
1133 if (smmu->event_gsiv) in arm_smmu_v3_count_resources()
1136 if (smmu->pri_gsiv) in arm_smmu_v3_count_resources()
1139 if (smmu->gerr_gsiv) in arm_smmu_v3_count_resources()
1142 if (smmu->sync_gsiv) in arm_smmu_v3_count_resources()
1148 static bool arm_smmu_v3_is_combined_irq(struct acpi_iort_smmu_v3 *smmu) in arm_smmu_v3_is_combined_irq() argument
1154 if (smmu->model != ACPI_IORT_SMMU_V3_CAVIUM_CN99XX) in arm_smmu_v3_is_combined_irq()
1158 * ThunderX2 doesn't support MSIs from the SMMU, so we're checking in arm_smmu_v3_is_combined_irq()
1161 return smmu->event_gsiv == smmu->pri_gsiv && in arm_smmu_v3_is_combined_irq()
1162 smmu->event_gsiv == smmu->gerr_gsiv && in arm_smmu_v3_is_combined_irq()
1163 smmu->event_gsiv == smmu->sync_gsiv; in arm_smmu_v3_is_combined_irq()
1166 static unsigned long arm_smmu_v3_resource_size(struct acpi_iort_smmu_v3 *smmu) in arm_smmu_v3_resource_size() argument
1170 * which doesn't support the page 1 SMMU register space. in arm_smmu_v3_resource_size()
1172 if (smmu->model == ACPI_IORT_SMMU_V3_CAVIUM_CN99XX) in arm_smmu_v3_resource_size()
1181 struct acpi_iort_smmu_v3 *smmu; in arm_smmu_v3_init_resources() local
1185 smmu = (struct acpi_iort_smmu_v3 *)node->node_data; in arm_smmu_v3_init_resources()
1187 res[num_res].start = smmu->base_address; in arm_smmu_v3_init_resources()
1188 res[num_res].end = smmu->base_address + in arm_smmu_v3_init_resources()
1189 arm_smmu_v3_resource_size(smmu) - 1; in arm_smmu_v3_init_resources()
1193 if (arm_smmu_v3_is_combined_irq(smmu)) { in arm_smmu_v3_init_resources()
1194 if (smmu->event_gsiv) in arm_smmu_v3_init_resources()
1195 acpi_iort_register_irq(smmu->event_gsiv, "combined", in arm_smmu_v3_init_resources()
1200 if (smmu->event_gsiv) in arm_smmu_v3_init_resources()
1201 acpi_iort_register_irq(smmu->event_gsiv, "eventq", in arm_smmu_v3_init_resources()
1205 if (smmu->pri_gsiv) in arm_smmu_v3_init_resources()
1206 acpi_iort_register_irq(smmu->pri_gsiv, "priq", in arm_smmu_v3_init_resources()
1210 if (smmu->gerr_gsiv) in arm_smmu_v3_init_resources()
1211 acpi_iort_register_irq(smmu->gerr_gsiv, "gerror", in arm_smmu_v3_init_resources()
1215 if (smmu->sync_gsiv) in arm_smmu_v3_init_resources()
1216 acpi_iort_register_irq(smmu->sync_gsiv, "cmdq-sync", in arm_smmu_v3_init_resources()
1225 struct acpi_iort_smmu_v3 *smmu; in arm_smmu_v3_dma_configure() local
1229 smmu = (struct acpi_iort_smmu_v3 *)node->node_data; in arm_smmu_v3_dma_configure()
1231 attr = (smmu->flags & ACPI_IORT_SMMU_V3_COHACC_OVERRIDE) ? in arm_smmu_v3_dma_configure()
1234 /* We expect the dma masks to be equivalent for all SMMUv3 set-ups */ in arm_smmu_v3_dma_configure()
1235 dev->dma_mask = &dev->coherent_dma_mask; in arm_smmu_v3_dma_configure()
1248 struct acpi_iort_smmu_v3 *smmu; in arm_smmu_v3_set_proximity() local
1250 smmu = (struct acpi_iort_smmu_v3 *)node->node_data; in arm_smmu_v3_set_proximity()
1251 if (smmu->flags & ACPI_IORT_SMMU_V3_PXM_VALID) { in arm_smmu_v3_set_proximity()
1252 int dev_node = pxm_to_node(smmu->pxm); in arm_smmu_v3_set_proximity()
1255 return -EINVAL; in arm_smmu_v3_set_proximity()
1258 pr_info("SMMU-v3[%llx] Mapped to Proximity domain %d\n", in arm_smmu_v3_set_proximity()
1259 smmu->base_address, in arm_smmu_v3_set_proximity()
1260 smmu->pxm); in arm_smmu_v3_set_proximity()
1270 struct acpi_iort_smmu *smmu; in arm_smmu_count_resources() local
1272 /* Retrieve SMMU specific data */ in arm_smmu_count_resources()
1273 smmu = (struct acpi_iort_smmu *)node->node_data; in arm_smmu_count_resources()
1283 return smmu->context_interrupt_count + 2; in arm_smmu_count_resources()
1289 struct acpi_iort_smmu *smmu; in arm_smmu_init_resources() local
1293 /* Retrieve SMMU specific data */ in arm_smmu_init_resources()
1294 smmu = (struct acpi_iort_smmu *)node->node_data; in arm_smmu_init_resources()
1296 res[num_res].start = smmu->base_address; in arm_smmu_init_resources()
1297 res[num_res].end = smmu->base_address + smmu->span - 1; in arm_smmu_init_resources()
1301 glb_irq = ACPI_ADD_PTR(u64, node, smmu->global_interrupt_offset); in arm_smmu_init_resources()
1306 acpi_iort_register_irq(hw_irq, "arm-smmu-global", trigger, in arm_smmu_init_resources()
1310 ctx_irq = ACPI_ADD_PTR(u64, node, smmu->context_interrupt_offset); in arm_smmu_init_resources()
1311 for (i = 0; i < smmu->context_interrupt_count; i++) { in arm_smmu_init_resources()
1315 acpi_iort_register_irq(hw_irq, "arm-smmu-context", trigger, in arm_smmu_init_resources()
1323 struct acpi_iort_smmu *smmu; in arm_smmu_dma_configure() local
1326 /* Retrieve SMMU specific data */ in arm_smmu_dma_configure()
1327 smmu = (struct acpi_iort_smmu *)node->node_data; in arm_smmu_dma_configure()
1329 attr = (smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK) ? in arm_smmu_dma_configure()
1332 /* We expect the dma masks to be equivalent for SMMU set-ups */ in arm_smmu_dma_configure()
1333 dev->dma_mask = &dev->coherent_dma_mask; in arm_smmu_dma_configure()
1344 pmcg = (struct acpi_iort_pmcg *)node->node_data; in arm_smmu_v3_pmcg_count_resources()
1350 return pmcg->overflow_gsiv ? 3 : 2; in arm_smmu_v3_pmcg_count_resources()
1359 pmcg = (struct acpi_iort_pmcg *)node->node_data; in arm_smmu_v3_pmcg_init_resources()
1361 res[0].start = pmcg->page0_base_address; in arm_smmu_v3_pmcg_init_resources()
1362 res[0].end = pmcg->page0_base_address + SZ_4K - 1; in arm_smmu_v3_pmcg_init_resources()
1364 res[1].start = pmcg->page1_base_address; in arm_smmu_v3_pmcg_init_resources()
1365 res[1].end = pmcg->page1_base_address + SZ_4K - 1; in arm_smmu_v3_pmcg_init_resources()
1368 if (pmcg->overflow_gsiv) in arm_smmu_v3_pmcg_init_resources()
1369 acpi_iort_register_irq(pmcg->overflow_gsiv, "overflow", in arm_smmu_v3_pmcg_init_resources()
1408 .name = "arm-smmu-v3",
1416 .name = "arm-smmu",
1423 .name = "arm-smmu-v3-pmcg",
1432 switch (node->type) { in iort_get_dev_cfg()
1445 * iort_add_platform_device() - Allocate a platform device for IORT node
1459 pdev = platform_device_alloc(ops->name, PLATFORM_DEVID_AUTO); in iort_add_platform_device()
1461 return -ENOMEM; in iort_add_platform_device()
1463 if (ops->dev_set_proximity) { in iort_add_platform_device()
1464 ret = ops->dev_set_proximity(&pdev->dev, node); in iort_add_platform_device()
1469 count = ops->dev_count_resources(node); in iort_add_platform_device()
1473 ret = -ENOMEM; in iort_add_platform_device()
1477 ops->dev_init_resources(r, node); in iort_add_platform_device()
1495 if (ops->dev_add_platdata) in iort_add_platform_device()
1496 ret = ops->dev_add_platdata(pdev); in iort_add_platform_device()
1506 ret = -ENODEV; in iort_add_platform_device()
1510 pdev->dev.fwnode = fwnode; in iort_add_platform_device()
1512 if (ops->dev_dma_configure) in iort_add_platform_device()
1513 ops->dev_dma_configure(&pdev->dev, node); in iort_add_platform_device()
1515 iort_set_device_domain(&pdev->dev, node); in iort_add_platform_device()
1524 arch_teardown_dma_ops(&pdev->dev); in iort_add_platform_device()
1539 if (iort_node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) { in iort_enable_acs()
1545 iort_node->mapping_offset); in iort_enable_acs()
1547 for (i = 0; i < iort_node->mapping_count; i++, map++) { in iort_enable_acs()
1548 if (!map->output_reference) in iort_enable_acs()
1552 iort_table, map->output_reference); in iort_enable_acs()
1554 * If we detect a RC->SMMU mapping, make sure in iort_enable_acs()
1557 if ((parent->type == ACPI_IORT_NODE_SMMU) || in iort_enable_acs()
1558 (parent->type == ACPI_IORT_NODE_SMMU_V3)) { in iort_enable_acs()
1586 iort->node_offset); in iort_init_platform_devices()
1588 iort_table->length); in iort_init_platform_devices()
1590 for (i = 0; i < iort->node_count; i++) { in iort_init_platform_devices()
1615 iort_node->length); in iort_init_platform_devices()
1662 node = ACPI_ADD_PTR(struct acpi_iort_node, iort, iort->node_offset); in acpi_iort_dma_get_max_cpu_address()
1663 end = ACPI_ADD_PTR(struct acpi_iort_node, iort, iort->header.length); in acpi_iort_dma_get_max_cpu_address()
1665 for (i = 0; i < iort->node_count; i++) { in acpi_iort_dma_get_max_cpu_address()
1669 switch (node->type) { in acpi_iort_dma_get_max_cpu_address()
1675 ncomp = (struct acpi_iort_named_component *)node->node_data; in acpi_iort_dma_get_max_cpu_address()
1676 local_limit = DMA_BIT_MASK(ncomp->memory_address_limit); in acpi_iort_dma_get_max_cpu_address()
1681 if (node->revision < 1) in acpi_iort_dma_get_max_cpu_address()
1684 rc = (struct acpi_iort_root_complex *)node->node_data; in acpi_iort_dma_get_max_cpu_address()
1685 local_limit = DMA_BIT_MASK(rc->memory_address_limit); in acpi_iort_dma_get_max_cpu_address()
1689 node = ACPI_ADD_PTR(struct acpi_iort_node, node, node->length); in acpi_iort_dma_get_max_cpu_address()
1691 acpi_put_table(&iort->header); in acpi_iort_dma_get_max_cpu_address()