Lines Matching +full:iommu +full:- +full:map

1 // SPDX-License-Identifier: GPL-2.0-only
15 #include <linux/iommu.h>
21 #include <linux/dma-map-ops.h>
45 * iort_set_fwnode() - Create iort_fwnode and use it to register
46 * iommu data in the iort_fwnode_list
48 * @iort_node: IORT table node associated with the IOMMU
62 return -ENOMEM; in iort_set_fwnode()
64 INIT_LIST_HEAD(&np->list); in iort_set_fwnode()
65 np->iort_node = iort_node; in iort_set_fwnode()
66 np->fwnode = fwnode; in iort_set_fwnode()
69 list_add_tail(&np->list, &iort_fwnode_list); in iort_set_fwnode()
76 * iort_get_fwnode() - Retrieve fwnode associated with an IORT node
78 * @node: IORT table node to be looked-up
90 if (curr->iort_node == node) { in iort_get_fwnode()
91 fwnode = curr->fwnode; in iort_get_fwnode()
101 * iort_delete_fwnode() - Delete fwnode associated with an IORT node
111 if (curr->iort_node == node) { in iort_delete_fwnode()
112 list_del(&curr->list); in iort_delete_fwnode()
121 * iort_get_iort_node() - Retrieve iort_node associated with an fwnode
123 * @fwnode: fwnode associated with device to be looked-up
135 if (curr->fwnode == fwnode) { in iort_get_iort_node()
136 iort_node = curr->iort_node; in iort_get_iort_node()
155 * iort_register_domain_token() - register domain token along with related
161 * Returns: 0 on success, -ENOMEM if no memory when allocating list element
170 return -ENOMEM; in iort_register_domain_token()
172 its_msi_chip->fw_node = fw_node; in iort_register_domain_token()
173 its_msi_chip->translation_id = trans_id; in iort_register_domain_token()
174 its_msi_chip->base_addr = base; in iort_register_domain_token()
177 list_add(&its_msi_chip->list, &iort_msi_chip_list); in iort_register_domain_token()
184 * iort_deregister_domain_token() - Deregister domain token based on ITS ID
195 if (its_msi_chip->translation_id == trans_id) { in iort_deregister_domain_token()
196 list_del(&its_msi_chip->list); in iort_deregister_domain_token()
205 * iort_find_domain_token() - Find domain token based on given ITS ID
217 if (its_msi_chip->translation_id == trans_id) { in iort_find_domain_token()
218 fw_node = its_msi_chip->fw_node; in iort_find_domain_token()
241 iort->node_offset); in iort_scan_node()
243 iort_table->length); in iort_scan_node()
245 for (i = 0; i < iort->node_count; i++) { in iort_scan_node()
250 if (iort_node->type == type && in iort_scan_node()
255 iort_node->length); in iort_scan_node()
267 if (node->type == ACPI_IORT_NODE_NAMED_COMPONENT) { in iort_match_node_callback()
285 nc_dev = nc_dev->parent; in iort_match_node_callback()
291 status = acpi_get_name(adev->handle, ACPI_FULL_PATHNAME, &buf); in iort_match_node_callback()
297 ncomp = (struct acpi_iort_named_component *)node->node_data; in iort_match_node_callback()
298 status = !strcmp(ncomp->device_name, buf.pointer) ? in iort_match_node_callback()
301 } else if (node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) { in iort_match_node_callback()
306 pci_rc = (struct acpi_iort_root_complex *)node->node_data; in iort_match_node_callback()
309 * It is assumed that PCI segment numbers maps one-to-one in iort_match_node_callback()
313 status = pci_rc->pci_segment_number == pci_domain_nr(bus) ? in iort_match_node_callback()
320 static int iort_id_map(struct acpi_iort_id_mapping *map, u8 type, u32 rid_in, in iort_id_map() argument
324 if (map->flags & ACPI_IORT_ID_SINGLE_MAPPING) { in iort_id_map()
327 *rid_out = map->output_base; in iort_id_map()
331 pr_warn(FW_BUG "[map %p] SINGLE MAPPING flag not allowed for node type %d, skipping ID map\n", in iort_id_map()
332 map, type); in iort_id_map()
333 return -ENXIO; in iort_id_map()
336 if (rid_in < map->input_base || in iort_id_map()
337 (rid_in > map->input_base + map->id_count)) in iort_id_map()
338 return -ENXIO; in iort_id_map()
344 * region, we assume the prior match was due to the off-by-1 in iort_id_map()
349 pr_err(FW_BUG "[map %p] conflicting mapping for input ID 0x%x\n", in iort_id_map()
350 map, rid_in); in iort_id_map()
351 if (rid_in != map->input_base) in iort_id_map()
352 return -ENXIO; in iort_id_map()
357 *rid_out = map->output_base + (rid_in - map->input_base); in iort_id_map()
365 if (map->id_count > 0 && rid_in == map->input_base + map->id_count) in iort_id_map()
366 return -EAGAIN; in iort_id_map()
374 struct acpi_iort_id_mapping *map; in iort_node_get_id() local
376 if (!node->mapping_offset || !node->mapping_count || in iort_node_get_id()
377 index >= node->mapping_count) in iort_node_get_id()
380 map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node, in iort_node_get_id()
381 node->mapping_offset + index * sizeof(*map)); in iort_node_get_id()
384 if (!map->output_reference) { in iort_node_get_id()
385 pr_err(FW_BUG "[node %p type %d] ID map has NULL parent reference\n", in iort_node_get_id()
386 node, node->type); in iort_node_get_id()
391 map->output_reference); in iort_node_get_id()
393 if (map->flags & ACPI_IORT_ID_SINGLE_MAPPING) { in iort_node_get_id()
394 if (node->type == ACPI_IORT_NODE_NAMED_COMPONENT || in iort_node_get_id()
395 node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX || in iort_node_get_id()
396 node->type == ACPI_IORT_NODE_SMMU_V3 || in iort_node_get_id()
397 node->type == ACPI_IORT_NODE_PMCG) { in iort_node_get_id()
398 *id_out = map->output_base; in iort_node_get_id()
415 switch (node->type) { in iort_get_id_mapping_index()
421 if (node->revision < 1) in iort_get_id_mapping_index()
422 return -EINVAL; in iort_get_id_mapping_index()
424 smmu = (struct acpi_iort_smmu_v3 *)node->node_data; in iort_get_id_mapping_index()
427 * defined to be valid unless all interrupts are GSIV-based. in iort_get_id_mapping_index()
429 if (node->revision < 5) { in iort_get_id_mapping_index()
430 if (smmu->event_gsiv && smmu->pri_gsiv && in iort_get_id_mapping_index()
431 smmu->gerr_gsiv && smmu->sync_gsiv) in iort_get_id_mapping_index()
432 return -EINVAL; in iort_get_id_mapping_index()
433 } else if (!(smmu->flags & ACPI_IORT_SMMU_V3_DEVICEID_VALID)) { in iort_get_id_mapping_index()
434 return -EINVAL; in iort_get_id_mapping_index()
437 if (smmu->id_mapping_index >= node->mapping_count) { in iort_get_id_mapping_index()
439 node, node->type); in iort_get_id_mapping_index()
440 return -EINVAL; in iort_get_id_mapping_index()
443 return smmu->id_mapping_index; in iort_get_id_mapping_index()
445 pmcg = (struct acpi_iort_pmcg *)node->node_data; in iort_get_id_mapping_index()
446 if (pmcg->overflow_gsiv || node->mapping_count == 0) in iort_get_id_mapping_index()
447 return -EINVAL; in iort_get_id_mapping_index()
451 return -EINVAL; in iort_get_id_mapping_index()
463 struct acpi_iort_id_mapping *map; in iort_node_map_id() local
467 if (IORT_TYPE_MASK(node->type) & type_mask) { in iort_node_map_id()
473 if (!node->mapping_offset || !node->mapping_count) in iort_node_map_id()
476 map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node, in iort_node_map_id()
477 node->mapping_offset); in iort_node_map_id()
480 if (!map->output_reference) { in iort_node_map_id()
481 pr_err(FW_BUG "[node %p type %d] ID map has NULL parent reference\n", in iort_node_map_id()
482 node, node->type); in iort_node_map_id()
488 * associated ID map to prevent erroneous multi-stage in iort_node_map_id()
494 for (i = 0; i < node->mapping_count; i++, map++) { in iort_node_map_id()
499 rc = iort_id_map(map, node->type, map_id, &id, out_ref); in iort_node_map_id()
502 if (rc == -EAGAIN) in iort_node_map_id()
503 out_ref = map->output_reference; in iort_node_map_id()
506 if (i == node->mapping_count && !out_ref) in iort_node_map_id()
510 rc ? out_ref : map->output_reference); in iort_node_map_id()
514 /* Map input ID to output ID unchanged on mapping failure */ in iort_node_map_id()
534 * optional step 2: map the initial dev id if its parent is not in iort_node_map_platform_id()
535 * the target type we want, map it again for the use cases such in iort_node_map_platform_id()
536 * as NC (named component) -> SMMU -> ITS. If the type is matched, in iort_node_map_platform_id()
539 if (!(IORT_TYPE_MASK(parent->type) & type_mask)) in iort_node_map_platform_id()
560 node = iort_get_iort_node(dev->fwnode); in iort_find_dev_node()
571 pbus = to_pci_dev(dev)->bus; in iort_find_dev_node()
574 iort_match_node_callback, &pbus->dev); in iort_find_dev_node()
578 * iort_msi_map_id() - Map a MSI input ID for a device
598 * iort_pmsi_get_dev_id() - Get the device id for a device
602 * Returns: 0 for successful find a dev id, -ENODEV on error
611 return -ENODEV; in iort_pmsi_get_dev_id()
619 for (i = 0; i < node->mapping_count; i++) { in iort_pmsi_get_dev_id()
626 return -ENODEV; in iort_pmsi_get_dev_id()
632 int ret = -ENODEV; in iort_find_its_base()
636 if (its_msi_chip->translation_id == its_id) { in iort_find_its_base()
637 *base = its_msi_chip->base_addr; in iort_find_its_base()
648 * iort_dev_find_its_id() - Find the ITS identifier for a device
664 return -ENXIO; in iort_dev_find_its_id()
668 return -ENXIO; in iort_dev_find_its_id()
671 its = (struct acpi_iort_its_group *)node->node_data; in iort_dev_find_its_id()
672 if (idx >= its->its_count) { in iort_dev_find_its_id()
674 idx, its->its_count); in iort_dev_find_its_id()
675 return -ENXIO; in iort_dev_find_its_id()
678 *its_id = its->identifiers[idx]; in iort_dev_find_its_id()
683 * iort_get_device_domain() - Find MSI domain related to a device
711 struct acpi_iort_id_mapping *map; in iort_set_device_domain() local
720 map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node, in iort_set_device_domain()
721 node->mapping_offset + index * sizeof(*map)); in iort_set_device_domain()
724 if (!map->output_reference || in iort_set_device_domain()
725 !(map->flags & ACPI_IORT_ID_SINGLE_MAPPING)) { in iort_set_device_domain()
727 node, node->type); in iort_set_device_domain()
732 map->output_reference); in iort_set_device_domain()
734 if (!msi_parent || msi_parent->type != ACPI_IORT_NODE_ITS_GROUP) in iort_set_device_domain()
738 its = (struct acpi_iort_its_group *)msi_parent->node_data; in iort_set_device_domain()
740 iort_fwnode = iort_find_domain_token(its->identifiers[0]); in iort_set_device_domain()
750 * iort_get_platform_device_domain() - Find MSI domain related to a
770 for (i = 0; i < node->mapping_count; i++) { in iort_get_platform_device_domain()
781 its = (struct acpi_iort_its_group *)msi_parent->node_data; in iort_get_platform_device_domain()
783 iort_fwnode = iort_find_domain_token(its->identifiers[0]); in iort_get_platform_device_domain()
806 kfree(rmr_data->sids); in iort_rmr_free()
818 u64 addr = rmr_desc->base_address, size = rmr_desc->length; in iort_rmr_alloc()
830 rmr_data->sids = sids_copy; in iort_rmr_alloc()
831 rmr_data->num_sids = num_sids; in iort_rmr_alloc()
836 size = PAGE_ALIGN(size + offset_in_page(rmr_desc->base_address)); in iort_rmr_alloc()
838 …pr_err(FW_BUG "RMR descriptor[0x%llx - 0x%llx] not aligned to 64K, continue with [0x%llx - 0x%llx]… in iort_rmr_alloc()
839 rmr_desc->base_address, in iort_rmr_alloc()
840 rmr_desc->base_address + rmr_desc->length - 1, in iort_rmr_alloc()
841 addr, addr + size - 1); in iort_rmr_alloc()
844 region = &rmr_data->rr; in iort_rmr_alloc()
845 INIT_LIST_HEAD(&region->list); in iort_rmr_alloc()
846 region->start = addr; in iort_rmr_alloc()
847 region->length = size; in iort_rmr_alloc()
848 region->prot = prot; in iort_rmr_alloc()
849 region->type = type; in iort_rmr_alloc()
850 region->free = iort_rmr_free; in iort_rmr_alloc()
869 end = start + length - 1; in iort_rmr_desc_check_overlap()
874 u64 e_end = e_start + desc[j].length - 1; in iort_rmr_desc_check_overlap()
877 pr_err(FW_BUG "RMR descriptor[0x%llx - 0x%llx] overlaps, continue anyway\n", in iort_rmr_desc_check_overlap()
892 struct acpi_iort_rmr *rmr = (struct acpi_iort_rmr *)node->node_data; in iort_get_rmrs()
897 rmr->rmr_offset); in iort_get_rmrs()
899 iort_rmr_desc_check_overlap(rmr_desc, rmr->rmr_count); in iort_get_rmrs()
901 for (i = 0; i < rmr->rmr_count; i++, rmr_desc++) { in iort_get_rmrs()
906 if (rmr->flags & ACPI_IORT_RMR_REMAP_PERMITTED) in iort_get_rmrs()
911 if (rmr->flags & ACPI_IORT_RMR_ACCESS_PRIVILEGE) in iort_get_rmrs()
914 /* Attributes 0x00 - 0x03 represents device memory */ in iort_get_rmrs()
915 if (ACPI_IORT_RMR_ACCESS_ATTRIBUTES(rmr->flags) <= in iort_get_rmrs()
918 else if (ACPI_IORT_RMR_ACCESS_ATTRIBUTES(rmr->flags) == in iort_get_rmrs()
927 list_add_tail(&rmr_data->rr.list, head); in iort_get_rmrs()
962 struct pci_host_bridge *host = pci_find_host_bridge(pdev->bus); in iort_rmr_has_dev()
964 if (!host->preserve_config) in iort_rmr_has_dev()
968 for (i = 0; i < fwspec->num_ids; i++) { in iort_rmr_has_dev()
969 if (fwspec->ids[i] >= id_start && in iort_rmr_has_dev()
970 fwspec->ids[i] <= id_start + id_count) in iort_rmr_has_dev()
978 struct acpi_iort_node *iommu, in iort_node_get_rmr_info() argument
983 struct acpi_iort_id_mapping *map; in iort_node_get_rmr_info() local
988 if (!node->mapping_offset || !node->mapping_count) { in iort_node_get_rmr_info()
994 rmr = (struct acpi_iort_rmr *)node->node_data; in iort_node_get_rmr_info()
995 if (!rmr->rmr_offset || !rmr->rmr_count) in iort_node_get_rmr_info()
998 map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node, in iort_node_get_rmr_info()
999 node->mapping_offset); in iort_node_get_rmr_info()
1007 for (i = 0; i < node->mapping_count; i++, map++) { in iort_node_get_rmr_info()
1011 map->output_reference); in iort_node_get_rmr_info()
1012 if (parent != iommu) in iort_node_get_rmr_info()
1016 if (dev && !iort_rmr_has_dev(dev, map->output_base, in iort_node_get_rmr_info()
1017 map->id_count)) in iort_node_get_rmr_info()
1021 sids = iort_rmr_alloc_sids(sids, num_sids, map->output_base, in iort_node_get_rmr_info()
1022 map->id_count + 1); in iort_node_get_rmr_info()
1026 num_sids += map->id_count + 1; in iort_node_get_rmr_info()
1036 static void iort_find_rmrs(struct acpi_iort_node *iommu, struct device *dev, in iort_find_rmrs() argument
1044 if (iort_table->revision < 5) in iort_find_rmrs()
1050 iort->node_offset); in iort_find_rmrs()
1052 iort_table->length); in iort_find_rmrs()
1054 for (i = 0; i < iort->node_count; i++) { in iort_find_rmrs()
1059 if (iort_node->type == ACPI_IORT_NODE_RMR) in iort_find_rmrs()
1060 iort_node_get_rmr_info(iort_node, iommu, dev, head); in iort_find_rmrs()
1063 iort_node->length); in iort_find_rmrs()
1068 * Populate the RMR list associated with a given IOMMU and dev(if provided).
1070 * given IOMMU.
1076 struct acpi_iort_node *iommu; in iort_iommu_rmr_get_resv_regions() local
1078 iommu = iort_get_iort_node(iommu_fwnode); in iort_iommu_rmr_get_resv_regions()
1079 if (!iommu) in iort_iommu_rmr_get_resv_regions()
1082 iort_find_rmrs(iommu, dev, head); in iort_iommu_rmr_get_resv_regions()
1087 struct acpi_iort_node *iommu; in iort_get_msi_resv_iommu() local
1090 iommu = iort_get_iort_node(fwspec->iommu_fwnode); in iort_get_msi_resv_iommu()
1092 if (iommu && (iommu->type == ACPI_IORT_NODE_SMMU_V3)) { in iort_get_msi_resv_iommu()
1095 smmu = (struct acpi_iort_smmu_v3 *)iommu->node_data; in iort_get_msi_resv_iommu()
1096 if (smmu->model == ACPI_IORT_SMMU_V3_HISILICON_HI161X) in iort_get_msi_resv_iommu()
1097 return iommu; in iort_get_msi_resv_iommu()
1123 * ITS group; if a PCI or named component can map its IDs to in iort_iommu_msi_get_resv_regions()
1126 * a given PCI or named component may map IDs to. in iort_iommu_msi_get_resv_regions()
1129 for (i = 0; i < fwspec->num_ids; i++) { in iort_iommu_msi_get_resv_regions()
1131 fwspec->ids[i], in iort_iommu_msi_get_resv_regions()
1141 its = (struct acpi_iort_its_group *)its_node->node_data; in iort_iommu_msi_get_resv_regions()
1143 for (i = 0; i < its->its_count; i++) { in iort_iommu_msi_get_resv_regions()
1146 if (!iort_find_its_base(its->identifiers[i], &base)) { in iort_iommu_msi_get_resv_regions()
1154 list_add_tail(&region->list, head); in iort_iommu_msi_get_resv_regions()
1160 * iort_iommu_get_resv_regions - Generic helper to retrieve reserved regions.
1169 iort_iommu_rmr_get_resv_regions(fwspec->iommu_fwnode, dev, head); in iort_iommu_get_resv_regions()
1173 * iort_get_rmr_sids - Retrieve IORT RMR node reserved regions with
1175 * @iommu_fwnode: fwnode associated with IOMMU
1186 * iort_put_rmr_sids - Free memory allocated for RMR reserved regions.
1187 * @iommu_fwnode: fwnode associated with IOMMU
1196 entry->free(NULL, entry); in iort_put_rmr_sids()
1217 pci_rc = (struct acpi_iort_root_complex *)node->node_data; in iort_pci_rc_supports_ats()
1218 return pci_rc->ats_attribute & ACPI_IORT_ATS_SUPPORTED; in iort_pci_rc_supports_ats()
1228 return -ENODEV; in iort_iommu_xlate()
1232 return -ENODEV; in iort_iommu_xlate()
1235 * If the ops look-up fails, this means that either in iort_iommu_xlate()
1238 * Depending on whether the SMMU drivers are built-in in iort_iommu_xlate()
1239 * in the kernel or not, defer the IOMMU configuration in iort_iommu_xlate()
1244 return iort_iommu_driver_enabled(node->type) ? in iort_iommu_xlate()
1245 -EPROBE_DEFER : -ENODEV; in iort_iommu_xlate()
1261 parent = iort_node_map_id(info->node, alias, &streamid, in iort_pci_iommu_init()
1263 return iort_iommu_xlate(info->dev, parent, streamid); in iort_pci_iommu_init()
1272 nc = (struct acpi_iort_named_component *)node->node_data; in iort_named_component_init()
1273 props[0] = PROPERTY_ENTRY_U32("pasid-num-bits", in iort_named_component_init()
1275 nc->node_flags)); in iort_named_component_init()
1276 if (nc->node_flags & ACPI_IORT_NC_STALL_SUPPORTED) in iort_named_component_init()
1277 props[1] = PROPERTY_ENTRY_BOOL("dma-can-stall"); in iort_named_component_init()
1286 int err = -ENODEV, i = 0; in iort_nc_iommu_map()
1313 return -ENODEV; in iort_nc_iommu_map_id()
1318 * iort_iommu_configure_id - Set-up IOMMU configuration for a device.
1328 int err = -ENODEV; in iort_iommu_configure_id()
1332 struct pci_bus *bus = to_pci_dev(dev)->bus; in iort_iommu_configure_id()
1336 iort_match_node_callback, &bus->dev); in iort_iommu_configure_id()
1338 return -ENODEV; in iort_iommu_configure_id()
1346 fwspec->flags |= IOMMU_FWSPEC_PCI_RC_ATS; in iort_iommu_configure_id()
1351 return -ENODEV; in iort_iommu_configure_id()
1367 { return -ENODEV; } in iort_iommu_configure_id()
1378 return -ENODEV; in nc_dma_get_range()
1380 ncomp = (struct acpi_iort_named_component *)node->node_data; in nc_dma_get_range()
1382 if (!ncomp->memory_address_limit) { in nc_dma_get_range()
1384 return -EINVAL; in nc_dma_get_range()
1387 *size = ncomp->memory_address_limit >= 64 ? U64_MAX : in nc_dma_get_range()
1388 1ULL<<ncomp->memory_address_limit; in nc_dma_get_range()
1397 struct pci_bus *pbus = to_pci_dev(dev)->bus; in rc_dma_get_range()
1400 iort_match_node_callback, &pbus->dev); in rc_dma_get_range()
1401 if (!node || node->revision < 1) in rc_dma_get_range()
1402 return -ENODEV; in rc_dma_get_range()
1404 rc = (struct acpi_iort_root_complex *)node->node_data; in rc_dma_get_range()
1406 if (!rc->memory_address_limit) { in rc_dma_get_range()
1408 return -EINVAL; in rc_dma_get_range()
1411 *size = rc->memory_address_limit >= 64 ? U64_MAX : in rc_dma_get_range()
1412 1ULL<<rc->memory_address_limit; in rc_dma_get_range()
1418 * iort_dma_get_ranges() - Look up DMA addressing limit for the device
1445 res->start = irq; in acpi_iort_register_irq()
1446 res->end = irq; in acpi_iort_register_irq()
1447 res->flags = IORESOURCE_IRQ; in acpi_iort_register_irq()
1448 res->name = name; in acpi_iort_register_irq()
1458 smmu = (struct acpi_iort_smmu_v3 *)node->node_data; in arm_smmu_v3_count_resources()
1460 if (smmu->event_gsiv) in arm_smmu_v3_count_resources()
1463 if (smmu->pri_gsiv) in arm_smmu_v3_count_resources()
1466 if (smmu->gerr_gsiv) in arm_smmu_v3_count_resources()
1469 if (smmu->sync_gsiv) in arm_smmu_v3_count_resources()
1481 if (smmu->model != ACPI_IORT_SMMU_V3_CAVIUM_CN99XX) in arm_smmu_v3_is_combined_irq()
1488 return smmu->event_gsiv == smmu->pri_gsiv && in arm_smmu_v3_is_combined_irq()
1489 smmu->event_gsiv == smmu->gerr_gsiv && in arm_smmu_v3_is_combined_irq()
1490 smmu->event_gsiv == smmu->sync_gsiv; in arm_smmu_v3_is_combined_irq()
1499 if (smmu->model == ACPI_IORT_SMMU_V3_CAVIUM_CN99XX) in arm_smmu_v3_resource_size()
1512 smmu = (struct acpi_iort_smmu_v3 *)node->node_data; in arm_smmu_v3_init_resources()
1514 res[num_res].start = smmu->base_address; in arm_smmu_v3_init_resources()
1515 res[num_res].end = smmu->base_address + in arm_smmu_v3_init_resources()
1516 arm_smmu_v3_resource_size(smmu) - 1; in arm_smmu_v3_init_resources()
1521 if (smmu->event_gsiv) in arm_smmu_v3_init_resources()
1522 acpi_iort_register_irq(smmu->event_gsiv, "combined", in arm_smmu_v3_init_resources()
1527 if (smmu->event_gsiv) in arm_smmu_v3_init_resources()
1528 acpi_iort_register_irq(smmu->event_gsiv, "eventq", in arm_smmu_v3_init_resources()
1532 if (smmu->pri_gsiv) in arm_smmu_v3_init_resources()
1533 acpi_iort_register_irq(smmu->pri_gsiv, "priq", in arm_smmu_v3_init_resources()
1537 if (smmu->gerr_gsiv) in arm_smmu_v3_init_resources()
1538 acpi_iort_register_irq(smmu->gerr_gsiv, "gerror", in arm_smmu_v3_init_resources()
1542 if (smmu->sync_gsiv) in arm_smmu_v3_init_resources()
1543 acpi_iort_register_irq(smmu->sync_gsiv, "cmdq-sync", in arm_smmu_v3_init_resources()
1556 smmu = (struct acpi_iort_smmu_v3 *)node->node_data; in arm_smmu_v3_dma_configure()
1558 attr = (smmu->flags & ACPI_IORT_SMMU_V3_COHACC_OVERRIDE) ? in arm_smmu_v3_dma_configure()
1561 /* We expect the dma masks to be equivalent for all SMMUv3 set-ups */ in arm_smmu_v3_dma_configure()
1562 dev->dma_mask = &dev->coherent_dma_mask; in arm_smmu_v3_dma_configure()
1577 smmu = (struct acpi_iort_smmu_v3 *)node->node_data; in arm_smmu_v3_set_proximity()
1578 if (smmu->flags & ACPI_IORT_SMMU_V3_PXM_VALID) { in arm_smmu_v3_set_proximity()
1579 int dev_node = pxm_to_node(smmu->pxm); in arm_smmu_v3_set_proximity()
1582 return -EINVAL; in arm_smmu_v3_set_proximity()
1585 pr_info("SMMU-v3[%llx] Mapped to Proximity domain %d\n", in arm_smmu_v3_set_proximity()
1586 smmu->base_address, in arm_smmu_v3_set_proximity()
1587 smmu->pxm); in arm_smmu_v3_set_proximity()
1600 smmu = (struct acpi_iort_smmu *)node->node_data; in arm_smmu_count_resources()
1610 return smmu->context_interrupt_count + 2; in arm_smmu_count_resources()
1621 smmu = (struct acpi_iort_smmu *)node->node_data; in arm_smmu_init_resources()
1623 res[num_res].start = smmu->base_address; in arm_smmu_init_resources()
1624 res[num_res].end = smmu->base_address + smmu->span - 1; in arm_smmu_init_resources()
1628 glb_irq = ACPI_ADD_PTR(u64, node, smmu->global_interrupt_offset); in arm_smmu_init_resources()
1633 acpi_iort_register_irq(hw_irq, "arm-smmu-global", trigger, in arm_smmu_init_resources()
1637 ctx_irq = ACPI_ADD_PTR(u64, node, smmu->context_interrupt_offset); in arm_smmu_init_resources()
1638 for (i = 0; i < smmu->context_interrupt_count; i++) { in arm_smmu_init_resources()
1642 acpi_iort_register_irq(hw_irq, "arm-smmu-context", trigger, in arm_smmu_init_resources()
1654 smmu = (struct acpi_iort_smmu *)node->node_data; in arm_smmu_dma_configure()
1656 attr = (smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK) ? in arm_smmu_dma_configure()
1659 /* We expect the dma masks to be equivalent for SMMU set-ups */ in arm_smmu_dma_configure()
1660 dev->dma_mask = &dev->coherent_dma_mask; in arm_smmu_dma_configure()
1671 pmcg = (struct acpi_iort_pmcg *)node->node_data; in arm_smmu_v3_pmcg_count_resources()
1677 return pmcg->overflow_gsiv ? 3 : 2; in arm_smmu_v3_pmcg_count_resources()
1686 pmcg = (struct acpi_iort_pmcg *)node->node_data; in arm_smmu_v3_pmcg_init_resources()
1688 res[0].start = pmcg->page0_base_address; in arm_smmu_v3_pmcg_init_resources()
1689 res[0].end = pmcg->page0_base_address + SZ_4K - 1; in arm_smmu_v3_pmcg_init_resources()
1697 if (node->revision > 0) { in arm_smmu_v3_pmcg_init_resources()
1698 res[1].start = pmcg->page1_base_address; in arm_smmu_v3_pmcg_init_resources()
1699 res[1].end = pmcg->page1_base_address + SZ_4K - 1; in arm_smmu_v3_pmcg_init_resources()
1703 if (pmcg->overflow_gsiv) in arm_smmu_v3_pmcg_init_resources()
1704 acpi_iort_register_irq(pmcg->overflow_gsiv, "overflow", in arm_smmu_v3_pmcg_init_resources()
1746 .name = "arm-smmu-v3",
1754 .name = "arm-smmu",
1761 .name = "arm-smmu-v3-pmcg",
1770 switch (node->type) { in iort_get_dev_cfg()
1783 * iort_add_platform_device() - Allocate a platform device for IORT node
1797 pdev = platform_device_alloc(ops->name, PLATFORM_DEVID_AUTO); in iort_add_platform_device()
1799 return -ENOMEM; in iort_add_platform_device()
1801 if (ops->dev_set_proximity) { in iort_add_platform_device()
1802 ret = ops->dev_set_proximity(&pdev->dev, node); in iort_add_platform_device()
1807 count = ops->dev_count_resources(node); in iort_add_platform_device()
1811 ret = -ENOMEM; in iort_add_platform_device()
1815 ops->dev_init_resources(r, node); in iort_add_platform_device()
1833 if (ops->dev_add_platdata) in iort_add_platform_device()
1834 ret = ops->dev_add_platdata(pdev); in iort_add_platform_device()
1844 ret = -ENODEV; in iort_add_platform_device()
1848 pdev->dev.fwnode = fwnode; in iort_add_platform_device()
1850 if (ops->dev_dma_configure) in iort_add_platform_device()
1851 ops->dev_dma_configure(&pdev->dev, node); in iort_add_platform_device()
1853 iort_set_device_domain(&pdev->dev, node); in iort_add_platform_device()
1862 arch_teardown_dma_ops(&pdev->dev); in iort_add_platform_device()
1877 if (iort_node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) { in iort_enable_acs()
1879 struct acpi_iort_id_mapping *map; in iort_enable_acs() local
1882 map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, iort_node, in iort_enable_acs()
1883 iort_node->mapping_offset); in iort_enable_acs()
1885 for (i = 0; i < iort_node->mapping_count; i++, map++) { in iort_enable_acs()
1886 if (!map->output_reference) in iort_enable_acs()
1890 iort_table, map->output_reference); in iort_enable_acs()
1892 * If we detect a RC->SMMU mapping, make sure in iort_enable_acs()
1895 if ((parent->type == ACPI_IORT_NODE_SMMU) || in iort_enable_acs()
1896 (parent->type == ACPI_IORT_NODE_SMMU_V3)) { in iort_enable_acs()
1924 iort->node_offset); in iort_init_platform_devices()
1926 iort_table->length); in iort_init_platform_devices()
1928 for (i = 0; i < iort->node_count; i++) { in iort_init_platform_devices()
1953 iort_node->length); in iort_init_platform_devices()
2000 node = ACPI_ADD_PTR(struct acpi_iort_node, iort, iort->node_offset); in acpi_iort_dma_get_max_cpu_address()
2001 end = ACPI_ADD_PTR(struct acpi_iort_node, iort, iort->header.length); in acpi_iort_dma_get_max_cpu_address()
2003 for (i = 0; i < iort->node_count; i++) { in acpi_iort_dma_get_max_cpu_address()
2007 switch (node->type) { in acpi_iort_dma_get_max_cpu_address()
2013 ncomp = (struct acpi_iort_named_component *)node->node_data; in acpi_iort_dma_get_max_cpu_address()
2014 local_limit = DMA_BIT_MASK(ncomp->memory_address_limit); in acpi_iort_dma_get_max_cpu_address()
2019 if (node->revision < 1) in acpi_iort_dma_get_max_cpu_address()
2022 rc = (struct acpi_iort_root_complex *)node->node_data; in acpi_iort_dma_get_max_cpu_address()
2023 local_limit = DMA_BIT_MASK(rc->memory_address_limit); in acpi_iort_dma_get_max_cpu_address()
2027 node = ACPI_ADD_PTR(struct acpi_iort_node, node, node->length); in acpi_iort_dma_get_max_cpu_address()
2029 acpi_put_table(&iort->header); in acpi_iort_dma_get_max_cpu_address()