Lines Matching +full:iommu +full:- +full:v1
1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
8 #define pr_fmt(fmt) "AMD-Vi: " fmt
20 #include <linux/amd-iommu.h>
25 #include <asm/pci-direct.h>
26 #include <asm/iommu.h>
99 * structure describing one IOMMU in the ACPI table. Typically followed by one
119 * A device entry describing which devices a specific IOMMU translates and
137 * An AMD IOMMU memory definition structure. It defines things like exclusion
183 /* IOMMUs have a non-present cache? */
196 * AMD IOMMU allows up to 2^16 different protection domains. This is a bitmap
236 bool translation_pre_enabled(struct amd_iommu *iommu) in translation_pre_enabled() argument
238 return (iommu->flags & AMD_IOMMU_FLAG_TRANS_PRE_ENABLED); in translation_pre_enabled()
241 static void clear_translation_pre_enabled(struct amd_iommu *iommu) in clear_translation_pre_enabled() argument
243 iommu->flags &= ~AMD_IOMMU_FLAG_TRANS_PRE_ENABLED; in clear_translation_pre_enabled()
246 static void init_translation_status(struct amd_iommu *iommu) in init_translation_status() argument
250 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET); in init_translation_status()
252 iommu->flags |= AMD_IOMMU_FLAG_TRANS_PRE_ENABLED; in init_translation_status()
274 struct amd_iommu *iommu; in get_global_efr() local
276 for_each_iommu(iommu) { in get_global_efr()
277 u64 tmp = iommu->features; in get_global_efr()
278 u64 tmp2 = iommu->features2; in get_global_efr()
280 if (list_is_first(&iommu->list, &amd_iommu_list)) { in get_global_efr()
291 …"Found inconsistent EFR/EFR2 %#llx,%#llx (global %#llx,%#llx) on iommu%d (%04x:%02x:%02x.%01x).\n", in get_global_efr()
293 iommu->index, iommu->pci_seg->id, in get_global_efr()
294 PCI_BUS_NUM(iommu->devid), PCI_SLOT(iommu->devid), in get_global_efr()
295 PCI_FUNC(iommu->devid)); in get_global_efr()
314 static void __init early_iommu_features_init(struct amd_iommu *iommu, in early_iommu_features_init() argument
318 iommu->features = h->efr_reg; in early_iommu_features_init()
319 iommu->features2 = h->efr_reg2; in early_iommu_features_init()
327 static u32 iommu_read_l1(struct amd_iommu *iommu, u16 l1, u8 address) in iommu_read_l1() argument
331 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16)); in iommu_read_l1()
332 pci_read_config_dword(iommu->dev, 0xfc, &val); in iommu_read_l1()
336 static void iommu_write_l1(struct amd_iommu *iommu, u16 l1, u8 address, u32 val) in iommu_write_l1() argument
338 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16 | 1 << 31)); in iommu_write_l1()
339 pci_write_config_dword(iommu->dev, 0xfc, val); in iommu_write_l1()
340 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16)); in iommu_write_l1()
343 static u32 iommu_read_l2(struct amd_iommu *iommu, u8 address) in iommu_read_l2() argument
347 pci_write_config_dword(iommu->dev, 0xf0, address); in iommu_read_l2()
348 pci_read_config_dword(iommu->dev, 0xf4, &val); in iommu_read_l2()
352 static void iommu_write_l2(struct amd_iommu *iommu, u8 address, u32 val) in iommu_write_l2() argument
354 pci_write_config_dword(iommu->dev, 0xf0, (address | 1 << 8)); in iommu_write_l2()
355 pci_write_config_dword(iommu->dev, 0xf4, val); in iommu_write_l2()
360 * AMD IOMMU MMIO register space handling functions
362 * These functions are used to program the IOMMU device registers in
368 * This function set the exclusion range in the IOMMU. DMA accesses to the
371 static void iommu_set_exclusion_range(struct amd_iommu *iommu) in iommu_set_exclusion_range() argument
373 u64 start = iommu->exclusion_start & PAGE_MASK; in iommu_set_exclusion_range()
374 u64 limit = (start + iommu->exclusion_length - 1) & PAGE_MASK; in iommu_set_exclusion_range()
377 if (!iommu->exclusion_start) in iommu_set_exclusion_range()
381 memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET, in iommu_set_exclusion_range()
385 memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET, in iommu_set_exclusion_range()
389 static void iommu_set_cwwb_range(struct amd_iommu *iommu) in iommu_set_cwwb_range() argument
391 u64 start = iommu_virt_to_phys((void *)iommu->cmd_sem); in iommu_set_cwwb_range()
398 * Re-purpose Exclusion base/limit registers for Completion wait in iommu_set_cwwb_range()
399 * write-back base/limit. in iommu_set_cwwb_range()
401 memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET, in iommu_set_cwwb_range()
408 memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET, in iommu_set_cwwb_range()
412 /* Programs the physical address of the device table into the IOMMU hardware */
413 static void iommu_set_device_table(struct amd_iommu *iommu) in iommu_set_device_table() argument
416 u32 dev_table_size = iommu->pci_seg->dev_table_size; in iommu_set_device_table()
417 void *dev_table = (void *)get_dev_table(iommu); in iommu_set_device_table()
419 BUG_ON(iommu->mmio_base == NULL); in iommu_set_device_table()
422 entry |= (dev_table_size >> 12) - 1; in iommu_set_device_table()
423 memcpy_toio(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET, in iommu_set_device_table()
427 /* Generic functions to enable/disable certain features of the IOMMU. */
428 static void iommu_feature_enable(struct amd_iommu *iommu, u8 bit) in iommu_feature_enable() argument
432 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET); in iommu_feature_enable()
434 writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET); in iommu_feature_enable()
437 static void iommu_feature_disable(struct amd_iommu *iommu, u8 bit) in iommu_feature_disable() argument
441 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET); in iommu_feature_disable()
443 writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET); in iommu_feature_disable()
446 static void iommu_set_inv_tlb_timeout(struct amd_iommu *iommu, int timeout) in iommu_set_inv_tlb_timeout() argument
450 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET); in iommu_set_inv_tlb_timeout()
453 writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET); in iommu_set_inv_tlb_timeout()
457 static void iommu_enable(struct amd_iommu *iommu) in iommu_enable() argument
459 iommu_feature_enable(iommu, CONTROL_IOMMU_EN); in iommu_enable()
462 static void iommu_disable(struct amd_iommu *iommu) in iommu_disable() argument
464 if (!iommu->mmio_base) in iommu_disable()
468 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN); in iommu_disable()
471 iommu_feature_disable(iommu, CONTROL_EVT_INT_EN); in iommu_disable()
472 iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN); in iommu_disable()
474 /* Disable IOMMU GA_LOG */ in iommu_disable()
475 iommu_feature_disable(iommu, CONTROL_GALOG_EN); in iommu_disable()
476 iommu_feature_disable(iommu, CONTROL_GAINT_EN); in iommu_disable()
478 /* Disable IOMMU hardware itself */ in iommu_disable()
479 iommu_feature_disable(iommu, CONTROL_IOMMU_EN); in iommu_disable()
483 * mapping and unmapping functions for the IOMMU MMIO space. Each AMD IOMMU in
489 pr_err("Can not reserve memory region %llx-%llx for mmio\n", in iommu_map_mmio_space()
498 static void __init iommu_unmap_mmio_space(struct amd_iommu *iommu) in iommu_unmap_mmio_space() argument
500 if (iommu->mmio_base) in iommu_unmap_mmio_space()
501 iounmap(iommu->mmio_base); in iommu_unmap_mmio_space()
502 release_mem_region(iommu->mmio_phys, iommu->mmio_phys_end); in iommu_unmap_mmio_space()
509 switch (h->type) { in get_ivhd_header_size()
523 * The functions below belong to the first pass of AMD IOMMU ACPI table
535 u32 type = ((struct ivhd_entry *)ivhd)->type; in ivhd_entry_length()
547 * After reading the highest device id from the IOMMU PCI capability header
554 int last_devid = -EINVAL; in find_last_devid_from_ivhd()
559 pr_err("Unsupported IVHD type %#x\n", h->type); in find_last_devid_from_ivhd()
560 return -EINVAL; in find_last_devid_from_ivhd()
564 end += h->length; in find_last_devid_from_ivhd()
568 switch (dev->type) { in find_last_devid_from_ivhd()
577 if (dev->devid > last_devid) in find_last_devid_from_ivhd()
578 last_devid = dev->devid; in find_last_devid_from_ivhd()
596 for (i = 0; i < table->length; ++i) in check_ivrs_checksum()
601 return -ENODEV; in check_ivrs_checksum()
620 end += table->length; in find_last_devid_acpi()
623 if (h->pci_seg == pci_seg && in find_last_devid_acpi()
624 h->type == amd_iommu_target_ivhd_type) { in find_last_devid_acpi()
628 return -EINVAL; in find_last_devid_acpi()
632 p += h->length; in find_last_devid_acpi()
642 * the second time. In this ACPI parsing iteration we allocate IOMMU specific
651 pci_seg->dev_table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO | GFP_DMA32, in alloc_dev_table()
652 get_order(pci_seg->dev_table_size)); in alloc_dev_table()
653 if (!pci_seg->dev_table) in alloc_dev_table()
654 return -ENOMEM; in alloc_dev_table()
661 free_pages((unsigned long)pci_seg->dev_table, in free_dev_table()
662 get_order(pci_seg->dev_table_size)); in free_dev_table()
663 pci_seg->dev_table = NULL; in free_dev_table()
666 /* Allocate per PCI segment IOMMU rlookup table. */
669 pci_seg->rlookup_table = (void *)__get_free_pages( in alloc_rlookup_table()
671 get_order(pci_seg->rlookup_table_size)); in alloc_rlookup_table()
672 if (pci_seg->rlookup_table == NULL) in alloc_rlookup_table()
673 return -ENOMEM; in alloc_rlookup_table()
680 free_pages((unsigned long)pci_seg->rlookup_table, in free_rlookup_table()
681 get_order(pci_seg->rlookup_table_size)); in free_rlookup_table()
682 pci_seg->rlookup_table = NULL; in free_rlookup_table()
687 pci_seg->irq_lookup_table = (void *)__get_free_pages( in alloc_irq_lookup_table()
689 get_order(pci_seg->rlookup_table_size)); in alloc_irq_lookup_table()
690 kmemleak_alloc(pci_seg->irq_lookup_table, in alloc_irq_lookup_table()
691 pci_seg->rlookup_table_size, 1, GFP_KERNEL); in alloc_irq_lookup_table()
692 if (pci_seg->irq_lookup_table == NULL) in alloc_irq_lookup_table()
693 return -ENOMEM; in alloc_irq_lookup_table()
700 kmemleak_free(pci_seg->irq_lookup_table); in free_irq_lookup_table()
701 free_pages((unsigned long)pci_seg->irq_lookup_table, in free_irq_lookup_table()
702 get_order(pci_seg->rlookup_table_size)); in free_irq_lookup_table()
703 pci_seg->irq_lookup_table = NULL; in free_irq_lookup_table()
710 pci_seg->alias_table = (void *)__get_free_pages(GFP_KERNEL, in alloc_alias_table()
711 get_order(pci_seg->alias_table_size)); in alloc_alias_table()
712 if (!pci_seg->alias_table) in alloc_alias_table()
713 return -ENOMEM; in alloc_alias_table()
718 for (i = 0; i <= pci_seg->last_bdf; ++i) in alloc_alias_table()
719 pci_seg->alias_table[i] = i; in alloc_alias_table()
726 free_pages((unsigned long)pci_seg->alias_table, in free_alias_table()
727 get_order(pci_seg->alias_table_size)); in free_alias_table()
728 pci_seg->alias_table = NULL; in free_alias_table()
732 * Allocates the command buffer. This buffer is per AMD IOMMU. We can
733 * write commands to that buffer later and the IOMMU will execute them
736 static int __init alloc_command_buffer(struct amd_iommu *iommu) in alloc_command_buffer() argument
738 iommu->cmd_buf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, in alloc_command_buffer()
741 return iommu->cmd_buf ? 0 : -ENOMEM; in alloc_command_buffer()
745 * This function restarts event logging in case the IOMMU experienced
748 void amd_iommu_restart_event_logging(struct amd_iommu *iommu) in amd_iommu_restart_event_logging() argument
750 iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN); in amd_iommu_restart_event_logging()
751 iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN); in amd_iommu_restart_event_logging()
755 * This function resets the command buffer if the IOMMU stopped fetching
758 static void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu) in amd_iommu_reset_cmd_buffer() argument
760 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN); in amd_iommu_reset_cmd_buffer()
762 writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET); in amd_iommu_reset_cmd_buffer()
763 writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); in amd_iommu_reset_cmd_buffer()
764 iommu->cmd_buf_head = 0; in amd_iommu_reset_cmd_buffer()
765 iommu->cmd_buf_tail = 0; in amd_iommu_reset_cmd_buffer()
767 iommu_feature_enable(iommu, CONTROL_CMDBUF_EN); in amd_iommu_reset_cmd_buffer()
774 static void iommu_enable_command_buffer(struct amd_iommu *iommu) in iommu_enable_command_buffer() argument
778 BUG_ON(iommu->cmd_buf == NULL); in iommu_enable_command_buffer()
780 entry = iommu_virt_to_phys(iommu->cmd_buf); in iommu_enable_command_buffer()
783 memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET, in iommu_enable_command_buffer()
786 amd_iommu_reset_cmd_buffer(iommu); in iommu_enable_command_buffer()
792 static void iommu_disable_command_buffer(struct amd_iommu *iommu) in iommu_disable_command_buffer() argument
794 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN); in iommu_disable_command_buffer()
797 static void __init free_command_buffer(struct amd_iommu *iommu) in free_command_buffer() argument
799 free_pages((unsigned long)iommu->cmd_buf, get_order(CMD_BUFFER_SIZE)); in free_command_buffer()
802 static void *__init iommu_alloc_4k_pages(struct amd_iommu *iommu, in iommu_alloc_4k_pages() argument
818 /* allocates the memory where the IOMMU will log its events to */
819 static int __init alloc_event_buffer(struct amd_iommu *iommu) in alloc_event_buffer() argument
821 iommu->evt_buf = iommu_alloc_4k_pages(iommu, GFP_KERNEL | __GFP_ZERO, in alloc_event_buffer()
824 return iommu->evt_buf ? 0 : -ENOMEM; in alloc_event_buffer()
827 static void iommu_enable_event_buffer(struct amd_iommu *iommu) in iommu_enable_event_buffer() argument
831 BUG_ON(iommu->evt_buf == NULL); in iommu_enable_event_buffer()
833 entry = iommu_virt_to_phys(iommu->evt_buf) | EVT_LEN_MASK; in iommu_enable_event_buffer()
835 memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET, in iommu_enable_event_buffer()
839 writel(0x00, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET); in iommu_enable_event_buffer()
840 writel(0x00, iommu->mmio_base + MMIO_EVT_TAIL_OFFSET); in iommu_enable_event_buffer()
842 iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN); in iommu_enable_event_buffer()
848 static void iommu_disable_event_buffer(struct amd_iommu *iommu) in iommu_disable_event_buffer() argument
850 iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN); in iommu_disable_event_buffer()
853 static void __init free_event_buffer(struct amd_iommu *iommu) in free_event_buffer() argument
855 free_pages((unsigned long)iommu->evt_buf, get_order(EVT_BUFFER_SIZE)); in free_event_buffer()
858 /* allocates the memory where the IOMMU will log its events to */
859 static int __init alloc_ppr_log(struct amd_iommu *iommu) in alloc_ppr_log() argument
861 iommu->ppr_log = iommu_alloc_4k_pages(iommu, GFP_KERNEL | __GFP_ZERO, in alloc_ppr_log()
864 return iommu->ppr_log ? 0 : -ENOMEM; in alloc_ppr_log()
867 static void iommu_enable_ppr_log(struct amd_iommu *iommu) in iommu_enable_ppr_log() argument
871 if (iommu->ppr_log == NULL) in iommu_enable_ppr_log()
874 entry = iommu_virt_to_phys(iommu->ppr_log) | PPR_LOG_SIZE_512; in iommu_enable_ppr_log()
876 memcpy_toio(iommu->mmio_base + MMIO_PPR_LOG_OFFSET, in iommu_enable_ppr_log()
880 writel(0x00, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); in iommu_enable_ppr_log()
881 writel(0x00, iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); in iommu_enable_ppr_log()
883 iommu_feature_enable(iommu, CONTROL_PPRLOG_EN); in iommu_enable_ppr_log()
884 iommu_feature_enable(iommu, CONTROL_PPR_EN); in iommu_enable_ppr_log()
887 static void __init free_ppr_log(struct amd_iommu *iommu) in free_ppr_log() argument
889 free_pages((unsigned long)iommu->ppr_log, get_order(PPR_LOG_SIZE)); in free_ppr_log()
892 static void free_ga_log(struct amd_iommu *iommu) in free_ga_log() argument
895 free_pages((unsigned long)iommu->ga_log, get_order(GA_LOG_SIZE)); in free_ga_log()
896 free_pages((unsigned long)iommu->ga_log_tail, get_order(8)); in free_ga_log()
901 static int iommu_ga_log_enable(struct amd_iommu *iommu) in iommu_ga_log_enable() argument
906 if (!iommu->ga_log) in iommu_ga_log_enable()
907 return -EINVAL; in iommu_ga_log_enable()
909 entry = iommu_virt_to_phys(iommu->ga_log) | GA_LOG_SIZE_512; in iommu_ga_log_enable()
910 memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_BASE_OFFSET, in iommu_ga_log_enable()
912 entry = (iommu_virt_to_phys(iommu->ga_log_tail) & in iommu_ga_log_enable()
913 (BIT_ULL(52)-1)) & ~7ULL; in iommu_ga_log_enable()
914 memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_TAIL_OFFSET, in iommu_ga_log_enable()
916 writel(0x00, iommu->mmio_base + MMIO_GA_HEAD_OFFSET); in iommu_ga_log_enable()
917 writel(0x00, iommu->mmio_base + MMIO_GA_TAIL_OFFSET); in iommu_ga_log_enable()
920 iommu_feature_enable(iommu, CONTROL_GAINT_EN); in iommu_ga_log_enable()
921 iommu_feature_enable(iommu, CONTROL_GALOG_EN); in iommu_ga_log_enable()
924 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); in iommu_ga_log_enable()
931 return -EINVAL; in iommu_ga_log_enable()
936 static int iommu_init_ga_log(struct amd_iommu *iommu) in iommu_init_ga_log() argument
941 iommu->ga_log = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, in iommu_init_ga_log()
943 if (!iommu->ga_log) in iommu_init_ga_log()
946 iommu->ga_log_tail = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, in iommu_init_ga_log()
948 if (!iommu->ga_log_tail) in iommu_init_ga_log()
953 free_ga_log(iommu); in iommu_init_ga_log()
954 return -EINVAL; in iommu_init_ga_log()
958 static int __init alloc_cwwb_sem(struct amd_iommu *iommu) in alloc_cwwb_sem() argument
960 iommu->cmd_sem = iommu_alloc_4k_pages(iommu, GFP_KERNEL | __GFP_ZERO, 1); in alloc_cwwb_sem()
962 return iommu->cmd_sem ? 0 : -ENOMEM; in alloc_cwwb_sem()
965 static void __init free_cwwb_sem(struct amd_iommu *iommu) in free_cwwb_sem() argument
967 if (iommu->cmd_sem) in free_cwwb_sem()
968 free_page((unsigned long)iommu->cmd_sem); in free_cwwb_sem()
971 static void iommu_enable_xt(struct amd_iommu *iommu) in iommu_enable_xt() argument
975 * XT mode (32-bit APIC destination ID) requires in iommu_enable_xt()
976 * GA mode (128-bit IRTE support) as a prerequisite. in iommu_enable_xt()
980 iommu_feature_enable(iommu, CONTROL_XT_EN); in iommu_enable_xt()
984 static void iommu_enable_gt(struct amd_iommu *iommu) in iommu_enable_gt() argument
986 if (!iommu_feature(iommu, FEATURE_GT)) in iommu_enable_gt()
989 iommu_feature_enable(iommu, CONTROL_GT_EN); in iommu_enable_gt()
1002 static void set_dev_entry_bit(struct amd_iommu *iommu, u16 devid, u8 bit) in set_dev_entry_bit() argument
1004 struct dev_table_entry *dev_table = get_dev_table(iommu); in set_dev_entry_bit()
1018 static int get_dev_entry_bit(struct amd_iommu *iommu, u16 devid, u8 bit) in get_dev_entry_bit() argument
1020 struct dev_table_entry *dev_table = get_dev_table(iommu); in get_dev_entry_bit()
1025 static bool __copy_device_table(struct amd_iommu *iommu) in __copy_device_table() argument
1028 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg; in __copy_device_table()
1036 /* Each IOMMU use separate device table with the same size */ in __copy_device_table()
1037 lo = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET); in __copy_device_table()
1038 hi = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET + 4); in __copy_device_table()
1042 if (old_devtb_size != pci_seg->dev_table_size) { in __copy_device_table()
1043 pr_err("The device table size of IOMMU:%d is not expected!\n", in __copy_device_table()
1044 iommu->index); in __copy_device_table()
1061 pci_seg->dev_table_size) in __copy_device_table()
1062 : memremap(old_devtb_phys, pci_seg->dev_table_size, MEMREMAP_WB); in __copy_device_table()
1068 pci_seg->old_dev_tbl_cpy = (void *)__get_free_pages(gfp_flag, in __copy_device_table()
1069 get_order(pci_seg->dev_table_size)); in __copy_device_table()
1070 if (pci_seg->old_dev_tbl_cpy == NULL) { in __copy_device_table()
1076 for (devid = 0; devid <= pci_seg->last_bdf; ++devid) { in __copy_device_table()
1077 pci_seg->old_dev_tbl_cpy[devid] = old_devtb[devid]; in __copy_device_table()
1082 pci_seg->old_dev_tbl_cpy[devid].data[0] = old_devtb[devid].data[0]; in __copy_device_table()
1083 pci_seg->old_dev_tbl_cpy[devid].data[1] = old_devtb[devid].data[1]; in __copy_device_table()
1089 pci_seg->old_dev_tbl_cpy[devid].data[1] &= ~tmp; in __copy_device_table()
1092 pci_seg->old_dev_tbl_cpy[devid].data[0] &= ~tmp; in __copy_device_table()
1107 pci_seg->old_dev_tbl_cpy[devid].data[2] = old_devtb[devid].data[2]; in __copy_device_table()
1117 struct amd_iommu *iommu; in copy_device_table() local
1123 pr_warn("Translation is already enabled - trying to copy translation structures\n"); in copy_device_table()
1130 for_each_iommu(iommu) { in copy_device_table()
1131 if (pci_seg->id != iommu->pci_seg->id) in copy_device_table()
1133 if (!__copy_device_table(iommu)) in copy_device_table()
1142 void amd_iommu_apply_erratum_63(struct amd_iommu *iommu, u16 devid) in amd_iommu_apply_erratum_63() argument
1146 sysmgt = get_dev_entry_bit(iommu, devid, DEV_ENTRY_SYSMGT1) | in amd_iommu_apply_erratum_63()
1147 (get_dev_entry_bit(iommu, devid, DEV_ENTRY_SYSMGT2) << 1); in amd_iommu_apply_erratum_63()
1150 set_dev_entry_bit(iommu, devid, DEV_ENTRY_IW); in amd_iommu_apply_erratum_63()
1157 static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu, in set_dev_entry_from_acpi() argument
1161 set_dev_entry_bit(iommu, devid, DEV_ENTRY_INIT_PASS); in set_dev_entry_from_acpi()
1163 set_dev_entry_bit(iommu, devid, DEV_ENTRY_EINT_PASS); in set_dev_entry_from_acpi()
1165 set_dev_entry_bit(iommu, devid, DEV_ENTRY_NMI_PASS); in set_dev_entry_from_acpi()
1167 set_dev_entry_bit(iommu, devid, DEV_ENTRY_SYSMGT1); in set_dev_entry_from_acpi()
1169 set_dev_entry_bit(iommu, devid, DEV_ENTRY_SYSMGT2); in set_dev_entry_from_acpi()
1171 set_dev_entry_bit(iommu, devid, DEV_ENTRY_LINT0_PASS); in set_dev_entry_from_acpi()
1173 set_dev_entry_bit(iommu, devid, DEV_ENTRY_LINT1_PASS); in set_dev_entry_from_acpi()
1175 amd_iommu_apply_erratum_63(iommu, devid); in set_dev_entry_from_acpi()
1177 amd_iommu_set_rlookup_table(iommu, devid); in set_dev_entry_from_acpi()
1190 return -EINVAL; in add_special_device()
1193 if (!(entry->id == id && entry->cmd_line)) in add_special_device()
1196 pr_info("Command-line override present for %s id %d - ignoring\n", in add_special_device()
1199 *devid = entry->devid; in add_special_device()
1206 return -ENOMEM; in add_special_device()
1208 entry->id = id; in add_special_device()
1209 entry->devid = *devid; in add_special_device()
1210 entry->cmd_line = cmd_line; in add_special_device()
1212 list_add_tail(&entry->list, list); in add_special_device()
1224 if (strcmp(entry->hid, hid) || in add_acpi_hid_device()
1225 (*uid && *entry->uid && strcmp(entry->uid, uid)) || in add_acpi_hid_device()
1226 !entry->cmd_line) in add_acpi_hid_device()
1229 pr_info("Command-line override for hid:%s uid:%s\n", in add_acpi_hid_device()
1231 *devid = entry->devid; in add_acpi_hid_device()
1237 return -ENOMEM; in add_acpi_hid_device()
1239 memcpy(entry->uid, uid, strlen(uid)); in add_acpi_hid_device()
1240 memcpy(entry->hid, hid, strlen(hid)); in add_acpi_hid_device()
1241 entry->devid = *devid; in add_acpi_hid_device()
1242 entry->cmd_line = cmd_line; in add_acpi_hid_device()
1243 entry->root_devid = (entry->devid & (~0x7)); in add_acpi_hid_device()
1246 entry->cmd_line ? "cmd" : "ivrs", in add_acpi_hid_device()
1247 entry->hid, entry->uid, entry->root_devid); in add_acpi_hid_device()
1249 list_add_tail(&entry->list, list); in add_acpi_hid_device()
1288 * Takes a pointer to an AMD IOMMU entry in the ACPI table and
1291 static int __init init_iommu_from_acpi(struct amd_iommu *iommu, in init_iommu_from_acpi() argument
1300 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg; in init_iommu_from_acpi()
1314 iommu->acpi_flags = h->flags; in init_iommu_from_acpi()
1321 pr_err("Unsupported IVHD type %#x\n", h->type); in init_iommu_from_acpi()
1322 return -EINVAL; in init_iommu_from_acpi()
1327 end += h->length; in init_iommu_from_acpi()
1332 seg_id = pci_seg->id; in init_iommu_from_acpi()
1334 switch (e->type) { in init_iommu_from_acpi()
1337 DUMP_printk(" DEV_ALL\t\t\tflags: %02x\n", e->flags); in init_iommu_from_acpi()
1339 for (dev_i = 0; dev_i <= pci_seg->last_bdf; ++dev_i) in init_iommu_from_acpi()
1340 set_dev_entry_from_acpi(iommu, dev_i, e->flags, 0); in init_iommu_from_acpi()
1346 seg_id, PCI_BUS_NUM(e->devid), in init_iommu_from_acpi()
1347 PCI_SLOT(e->devid), in init_iommu_from_acpi()
1348 PCI_FUNC(e->devid), in init_iommu_from_acpi()
1349 e->flags); in init_iommu_from_acpi()
1351 devid = e->devid; in init_iommu_from_acpi()
1352 set_dev_entry_from_acpi(iommu, devid, e->flags, 0); in init_iommu_from_acpi()
1358 seg_id, PCI_BUS_NUM(e->devid), in init_iommu_from_acpi()
1359 PCI_SLOT(e->devid), in init_iommu_from_acpi()
1360 PCI_FUNC(e->devid), in init_iommu_from_acpi()
1361 e->flags); in init_iommu_from_acpi()
1363 devid_start = e->devid; in init_iommu_from_acpi()
1364 flags = e->flags; in init_iommu_from_acpi()
1372 seg_id, PCI_BUS_NUM(e->devid), in init_iommu_from_acpi()
1373 PCI_SLOT(e->devid), in init_iommu_from_acpi()
1374 PCI_FUNC(e->devid), in init_iommu_from_acpi()
1375 e->flags, in init_iommu_from_acpi()
1376 PCI_BUS_NUM(e->ext >> 8), in init_iommu_from_acpi()
1377 PCI_SLOT(e->ext >> 8), in init_iommu_from_acpi()
1378 PCI_FUNC(e->ext >> 8)); in init_iommu_from_acpi()
1380 devid = e->devid; in init_iommu_from_acpi()
1381 devid_to = e->ext >> 8; in init_iommu_from_acpi()
1382 set_dev_entry_from_acpi(iommu, devid , e->flags, 0); in init_iommu_from_acpi()
1383 set_dev_entry_from_acpi(iommu, devid_to, e->flags, 0); in init_iommu_from_acpi()
1384 pci_seg->alias_table[devid] = devid_to; in init_iommu_from_acpi()
1391 seg_id, PCI_BUS_NUM(e->devid), in init_iommu_from_acpi()
1392 PCI_SLOT(e->devid), in init_iommu_from_acpi()
1393 PCI_FUNC(e->devid), in init_iommu_from_acpi()
1394 e->flags, in init_iommu_from_acpi()
1395 seg_id, PCI_BUS_NUM(e->ext >> 8), in init_iommu_from_acpi()
1396 PCI_SLOT(e->ext >> 8), in init_iommu_from_acpi()
1397 PCI_FUNC(e->ext >> 8)); in init_iommu_from_acpi()
1399 devid_start = e->devid; in init_iommu_from_acpi()
1400 flags = e->flags; in init_iommu_from_acpi()
1401 devid_to = e->ext >> 8; in init_iommu_from_acpi()
1409 seg_id, PCI_BUS_NUM(e->devid), in init_iommu_from_acpi()
1410 PCI_SLOT(e->devid), in init_iommu_from_acpi()
1411 PCI_FUNC(e->devid), in init_iommu_from_acpi()
1412 e->flags, e->ext); in init_iommu_from_acpi()
1414 devid = e->devid; in init_iommu_from_acpi()
1415 set_dev_entry_from_acpi(iommu, devid, e->flags, in init_iommu_from_acpi()
1416 e->ext); in init_iommu_from_acpi()
1422 seg_id, PCI_BUS_NUM(e->devid), in init_iommu_from_acpi()
1423 PCI_SLOT(e->devid), in init_iommu_from_acpi()
1424 PCI_FUNC(e->devid), in init_iommu_from_acpi()
1425 e->flags, e->ext); in init_iommu_from_acpi()
1427 devid_start = e->devid; in init_iommu_from_acpi()
1428 flags = e->flags; in init_iommu_from_acpi()
1429 ext_flags = e->ext; in init_iommu_from_acpi()
1435 seg_id, PCI_BUS_NUM(e->devid), in init_iommu_from_acpi()
1436 PCI_SLOT(e->devid), in init_iommu_from_acpi()
1437 PCI_FUNC(e->devid)); in init_iommu_from_acpi()
1439 devid = e->devid; in init_iommu_from_acpi()
1442 pci_seg->alias_table[dev_i] = devid_to; in init_iommu_from_acpi()
1443 set_dev_entry_from_acpi(iommu, in init_iommu_from_acpi()
1446 set_dev_entry_from_acpi(iommu, dev_i, in init_iommu_from_acpi()
1456 handle = e->ext & 0xff; in init_iommu_from_acpi()
1457 devid = PCI_SEG_DEVID_TO_SBDF(seg_id, (e->ext >> 8)); in init_iommu_from_acpi()
1458 type = (e->ext >> 24) & 0xff; in init_iommu_from_acpi()
1479 * command-line override is present. So call in init_iommu_from_acpi()
1482 set_dev_entry_from_acpi(iommu, devid, e->flags, 0); in init_iommu_from_acpi()
1492 if (h->type != 0x40) { in init_iommu_from_acpi()
1494 e->type); in init_iommu_from_acpi()
1498 BUILD_BUG_ON(sizeof(e->ext_hid) != ACPIHID_HID_LEN - 1); in init_iommu_from_acpi()
1499 memcpy(hid, &e->ext_hid, ACPIHID_HID_LEN - 1); in init_iommu_from_acpi()
1500 hid[ACPIHID_HID_LEN - 1] = '\0'; in init_iommu_from_acpi()
1508 switch (e->uidf) { in init_iommu_from_acpi()
1511 if (e->uidl != 0) in init_iommu_from_acpi()
1517 sprintf(uid, "%d", e->uid); in init_iommu_from_acpi()
1522 memcpy(uid, &e->uid, e->uidl); in init_iommu_from_acpi()
1523 uid[e->uidl] = '\0'; in init_iommu_from_acpi()
1530 devid = PCI_SEG_DEVID_TO_SBDF(seg_id, e->devid); in init_iommu_from_acpi()
1537 flags = e->flags; in init_iommu_from_acpi()
1545 * command-line override is present. So call in init_iommu_from_acpi()
1548 set_dev_entry_from_acpi(iommu, devid, e->flags, 0); in init_iommu_from_acpi()
1582 pci_seg->last_bdf = last_bdf; in alloc_pci_segment()
1584 pci_seg->dev_table_size = tbl_size(DEV_TABLE_ENTRY_SIZE, last_bdf); in alloc_pci_segment()
1585 pci_seg->alias_table_size = tbl_size(ALIAS_TABLE_ENTRY_SIZE, last_bdf); in alloc_pci_segment()
1586 pci_seg->rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE, last_bdf); in alloc_pci_segment()
1588 pci_seg->id = id; in alloc_pci_segment()
1589 init_llist_head(&pci_seg->dev_data_list); in alloc_pci_segment()
1590 INIT_LIST_HEAD(&pci_seg->unity_map); in alloc_pci_segment()
1591 list_add_tail(&pci_seg->list, &amd_iommu_pci_seg_list); in alloc_pci_segment()
1609 if (pci_seg->id == id) in get_pci_segment()
1621 list_del(&pci_seg->list); in free_pci_segments()
1630 static void __init free_iommu_one(struct amd_iommu *iommu) in free_iommu_one() argument
1632 free_cwwb_sem(iommu); in free_iommu_one()
1633 free_command_buffer(iommu); in free_iommu_one()
1634 free_event_buffer(iommu); in free_iommu_one()
1635 free_ppr_log(iommu); in free_iommu_one()
1636 free_ga_log(iommu); in free_iommu_one()
1637 iommu_unmap_mmio_space(iommu); in free_iommu_one()
1642 struct amd_iommu *iommu, *next; in free_iommu_all() local
1644 for_each_iommu_safe(iommu, next) { in free_iommu_all()
1645 list_del(&iommu->list); in free_iommu_all()
1646 free_iommu_one(iommu); in free_iommu_all()
1647 kfree(iommu); in free_iommu_all()
1652 * Family15h Model 10h-1fh erratum 746 (IOMMU Logging May Stall Translations)
1657 static void amd_iommu_erratum_746_workaround(struct amd_iommu *iommu) in amd_iommu_erratum_746_workaround() argument
1666 pci_write_config_dword(iommu->dev, 0xf0, 0x90); in amd_iommu_erratum_746_workaround()
1667 pci_read_config_dword(iommu->dev, 0xf4, &value); in amd_iommu_erratum_746_workaround()
1673 pci_write_config_dword(iommu->dev, 0xf0, 0x90 | (1 << 8)); in amd_iommu_erratum_746_workaround()
1675 pci_write_config_dword(iommu->dev, 0xf4, value | 0x4); in amd_iommu_erratum_746_workaround()
1676 pci_info(iommu->dev, "Applying erratum 746 workaround\n"); in amd_iommu_erratum_746_workaround()
1679 pci_write_config_dword(iommu->dev, 0xf0, 0x90); in amd_iommu_erratum_746_workaround()
1683 * Family15h Model 30h-3fh (IOMMU Mishandles ATS Write Permission)
1688 static void amd_iommu_ats_write_check_workaround(struct amd_iommu *iommu) in amd_iommu_ats_write_check_workaround() argument
1698 value = iommu_read_l2(iommu, 0x47); in amd_iommu_ats_write_check_workaround()
1704 iommu_write_l2(iommu, 0x47, value | BIT(0)); in amd_iommu_ats_write_check_workaround()
1706 pci_info(iommu->dev, "Applying ATS write check workaround\n"); in amd_iommu_ats_write_check_workaround()
1710 * This function glues the initialization function for one IOMMU
1712 * hardware. It does NOT enable the IOMMU. This is done afterwards.
1714 static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h, in init_iommu_one() argument
1719 pci_seg = get_pci_segment(h->pci_seg, ivrs_base); in init_iommu_one()
1721 return -ENOMEM; in init_iommu_one()
1722 iommu->pci_seg = pci_seg; in init_iommu_one()
1724 raw_spin_lock_init(&iommu->lock); in init_iommu_one()
1725 iommu->cmd_sem_val = 0; in init_iommu_one()
1727 /* Add IOMMU to internal data structures */ in init_iommu_one()
1728 list_add_tail(&iommu->list, &amd_iommu_list); in init_iommu_one()
1729 iommu->index = amd_iommus_present++; in init_iommu_one()
1731 if (unlikely(iommu->index >= MAX_IOMMUS)) { in init_iommu_one()
1733 return -ENOSYS; in init_iommu_one()
1736 /* Index is fine - add IOMMU to the array */ in init_iommu_one()
1737 amd_iommus[iommu->index] = iommu; in init_iommu_one()
1740 * Copy data from ACPI table entry to the iommu struct in init_iommu_one()
1742 iommu->devid = h->devid; in init_iommu_one()
1743 iommu->cap_ptr = h->cap_ptr; in init_iommu_one()
1744 iommu->mmio_phys = h->mmio_phys; in init_iommu_one()
1746 switch (h->type) { in init_iommu_one()
1749 if ((h->efr_attr != 0) && in init_iommu_one()
1750 ((h->efr_attr & (0xF << 13)) != 0) && in init_iommu_one()
1751 ((h->efr_attr & (0x3F << 17)) != 0)) in init_iommu_one()
1752 iommu->mmio_phys_end = MMIO_REG_END_OFFSET; in init_iommu_one()
1754 iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET; in init_iommu_one()
1757 * Note: GA (128-bit IRTE) mode requires cmpxchg16b supports. in init_iommu_one()
1762 ((h->efr_attr & (0x1 << IOMMU_FEAT_GASUP_SHIFT)) == 0)) in init_iommu_one()
1767 if (h->efr_reg & (1 << 9)) in init_iommu_one()
1768 iommu->mmio_phys_end = MMIO_REG_END_OFFSET; in init_iommu_one()
1770 iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET; in init_iommu_one()
1773 * Note: GA (128-bit IRTE) mode requires cmpxchg16b supports. in init_iommu_one()
1778 ((h->efr_reg & (0x1 << IOMMU_EFR_GASUP_SHIFT)) == 0)) { in init_iommu_one()
1783 if (h->efr_reg & BIT(IOMMU_EFR_XTSUP_SHIFT)) in init_iommu_one()
1786 early_iommu_features_init(iommu, h); in init_iommu_one()
1790 return -EINVAL; in init_iommu_one()
1793 iommu->mmio_base = iommu_map_mmio_space(iommu->mmio_phys, in init_iommu_one()
1794 iommu->mmio_phys_end); in init_iommu_one()
1795 if (!iommu->mmio_base) in init_iommu_one()
1796 return -ENOMEM; in init_iommu_one()
1798 return init_iommu_from_acpi(iommu, h); in init_iommu_one()
1801 static int __init init_iommu_one_late(struct amd_iommu *iommu) in init_iommu_one_late() argument
1805 if (alloc_cwwb_sem(iommu)) in init_iommu_one_late()
1806 return -ENOMEM; in init_iommu_one_late()
1808 if (alloc_command_buffer(iommu)) in init_iommu_one_late()
1809 return -ENOMEM; in init_iommu_one_late()
1811 if (alloc_event_buffer(iommu)) in init_iommu_one_late()
1812 return -ENOMEM; in init_iommu_one_late()
1814 iommu->int_enabled = false; in init_iommu_one_late()
1816 init_translation_status(iommu); in init_iommu_one_late()
1817 if (translation_pre_enabled(iommu) && !is_kdump_kernel()) { in init_iommu_one_late()
1818 iommu_disable(iommu); in init_iommu_one_late()
1819 clear_translation_pre_enabled(iommu); in init_iommu_one_late()
1820 pr_warn("Translation was enabled for IOMMU:%d but we are not in kdump mode\n", in init_iommu_one_late()
1821 iommu->index); in init_iommu_one_late()
1824 amd_iommu_pre_enabled = translation_pre_enabled(iommu); in init_iommu_one_late()
1827 ret = amd_iommu_create_irq_domain(iommu); in init_iommu_one_late()
1833 * Make sure IOMMU is not considered to translate itself. The IVRS in init_iommu_one_late()
1836 iommu->pci_seg->rlookup_table[iommu->devid] = NULL; in init_iommu_one_late()
1842 * get_highest_supported_ivhd_type - Look up the appropriate IVHD type
1852 u8 last_type = ivhd->type; in get_highest_supported_ivhd_type()
1853 u16 devid = ivhd->devid; in get_highest_supported_ivhd_type()
1855 while (((u8 *)ivhd - base < ivrs->length) && in get_highest_supported_ivhd_type()
1856 (ivhd->type <= ACPI_IVHD_TYPE_MAX_SUPPORTED)) { in get_highest_supported_ivhd_type()
1859 if (ivhd->devid == devid) in get_highest_supported_ivhd_type()
1860 last_type = ivhd->type; in get_highest_supported_ivhd_type()
1861 ivhd = (struct ivhd_header *)(p + ivhd->length); in get_highest_supported_ivhd_type()
1868 * Iterates over all IOMMU entries in the ACPI table, allocates the
1869 * IOMMU structure and initializes it with init_iommu_one()
1875 struct amd_iommu *iommu; in init_iommu_all() local
1878 end += table->length; in init_iommu_all()
1888 h->pci_seg, PCI_BUS_NUM(h->devid), in init_iommu_all()
1889 PCI_SLOT(h->devid), PCI_FUNC(h->devid), in init_iommu_all()
1890 h->cap_ptr, h->flags, h->info); in init_iommu_all()
1891 DUMP_printk(" mmio-addr: %016llx\n", in init_iommu_all()
1892 h->mmio_phys); in init_iommu_all()
1894 iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL); in init_iommu_all()
1895 if (iommu == NULL) in init_iommu_all()
1896 return -ENOMEM; in init_iommu_all()
1898 ret = init_iommu_one(iommu, h, table); in init_iommu_all()
1902 p += h->length; in init_iommu_all()
1910 /* Phase 3 : Enabling IOMMU features */ in init_iommu_all()
1911 for_each_iommu(iommu) { in init_iommu_all()
1912 ret = init_iommu_one_late(iommu); in init_iommu_all()
1920 static void init_iommu_perf_ctr(struct amd_iommu *iommu) in init_iommu_perf_ctr() argument
1923 struct pci_dev *pdev = iommu->dev; in init_iommu_perf_ctr()
1925 if (!iommu_feature(iommu, FEATURE_PC)) in init_iommu_perf_ctr()
1930 pci_info(pdev, "IOMMU performance counters supported\n"); in init_iommu_perf_ctr()
1932 val = readl(iommu->mmio_base + MMIO_CNTR_CONF_OFFSET); in init_iommu_perf_ctr()
1933 iommu->max_banks = (u8) ((val >> 12) & 0x3f); in init_iommu_perf_ctr()
1934 iommu->max_counters = (u8) ((val >> 7) & 0xf); in init_iommu_perf_ctr()
1943 struct amd_iommu *iommu = dev_to_amd_iommu(dev); in amd_iommu_show_cap() local
1944 return sprintf(buf, "%x\n", iommu->cap); in amd_iommu_show_cap()
1952 struct amd_iommu *iommu = dev_to_amd_iommu(dev); in amd_iommu_show_features() local
1953 return sprintf(buf, "%llx:%llx\n", iommu->features2, iommu->features); in amd_iommu_show_features()
1964 .name = "amd-iommu",
1975 * of the IOMMU Extended Feature Register [MMIO Offset 0030h].
1978 static void __init late_iommu_features_init(struct amd_iommu *iommu) in late_iommu_features_init() argument
1982 if (!(iommu->cap & (1 << IOMMU_CAP_EFR))) in late_iommu_features_init()
1986 features = readq(iommu->mmio_base + MMIO_EXT_FEATURES); in late_iommu_features_init()
1987 features2 = readq(iommu->mmio_base + MMIO_EXT_FEATURES2); in late_iommu_features_init()
1989 if (!iommu->features) { in late_iommu_features_init()
1990 iommu->features = features; in late_iommu_features_init()
1991 iommu->features2 = features2; in late_iommu_features_init()
1999 if (features != iommu->features || in late_iommu_features_init()
2000 features2 != iommu->features2) { in late_iommu_features_init()
2003 features, iommu->features, in late_iommu_features_init()
2004 features2, iommu->features2); in late_iommu_features_init()
2008 static int __init iommu_init_pci(struct amd_iommu *iommu) in iommu_init_pci() argument
2010 int cap_ptr = iommu->cap_ptr; in iommu_init_pci()
2013 iommu->dev = pci_get_domain_bus_and_slot(iommu->pci_seg->id, in iommu_init_pci()
2014 PCI_BUS_NUM(iommu->devid), in iommu_init_pci()
2015 iommu->devid & 0xff); in iommu_init_pci()
2016 if (!iommu->dev) in iommu_init_pci()
2017 return -ENODEV; in iommu_init_pci()
2019 /* Prevent binding other PCI device drivers to IOMMU devices */ in iommu_init_pci()
2020 iommu->dev->match_driver = false; in iommu_init_pci()
2022 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET, in iommu_init_pci()
2023 &iommu->cap); in iommu_init_pci()
2025 if (!(iommu->cap & (1 << IOMMU_CAP_IOTLB))) in iommu_init_pci()
2028 late_iommu_features_init(iommu); in iommu_init_pci()
2030 if (iommu_feature(iommu, FEATURE_GT)) { in iommu_init_pci()
2035 pasmax = iommu->features & FEATURE_PASID_MASK; in iommu_init_pci()
2037 max_pasid = (1 << (pasmax + 1)) - 1; in iommu_init_pci()
2043 glxval = iommu->features & FEATURE_GLXVAL_MASK; in iommu_init_pci()
2046 if (amd_iommu_max_glx_val == -1) in iommu_init_pci()
2052 if (iommu_feature(iommu, FEATURE_GT) && in iommu_init_pci()
2053 iommu_feature(iommu, FEATURE_PPR)) { in iommu_init_pci()
2054 iommu->is_iommu_v2 = true; in iommu_init_pci()
2058 if (iommu_feature(iommu, FEATURE_PPR) && alloc_ppr_log(iommu)) in iommu_init_pci()
2059 return -ENOMEM; in iommu_init_pci()
2061 if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE)) { in iommu_init_pci()
2067 init_iommu_perf_ctr(iommu); in iommu_init_pci()
2070 if (!iommu_feature(iommu, FEATURE_GIOSUP) || in iommu_init_pci()
2071 !iommu_feature(iommu, FEATURE_GT)) { in iommu_init_pci()
2072 pr_warn("Cannot enable v2 page table for DMA-API. Fallback to v1.\n"); in iommu_init_pci()
2075 pr_warn("V2 page table doesn't support passthrough mode. Fallback to v1.\n"); in iommu_init_pci()
2080 if (is_rd890_iommu(iommu->dev)) { in iommu_init_pci()
2083 iommu->root_pdev = in iommu_init_pci()
2084 pci_get_domain_bus_and_slot(iommu->pci_seg->id, in iommu_init_pci()
2085 iommu->dev->bus->number, in iommu_init_pci()
2093 pci_read_config_dword(iommu->dev, iommu->cap_ptr + 4, in iommu_init_pci()
2094 &iommu->stored_addr_lo); in iommu_init_pci()
2095 pci_read_config_dword(iommu->dev, iommu->cap_ptr + 8, in iommu_init_pci()
2096 &iommu->stored_addr_hi); in iommu_init_pci()
2099 iommu->stored_addr_lo &= ~1; in iommu_init_pci()
2103 iommu->stored_l1[i][j] = iommu_read_l1(iommu, i, j); in iommu_init_pci()
2106 iommu->stored_l2[i] = iommu_read_l2(iommu, i); in iommu_init_pci()
2109 amd_iommu_erratum_746_workaround(iommu); in iommu_init_pci()
2110 amd_iommu_ats_write_check_workaround(iommu); in iommu_init_pci()
2112 ret = iommu_device_sysfs_add(&iommu->iommu, &iommu->dev->dev, in iommu_init_pci()
2113 amd_iommu_groups, "ivhd%d", iommu->index); in iommu_init_pci()
2117 iommu_device_register(&iommu->iommu, &amd_iommu_ops, NULL); in iommu_init_pci()
2119 return pci_enable_device(iommu->dev); in iommu_init_pci()
2128 struct amd_iommu *iommu; in print_iommu_info() local
2130 for_each_iommu(iommu) { in print_iommu_info()
2131 struct pci_dev *pdev = iommu->dev; in print_iommu_info()
2134 pci_info(pdev, "Found IOMMU cap 0x%x\n", iommu->cap_ptr); in print_iommu_info()
2136 if (iommu->cap & (1 << IOMMU_CAP_EFR)) { in print_iommu_info()
2137 pr_info("Extended features (%#llx, %#llx):", iommu->features, iommu->features2); in print_iommu_info()
2140 if (iommu_feature(iommu, (1ULL << i))) in print_iommu_info()
2144 if (iommu->features & FEATURE_GAM_VAPIC) in print_iommu_info()
2147 if (iommu->features & FEATURE_SNP) in print_iommu_info()
2164 struct amd_iommu *iommu; in amd_iommu_init_pci() local
2168 for_each_iommu(iommu) { in amd_iommu_init_pci()
2169 ret = iommu_init_pci(iommu); in amd_iommu_init_pci()
2171 pr_err("IOMMU%d: Failed to initialize IOMMU Hardware (error=%d)!\n", in amd_iommu_init_pci()
2172 iommu->index, ret); in amd_iommu_init_pci()
2176 iommu_set_cwwb_range(iommu); in amd_iommu_init_pci()
2192 for_each_iommu(iommu) in amd_iommu_init_pci()
2193 iommu_flush_all_caches(iommu); in amd_iommu_init_pci()
2210 static int iommu_setup_msi(struct amd_iommu *iommu) in iommu_setup_msi() argument
2214 r = pci_enable_msi(iommu->dev); in iommu_setup_msi()
2218 r = request_threaded_irq(iommu->dev->irq, in iommu_setup_msi()
2221 0, "AMD-Vi", in iommu_setup_msi()
2222 iommu); in iommu_setup_msi()
2225 pci_disable_msi(iommu->dev); in iommu_setup_msi()
2266 if (!info || info->type != X86_IRQ_ALLOC_TYPE_AMDVI) in intcapxt_irqdomain_alloc()
2267 return -EINVAL; in intcapxt_irqdomain_alloc()
2276 irqd->chip = &intcapxt_controller; in intcapxt_irqdomain_alloc()
2277 irqd->chip_data = info->data; in intcapxt_irqdomain_alloc()
2293 struct amd_iommu *iommu = irqd->chip_data; in intcapxt_unmask_irq() local
2298 xt.dest_mode_logical = apic->dest_mode_logical; in intcapxt_unmask_irq()
2299 xt.vector = cfg->vector; in intcapxt_unmask_irq()
2300 xt.destid_0_23 = cfg->dest_apicid & GENMASK(23, 0); in intcapxt_unmask_irq()
2301 xt.destid_24_31 = cfg->dest_apicid >> 24; in intcapxt_unmask_irq()
2304 * Current IOMMU implementation uses the same IRQ for all in intcapxt_unmask_irq()
2305 * 3 IOMMU interrupts. in intcapxt_unmask_irq()
2307 writeq(xt.capxt, iommu->mmio_base + MMIO_INTCAPXT_EVT_OFFSET); in intcapxt_unmask_irq()
2308 writeq(xt.capxt, iommu->mmio_base + MMIO_INTCAPXT_PPR_OFFSET); in intcapxt_unmask_irq()
2309 writeq(xt.capxt, iommu->mmio_base + MMIO_INTCAPXT_GALOG_OFFSET); in intcapxt_unmask_irq()
2314 struct amd_iommu *iommu = irqd->chip_data; in intcapxt_mask_irq() local
2316 writeq(0, iommu->mmio_base + MMIO_INTCAPXT_EVT_OFFSET); in intcapxt_mask_irq()
2317 writeq(0, iommu->mmio_base + MMIO_INTCAPXT_PPR_OFFSET); in intcapxt_mask_irq()
2318 writeq(0, iommu->mmio_base + MMIO_INTCAPXT_GALOG_OFFSET); in intcapxt_mask_irq()
2325 struct irq_data *parent = irqd->parent_data; in intcapxt_set_affinity()
2328 ret = parent->chip->irq_set_affinity(parent, mask, force); in intcapxt_set_affinity()
2336 return on ? -EOPNOTSUPP : 0; in intcapxt_set_wake()
2340 .name = "IOMMU-MSI",
2364 /* No need for locking here (yet) as the init is single-threaded */ in iommu_get_irqdomain()
2368 fn = irq_domain_alloc_named_fwnode("AMD-Vi-MSI"); in iommu_get_irqdomain()
2381 static int iommu_setup_intcapxt(struct amd_iommu *iommu) in iommu_setup_intcapxt() argument
2389 return -ENXIO; in iommu_setup_intcapxt()
2393 info.data = iommu; in iommu_setup_intcapxt()
2402 amd_iommu_int_thread, 0, "AMD-Vi", iommu); in iommu_setup_intcapxt()
2412 static int iommu_init_irq(struct amd_iommu *iommu) in iommu_init_irq() argument
2416 if (iommu->int_enabled) in iommu_init_irq()
2420 ret = iommu_setup_intcapxt(iommu); in iommu_init_irq()
2421 else if (iommu->dev->msi_cap) in iommu_init_irq()
2422 ret = iommu_setup_msi(iommu); in iommu_init_irq()
2424 ret = -ENODEV; in iommu_init_irq()
2429 iommu->int_enabled = true; in iommu_init_irq()
2433 iommu_feature_enable(iommu, CONTROL_INTCAPXT_EN); in iommu_init_irq()
2435 iommu_feature_enable(iommu, CONTROL_EVT_INT_EN); in iommu_init_irq()
2437 if (iommu->ppr_log != NULL) in iommu_init_irq()
2438 iommu_feature_enable(iommu, CONTROL_PPRINT_EN); in iommu_init_irq()
2456 list_for_each_entry_safe(entry, next, &pci_seg->unity_map, list) { in free_unity_maps()
2457 list_del(&entry->list); in free_unity_maps()
2471 pci_seg = get_pci_segment(m->pci_seg, ivrs_base); in init_unity_map_range()
2473 return -ENOMEM; in init_unity_map_range()
2477 return -ENOMEM; in init_unity_map_range()
2479 switch (m->type) { in init_unity_map_range()
2485 e->devid_start = e->devid_end = m->devid; in init_unity_map_range()
2489 e->devid_start = 0; in init_unity_map_range()
2490 e->devid_end = pci_seg->last_bdf; in init_unity_map_range()
2494 e->devid_start = m->devid; in init_unity_map_range()
2495 e->devid_end = m->aux; in init_unity_map_range()
2498 e->address_start = PAGE_ALIGN(m->range_start); in init_unity_map_range()
2499 e->address_end = e->address_start + PAGE_ALIGN(m->range_length); in init_unity_map_range()
2500 e->prot = m->flags >> 1; in init_unity_map_range()
2503 * Treat per-device exclusion ranges as r/w unity-mapped regions in init_unity_map_range()
2509 if (m->flags & IVMD_FLAG_EXCL_RANGE) in init_unity_map_range()
2510 e->prot = (IVMD_FLAG_IW | IVMD_FLAG_IR) >> 1; in init_unity_map_range()
2514 " flags: %x\n", s, m->pci_seg, in init_unity_map_range()
2515 PCI_BUS_NUM(e->devid_start), PCI_SLOT(e->devid_start), in init_unity_map_range()
2516 PCI_FUNC(e->devid_start), m->pci_seg, in init_unity_map_range()
2517 PCI_BUS_NUM(e->devid_end), in init_unity_map_range()
2518 PCI_SLOT(e->devid_end), PCI_FUNC(e->devid_end), in init_unity_map_range()
2519 e->address_start, e->address_end, m->flags); in init_unity_map_range()
2521 list_add_tail(&e->list, &pci_seg->unity_map); in init_unity_map_range()
2532 end += table->length; in init_memory_definitions()
2537 if (m->flags & (IVMD_FLAG_UNITY_MAP | IVMD_FLAG_EXCL_RANGE)) in init_memory_definitions()
2540 p += m->length; in init_memory_definitions()
2552 struct dev_table_entry *dev_table = pci_seg->dev_table; in init_device_table_dma()
2557 for (devid = 0; devid <= pci_seg->last_bdf; ++devid) { in init_device_table_dma()
2567 struct dev_table_entry *dev_table = pci_seg->dev_table; in uninit_device_table_dma()
2572 for (devid = 0; devid <= pci_seg->last_bdf; ++devid) { in uninit_device_table_dma()
2587 for (devid = 0; devid <= pci_seg->last_bdf; ++devid) in init_device_table()
2588 __set_dev_entry_bit(pci_seg->dev_table, in init_device_table()
2593 static void iommu_init_flags(struct amd_iommu *iommu) in iommu_init_flags() argument
2595 iommu->acpi_flags & IVHD_FLAG_HT_TUN_EN_MASK ? in iommu_init_flags()
2596 iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) : in iommu_init_flags()
2597 iommu_feature_disable(iommu, CONTROL_HT_TUN_EN); in iommu_init_flags()
2599 iommu->acpi_flags & IVHD_FLAG_PASSPW_EN_MASK ? in iommu_init_flags()
2600 iommu_feature_enable(iommu, CONTROL_PASSPW_EN) : in iommu_init_flags()
2601 iommu_feature_disable(iommu, CONTROL_PASSPW_EN); in iommu_init_flags()
2603 iommu->acpi_flags & IVHD_FLAG_RESPASSPW_EN_MASK ? in iommu_init_flags()
2604 iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) : in iommu_init_flags()
2605 iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN); in iommu_init_flags()
2607 iommu->acpi_flags & IVHD_FLAG_ISOC_EN_MASK ? in iommu_init_flags()
2608 iommu_feature_enable(iommu, CONTROL_ISOC_EN) : in iommu_init_flags()
2609 iommu_feature_disable(iommu, CONTROL_ISOC_EN); in iommu_init_flags()
2612 * make IOMMU memory accesses cache coherent in iommu_init_flags()
2614 iommu_feature_enable(iommu, CONTROL_COHERENT_EN); in iommu_init_flags()
2617 iommu_set_inv_tlb_timeout(iommu, CTRL_INV_TO_1S); in iommu_init_flags()
2620 static void iommu_apply_resume_quirks(struct amd_iommu *iommu) in iommu_apply_resume_quirks() argument
2624 struct pci_dev *pdev = iommu->root_pdev; in iommu_apply_resume_quirks()
2626 /* RD890 BIOSes may not have completely reconfigured the iommu */ in iommu_apply_resume_quirks()
2627 if (!is_rd890_iommu(iommu->dev) || !pdev) in iommu_apply_resume_quirks()
2631 * First, we need to ensure that the iommu is enabled. This is in iommu_apply_resume_quirks()
2639 /* Enable the iommu */ in iommu_apply_resume_quirks()
2643 /* Restore the iommu BAR */ in iommu_apply_resume_quirks()
2644 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4, in iommu_apply_resume_quirks()
2645 iommu->stored_addr_lo); in iommu_apply_resume_quirks()
2646 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 8, in iommu_apply_resume_quirks()
2647 iommu->stored_addr_hi); in iommu_apply_resume_quirks()
2652 iommu_write_l1(iommu, i, j, iommu->stored_l1[i][j]); in iommu_apply_resume_quirks()
2656 iommu_write_l2(iommu, i, iommu->stored_l2[i]); in iommu_apply_resume_quirks()
2659 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4, in iommu_apply_resume_quirks()
2660 iommu->stored_addr_lo | 1); in iommu_apply_resume_quirks()
2663 static void iommu_enable_ga(struct amd_iommu *iommu) in iommu_enable_ga() argument
2669 iommu_feature_enable(iommu, CONTROL_GA_EN); in iommu_enable_ga()
2670 iommu->irte_ops = &irte_128_ops; in iommu_enable_ga()
2673 iommu->irte_ops = &irte_32_ops; in iommu_enable_ga()
2679 static void early_enable_iommu(struct amd_iommu *iommu) in early_enable_iommu() argument
2681 iommu_disable(iommu); in early_enable_iommu()
2682 iommu_init_flags(iommu); in early_enable_iommu()
2683 iommu_set_device_table(iommu); in early_enable_iommu()
2684 iommu_enable_command_buffer(iommu); in early_enable_iommu()
2685 iommu_enable_event_buffer(iommu); in early_enable_iommu()
2686 iommu_set_exclusion_range(iommu); in early_enable_iommu()
2687 iommu_enable_ga(iommu); in early_enable_iommu()
2688 iommu_enable_xt(iommu); in early_enable_iommu()
2689 iommu_enable(iommu); in early_enable_iommu()
2690 iommu_flush_all_caches(iommu); in early_enable_iommu()
2697 * Or if in kdump kernel and IOMMUs are all pre-enabled, try to copy
2703 struct amd_iommu *iommu; in early_enable_iommus() local
2716 if (pci_seg->old_dev_tbl_cpy != NULL) { in early_enable_iommus()
2717 free_pages((unsigned long)pci_seg->old_dev_tbl_cpy, in early_enable_iommus()
2718 get_order(pci_seg->dev_table_size)); in early_enable_iommus()
2719 pci_seg->old_dev_tbl_cpy = NULL; in early_enable_iommus()
2723 for_each_iommu(iommu) { in early_enable_iommus()
2724 clear_translation_pre_enabled(iommu); in early_enable_iommus()
2725 early_enable_iommu(iommu); in early_enable_iommus()
2731 free_pages((unsigned long)pci_seg->dev_table, in early_enable_iommus()
2732 get_order(pci_seg->dev_table_size)); in early_enable_iommus()
2733 pci_seg->dev_table = pci_seg->old_dev_tbl_cpy; in early_enable_iommus()
2736 for_each_iommu(iommu) { in early_enable_iommus()
2737 iommu_disable_command_buffer(iommu); in early_enable_iommus()
2738 iommu_disable_event_buffer(iommu); in early_enable_iommus()
2739 iommu_enable_command_buffer(iommu); in early_enable_iommus()
2740 iommu_enable_event_buffer(iommu); in early_enable_iommus()
2741 iommu_enable_ga(iommu); in early_enable_iommus()
2742 iommu_enable_xt(iommu); in early_enable_iommus()
2743 iommu_set_device_table(iommu); in early_enable_iommus()
2744 iommu_flush_all_caches(iommu); in early_enable_iommus()
2751 struct amd_iommu *iommu; in enable_iommus_v2() local
2753 for_each_iommu(iommu) { in enable_iommus_v2()
2754 iommu_enable_ppr_log(iommu); in enable_iommus_v2()
2755 iommu_enable_gt(iommu); in enable_iommus_v2()
2763 struct amd_iommu *iommu; in enable_iommus_vapic() local
2765 for_each_iommu(iommu) { in enable_iommus_vapic()
2770 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); in enable_iommus_vapic()
2774 iommu_feature_disable(iommu, CONTROL_GALOG_EN); in enable_iommus_vapic()
2775 iommu_feature_disable(iommu, CONTROL_GAINT_EN); in enable_iommus_vapic()
2782 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); in enable_iommus_vapic()
2806 for_each_iommu(iommu) { in enable_iommus_vapic()
2807 if (iommu_init_ga_log(iommu) || in enable_iommus_vapic()
2808 iommu_ga_log_enable(iommu)) in enable_iommus_vapic()
2811 iommu_feature_enable(iommu, CONTROL_GAM_EN); in enable_iommus_vapic()
2813 iommu_feature_enable(iommu, CONTROL_SNPAVIC_EN); in enable_iommus_vapic()
2830 struct amd_iommu *iommu; in disable_iommus() local
2832 for_each_iommu(iommu) in disable_iommus()
2833 iommu_disable(iommu); in disable_iommus()
2848 struct amd_iommu *iommu; in amd_iommu_resume() local
2850 for_each_iommu(iommu) in amd_iommu_resume()
2851 iommu_apply_resume_quirks(iommu); in amd_iommu_resume()
2853 /* re-load the hardware */ in amd_iommu_resume()
2896 * anymore - so be careful in check_ioapic_information()
2948 * This is the hardware init function for AMD IOMMU in the system.
2952 * This function basically parses the ACPI table for AMD IOMMU (IVRS)
2981 return -ENODEV; in early_amd_iommu_init()
2985 return -ENODEV; in early_amd_iommu_init()
2989 return -EINVAL; in early_amd_iommu_init()
3005 /* Device table - directly used by all IOMMUs */ in early_amd_iommu_init()
3006 ret = -ENOMEM; in early_amd_iommu_init()
3015 * never allocate domain 0 because its used as the non-allocated and in early_amd_iommu_init()
3041 ret = -ENOMEM; in early_amd_iommu_init()
3075 struct amd_iommu *iommu; in amd_iommu_enable_interrupts() local
3078 for_each_iommu(iommu) { in amd_iommu_enable_interrupts()
3079 ret = iommu_init_irq(iommu); in amd_iommu_enable_interrupts()
3108 /* Don't use IOMMU if there is Stoney Ridge graphics */ in detect_ivrs()
3114 pr_info("Disable IOMMU on Stoney Ridge\n"); in detect_ivrs()
3128 * AMD IOMMU Initialization State Machine
3140 ret = -ENODEV; in state_next()
3148 ret = -EINVAL; in state_next()
3180 ret = -EINVAL; in state_next()
3193 struct amd_iommu *iommu; in state_next() local
3199 for_each_iommu(iommu) in state_next()
3200 iommu_flush_all_caches(iommu); in state_next()
3208 int ret = -EINVAL; in iommu_go_to_state()
3234 return amd_iommu_irq_remap ? 0 : -ENODEV; in amd_iommu_prepare()
3269 * This is the core init function for AMD IOMMU hardware in the system.
3275 struct amd_iommu *iommu; in amd_iommu_init() local
3282 * We failed to initialize the AMD IOMMU - try fallback in amd_iommu_init()
3289 for_each_iommu(iommu) in amd_iommu_init()
3290 amd_iommu_debugfs_setup(iommu); in amd_iommu_init()
3309 pr_notice("IOMMU not currently supported when SME is active\n"); in amd_iommu_sme_check()
3316 * Early detect code. This code runs at IOMMU detection time in the DMA
3326 return -ENODEV; in amd_iommu_detect()
3329 return -ENODEV; in amd_iommu_detect()
3337 x86_init.iommu.iommu_init = amd_iommu_init; in amd_iommu_detect()
3344 * Parsing functions for the AMD IOMMU specific kernel command line
3374 return -EINVAL; in parse_amd_iommu_options()
3378 pr_warn("amd_iommu=fullflush deprecated; use iommu.strict=1 instead\n"); in parse_amd_iommu_options()
3391 pr_notice("Unknown option - '%s'\n", str); in parse_amd_iommu_options()
3418 pr_err("Early IOAPIC map overflow - ignoring ivrs_ioapic%s\n", in parse_ivrs_ioapic()
3450 pr_err("Early HPET map overflow - ignoring ivrs_hpet%s\n", in parse_ivrs_hpet()
3510 * Since DTE[Mode]=0 is prohibited on SNP-enabled system in amd_iommu_v2_supported()
3521 struct amd_iommu *iommu; in get_amd_iommu() local
3523 for_each_iommu(iommu) in get_amd_iommu()
3525 return iommu; in get_amd_iommu()
3531 * IOMMU EFR Performance Counter support functionality. This code allows
3532 * access to the IOMMU PC functionality.
3538 struct amd_iommu *iommu = get_amd_iommu(idx); in amd_iommu_pc_get_max_banks() local
3540 if (iommu) in amd_iommu_pc_get_max_banks()
3541 return iommu->max_banks; in amd_iommu_pc_get_max_banks()
3555 struct amd_iommu *iommu = get_amd_iommu(idx); in amd_iommu_pc_get_max_counters() local
3557 if (iommu) in amd_iommu_pc_get_max_counters()
3558 return iommu->max_counters; in amd_iommu_pc_get_max_counters()
3564 static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, in iommu_pc_get_set_reg() argument
3570 /* Make sure the IOMMU PC resource is available */ in iommu_pc_get_set_reg()
3572 return -ENODEV; in iommu_pc_get_set_reg()
3574 /* Check for valid iommu and pc register indexing */ in iommu_pc_get_set_reg()
3575 if (WARN_ON(!iommu || (fxn > 0x28) || (fxn & 7))) in iommu_pc_get_set_reg()
3576 return -ENODEV; in iommu_pc_get_set_reg()
3581 max_offset_lim = (u32)(((0x40 | iommu->max_banks) << 12) | in iommu_pc_get_set_reg()
3582 (iommu->max_counters << 8) | 0x28); in iommu_pc_get_set_reg()
3585 return -EINVAL; in iommu_pc_get_set_reg()
3590 writel((u32)val, iommu->mmio_base + offset); in iommu_pc_get_set_reg()
3591 writel((val >> 32), iommu->mmio_base + offset + 4); in iommu_pc_get_set_reg()
3593 *value = readl(iommu->mmio_base + offset + 4); in iommu_pc_get_set_reg()
3595 *value |= readl(iommu->mmio_base + offset); in iommu_pc_get_set_reg()
3602 int amd_iommu_pc_get_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64 *value) in amd_iommu_pc_get_reg() argument
3604 if (!iommu) in amd_iommu_pc_get_reg()
3605 return -EINVAL; in amd_iommu_pc_get_reg()
3607 return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, false); in amd_iommu_pc_get_reg()
3610 int amd_iommu_pc_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64 *value) in amd_iommu_pc_set_reg() argument
3612 if (!iommu) in amd_iommu_pc_set_reg()
3613 return -EINVAL; in amd_iommu_pc_set_reg()
3615 return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, true); in amd_iommu_pc_set_reg()
3622 * The SNP support requires that IOMMU must be enabled, and is in amd_iommu_snp_enable()
3626 pr_err("SNP: IOMMU is disabled or configured in passthrough mode, SNP cannot be supported"); in amd_iommu_snp_enable()
3627 return -EINVAL; in amd_iommu_snp_enable()
3632 * affect how IOMMU driver sets up data structures and configures in amd_iommu_snp_enable()
3633 * IOMMU hardware. in amd_iommu_snp_enable()
3636 pr_err("SNP: Too late to enable SNP for IOMMU.\n"); in amd_iommu_snp_enable()
3637 return -EINVAL; in amd_iommu_snp_enable()
3642 return -EINVAL; in amd_iommu_snp_enable()
3646 /* Enforce IOMMU v1 pagetable when SNP is enabled. */ in amd_iommu_snp_enable()
3648 pr_warn("Force to using AMD IOMMU v1 page table due to SNP\n"); in amd_iommu_snp_enable()