Lines Matching full:devid

104 	u16 devid;  member
122 u16 devid; member
140 u16 devid; member
281 static inline void update_last_devid(u16 devid) in update_last_devid() argument
283 if (devid > amd_iommu_last_bdf) in update_last_devid()
284 amd_iommu_last_bdf = devid; in update_last_devid()
578 update_last_devid(dev->devid); in find_last_devid_from_ivhd()
904 static void set_dev_entry_bit(u16 devid, u8 bit) in set_dev_entry_bit() argument
909 amd_iommu_dev_table[devid].data[i] |= (1UL << _bit); in set_dev_entry_bit()
912 static int get_dev_entry_bit(u16 devid, u8 bit) in get_dev_entry_bit() argument
917 return (amd_iommu_dev_table[devid].data[i] & (1UL << _bit)) >> _bit; in get_dev_entry_bit()
925 u32 lo, hi, devid, old_devtb_size; in copy_device_table() local
983 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) { in copy_device_table()
984 old_dev_tbl_cpy[devid] = old_devtb[devid]; in copy_device_table()
985 dom_id = old_devtb[devid].data[1] & DEV_DOMID_MASK; in copy_device_table()
986 dte_v = old_devtb[devid].data[0] & DTE_FLAG_V; in copy_device_table()
989 old_dev_tbl_cpy[devid].data[0] = old_devtb[devid].data[0]; in copy_device_table()
990 old_dev_tbl_cpy[devid].data[1] = old_devtb[devid].data[1]; in copy_device_table()
993 if (old_devtb[devid].data[0] & DTE_FLAG_GV) { in copy_device_table()
996 old_dev_tbl_cpy[devid].data[1] &= ~tmp; in copy_device_table()
999 old_dev_tbl_cpy[devid].data[0] &= ~tmp; in copy_device_table()
1003 irq_v = old_devtb[devid].data[2] & DTE_IRQ_REMAP_ENABLE; in copy_device_table()
1004 int_ctl = old_devtb[devid].data[2] & DTE_IRQ_REMAP_INTCTL_MASK; in copy_device_table()
1005 int_tab_len = old_devtb[devid].data[2] & DTE_INTTABLEN_MASK; in copy_device_table()
1009 pr_err("Wrong old irq remapping flag: %#x\n", devid); in copy_device_table()
1013 old_dev_tbl_cpy[devid].data[2] = old_devtb[devid].data[2]; in copy_device_table()
1021 void amd_iommu_apply_erratum_63(u16 devid) in amd_iommu_apply_erratum_63() argument
1025 sysmgt = get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1) | in amd_iommu_apply_erratum_63()
1026 (get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2) << 1); in amd_iommu_apply_erratum_63()
1029 set_dev_entry_bit(devid, DEV_ENTRY_IW); in amd_iommu_apply_erratum_63()
1033 static void __init set_iommu_for_device(struct amd_iommu *iommu, u16 devid) in set_iommu_for_device() argument
1035 amd_iommu_rlookup_table[devid] = iommu; in set_iommu_for_device()
1043 u16 devid, u32 flags, u32 ext_flags) in set_dev_entry_from_acpi() argument
1046 set_dev_entry_bit(devid, DEV_ENTRY_INIT_PASS); in set_dev_entry_from_acpi()
1048 set_dev_entry_bit(devid, DEV_ENTRY_EINT_PASS); in set_dev_entry_from_acpi()
1050 set_dev_entry_bit(devid, DEV_ENTRY_NMI_PASS); in set_dev_entry_from_acpi()
1052 set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1); in set_dev_entry_from_acpi()
1054 set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2); in set_dev_entry_from_acpi()
1056 set_dev_entry_bit(devid, DEV_ENTRY_LINT0_PASS); in set_dev_entry_from_acpi()
1058 set_dev_entry_bit(devid, DEV_ENTRY_LINT1_PASS); in set_dev_entry_from_acpi()
1060 amd_iommu_apply_erratum_63(devid); in set_dev_entry_from_acpi()
1062 set_iommu_for_device(iommu, devid); in set_dev_entry_from_acpi()
1065 int __init add_special_device(u8 type, u8 id, u16 *devid, bool cmd_line) in add_special_device() argument
1084 *devid = entry->devid; in add_special_device()
1094 entry->devid = *devid; in add_special_device()
1102 static int __init add_acpi_hid_device(u8 *hid, u8 *uid, u16 *devid, in add_acpi_hid_device() argument
1116 *devid = entry->devid; in add_acpi_hid_device()
1126 entry->devid = *devid; in add_acpi_hid_device()
1128 entry->root_devid = (entry->devid & (~0x7)); in add_acpi_hid_device()
1145 &early_ioapic_map[i].devid, in add_early_maps()
1154 &early_hpet_map[i].devid, in add_early_maps()
1163 &early_acpihid_map[i].devid, in add_early_maps()
1181 u16 devid = 0, devid_start = 0, devid_to = 0; in init_iommu_from_acpi() local
1226 DUMP_printk(" DEV_SELECT\t\t\t devid: %02x:%02x.%x " in init_iommu_from_acpi()
1228 PCI_BUS_NUM(e->devid), in init_iommu_from_acpi()
1229 PCI_SLOT(e->devid), in init_iommu_from_acpi()
1230 PCI_FUNC(e->devid), in init_iommu_from_acpi()
1233 devid = e->devid; in init_iommu_from_acpi()
1234 set_dev_entry_from_acpi(iommu, devid, e->flags, 0); in init_iommu_from_acpi()
1239 "devid: %02x:%02x.%x flags: %02x\n", in init_iommu_from_acpi()
1240 PCI_BUS_NUM(e->devid), in init_iommu_from_acpi()
1241 PCI_SLOT(e->devid), in init_iommu_from_acpi()
1242 PCI_FUNC(e->devid), in init_iommu_from_acpi()
1245 devid_start = e->devid; in init_iommu_from_acpi()
1252 DUMP_printk(" DEV_ALIAS\t\t\t devid: %02x:%02x.%x " in init_iommu_from_acpi()
1254 PCI_BUS_NUM(e->devid), in init_iommu_from_acpi()
1255 PCI_SLOT(e->devid), in init_iommu_from_acpi()
1256 PCI_FUNC(e->devid), in init_iommu_from_acpi()
1262 devid = e->devid; in init_iommu_from_acpi()
1264 set_dev_entry_from_acpi(iommu, devid , e->flags, 0); in init_iommu_from_acpi()
1266 amd_iommu_alias_table[devid] = devid_to; in init_iommu_from_acpi()
1271 "devid: %02x:%02x.%x flags: %02x " in init_iommu_from_acpi()
1273 PCI_BUS_NUM(e->devid), in init_iommu_from_acpi()
1274 PCI_SLOT(e->devid), in init_iommu_from_acpi()
1275 PCI_FUNC(e->devid), in init_iommu_from_acpi()
1281 devid_start = e->devid; in init_iommu_from_acpi()
1289 DUMP_printk(" DEV_EXT_SELECT\t\t devid: %02x:%02x.%x " in init_iommu_from_acpi()
1291 PCI_BUS_NUM(e->devid), in init_iommu_from_acpi()
1292 PCI_SLOT(e->devid), in init_iommu_from_acpi()
1293 PCI_FUNC(e->devid), in init_iommu_from_acpi()
1296 devid = e->devid; in init_iommu_from_acpi()
1297 set_dev_entry_from_acpi(iommu, devid, e->flags, in init_iommu_from_acpi()
1302 DUMP_printk(" DEV_EXT_SELECT_RANGE\t devid: " in init_iommu_from_acpi()
1304 PCI_BUS_NUM(e->devid), in init_iommu_from_acpi()
1305 PCI_SLOT(e->devid), in init_iommu_from_acpi()
1306 PCI_FUNC(e->devid), in init_iommu_from_acpi()
1309 devid_start = e->devid; in init_iommu_from_acpi()
1316 DUMP_printk(" DEV_RANGE_END\t\t devid: %02x:%02x.%x\n", in init_iommu_from_acpi()
1317 PCI_BUS_NUM(e->devid), in init_iommu_from_acpi()
1318 PCI_SLOT(e->devid), in init_iommu_from_acpi()
1319 PCI_FUNC(e->devid)); in init_iommu_from_acpi()
1321 devid = e->devid; in init_iommu_from_acpi()
1322 for (dev_i = devid_start; dev_i <= devid; ++dev_i) { in init_iommu_from_acpi()
1335 u16 devid; in init_iommu_from_acpi() local
1339 devid = (e->ext >> 8) & 0xffff; in init_iommu_from_acpi()
1351 PCI_BUS_NUM(devid), in init_iommu_from_acpi()
1352 PCI_SLOT(devid), in init_iommu_from_acpi()
1353 PCI_FUNC(devid)); in init_iommu_from_acpi()
1355 ret = add_special_device(type, handle, &devid, false); in init_iommu_from_acpi()
1360 * add_special_device might update the devid in case a in init_iommu_from_acpi()
1364 set_dev_entry_from_acpi(iommu, devid, e->flags, 0); in init_iommu_from_acpi()
1369 u16 devid; in init_iommu_from_acpi() local
1411 devid = e->devid; in init_iommu_from_acpi()
1414 PCI_BUS_NUM(devid), in init_iommu_from_acpi()
1415 PCI_SLOT(devid), in init_iommu_from_acpi()
1416 PCI_FUNC(devid)); in init_iommu_from_acpi()
1420 ret = add_acpi_hid_device(hid, uid, &devid, false); in init_iommu_from_acpi()
1425 * add_special_device might update the devid in case a in init_iommu_from_acpi()
1429 set_dev_entry_from_acpi(iommu, devid, e->flags, 0); in init_iommu_from_acpi()
1549 iommu->devid = h->devid; in init_iommu_one()
1641 amd_iommu_rlookup_table[iommu->devid] = NULL; in init_iommu_one()
1658 u16 devid = ivhd->devid; in get_highest_supported_ivhd_type() local
1664 if (ivhd->devid == devid) in get_highest_supported_ivhd_type()
1692 PCI_BUS_NUM(h->devid), PCI_SLOT(h->devid), in init_iommu_all()
1693 PCI_FUNC(h->devid), h->cap_ptr, in init_iommu_all()
1801 iommu->dev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(iommu->devid), in iommu_init_pci()
1802 iommu->devid & 0xff); in iommu_init_pci()
2240 e->devid_start = e->devid_end = m->devid; in init_unity_map_range()
2249 e->devid_start = m->devid; in init_unity_map_range()
2304 u32 devid; in init_device_table_dma() local
2306 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) { in init_device_table_dma()
2307 set_dev_entry_bit(devid, DEV_ENTRY_VALID); in init_device_table_dma()
2308 set_dev_entry_bit(devid, DEV_ENTRY_TRANSLATION); in init_device_table_dma()
2314 u32 devid; in uninit_device_table_dma() local
2316 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) { in uninit_device_table_dma()
2317 amd_iommu_dev_table[devid].data[0] = 0ULL; in uninit_device_table_dma()
2318 amd_iommu_dev_table[devid].data[1] = 0ULL; in uninit_device_table_dma()
2324 u32 devid; in init_device_table() local
2329 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) in init_device_table()
2330 set_dev_entry_bit(devid, DEV_ENTRY_IRQ_TBL_EN); in init_device_table()
2602 int devid, id = mpc_ioapic_id(idx); in check_ioapic_information() local
2604 devid = get_ioapic_devid(id); in check_ioapic_information()
2605 if (devid < 0) { in check_ioapic_information()
2609 } else if (devid == IOAPIC_SB_DEVID) { in check_ioapic_information()
3130 u16 devid; in parse_ivrs_ioapic() local
3145 devid = ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7); in parse_ivrs_ioapic()
3150 early_ioapic_map[i].devid = devid; in parse_ivrs_ioapic()
3160 u16 devid; in parse_ivrs_hpet() local
3175 devid = ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7); in parse_ivrs_hpet()
3180 early_hpet_map[i].devid = devid; in parse_ivrs_hpet()
3211 early_acpihid_map[i].devid = in parse_ivrs_acpihid()