Lines Matching refs:pe

58 void pe_level_printk(const struct pnv_ioda_pe *pe, const char *level,  in pe_level_printk()  argument
70 if (pe->flags & PNV_IODA_PE_DEV) in pe_level_printk()
71 strlcpy(pfix, dev_name(&pe->pdev->dev), sizeof(pfix)); in pe_level_printk()
72 else if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL)) in pe_level_printk()
74 pci_domain_nr(pe->pbus), pe->pbus->number); in pe_level_printk()
76 else if (pe->flags & PNV_IODA_PE_VF) in pe_level_printk()
78 pci_domain_nr(pe->parent_dev->bus), in pe_level_printk()
79 (pe->rid & 0xff00) >> 8, in pe_level_printk()
80 PCI_SLOT(pe->rid), PCI_FUNC(pe->rid)); in pe_level_printk()
84 level, pfix, pe->pe_number, &vaf); in pe_level_printk()
178 long pe; in pnv_ioda_alloc_pe() local
180 for (pe = phb->ioda.total_pe_num - 1; pe >= 0; pe--) { in pnv_ioda_alloc_pe()
181 if (!test_and_set_bit(pe, phb->ioda.pe_alloc)) in pnv_ioda_alloc_pe()
182 return pnv_ioda_init_pe(phb, pe); in pnv_ioda_alloc_pe()
188 static void pnv_ioda_free_pe(struct pnv_ioda_pe *pe) in pnv_ioda_free_pe() argument
190 struct pnv_phb *phb = pe->phb; in pnv_ioda_free_pe()
191 unsigned int pe_num = pe->pe_number; in pnv_ioda_free_pe()
193 WARN_ON(pe->pdev); in pnv_ioda_free_pe()
195 memset(pe, 0, sizeof(struct pnv_ioda_pe)); in pnv_ioda_free_pe()
357 struct pnv_ioda_pe *master_pe, *pe; in pnv_ioda_pick_m64_pe() local
395 pe = &phb->ioda.pe_array[i]; in pnv_ioda_pick_m64_pe()
397 phb->ioda.m64_segmap[pe->pe_number] = pe->pe_number; in pnv_ioda_pick_m64_pe()
399 pe->flags |= PNV_IODA_PE_MASTER; in pnv_ioda_pick_m64_pe()
400 INIT_LIST_HEAD(&pe->slaves); in pnv_ioda_pick_m64_pe()
401 master_pe = pe; in pnv_ioda_pick_m64_pe()
403 pe->flags |= PNV_IODA_PE_SLAVE; in pnv_ioda_pick_m64_pe()
404 pe->master = master_pe; in pnv_ioda_pick_m64_pe()
405 list_add_tail(&pe->list, &master_pe->slaves); in pnv_ioda_pick_m64_pe()
419 pe->pe_number, OPAL_M64_WINDOW_TYPE, in pnv_ioda_pick_m64_pe()
420 pe->pe_number / PNV_IODA1_M64_SEGS, in pnv_ioda_pick_m64_pe()
421 pe->pe_number % PNV_IODA1_M64_SEGS); in pnv_ioda_pick_m64_pe()
425 pe->pe_number); in pnv_ioda_pick_m64_pe()
527 struct pnv_ioda_pe *pe = &phb->ioda.pe_array[pe_no]; in pnv_ioda_freeze_pe() local
532 if (pe->flags & PNV_IODA_PE_SLAVE) { in pnv_ioda_freeze_pe()
533 pe = pe->master; in pnv_ioda_freeze_pe()
534 if (WARN_ON(!pe || !(pe->flags & PNV_IODA_PE_MASTER))) in pnv_ioda_freeze_pe()
537 pe_no = pe->pe_number; in pnv_ioda_freeze_pe()
551 if (!(pe->flags & PNV_IODA_PE_MASTER)) in pnv_ioda_freeze_pe()
554 list_for_each_entry(slave, &pe->slaves, list) { in pnv_ioda_freeze_pe()
567 struct pnv_ioda_pe *pe, *slave; in pnv_ioda_unfreeze_pe() local
571 pe = &phb->ioda.pe_array[pe_no]; in pnv_ioda_unfreeze_pe()
572 if (pe->flags & PNV_IODA_PE_SLAVE) { in pnv_ioda_unfreeze_pe()
573 pe = pe->master; in pnv_ioda_unfreeze_pe()
574 WARN_ON(!pe || !(pe->flags & PNV_IODA_PE_MASTER)); in pnv_ioda_unfreeze_pe()
575 pe_no = pe->pe_number; in pnv_ioda_unfreeze_pe()
586 if (!(pe->flags & PNV_IODA_PE_MASTER)) in pnv_ioda_unfreeze_pe()
590 list_for_each_entry(slave, &pe->slaves, list) { in pnv_ioda_unfreeze_pe()
607 struct pnv_ioda_pe *slave, *pe; in pnv_ioda_get_pe_state() local
620 pe = &phb->ioda.pe_array[pe_no]; in pnv_ioda_get_pe_state()
621 if (pe->flags & PNV_IODA_PE_SLAVE) { in pnv_ioda_get_pe_state()
622 pe = pe->master; in pnv_ioda_get_pe_state()
623 WARN_ON(!pe || !(pe->flags & PNV_IODA_PE_MASTER)); in pnv_ioda_get_pe_state()
624 pe_no = pe->pe_number; in pnv_ioda_get_pe_state()
639 if (!(pe->flags & PNV_IODA_PE_MASTER)) in pnv_ioda_get_pe_state()
642 list_for_each_entry(slave, &pe->slaves, list) { in pnv_ioda_get_pe_state()
723 struct pnv_ioda_pe *pe, in pnv_ioda_set_peltv() argument
735 opal_pci_eeh_freeze_clear(phb->opal_id, pe->pe_number, in pnv_ioda_set_peltv()
737 if (pe->flags & PNV_IODA_PE_MASTER) { in pnv_ioda_set_peltv()
738 list_for_each_entry(slave, &pe->slaves, list) in pnv_ioda_set_peltv()
751 ret = pnv_ioda_set_one_peltv(phb, pe, pe, is_add); in pnv_ioda_set_peltv()
756 if (pe->flags & PNV_IODA_PE_MASTER) { in pnv_ioda_set_peltv()
757 list_for_each_entry(slave, &pe->slaves, list) { in pnv_ioda_set_peltv()
758 ret = pnv_ioda_set_one_peltv(phb, slave, pe, is_add); in pnv_ioda_set_peltv()
764 if (pe->flags & (PNV_IODA_PE_BUS_ALL | PNV_IODA_PE_BUS)) in pnv_ioda_set_peltv()
765 pdev = pe->pbus->self; in pnv_ioda_set_peltv()
766 else if (pe->flags & PNV_IODA_PE_DEV) in pnv_ioda_set_peltv()
767 pdev = pe->pdev->bus->self; in pnv_ioda_set_peltv()
769 else if (pe->flags & PNV_IODA_PE_VF) in pnv_ioda_set_peltv()
770 pdev = pe->parent_dev; in pnv_ioda_set_peltv()
778 ret = pnv_ioda_set_one_peltv(phb, parent, pe, is_add); in pnv_ioda_set_peltv()
789 static int pnv_ioda_deconfigure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe) in pnv_ioda_deconfigure_pe() argument
797 if (pe->pbus) { in pnv_ioda_deconfigure_pe()
802 parent = pe->pbus->self; in pnv_ioda_deconfigure_pe()
803 if (pe->flags & PNV_IODA_PE_BUS_ALL) in pnv_ioda_deconfigure_pe()
804 count = pe->pbus->busn_res.end - pe->pbus->busn_res.start + 1; in pnv_ioda_deconfigure_pe()
816 dev_err(&pe->pbus->dev, "Number of subordinate buses %d unsupported\n", in pnv_ioda_deconfigure_pe()
821 rid_end = pe->rid + (count << 8); in pnv_ioda_deconfigure_pe()
824 if (pe->flags & PNV_IODA_PE_VF) in pnv_ioda_deconfigure_pe()
825 parent = pe->parent_dev; in pnv_ioda_deconfigure_pe()
828 parent = pe->pdev->bus->self; in pnv_ioda_deconfigure_pe()
832 rid_end = pe->rid + 1; in pnv_ioda_deconfigure_pe()
836 for (rid = pe->rid; rid < rid_end; rid++) in pnv_ioda_deconfigure_pe()
844 pe->pe_number, OPAL_REMOVE_PE_FROM_DOMAIN); in pnv_ioda_deconfigure_pe()
850 opal_pci_eeh_freeze_clear(phb->opal_id, pe->pe_number, in pnv_ioda_deconfigure_pe()
854 rc = opal_pci_set_peltv(phb->opal_id, pe->pe_number, in pnv_ioda_deconfigure_pe()
855 pe->pe_number, OPAL_REMOVE_PE_FROM_DOMAIN); in pnv_ioda_deconfigure_pe()
857 pe_warn(pe, "OPAL error %ld remove self from PELTV\n", rc); in pnv_ioda_deconfigure_pe()
858 rc = opal_pci_set_pe(phb->opal_id, pe->pe_number, pe->rid, in pnv_ioda_deconfigure_pe()
861 pe_err(pe, "OPAL error %ld trying to setup PELT table\n", rc); in pnv_ioda_deconfigure_pe()
863 pe->pbus = NULL; in pnv_ioda_deconfigure_pe()
864 pe->pdev = NULL; in pnv_ioda_deconfigure_pe()
866 pe->parent_dev = NULL; in pnv_ioda_deconfigure_pe()
872 static int pnv_ioda_configure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe) in pnv_ioda_configure_pe() argument
879 if (pe->pbus) { in pnv_ioda_configure_pe()
884 parent = pe->pbus->self; in pnv_ioda_configure_pe()
885 if (pe->flags & PNV_IODA_PE_BUS_ALL) in pnv_ioda_configure_pe()
886 count = pe->pbus->busn_res.end - pe->pbus->busn_res.start + 1; in pnv_ioda_configure_pe()
898 dev_err(&pe->pbus->dev, "Number of subordinate buses %d unsupported\n", in pnv_ioda_configure_pe()
903 rid_end = pe->rid + (count << 8); in pnv_ioda_configure_pe()
906 if (pe->flags & PNV_IODA_PE_VF) in pnv_ioda_configure_pe()
907 parent = pe->parent_dev; in pnv_ioda_configure_pe()
910 parent = pe->pdev->bus->self; in pnv_ioda_configure_pe()
914 rid_end = pe->rid + 1; in pnv_ioda_configure_pe()
923 rc = opal_pci_set_pe(phb->opal_id, pe->pe_number, pe->rid, in pnv_ioda_configure_pe()
926 pe_err(pe, "OPAL error %ld trying to setup PELT table\n", rc); in pnv_ioda_configure_pe()
935 pnv_ioda_set_peltv(phb, pe, true); in pnv_ioda_configure_pe()
938 for (rid = pe->rid; rid < rid_end; rid++) in pnv_ioda_configure_pe()
939 phb->ioda.pe_rmap[rid] = pe->pe_number; in pnv_ioda_configure_pe()
943 pe->mve_number = 0; in pnv_ioda_configure_pe()
947 pe->mve_number = pe->pe_number; in pnv_ioda_configure_pe()
948 rc = opal_pci_set_mve(phb->opal_id, pe->mve_number, pe->pe_number); in pnv_ioda_configure_pe()
950 pe_err(pe, "OPAL error %ld setting up MVE %x\n", in pnv_ioda_configure_pe()
951 rc, pe->mve_number); in pnv_ioda_configure_pe()
952 pe->mve_number = -1; in pnv_ioda_configure_pe()
955 pe->mve_number, OPAL_ENABLE_MVE); in pnv_ioda_configure_pe()
957 pe_err(pe, "OPAL error %ld enabling MVE %x\n", in pnv_ioda_configure_pe()
958 rc, pe->mve_number); in pnv_ioda_configure_pe()
959 pe->mve_number = -1; in pnv_ioda_configure_pe()
1057 struct pnv_ioda_pe *pe; in pnv_ioda_setup_dev_PE() local
1067 pe = pnv_ioda_alloc_pe(phb); in pnv_ioda_setup_dev_PE()
1068 if (!pe) { in pnv_ioda_setup_dev_PE()
1082 pdn->pe_number = pe->pe_number; in pnv_ioda_setup_dev_PE()
1083 pe->flags = PNV_IODA_PE_DEV; in pnv_ioda_setup_dev_PE()
1084 pe->pdev = dev; in pnv_ioda_setup_dev_PE()
1085 pe->pbus = NULL; in pnv_ioda_setup_dev_PE()
1086 pe->mve_number = -1; in pnv_ioda_setup_dev_PE()
1087 pe->rid = dev->bus->number << 8 | pdn->devfn; in pnv_ioda_setup_dev_PE()
1089 pe_info(pe, "Associated device to PE\n"); in pnv_ioda_setup_dev_PE()
1091 if (pnv_ioda_configure_pe(phb, pe)) { in pnv_ioda_setup_dev_PE()
1093 pnv_ioda_free_pe(pe); in pnv_ioda_setup_dev_PE()
1095 pe->pdev = NULL; in pnv_ioda_setup_dev_PE()
1101 list_add_tail(&pe->list, &phb->ioda.pe_list); in pnv_ioda_setup_dev_PE()
1103 return pe; in pnv_ioda_setup_dev_PE()
1106 static void pnv_ioda_setup_same_PE(struct pci_bus *bus, struct pnv_ioda_pe *pe) in pnv_ioda_setup_same_PE() argument
1127 pe->device_count++; in pnv_ioda_setup_same_PE()
1128 pdn->pe_number = pe->pe_number; in pnv_ioda_setup_same_PE()
1129 if ((pe->flags & PNV_IODA_PE_BUS_ALL) && dev->subordinate) in pnv_ioda_setup_same_PE()
1130 pnv_ioda_setup_same_PE(dev->subordinate, pe); in pnv_ioda_setup_same_PE()
1144 struct pnv_ioda_pe *pe = NULL; in pnv_ioda_setup_bus_PE() local
1153 pe = &phb->ioda.pe_array[pe_num]; in pnv_ioda_setup_bus_PE()
1154 pnv_ioda_setup_same_PE(bus, pe); in pnv_ioda_setup_bus_PE()
1161 pe = &phb->ioda.pe_array[phb->ioda.root_pe_idx]; in pnv_ioda_setup_bus_PE()
1164 if (!pe && phb->pick_m64_pe) in pnv_ioda_setup_bus_PE()
1165 pe = phb->pick_m64_pe(bus, all); in pnv_ioda_setup_bus_PE()
1168 if (!pe) in pnv_ioda_setup_bus_PE()
1169 pe = pnv_ioda_alloc_pe(phb); in pnv_ioda_setup_bus_PE()
1171 if (!pe) { in pnv_ioda_setup_bus_PE()
1177 pe->flags |= (all ? PNV_IODA_PE_BUS_ALL : PNV_IODA_PE_BUS); in pnv_ioda_setup_bus_PE()
1178 pe->pbus = bus; in pnv_ioda_setup_bus_PE()
1179 pe->pdev = NULL; in pnv_ioda_setup_bus_PE()
1180 pe->mve_number = -1; in pnv_ioda_setup_bus_PE()
1181 pe->rid = bus->busn_res.start << 8; in pnv_ioda_setup_bus_PE()
1184 pe_info(pe, "Secondary bus %d..%d associated with PE#%x\n", in pnv_ioda_setup_bus_PE()
1185 bus->busn_res.start, bus->busn_res.end, pe->pe_number); in pnv_ioda_setup_bus_PE()
1187 pe_info(pe, "Secondary bus %d associated with PE#%x\n", in pnv_ioda_setup_bus_PE()
1188 bus->busn_res.start, pe->pe_number); in pnv_ioda_setup_bus_PE()
1190 if (pnv_ioda_configure_pe(phb, pe)) { in pnv_ioda_setup_bus_PE()
1192 pnv_ioda_free_pe(pe); in pnv_ioda_setup_bus_PE()
1193 pe->pbus = NULL; in pnv_ioda_setup_bus_PE()
1198 pnv_ioda_setup_same_PE(bus, pe); in pnv_ioda_setup_bus_PE()
1201 list_add_tail(&pe->list, &phb->ioda.pe_list); in pnv_ioda_setup_bus_PE()
1203 return pe; in pnv_ioda_setup_bus_PE()
1210 struct pnv_ioda_pe *pe; in pnv_ioda_setup_npu_PE() local
1227 pe = &phb->ioda.pe_array[pe_num]; in pnv_ioda_setup_npu_PE()
1228 if (!pe->pdev) in pnv_ioda_setup_npu_PE()
1231 if (pnv_pci_get_gpu_dev(pe->pdev) == gpu_pdev) { in pnv_ioda_setup_npu_PE()
1243 phb->ioda.pe_rmap[rid] = pe->pe_number; in pnv_ioda_setup_npu_PE()
1264 return pe; in pnv_ioda_setup_npu_PE()
1441 static void pnv_pci_ioda2_release_dma_pe(struct pci_dev *dev, struct pnv_ioda_pe *pe) in pnv_pci_ioda2_release_dma_pe() argument
1446 tbl = pe->table_group.tables[0]; in pnv_pci_ioda2_release_dma_pe()
1447 rc = pnv_pci_ioda2_unset_window(&pe->table_group, 0); in pnv_pci_ioda2_release_dma_pe()
1449 pe_warn(pe, "OPAL error %ld release DMA window\n", rc); in pnv_pci_ioda2_release_dma_pe()
1451 pnv_pci_ioda2_set_bypass(pe, false); in pnv_pci_ioda2_release_dma_pe()
1452 if (pe->table_group.group) { in pnv_pci_ioda2_release_dma_pe()
1453 iommu_group_put(pe->table_group.group); in pnv_pci_ioda2_release_dma_pe()
1454 BUG_ON(pe->table_group.group); in pnv_pci_ioda2_release_dma_pe()
1464 struct pnv_ioda_pe *pe, *pe_n; in pnv_ioda_release_vf_PE() local
1475 list_for_each_entry_safe(pe, pe_n, &phb->ioda.pe_list, list) { in pnv_ioda_release_vf_PE()
1476 if (pe->parent_dev != pdev) in pnv_ioda_release_vf_PE()
1479 pnv_pci_ioda2_release_dma_pe(pdev, pe); in pnv_ioda_release_vf_PE()
1483 list_del(&pe->list); in pnv_ioda_release_vf_PE()
1486 pnv_ioda_deconfigure_pe(phb, pe); in pnv_ioda_release_vf_PE()
1488 pnv_ioda_free_pe(pe); in pnv_ioda_release_vf_PE()
1497 struct pnv_ioda_pe *pe; in pnv_pci_sriov_disable() local
1523 pe = &phb->ioda.pe_array[pdn->pe_num_map[i]]; in pnv_pci_sriov_disable()
1524 pnv_ioda_free_pe(pe); in pnv_pci_sriov_disable()
1534 struct pnv_ioda_pe *pe);
1540 struct pnv_ioda_pe *pe; in pnv_ioda_setup_vf_PE() local
1560 pe = &phb->ioda.pe_array[pe_num]; in pnv_ioda_setup_vf_PE()
1561 pe->pe_number = pe_num; in pnv_ioda_setup_vf_PE()
1562 pe->phb = phb; in pnv_ioda_setup_vf_PE()
1563 pe->flags = PNV_IODA_PE_VF; in pnv_ioda_setup_vf_PE()
1564 pe->pbus = NULL; in pnv_ioda_setup_vf_PE()
1565 pe->parent_dev = pdev; in pnv_ioda_setup_vf_PE()
1566 pe->mve_number = -1; in pnv_ioda_setup_vf_PE()
1567 pe->rid = (pci_iov_virtfn_bus(pdev, vf_index) << 8) | in pnv_ioda_setup_vf_PE()
1570 pe_info(pe, "VF %04d:%02d:%02d.%d associated with PE#%x\n", in pnv_ioda_setup_vf_PE()
1575 if (pnv_ioda_configure_pe(phb, pe)) { in pnv_ioda_setup_vf_PE()
1577 pnv_ioda_free_pe(pe); in pnv_ioda_setup_vf_PE()
1578 pe->pdev = NULL; in pnv_ioda_setup_vf_PE()
1584 list_add_tail(&pe->list, &phb->ioda.pe_list); in pnv_ioda_setup_vf_PE()
1587 pnv_pci_ioda2_setup_dma_pe(phb, pe); in pnv_ioda_setup_vf_PE()
1596 struct pnv_ioda_pe *pe; in pnv_pci_sriov_enable() local
1640 pe = pnv_ioda_alloc_pe(phb); in pnv_pci_sriov_enable()
1641 if (!pe) { in pnv_pci_sriov_enable()
1646 pdn->pe_num_map[i] = pe->pe_number; in pnv_pci_sriov_enable()
1694 pe = &phb->ioda.pe_array[pdn->pe_num_map[i]]; in pnv_pci_sriov_enable()
1695 pnv_ioda_free_pe(pe); in pnv_pci_sriov_enable()
1727 struct pnv_ioda_pe *pe; in pnv_pci_ioda_dma_dev_setup() local
1737 pe = &phb->ioda.pe_array[pdn->pe_number]; in pnv_pci_ioda_dma_dev_setup()
1739 set_dma_offset(&pdev->dev, pe->tce_bypass_base); in pnv_pci_ioda_dma_dev_setup()
1740 set_iommu_table_base(&pdev->dev, pe->table_group.tables[0]); in pnv_pci_ioda_dma_dev_setup()
1749 static bool pnv_pci_ioda_pe_single_vendor(struct pnv_ioda_pe *pe) in pnv_pci_ioda_pe_single_vendor() argument
1754 if (pe->device_count == 1) in pnv_pci_ioda_pe_single_vendor()
1758 if (!pe->pbus) in pnv_pci_ioda_pe_single_vendor()
1761 list_for_each_entry(pdev, &pe->pbus->devices, bus_list) { in pnv_pci_ioda_pe_single_vendor()
1791 static int pnv_pci_ioda_dma_64bit_bypass(struct pnv_ioda_pe *pe) in pnv_pci_ioda_dma_64bit_bypass() argument
1810 table_pages = alloc_pages_node(pe->phb->hose->node, GFP_KERNEL, in pnv_pci_ioda_dma_64bit_bypass()
1826 rc = opal_pci_map_pe_dma_window(pe->phb->opal_id, in pnv_pci_ioda_dma_64bit_bypass()
1827 pe->pe_number, in pnv_pci_ioda_dma_64bit_bypass()
1829 (pe->pe_number << 1) + 0, in pnv_pci_ioda_dma_64bit_bypass()
1835 pe_info(pe, "Using 64-bit DMA iommu bypass (through TVE#0)\n"); in pnv_pci_ioda_dma_64bit_bypass()
1839 pe_err(pe, "Error configuring 64-bit DMA bypass\n"); in pnv_pci_ioda_dma_64bit_bypass()
1848 struct pnv_ioda_pe *pe; in pnv_pci_ioda_dma_set_mask() local
1856 pe = &phb->ioda.pe_array[pdn->pe_number]; in pnv_pci_ioda_dma_set_mask()
1857 if (pe->tce_bypass_enabled) { in pnv_pci_ioda_dma_set_mask()
1858 top = pe->tce_bypass_base + memblock_end_of_DRAM() - 1; in pnv_pci_ioda_dma_set_mask()
1874 pnv_pci_ioda_pe_single_vendor(pe) && in pnv_pci_ioda_dma_set_mask()
1877 rc = pnv_pci_ioda_dma_64bit_bypass(pe); in pnv_pci_ioda_dma_set_mask()
1909 struct pnv_ioda_pe *pe; in pnv_pci_ioda_dma_get_required_mask() local
1915 pe = &phb->ioda.pe_array[pdn->pe_number]; in pnv_pci_ioda_dma_get_required_mask()
1916 if (!pe->tce_bypass_enabled) in pnv_pci_ioda_dma_get_required_mask()
1920 end = pe->tce_bypass_base + memblock_end_of_DRAM(); in pnv_pci_ioda_dma_get_required_mask()
1927 static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe, in pnv_ioda_setup_bus_dma() argument
1934 set_iommu_table_base(&dev->dev, pe->table_group.tables[0]); in pnv_ioda_setup_bus_dma()
1935 set_dma_offset(&dev->dev, pe->tce_bypass_base); in pnv_ioda_setup_bus_dma()
1939 if ((pe->flags & PNV_IODA_PE_BUS_ALL) && dev->subordinate) in pnv_ioda_setup_bus_dma()
1940 pnv_ioda_setup_bus_dma(pe, dev->subordinate, in pnv_ioda_setup_bus_dma()
1958 struct pnv_ioda_pe *pe = container_of(tgl->table_group, in pnv_pci_p7ioc_tce_invalidate() local
1960 __be64 __iomem *invalidate = pnv_ioda_get_inval_reg(pe->phb, rm); in pnv_pci_p7ioc_tce_invalidate()
2062 static inline void pnv_pci_phb3_tce_invalidate_pe(struct pnv_ioda_pe *pe) in pnv_pci_phb3_tce_invalidate_pe() argument
2065 __be64 __iomem *invalidate = pnv_ioda_get_inval_reg(pe->phb, false); in pnv_pci_phb3_tce_invalidate_pe()
2066 unsigned long val = PHB3_TCE_KILL_INVAL_PE | (pe->pe_number & 0xFF); in pnv_pci_phb3_tce_invalidate_pe()
2072 static void pnv_pci_phb3_tce_invalidate(struct pnv_ioda_pe *pe, bool rm, in pnv_pci_phb3_tce_invalidate() argument
2076 __be64 __iomem *invalidate = pnv_ioda_get_inval_reg(pe->phb, rm); in pnv_pci_phb3_tce_invalidate()
2081 start |= (pe->pe_number & 0xFF); in pnv_pci_phb3_tce_invalidate()
2099 static inline void pnv_pci_ioda2_tce_invalidate_pe(struct pnv_ioda_pe *pe) in pnv_pci_ioda2_tce_invalidate_pe() argument
2101 struct pnv_phb *phb = pe->phb; in pnv_pci_ioda2_tce_invalidate_pe()
2104 pnv_pci_phb3_tce_invalidate_pe(pe); in pnv_pci_ioda2_tce_invalidate_pe()
2107 pe->pe_number, 0, 0, 0); in pnv_pci_ioda2_tce_invalidate_pe()
2116 struct pnv_ioda_pe *pe = container_of(tgl->table_group, in pnv_pci_ioda2_tce_invalidate() local
2118 struct pnv_phb *phb = pe->phb; in pnv_pci_ioda2_tce_invalidate()
2136 pnv_pci_phb3_tce_invalidate(pe, rm, shift, in pnv_pci_ioda2_tce_invalidate()
2141 pe->pe_number, 1u << shift, in pnv_pci_ioda2_tce_invalidate()
2234 static unsigned int pnv_pci_ioda_pe_dma_weight(struct pnv_ioda_pe *pe) in pnv_pci_ioda_pe_dma_weight() argument
2240 if ((pe->flags & PNV_IODA_PE_VF) && pe->parent_dev) { in pnv_pci_ioda_pe_dma_weight()
2241 pnv_pci_ioda_dev_dma_weight(pe->parent_dev, &weight); in pnv_pci_ioda_pe_dma_weight()
2246 if ((pe->flags & PNV_IODA_PE_DEV) && pe->pdev) { in pnv_pci_ioda_pe_dma_weight()
2247 pnv_pci_ioda_dev_dma_weight(pe->pdev, &weight); in pnv_pci_ioda_pe_dma_weight()
2248 } else if ((pe->flags & PNV_IODA_PE_BUS) && pe->pbus) { in pnv_pci_ioda_pe_dma_weight()
2251 list_for_each_entry(pdev, &pe->pbus->devices, bus_list) in pnv_pci_ioda_pe_dma_weight()
2253 } else if ((pe->flags & PNV_IODA_PE_BUS_ALL) && pe->pbus) { in pnv_pci_ioda_pe_dma_weight()
2254 pci_walk_bus(pe->pbus, pnv_pci_ioda_dev_dma_weight, &weight); in pnv_pci_ioda_pe_dma_weight()
2261 struct pnv_ioda_pe *pe) in pnv_pci_ioda1_setup_dma_pe() argument
2274 weight = pnv_pci_ioda_pe_dma_weight(pe); in pnv_pci_ioda1_setup_dma_pe()
2304 pe_warn(pe, "No available DMA32 segments\n"); in pnv_pci_ioda1_setup_dma_pe()
2313 iommu_register_group(&pe->table_group, phb->hose->global_number, in pnv_pci_ioda1_setup_dma_pe()
2314 pe->pe_number); in pnv_pci_ioda1_setup_dma_pe()
2315 pnv_pci_link_table_and_group(phb->hose->node, 0, tbl, &pe->table_group); in pnv_pci_ioda1_setup_dma_pe()
2318 pe_info(pe, "DMA weight %d (%d), assigned (%d) %d DMA32 segments\n", in pnv_pci_ioda1_setup_dma_pe()
2320 pe_info(pe, " Setting up 32-bit TCE table at %08x..%08x\n", in pnv_pci_ioda1_setup_dma_pe()
2336 pe_err(pe, " Failed to allocate a 32-bit TCE memory\n"); in pnv_pci_ioda1_setup_dma_pe()
2345 pe->pe_number, in pnv_pci_ioda1_setup_dma_pe()
2350 pe_err(pe, " Failed to configure 32-bit TCE table," in pnv_pci_ioda1_setup_dma_pe()
2358 phb->ioda.dma32_segmap[i] = pe->pe_number; in pnv_pci_ioda1_setup_dma_pe()
2366 pe->table_group.tce32_start = tbl->it_offset << tbl->it_page_shift; in pnv_pci_ioda1_setup_dma_pe()
2367 pe->table_group.tce32_size = tbl->it_size << tbl->it_page_shift; in pnv_pci_ioda1_setup_dma_pe()
2370 if (pe->flags & PNV_IODA_PE_DEV) { in pnv_pci_ioda1_setup_dma_pe()
2376 set_iommu_table_base(&pe->pdev->dev, tbl); in pnv_pci_ioda1_setup_dma_pe()
2377 iommu_add_device(&pe->pdev->dev); in pnv_pci_ioda1_setup_dma_pe()
2378 } else if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL)) in pnv_pci_ioda1_setup_dma_pe()
2379 pnv_ioda_setup_bus_dma(pe, pe->pbus, true); in pnv_pci_ioda1_setup_dma_pe()
2387 pnv_pci_unlink_table_and_group(tbl, &pe->table_group); in pnv_pci_ioda1_setup_dma_pe()
2395 struct pnv_ioda_pe *pe = container_of(table_group, struct pnv_ioda_pe, in pnv_pci_ioda2_set_window() local
2397 struct pnv_phb *phb = pe->phb; in pnv_pci_ioda2_set_window()
2404 pe_info(pe, "Setting up window#%d %llx..%llx pg=%x\n", num, in pnv_pci_ioda2_set_window()
2413 pe->pe_number, in pnv_pci_ioda2_set_window()
2414 (pe->pe_number << 1) + num, in pnv_pci_ioda2_set_window()
2420 pe_err(pe, "Failed to configure TCE table, err %ld\n", rc); in pnv_pci_ioda2_set_window()
2425 tbl, &pe->table_group); in pnv_pci_ioda2_set_window()
2426 pnv_pci_ioda2_tce_invalidate_pe(pe); in pnv_pci_ioda2_set_window()
2431 void pnv_pci_ioda2_set_bypass(struct pnv_ioda_pe *pe, bool enable) in pnv_pci_ioda2_set_bypass() argument
2433 uint16_t window_id = (pe->pe_number << 1 ) + 1; in pnv_pci_ioda2_set_bypass()
2436 pe_info(pe, "%sabling 64-bit DMA bypass\n", enable ? "En" : "Dis"); in pnv_pci_ioda2_set_bypass()
2441 rc = opal_pci_map_pe_dma_window_real(pe->phb->opal_id, in pnv_pci_ioda2_set_bypass()
2442 pe->pe_number, in pnv_pci_ioda2_set_bypass()
2444 pe->tce_bypass_base, in pnv_pci_ioda2_set_bypass()
2447 rc = opal_pci_map_pe_dma_window_real(pe->phb->opal_id, in pnv_pci_ioda2_set_bypass()
2448 pe->pe_number, in pnv_pci_ioda2_set_bypass()
2450 pe->tce_bypass_base, in pnv_pci_ioda2_set_bypass()
2454 pe_err(pe, "OPAL error %lld configuring bypass window\n", rc); in pnv_pci_ioda2_set_bypass()
2456 pe->tce_bypass_enabled = enable; in pnv_pci_ioda2_set_bypass()
2463 struct pnv_ioda_pe *pe = container_of(table_group, struct pnv_ioda_pe, in pnv_pci_ioda2_create_table() local
2465 int nid = pe->phb->hose->node; in pnv_pci_ioda2_create_table()
2466 __u64 bus_offset = num ? pe->tce_bypass_base : table_group->tce32_start; in pnv_pci_ioda2_create_table()
2489 static long pnv_pci_ioda2_setup_default_config(struct pnv_ioda_pe *pe) in pnv_pci_ioda2_setup_default_config() argument
2506 const u64 window_size = min((u64)pe->table_group.tce32_size, max_memory); in pnv_pci_ioda2_setup_default_config()
2508 rc = pnv_pci_ioda2_create_table(&pe->table_group, 0, in pnv_pci_ioda2_setup_default_config()
2513 pe_err(pe, "Failed to create 32-bit TCE table, err %ld", in pnv_pci_ioda2_setup_default_config()
2518 iommu_init_table(tbl, pe->phb->hose->node); in pnv_pci_ioda2_setup_default_config()
2520 rc = pnv_pci_ioda2_set_window(&pe->table_group, 0, tbl); in pnv_pci_ioda2_setup_default_config()
2522 pe_err(pe, "Failed to configure 32-bit TCE table, err %ld\n", in pnv_pci_ioda2_setup_default_config()
2529 pnv_pci_ioda2_set_bypass(pe, true); in pnv_pci_ioda2_setup_default_config()
2536 if (pe->flags & PNV_IODA_PE_DEV) in pnv_pci_ioda2_setup_default_config()
2537 set_iommu_table_base(&pe->pdev->dev, tbl); in pnv_pci_ioda2_setup_default_config()
2546 struct pnv_ioda_pe *pe = container_of(table_group, struct pnv_ioda_pe, in pnv_pci_ioda2_unset_window() local
2548 struct pnv_phb *phb = pe->phb; in pnv_pci_ioda2_unset_window()
2551 pe_info(pe, "Removing DMA window #%d\n", num); in pnv_pci_ioda2_unset_window()
2553 ret = opal_pci_map_pe_dma_window(phb->opal_id, pe->pe_number, in pnv_pci_ioda2_unset_window()
2554 (pe->pe_number << 1) + num, in pnv_pci_ioda2_unset_window()
2558 pe_warn(pe, "Unmapping failed, ret = %ld\n", ret); in pnv_pci_ioda2_unset_window()
2560 pnv_pci_ioda2_tce_invalidate_pe(pe); in pnv_pci_ioda2_unset_window()
2612 struct pnv_ioda_pe *pe = container_of(table_group, struct pnv_ioda_pe, in pnv_ioda2_take_ownership() local
2615 struct iommu_table *tbl = pe->table_group.tables[0]; in pnv_ioda2_take_ownership()
2617 pnv_pci_ioda2_set_bypass(pe, false); in pnv_ioda2_take_ownership()
2618 pnv_pci_ioda2_unset_window(&pe->table_group, 0); in pnv_ioda2_take_ownership()
2619 if (pe->pbus) in pnv_ioda2_take_ownership()
2620 pnv_ioda_setup_bus_dma(pe, pe->pbus, false); in pnv_ioda2_take_ownership()
2626 struct pnv_ioda_pe *pe = container_of(table_group, struct pnv_ioda_pe, in pnv_ioda2_release_ownership() local
2629 pnv_pci_ioda2_setup_default_config(pe); in pnv_ioda2_release_ownership()
2630 if (pe->pbus) in pnv_ioda2_release_ownership()
2631 pnv_ioda_setup_bus_dma(pe, pe->pbus, false); in pnv_ioda2_release_ownership()
2752 struct pnv_ioda_pe *pe, *gpe; in pnv_pci_ioda_setup_iommu_api() local
2764 list_for_each_entry(pe, &phb->ioda.pe_list, list) { in pnv_pci_ioda_setup_iommu_api()
2765 gpe = pnv_pci_npu_setup_iommu(pe); in pnv_pci_ioda_setup_iommu_api()
2804 struct pnv_ioda_pe *pe) in pnv_pci_ioda2_setup_dma_pe() argument
2808 if (!pnv_pci_ioda_pe_dma_weight(pe)) in pnv_pci_ioda2_setup_dma_pe()
2812 pe->tce_bypass_base = 1ull << 59; in pnv_pci_ioda2_setup_dma_pe()
2814 iommu_register_group(&pe->table_group, phb->hose->global_number, in pnv_pci_ioda2_setup_dma_pe()
2815 pe->pe_number); in pnv_pci_ioda2_setup_dma_pe()
2818 pe_info(pe, "Setting up 32-bit TCE table at 0..%08x\n", in pnv_pci_ioda2_setup_dma_pe()
2822 pe->table_group.tce32_start = 0; in pnv_pci_ioda2_setup_dma_pe()
2823 pe->table_group.tce32_size = phb->ioda.m32_pci_base; in pnv_pci_ioda2_setup_dma_pe()
2824 pe->table_group.max_dynamic_windows_supported = in pnv_pci_ioda2_setup_dma_pe()
2826 pe->table_group.max_levels = POWERNV_IOMMU_MAX_LEVELS; in pnv_pci_ioda2_setup_dma_pe()
2827 pe->table_group.pgsizes = pnv_ioda_parse_tce_sizes(phb); in pnv_pci_ioda2_setup_dma_pe()
2829 pe->table_group.ops = &pnv_pci_ioda2_ops; in pnv_pci_ioda2_setup_dma_pe()
2832 rc = pnv_pci_ioda2_setup_default_config(pe); in pnv_pci_ioda2_setup_dma_pe()
2836 if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL)) in pnv_pci_ioda2_setup_dma_pe()
2837 pnv_ioda_setup_bus_dma(pe, pe->pbus, true); in pnv_pci_ioda2_setup_dma_pe()
2899 struct pnv_ioda_pe *pe = pnv_ioda_get_pe(dev); in pnv_pci_ioda_msi_setup() local
2905 if (pe == NULL) in pnv_pci_ioda_msi_setup()
2909 if (pe->mve_number < 0) in pnv_pci_ioda_msi_setup()
2917 rc = opal_pci_set_xive_pe(phb->opal_id, pe->pe_number, xive_num); in pnv_pci_ioda_msi_setup()
2927 rc = opal_get_msi_64(phb->opal_id, pe->mve_number, xive_num, 1, in pnv_pci_ioda_msi_setup()
2939 rc = opal_get_msi_32(phb->opal_id, pe->mve_number, xive_num, 1, in pnv_pci_ioda_msi_setup()
2956 msg->address_hi, msg->address_lo, data, pe->pe_number); in pnv_pci_ioda_msi_setup()
3081 static void pnv_ioda_setup_pe_res(struct pnv_ioda_pe *pe, in pnv_ioda_setup_pe_res() argument
3084 struct pnv_phb *phb = pe->phb; in pnv_ioda_setup_pe_res()
3099 phb->ioda.io_segmap[index] = pe->pe_number; in pnv_ioda_setup_pe_res()
3101 pe->pe_number, OPAL_IO_WINDOW_TYPE, 0, index); in pnv_ioda_setup_pe_res()
3104 __func__, rc, index, pe->pe_number); in pnv_ioda_setup_pe_res()
3123 phb->ioda.m32_segmap[index] = pe->pe_number; in pnv_ioda_setup_pe_res()
3125 pe->pe_number, OPAL_M32_WINDOW_TYPE, 0, index); in pnv_ioda_setup_pe_res()
3128 __func__, rc, index, pe->pe_number); in pnv_ioda_setup_pe_res()
3143 static void pnv_ioda_setup_pe_seg(struct pnv_ioda_pe *pe) in pnv_ioda_setup_pe_seg() argument
3153 BUG_ON(!(pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL))); in pnv_ioda_setup_pe_seg()
3155 list_for_each_entry(pdev, &pe->pbus->devices, bus_list) { in pnv_ioda_setup_pe_seg()
3157 pnv_ioda_setup_pe_res(pe, &pdev->resource[i]); in pnv_ioda_setup_pe_seg()
3164 if (!(pe->flags & PNV_IODA_PE_BUS_ALL) || !pci_is_bridge(pdev)) in pnv_ioda_setup_pe_seg()
3167 pnv_ioda_setup_pe_res(pe, in pnv_ioda_setup_pe_seg()
3386 struct pnv_ioda_pe *pe; in pnv_pci_setup_bridge() local
3394 pe = pnv_ioda_setup_bus_PE(phb->hose->bus, false); in pnv_pci_setup_bridge()
3395 if (pe) { in pnv_pci_setup_bridge()
3396 phb->ioda.root_pe_idx = pe->pe_number; in pnv_pci_setup_bridge()
3414 pe = pnv_ioda_setup_bus_PE(bus, all); in pnv_pci_setup_bridge()
3415 if (!pe) in pnv_pci_setup_bridge()
3418 pnv_ioda_setup_pe_seg(pe); in pnv_pci_setup_bridge()
3421 pnv_pci_ioda1_setup_dma_pe(phb, pe); in pnv_pci_setup_bridge()
3424 pnv_pci_ioda2_setup_dma_pe(phb, pe); in pnv_pci_setup_bridge()
3500 struct pnv_ioda_pe *pe = container_of(table_group, in pnv_pci_ioda1_unset_window() local
3502 struct pnv_phb *phb = pe->phb; in pnv_pci_ioda1_unset_window()
3506 pe_info(pe, "Removing DMA window #%d\n", num); in pnv_pci_ioda1_unset_window()
3508 if (phb->ioda.dma32_segmap[idx] != pe->pe_number) in pnv_pci_ioda1_unset_window()
3511 rc = opal_pci_map_pe_dma_window(phb->opal_id, pe->pe_number, in pnv_pci_ioda1_unset_window()
3514 pe_warn(pe, "Failure %ld unmapping DMA32 segment#%d\n", in pnv_pci_ioda1_unset_window()
3526 static void pnv_pci_ioda1_release_pe_dma(struct pnv_ioda_pe *pe) in pnv_pci_ioda1_release_pe_dma() argument
3528 unsigned int weight = pnv_pci_ioda_pe_dma_weight(pe); in pnv_pci_ioda1_release_pe_dma()
3529 struct iommu_table *tbl = pe->table_group.tables[0]; in pnv_pci_ioda1_release_pe_dma()
3535 rc = pnv_pci_ioda1_unset_window(&pe->table_group, 0); in pnv_pci_ioda1_release_pe_dma()
3540 if (pe->table_group.group) { in pnv_pci_ioda1_release_pe_dma()
3541 iommu_group_put(pe->table_group.group); in pnv_pci_ioda1_release_pe_dma()
3542 WARN_ON(pe->table_group.group); in pnv_pci_ioda1_release_pe_dma()
3549 static void pnv_pci_ioda2_release_pe_dma(struct pnv_ioda_pe *pe) in pnv_pci_ioda2_release_pe_dma() argument
3551 struct iommu_table *tbl = pe->table_group.tables[0]; in pnv_pci_ioda2_release_pe_dma()
3552 unsigned int weight = pnv_pci_ioda_pe_dma_weight(pe); in pnv_pci_ioda2_release_pe_dma()
3561 rc = pnv_pci_ioda2_unset_window(&pe->table_group, 0); in pnv_pci_ioda2_release_pe_dma()
3563 pe_warn(pe, "OPAL error %ld release DMA window\n", rc); in pnv_pci_ioda2_release_pe_dma()
3566 pnv_pci_ioda2_set_bypass(pe, false); in pnv_pci_ioda2_release_pe_dma()
3567 if (pe->table_group.group) { in pnv_pci_ioda2_release_pe_dma()
3568 iommu_group_put(pe->table_group.group); in pnv_pci_ioda2_release_pe_dma()
3569 WARN_ON(pe->table_group.group); in pnv_pci_ioda2_release_pe_dma()
3575 static void pnv_ioda_free_pe_seg(struct pnv_ioda_pe *pe, in pnv_ioda_free_pe_seg() argument
3579 struct pnv_phb *phb = pe->phb; in pnv_ioda_free_pe_seg()
3584 if (map[idx] != pe->pe_number) in pnv_ioda_free_pe_seg()
3597 pe_warn(pe, "Error %ld unmapping (%d) segment#%d\n", in pnv_ioda_free_pe_seg()
3604 static void pnv_ioda_release_pe_seg(struct pnv_ioda_pe *pe) in pnv_ioda_release_pe_seg() argument
3606 struct pnv_phb *phb = pe->phb; in pnv_ioda_release_pe_seg()
3609 pnv_ioda_free_pe_seg(pe, OPAL_IO_WINDOW_TYPE, in pnv_ioda_release_pe_seg()
3611 pnv_ioda_free_pe_seg(pe, OPAL_M32_WINDOW_TYPE, in pnv_ioda_release_pe_seg()
3613 pnv_ioda_free_pe_seg(pe, OPAL_M64_WINDOW_TYPE, in pnv_ioda_release_pe_seg()
3616 pnv_ioda_free_pe_seg(pe, OPAL_M32_WINDOW_TYPE, in pnv_ioda_release_pe_seg()
3621 static void pnv_ioda_release_pe(struct pnv_ioda_pe *pe) in pnv_ioda_release_pe() argument
3623 struct pnv_phb *phb = pe->phb; in pnv_ioda_release_pe()
3626 list_del(&pe->list); in pnv_ioda_release_pe()
3629 pnv_pci_ioda1_release_pe_dma(pe); in pnv_ioda_release_pe()
3632 pnv_pci_ioda2_release_pe_dma(pe); in pnv_ioda_release_pe()
3638 pnv_ioda_release_pe_seg(pe); in pnv_ioda_release_pe()
3639 pnv_ioda_deconfigure_pe(pe->phb, pe); in pnv_ioda_release_pe()
3642 if (pe->flags & PNV_IODA_PE_MASTER) { in pnv_ioda_release_pe()
3643 list_for_each_entry_safe(slave, tmp, &pe->slaves, list) { in pnv_ioda_release_pe()
3656 phb->ioda.root_pe_idx == pe->pe_number) in pnv_ioda_release_pe()
3659 pnv_ioda_free_pe(pe); in pnv_ioda_release_pe()
3667 struct pnv_ioda_pe *pe; in pnv_pci_release_device() local
3683 pe = &phb->ioda.pe_array[pdn->pe_number]; in pnv_pci_release_device()
3686 WARN_ON(--pe->device_count < 0); in pnv_pci_release_device()
3687 if (pe->device_count == 0) in pnv_pci_release_device()
3688 pnv_ioda_release_pe(pe); in pnv_pci_release_device()