Lines Matching full:pe

54 static void pnv_pci_ioda2_set_bypass(struct pnv_ioda_pe *pe, bool enable);
57 void pe_level_printk(const struct pnv_ioda_pe *pe, const char *level, in pe_level_printk() argument
69 if (pe->flags & PNV_IODA_PE_DEV) in pe_level_printk()
70 strscpy(pfix, dev_name(&pe->pdev->dev), sizeof(pfix)); in pe_level_printk()
71 else if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL)) in pe_level_printk()
73 pci_domain_nr(pe->pbus), pe->pbus->number); in pe_level_printk()
75 else if (pe->flags & PNV_IODA_PE_VF) in pe_level_printk()
77 pci_domain_nr(pe->parent_dev->bus), in pe_level_printk()
78 (pe->rid & 0xff00) >> 8, in pe_level_printk()
79 PCI_SLOT(pe->rid), PCI_FUNC(pe->rid)); in pe_level_printk()
82 printk("%spci %s: [PE# %.2x] %pV", in pe_level_printk()
83 level, pfix, pe->pe_number, &vaf); in pe_level_printk()
128 * Clear the PE frozen state as it might be put into frozen state in pnv_ioda_init_pe()
130 * PE is already in unfrozen state. in pnv_ioda_init_pe()
135 pr_warn("%s: Error %lld unfreezing PHB#%x-PE#%x\n", in pnv_ioda_init_pe()
144 pr_warn("%s: Invalid PE %x on PHB#%x\n", in pnv_ioda_reserve_pe()
151 pr_debug("%s: PE %x was reserved on PHB#%x\n", in pnv_ioda_reserve_pe()
161 int run = 0, pe, i; in pnv_ioda_alloc_pe() local
166 for (pe = phb->ioda.total_pe_num - 1; pe >= 0; pe--) { in pnv_ioda_alloc_pe()
167 if (test_bit(pe, phb->ioda.pe_alloc)) { in pnv_ioda_alloc_pe()
179 for (i = pe; i < pe + count; i++) { in pnv_ioda_alloc_pe()
183 ret = &phb->ioda.pe_array[pe]; in pnv_ioda_alloc_pe()
190 void pnv_ioda_free_pe(struct pnv_ioda_pe *pe) in pnv_ioda_free_pe() argument
192 struct pnv_phb *phb = pe->phb; in pnv_ioda_free_pe()
193 unsigned int pe_num = pe->pe_number; in pnv_ioda_free_pe()
195 WARN_ON(pe->pdev); in pnv_ioda_free_pe()
196 memset(pe, 0, sizeof(struct pnv_ioda_pe)); in pnv_ioda_free_pe()
233 * Exclude the segments for reserved and root bus PE, which in pnv_ioda2_init_m64()
242 pr_warn(" Cannot strip M64 segment for reserved PE#%x\n", in pnv_ioda2_init_m64()
323 * to one particular PE#. However, PHB3 has fixed mapping in pnv_ioda1_init_m64()
324 * between M64 segment and PE#. In order to have same logic in pnv_ioda1_init_m64()
326 * segment and PE# on P7IOC. in pnv_ioda1_init_m64()
333 pr_warn("%s: Error %lld mapping M64 for PHB#%x-PE#%x\n", in pnv_ioda1_init_m64()
341 * Exclude the segments for reserved and root bus PE, which in pnv_ioda1_init_m64()
350 WARN(1, "Wrong reserved PE#%x on PHB#%x\n", in pnv_ioda1_init_m64()
381 struct pnv_ioda_pe *master_pe, *pe; in pnv_ioda_pick_m64_pe() local
398 /* Figure out reserved PE numbers by the PE */ in pnv_ioda_pick_m64_pe()
404 * pick M64 dependent PE#. in pnv_ioda_pick_m64_pe()
412 * Figure out the master PE and put all slave PEs to master in pnv_ioda_pick_m64_pe()
413 * PE's list to form compound PE. in pnv_ioda_pick_m64_pe()
419 pe = &phb->ioda.pe_array[i]; in pnv_ioda_pick_m64_pe()
421 phb->ioda.m64_segmap[pe->pe_number] = pe->pe_number; in pnv_ioda_pick_m64_pe()
423 pe->flags |= PNV_IODA_PE_MASTER; in pnv_ioda_pick_m64_pe()
424 INIT_LIST_HEAD(&pe->slaves); in pnv_ioda_pick_m64_pe()
425 master_pe = pe; in pnv_ioda_pick_m64_pe()
427 pe->flags |= PNV_IODA_PE_SLAVE; in pnv_ioda_pick_m64_pe()
428 pe->master = master_pe; in pnv_ioda_pick_m64_pe()
429 list_add_tail(&pe->list, &master_pe->slaves); in pnv_ioda_pick_m64_pe()
529 struct pnv_ioda_pe *pe = &phb->ioda.pe_array[pe_no]; in pnv_ioda_freeze_pe() local
533 /* Fetch master PE */ in pnv_ioda_freeze_pe()
534 if (pe->flags & PNV_IODA_PE_SLAVE) { in pnv_ioda_freeze_pe()
535 pe = pe->master; in pnv_ioda_freeze_pe()
536 if (WARN_ON(!pe || !(pe->flags & PNV_IODA_PE_MASTER))) in pnv_ioda_freeze_pe()
539 pe_no = pe->pe_number; in pnv_ioda_freeze_pe()
542 /* Freeze master PE */ in pnv_ioda_freeze_pe()
547 pr_warn("%s: Failure %lld freezing PHB#%x-PE#%x\n", in pnv_ioda_freeze_pe()
553 if (!(pe->flags & PNV_IODA_PE_MASTER)) in pnv_ioda_freeze_pe()
556 list_for_each_entry(slave, &pe->slaves, list) { in pnv_ioda_freeze_pe()
561 pr_warn("%s: Failure %lld freezing PHB#%x-PE#%x\n", in pnv_ioda_freeze_pe()
569 struct pnv_ioda_pe *pe, *slave; in pnv_ioda_unfreeze_pe() local
572 /* Find master PE */ in pnv_ioda_unfreeze_pe()
573 pe = &phb->ioda.pe_array[pe_no]; in pnv_ioda_unfreeze_pe()
574 if (pe->flags & PNV_IODA_PE_SLAVE) { in pnv_ioda_unfreeze_pe()
575 pe = pe->master; in pnv_ioda_unfreeze_pe()
576 WARN_ON(!pe || !(pe->flags & PNV_IODA_PE_MASTER)); in pnv_ioda_unfreeze_pe()
577 pe_no = pe->pe_number; in pnv_ioda_unfreeze_pe()
580 /* Clear frozen state for master PE */ in pnv_ioda_unfreeze_pe()
583 pr_warn("%s: Failure %lld clear %d on PHB#%x-PE#%x\n", in pnv_ioda_unfreeze_pe()
588 if (!(pe->flags & PNV_IODA_PE_MASTER)) in pnv_ioda_unfreeze_pe()
592 list_for_each_entry(slave, &pe->slaves, list) { in pnv_ioda_unfreeze_pe()
597 pr_warn("%s: Failure %lld clear %d on PHB#%x-PE#%x\n", in pnv_ioda_unfreeze_pe()
609 struct pnv_ioda_pe *slave, *pe; in pnv_ioda_get_pe_state() local
614 /* Sanity check on PE number */ in pnv_ioda_get_pe_state()
619 * Fetch the master PE and the PE instance might be in pnv_ioda_get_pe_state()
622 pe = &phb->ioda.pe_array[pe_no]; in pnv_ioda_get_pe_state()
623 if (pe->flags & PNV_IODA_PE_SLAVE) { in pnv_ioda_get_pe_state()
624 pe = pe->master; in pnv_ioda_get_pe_state()
625 WARN_ON(!pe || !(pe->flags & PNV_IODA_PE_MASTER)); in pnv_ioda_get_pe_state()
626 pe_no = pe->pe_number; in pnv_ioda_get_pe_state()
629 /* Check the master PE */ in pnv_ioda_get_pe_state()
634 "PHB#%x-PE#%x state\n", in pnv_ioda_get_pe_state()
640 /* Check the slave PE */ in pnv_ioda_get_pe_state()
641 if (!(pe->flags & PNV_IODA_PE_MASTER)) in pnv_ioda_get_pe_state()
644 list_for_each_entry(slave, &pe->slaves, list) { in pnv_ioda_get_pe_state()
652 "PHB#%x-PE#%x state\n", in pnv_ioda_get_pe_state()
702 /* Parent PE affects child PE */ in pnv_ioda_set_one_peltv()
714 /* Compound case: parent PE affects slave PEs */ in pnv_ioda_set_one_peltv()
729 struct pnv_ioda_pe *pe, in pnv_ioda_set_peltv() argument
737 * Clear PE frozen state. If it's master PE, we need in pnv_ioda_set_peltv()
738 * clear slave PE frozen state as well. in pnv_ioda_set_peltv()
741 opal_pci_eeh_freeze_clear(phb->opal_id, pe->pe_number, in pnv_ioda_set_peltv()
743 if (pe->flags & PNV_IODA_PE_MASTER) { in pnv_ioda_set_peltv()
744 list_for_each_entry(slave, &pe->slaves, list) in pnv_ioda_set_peltv()
752 * Associate PE in PELT. We need add the PE into the in pnv_ioda_set_peltv()
754 * originated from the PE might contribute to other in pnv_ioda_set_peltv()
757 ret = pnv_ioda_set_one_peltv(phb, pe, pe, is_add); in pnv_ioda_set_peltv()
762 if (pe->flags & PNV_IODA_PE_MASTER) { in pnv_ioda_set_peltv()
763 list_for_each_entry(slave, &pe->slaves, list) { in pnv_ioda_set_peltv()
764 ret = pnv_ioda_set_one_peltv(phb, slave, pe, is_add); in pnv_ioda_set_peltv()
770 if (pe->flags & (PNV_IODA_PE_BUS_ALL | PNV_IODA_PE_BUS)) in pnv_ioda_set_peltv()
771 pdev = pe->pbus->self; in pnv_ioda_set_peltv()
772 else if (pe->flags & PNV_IODA_PE_DEV) in pnv_ioda_set_peltv()
773 pdev = pe->pdev->bus->self; in pnv_ioda_set_peltv()
775 else if (pe->flags & PNV_IODA_PE_VF) in pnv_ioda_set_peltv()
776 pdev = pe->parent_dev; in pnv_ioda_set_peltv()
784 ret = pnv_ioda_set_one_peltv(phb, parent, pe, is_add); in pnv_ioda_set_peltv()
796 struct pnv_ioda_pe *pe, in pnv_ioda_unset_peltv() argument
806 pe->pe_number, in pnv_ioda_unset_peltv()
813 opal_pci_eeh_freeze_clear(phb->opal_id, pe->pe_number, in pnv_ioda_unset_peltv()
816 /* Disassociate PE in PELT */ in pnv_ioda_unset_peltv()
817 rc = opal_pci_set_peltv(phb->opal_id, pe->pe_number, in pnv_ioda_unset_peltv()
818 pe->pe_number, OPAL_REMOVE_PE_FROM_DOMAIN); in pnv_ioda_unset_peltv()
820 pe_warn(pe, "OPAL error %lld remove self from PELTV\n", rc); in pnv_ioda_unset_peltv()
823 int pnv_ioda_deconfigure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe) in pnv_ioda_deconfigure_pe() argument
830 /* Currently, we just deconfigure VF PE. Bus PE will always there.*/ in pnv_ioda_deconfigure_pe()
831 if (pe->pbus) { in pnv_ioda_deconfigure_pe()
836 parent = pe->pbus->self; in pnv_ioda_deconfigure_pe()
837 if (pe->flags & PNV_IODA_PE_BUS_ALL) in pnv_ioda_deconfigure_pe()
838 count = resource_size(&pe->pbus->busn_res); in pnv_ioda_deconfigure_pe()
850 dev_err(&pe->pbus->dev, "Number of subordinate buses %d unsupported\n", in pnv_ioda_deconfigure_pe()
855 rid_end = pe->rid + (count << 8); in pnv_ioda_deconfigure_pe()
858 if (pe->flags & PNV_IODA_PE_VF) in pnv_ioda_deconfigure_pe()
859 parent = pe->parent_dev; in pnv_ioda_deconfigure_pe()
862 parent = pe->pdev->bus->self; in pnv_ioda_deconfigure_pe()
866 rid_end = pe->rid + 1; in pnv_ioda_deconfigure_pe()
870 for (rid = pe->rid; rid < rid_end; rid++) in pnv_ioda_deconfigure_pe()
878 pnv_ioda_unset_peltv(phb, pe, parent); in pnv_ioda_deconfigure_pe()
880 rc = opal_pci_set_pe(phb->opal_id, pe->pe_number, pe->rid, in pnv_ioda_deconfigure_pe()
883 pe_err(pe, "OPAL error %lld trying to setup PELT table\n", rc); in pnv_ioda_deconfigure_pe()
885 pe->pbus = NULL; in pnv_ioda_deconfigure_pe()
886 pe->pdev = NULL; in pnv_ioda_deconfigure_pe()
888 pe->parent_dev = NULL; in pnv_ioda_deconfigure_pe()
894 int pnv_ioda_configure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe) in pnv_ioda_configure_pe() argument
900 if (pe->pbus) { in pnv_ioda_configure_pe()
905 if (pe->flags & PNV_IODA_PE_BUS_ALL) in pnv_ioda_configure_pe()
906 count = resource_size(&pe->pbus->busn_res); in pnv_ioda_configure_pe()
918 dev_err(&pe->pbus->dev, "Number of subordinate buses %d unsupported\n", in pnv_ioda_configure_pe()
923 rid_end = pe->rid + (count << 8); in pnv_ioda_configure_pe()
928 rid_end = pe->rid + 1; in pnv_ioda_configure_pe()
932 * Associate PE in PELT. We need add the PE into the in pnv_ioda_configure_pe()
934 * originated from the PE might contribute to other in pnv_ioda_configure_pe()
937 rc = opal_pci_set_pe(phb->opal_id, pe->pe_number, pe->rid, in pnv_ioda_configure_pe()
940 pe_err(pe, "OPAL error %ld trying to setup PELT table\n", rc); in pnv_ioda_configure_pe()
949 pnv_ioda_set_peltv(phb, pe, true); in pnv_ioda_configure_pe()
952 for (rid = pe->rid; rid < rid_end; rid++) in pnv_ioda_configure_pe()
953 phb->ioda.pe_rmap[rid] = pe->pe_number; in pnv_ioda_configure_pe()
957 pe->mve_number = 0; in pnv_ioda_configure_pe()
961 pe->mve_number = pe->pe_number; in pnv_ioda_configure_pe()
962 rc = opal_pci_set_mve(phb->opal_id, pe->mve_number, pe->pe_number); in pnv_ioda_configure_pe()
964 pe_err(pe, "OPAL error %ld setting up MVE %x\n", in pnv_ioda_configure_pe()
965 rc, pe->mve_number); in pnv_ioda_configure_pe()
966 pe->mve_number = -1; in pnv_ioda_configure_pe()
969 pe->mve_number, OPAL_ENABLE_MVE); in pnv_ioda_configure_pe()
971 pe_err(pe, "OPAL error %ld enabling MVE %x\n", in pnv_ioda_configure_pe()
972 rc, pe->mve_number); in pnv_ioda_configure_pe()
973 pe->mve_number = -1; in pnv_ioda_configure_pe()
985 struct pnv_ioda_pe *pe; in pnv_ioda_setup_dev_PE() local
995 pe = pnv_ioda_alloc_pe(phb, 1); in pnv_ioda_setup_dev_PE()
996 if (!pe) { in pnv_ioda_setup_dev_PE()
997 pr_warn("%s: Not enough PE# available, disabling device\n", in pnv_ioda_setup_dev_PE()
1002 /* NOTE: We don't get a reference for the pointer in the PE in pnv_ioda_setup_dev_PE()
1003 * data structure, both the device and PE structures should be in pnv_ioda_setup_dev_PE()
1008 pdn->pe_number = pe->pe_number; in pnv_ioda_setup_dev_PE()
1009 pe->flags = PNV_IODA_PE_DEV; in pnv_ioda_setup_dev_PE()
1010 pe->pdev = dev; in pnv_ioda_setup_dev_PE()
1011 pe->pbus = NULL; in pnv_ioda_setup_dev_PE()
1012 pe->mve_number = -1; in pnv_ioda_setup_dev_PE()
1013 pe->rid = dev->bus->number << 8 | pdn->devfn; in pnv_ioda_setup_dev_PE()
1014 pe->device_count++; in pnv_ioda_setup_dev_PE()
1016 pe_info(pe, "Associated device to PE\n"); in pnv_ioda_setup_dev_PE()
1018 if (pnv_ioda_configure_pe(phb, pe)) { in pnv_ioda_setup_dev_PE()
1020 pnv_ioda_free_pe(pe); in pnv_ioda_setup_dev_PE()
1022 pe->pdev = NULL; in pnv_ioda_setup_dev_PE()
1026 /* Put PE to the list */ in pnv_ioda_setup_dev_PE()
1028 list_add_tail(&pe->list, &phb->ioda.pe_list); in pnv_ioda_setup_dev_PE()
1030 return pe; in pnv_ioda_setup_dev_PE()
1036 * subordinate PCI devices and buses. The second type of PE is normally
1042 struct pnv_ioda_pe *pe = NULL; in pnv_ioda_setup_bus_PE() local
1046 * In partial hotplug case, the PE instance might be still alive. in pnv_ioda_setup_bus_PE()
1051 pe = &phb->ioda.pe_array[pe_num]; in pnv_ioda_setup_bus_PE()
1055 /* PE number for root bus should have been reserved */ in pnv_ioda_setup_bus_PE()
1057 pe = &phb->ioda.pe_array[phb->ioda.root_pe_idx]; in pnv_ioda_setup_bus_PE()
1059 /* Check if PE is determined by M64 */ in pnv_ioda_setup_bus_PE()
1060 if (!pe) in pnv_ioda_setup_bus_PE()
1061 pe = pnv_ioda_pick_m64_pe(bus, all); in pnv_ioda_setup_bus_PE()
1063 /* The PE number isn't pinned by M64 */ in pnv_ioda_setup_bus_PE()
1064 if (!pe) in pnv_ioda_setup_bus_PE()
1065 pe = pnv_ioda_alloc_pe(phb, 1); in pnv_ioda_setup_bus_PE()
1067 if (!pe) { in pnv_ioda_setup_bus_PE()
1068 pr_warn("%s: Not enough PE# available for PCI bus %04x:%02x\n", in pnv_ioda_setup_bus_PE()
1073 pe->flags |= (all ? PNV_IODA_PE_BUS_ALL : PNV_IODA_PE_BUS); in pnv_ioda_setup_bus_PE()
1074 pe->pbus = bus; in pnv_ioda_setup_bus_PE()
1075 pe->pdev = NULL; in pnv_ioda_setup_bus_PE()
1076 pe->mve_number = -1; in pnv_ioda_setup_bus_PE()
1077 pe->rid = bus->busn_res.start << 8; in pnv_ioda_setup_bus_PE()
1080 pe_info(pe, "Secondary bus %pad..%pad associated with PE#%x\n", in pnv_ioda_setup_bus_PE()
1082 pe->pe_number); in pnv_ioda_setup_bus_PE()
1084 pe_info(pe, "Secondary bus %pad associated with PE#%x\n", in pnv_ioda_setup_bus_PE()
1085 &bus->busn_res.start, pe->pe_number); in pnv_ioda_setup_bus_PE()
1087 if (pnv_ioda_configure_pe(phb, pe)) { in pnv_ioda_setup_bus_PE()
1089 pnv_ioda_free_pe(pe); in pnv_ioda_setup_bus_PE()
1090 pe->pbus = NULL; in pnv_ioda_setup_bus_PE()
1094 /* Put PE to the list */ in pnv_ioda_setup_bus_PE()
1095 list_add_tail(&pe->list, &phb->ioda.pe_list); in pnv_ioda_setup_bus_PE()
1097 return pe; in pnv_ioda_setup_bus_PE()
1101 struct pnv_ioda_pe *pe);
1107 struct pnv_ioda_pe *pe; in pnv_pci_ioda_dma_dev_setup() local
1109 /* Check if the BDFN for this device is associated with a PE yet */ in pnv_pci_ioda_dma_dev_setup()
1110 pe = pnv_pci_bdfn_to_pe(phb, pdev->devfn | (pdev->bus->number << 8)); in pnv_pci_ioda_dma_dev_setup()
1111 if (!pe) { in pnv_pci_ioda_dma_dev_setup()
1117 pe = pnv_pci_bdfn_to_pe(phb, pdev->devfn | (pdev->bus->number << 8)); in pnv_pci_ioda_dma_dev_setup()
1118 pci_info(pdev, "Configured PE#%x\n", pe ? pe->pe_number : 0xfffff); in pnv_pci_ioda_dma_dev_setup()
1122 * If we can't setup the IODA PE something has gone horribly in pnv_pci_ioda_dma_dev_setup()
1125 if (WARN_ON(!pe)) in pnv_pci_ioda_dma_dev_setup()
1128 pci_info(pdev, "Added to existing PE#%x\n", pe->pe_number); in pnv_pci_ioda_dma_dev_setup()
1135 if (!pe->dma_setup_done && !pci_is_bridge(pdev)) { in pnv_pci_ioda_dma_dev_setup()
1138 pnv_pci_ioda1_setup_dma_pe(phb, pe); in pnv_pci_ioda_dma_dev_setup()
1141 pnv_pci_ioda2_setup_dma_pe(phb, pe); in pnv_pci_ioda_dma_dev_setup()
1150 pdn->pe_number = pe->pe_number; in pnv_pci_ioda_dma_dev_setup()
1151 pe->device_count++; in pnv_pci_ioda_dma_dev_setup()
1154 pdev->dev.archdata.dma_offset = pe->tce_bypass_base; in pnv_pci_ioda_dma_dev_setup()
1155 set_iommu_table_base(&pdev->dev, pe->table_group.tables[0]); in pnv_pci_ioda_dma_dev_setup()
1158 if (pe->table_group.group) in pnv_pci_ioda_dma_dev_setup()
1159 iommu_add_device(&pe->table_group, &pdev->dev); in pnv_pci_ioda_dma_dev_setup()
1165 * The first 4GB of virtual memory for a PE is reserved for 32-bit accesses.
1179 static int pnv_pci_ioda_dma_64bit_bypass(struct pnv_ioda_pe *pe) in pnv_pci_ioda_dma_64bit_bypass() argument
1198 table_pages = alloc_pages_node(pe->phb->hose->node, GFP_KERNEL, in pnv_pci_ioda_dma_64bit_bypass()
1214 rc = opal_pci_map_pe_dma_window(pe->phb->opal_id, in pnv_pci_ioda_dma_64bit_bypass()
1215 pe->pe_number, in pnv_pci_ioda_dma_64bit_bypass()
1217 (pe->pe_number << 1) + 0, in pnv_pci_ioda_dma_64bit_bypass()
1223 pe_info(pe, "Using 64-bit DMA iommu bypass (through TVE#0)\n"); in pnv_pci_ioda_dma_64bit_bypass()
1227 pe_err(pe, "Error configuring 64-bit DMA bypass\n"); in pnv_pci_ioda_dma_64bit_bypass()
1236 struct pnv_ioda_pe *pe; in pnv_pci_ioda_iommu_bypass_supported() local
1241 pe = &phb->ioda.pe_array[pdn->pe_number]; in pnv_pci_ioda_iommu_bypass_supported()
1242 if (pe->tce_bypass_enabled) { in pnv_pci_ioda_iommu_bypass_supported()
1243 u64 top = pe->tce_bypass_base + memblock_end_of_DRAM() - 1; in pnv_pci_ioda_iommu_bypass_supported()
1256 /* pe->pdev should be set if it's a single device, pe->pbus if not */ in pnv_pci_ioda_iommu_bypass_supported()
1257 (pe->device_count == 1 || !pe->pbus) && in pnv_pci_ioda_iommu_bypass_supported()
1260 s64 rc = pnv_pci_ioda_dma_64bit_bypass(pe); in pnv_pci_ioda_iommu_bypass_supported()
1282 struct pnv_ioda_pe *pe = container_of(tgl->table_group, in pnv_pci_p7ioc_tce_invalidate() local
1284 __be64 __iomem *invalidate = pnv_ioda_get_inval_reg(pe->phb); in pnv_pci_p7ioc_tce_invalidate()
1355 static inline void pnv_pci_phb3_tce_invalidate_pe(struct pnv_ioda_pe *pe) in pnv_pci_phb3_tce_invalidate_pe() argument
1357 /* 01xb - invalidate TCEs that match the specified PE# */ in pnv_pci_phb3_tce_invalidate_pe()
1358 __be64 __iomem *invalidate = pnv_ioda_get_inval_reg(pe->phb); in pnv_pci_phb3_tce_invalidate_pe()
1359 unsigned long val = PHB3_TCE_KILL_INVAL_PE | (pe->pe_number & 0xFF); in pnv_pci_phb3_tce_invalidate_pe()
1365 static void pnv_pci_phb3_tce_invalidate(struct pnv_ioda_pe *pe, in pnv_pci_phb3_tce_invalidate() argument
1369 __be64 __iomem *invalidate = pnv_ioda_get_inval_reg(pe->phb); in pnv_pci_phb3_tce_invalidate()
1372 /* We'll invalidate DMA address in PE scope */ in pnv_pci_phb3_tce_invalidate()
1374 start |= (pe->pe_number & 0xFF); in pnv_pci_phb3_tce_invalidate()
1389 static inline void pnv_pci_ioda2_tce_invalidate_pe(struct pnv_ioda_pe *pe) in pnv_pci_ioda2_tce_invalidate_pe() argument
1391 struct pnv_phb *phb = pe->phb; in pnv_pci_ioda2_tce_invalidate_pe()
1394 pnv_pci_phb3_tce_invalidate_pe(pe); in pnv_pci_ioda2_tce_invalidate_pe()
1397 pe->pe_number, 0, 0, 0); in pnv_pci_ioda2_tce_invalidate_pe()
1406 struct pnv_ioda_pe *pe = container_of(tgl->table_group, in pnv_pci_ioda2_tce_invalidate() local
1408 struct pnv_phb *phb = pe->phb; in pnv_pci_ioda2_tce_invalidate()
1412 pnv_pci_phb3_tce_invalidate(pe, shift, in pnv_pci_ioda2_tce_invalidate()
1417 pe->pe_number, 1u << shift, in pnv_pci_ioda2_tce_invalidate()
1478 static unsigned int pnv_pci_ioda_pe_dma_weight(struct pnv_ioda_pe *pe) in pnv_pci_ioda_pe_dma_weight() argument
1484 if ((pe->flags & PNV_IODA_PE_VF) && pe->parent_dev) { in pnv_pci_ioda_pe_dma_weight()
1485 pnv_pci_ioda_dev_dma_weight(pe->parent_dev, &weight); in pnv_pci_ioda_pe_dma_weight()
1490 if ((pe->flags & PNV_IODA_PE_DEV) && pe->pdev) { in pnv_pci_ioda_pe_dma_weight()
1491 pnv_pci_ioda_dev_dma_weight(pe->pdev, &weight); in pnv_pci_ioda_pe_dma_weight()
1492 } else if ((pe->flags & PNV_IODA_PE_BUS) && pe->pbus) { in pnv_pci_ioda_pe_dma_weight()
1495 list_for_each_entry(pdev, &pe->pbus->devices, bus_list) in pnv_pci_ioda_pe_dma_weight()
1497 } else if ((pe->flags & PNV_IODA_PE_BUS_ALL) && pe->pbus) { in pnv_pci_ioda_pe_dma_weight()
1498 pci_walk_bus(pe->pbus, pnv_pci_ioda_dev_dma_weight, &weight); in pnv_pci_ioda_pe_dma_weight()
1505 struct pnv_ioda_pe *pe) in pnv_pci_ioda1_setup_dma_pe() argument
1518 weight = pnv_pci_ioda_pe_dma_weight(pe); in pnv_pci_ioda1_setup_dma_pe()
1548 pe_warn(pe, "No available DMA32 segments\n"); in pnv_pci_ioda1_setup_dma_pe()
1557 iommu_register_group(&pe->table_group, phb->hose->global_number, in pnv_pci_ioda1_setup_dma_pe()
1558 pe->pe_number); in pnv_pci_ioda1_setup_dma_pe()
1559 pnv_pci_link_table_and_group(phb->hose->node, 0, tbl, &pe->table_group); in pnv_pci_ioda1_setup_dma_pe()
1562 pe_info(pe, "DMA weight %d (%d), assigned (%d) %d DMA32 segments\n", in pnv_pci_ioda1_setup_dma_pe()
1564 pe_info(pe, " Setting up 32-bit TCE table at %08x..%08x\n", in pnv_pci_ioda1_setup_dma_pe()
1580 pe_err(pe, " Failed to allocate a 32-bit TCE memory\n"); in pnv_pci_ioda1_setup_dma_pe()
1589 pe->pe_number, in pnv_pci_ioda1_setup_dma_pe()
1594 pe_err(pe, " Failed to configure 32-bit TCE table, err %lld\n", in pnv_pci_ioda1_setup_dma_pe()
1602 phb->ioda.dma32_segmap[i] = pe->pe_number; in pnv_pci_ioda1_setup_dma_pe()
1610 pe->table_group.tce32_start = tbl->it_offset << tbl->it_page_shift; in pnv_pci_ioda1_setup_dma_pe()
1611 pe->table_group.tce32_size = tbl->it_size << tbl->it_page_shift; in pnv_pci_ioda1_setup_dma_pe()
1612 tbl->it_index = (phb->hose->global_number << 16) | pe->pe_number; in pnv_pci_ioda1_setup_dma_pe()
1616 pe->dma_setup_done = true; in pnv_pci_ioda1_setup_dma_pe()
1623 pnv_pci_unlink_table_and_group(tbl, &pe->table_group); in pnv_pci_ioda1_setup_dma_pe()
1631 struct pnv_ioda_pe *pe = container_of(table_group, struct pnv_ioda_pe, in pnv_pci_ioda2_set_window() local
1633 struct pnv_phb *phb = pe->phb; in pnv_pci_ioda2_set_window()
1640 pe_info(pe, "Setting up window#%d %llx..%llx pg=%lx\n", in pnv_pci_ioda2_set_window()
1645 * Map TCE table through TVT. The TVE index is the PE number in pnv_pci_ioda2_set_window()
1649 pe->pe_number, in pnv_pci_ioda2_set_window()
1650 (pe->pe_number << 1) + num, in pnv_pci_ioda2_set_window()
1656 pe_err(pe, "Failed to configure TCE table, err %lld\n", rc); in pnv_pci_ioda2_set_window()
1661 tbl, &pe->table_group); in pnv_pci_ioda2_set_window()
1662 pnv_pci_ioda2_tce_invalidate_pe(pe); in pnv_pci_ioda2_set_window()
1667 static void pnv_pci_ioda2_set_bypass(struct pnv_ioda_pe *pe, bool enable) in pnv_pci_ioda2_set_bypass() argument
1669 uint16_t window_id = (pe->pe_number << 1 ) + 1; in pnv_pci_ioda2_set_bypass()
1672 pe_info(pe, "%sabling 64-bit DMA bypass\n", enable ? "En" : "Dis"); in pnv_pci_ioda2_set_bypass()
1677 rc = opal_pci_map_pe_dma_window_real(pe->phb->opal_id, in pnv_pci_ioda2_set_bypass()
1678 pe->pe_number, in pnv_pci_ioda2_set_bypass()
1680 pe->tce_bypass_base, in pnv_pci_ioda2_set_bypass()
1683 rc = opal_pci_map_pe_dma_window_real(pe->phb->opal_id, in pnv_pci_ioda2_set_bypass()
1684 pe->pe_number, in pnv_pci_ioda2_set_bypass()
1686 pe->tce_bypass_base, in pnv_pci_ioda2_set_bypass()
1690 pe_err(pe, "OPAL error %lld configuring bypass window\n", rc); in pnv_pci_ioda2_set_bypass()
1692 pe->tce_bypass_enabled = enable; in pnv_pci_ioda2_set_bypass()
1699 struct pnv_ioda_pe *pe = container_of(table_group, struct pnv_ioda_pe, in pnv_pci_ioda2_create_table() local
1701 int nid = pe->phb->hose->node; in pnv_pci_ioda2_create_table()
1702 __u64 bus_offset = num ? pe->tce_bypass_base : table_group->tce32_start; in pnv_pci_ioda2_create_table()
1725 static long pnv_pci_ioda2_setup_default_config(struct pnv_ioda_pe *pe) in pnv_pci_ioda2_setup_default_config() argument
1767 rc = pnv_pci_ioda2_create_table(&pe->table_group, 0, PAGE_SHIFT, in pnv_pci_ioda2_setup_default_config()
1770 pe_err(pe, "Failed to create 32-bit TCE table, err %ld", in pnv_pci_ioda2_setup_default_config()
1778 if (window_size > pe->phb->ioda.m32_pci_base) { in pnv_pci_ioda2_setup_default_config()
1779 res_start = pe->phb->ioda.m32_pci_base >> tbl->it_page_shift; in pnv_pci_ioda2_setup_default_config()
1783 tbl->it_index = (pe->phb->hose->global_number << 16) | pe->pe_number; in pnv_pci_ioda2_setup_default_config()
1784 if (iommu_init_table(tbl, pe->phb->hose->node, res_start, res_end)) in pnv_pci_ioda2_setup_default_config()
1785 rc = pnv_pci_ioda2_set_window(&pe->table_group, 0, tbl); in pnv_pci_ioda2_setup_default_config()
1789 pe_err(pe, "Failed to configure 32-bit TCE table, err %ld\n", rc); in pnv_pci_ioda2_setup_default_config()
1794 pnv_pci_ioda2_set_bypass(pe, true); in pnv_pci_ioda2_setup_default_config()
1801 if (pe->pdev) in pnv_pci_ioda2_setup_default_config()
1802 set_iommu_table_base(&pe->pdev->dev, tbl); in pnv_pci_ioda2_setup_default_config()
1810 struct pnv_ioda_pe *pe = container_of(table_group, struct pnv_ioda_pe, in pnv_pci_ioda2_unset_window() local
1812 struct pnv_phb *phb = pe->phb; in pnv_pci_ioda2_unset_window()
1815 pe_info(pe, "Removing DMA window #%d\n", num); in pnv_pci_ioda2_unset_window()
1817 ret = opal_pci_map_pe_dma_window(phb->opal_id, pe->pe_number, in pnv_pci_ioda2_unset_window()
1818 (pe->pe_number << 1) + num, in pnv_pci_ioda2_unset_window()
1822 pe_warn(pe, "Unmapping failed, ret = %ld\n", ret); in pnv_pci_ioda2_unset_window()
1824 pnv_pci_ioda2_tce_invalidate_pe(pe); in pnv_pci_ioda2_unset_window()
1878 static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe, struct pci_bus *bus) in pnv_ioda_setup_bus_dma() argument
1883 set_iommu_table_base(&dev->dev, pe->table_group.tables[0]); in pnv_ioda_setup_bus_dma()
1884 dev->dev.archdata.dma_offset = pe->tce_bypass_base; in pnv_ioda_setup_bus_dma()
1886 if ((pe->flags & PNV_IODA_PE_BUS_ALL) && dev->subordinate) in pnv_ioda_setup_bus_dma()
1887 pnv_ioda_setup_bus_dma(pe, dev->subordinate); in pnv_ioda_setup_bus_dma()
1893 struct pnv_ioda_pe *pe = container_of(table_group, struct pnv_ioda_pe, in pnv_ioda2_take_ownership() local
1896 struct iommu_table *tbl = pe->table_group.tables[0]; in pnv_ioda2_take_ownership()
1898 pnv_pci_ioda2_set_bypass(pe, false); in pnv_ioda2_take_ownership()
1899 pnv_pci_ioda2_unset_window(&pe->table_group, 0); in pnv_ioda2_take_ownership()
1900 if (pe->pbus) in pnv_ioda2_take_ownership()
1901 pnv_ioda_setup_bus_dma(pe, pe->pbus); in pnv_ioda2_take_ownership()
1902 else if (pe->pdev) in pnv_ioda2_take_ownership()
1903 set_iommu_table_base(&pe->pdev->dev, NULL); in pnv_ioda2_take_ownership()
1909 struct pnv_ioda_pe *pe = container_of(table_group, struct pnv_ioda_pe, in pnv_ioda2_release_ownership() local
1912 pnv_pci_ioda2_setup_default_config(pe); in pnv_ioda2_release_ownership()
1913 if (pe->pbus) in pnv_ioda2_release_ownership()
1914 pnv_ioda_setup_bus_dma(pe, pe->pbus); in pnv_ioda2_release_ownership()
1928 struct pnv_ioda_pe *pe) in pnv_pci_ioda2_setup_dma_pe() argument
1933 pe->tce_bypass_base = 1ull << 59; in pnv_pci_ioda2_setup_dma_pe()
1935 /* The PE will reserve all possible 32-bits space */ in pnv_pci_ioda2_setup_dma_pe()
1936 pe_info(pe, "Setting up 32-bit TCE table at 0..%08x\n", in pnv_pci_ioda2_setup_dma_pe()
1940 pe->table_group.tce32_start = 0; in pnv_pci_ioda2_setup_dma_pe()
1941 pe->table_group.tce32_size = phb->ioda.m32_pci_base; in pnv_pci_ioda2_setup_dma_pe()
1942 pe->table_group.max_dynamic_windows_supported = in pnv_pci_ioda2_setup_dma_pe()
1944 pe->table_group.max_levels = POWERNV_IOMMU_MAX_LEVELS; in pnv_pci_ioda2_setup_dma_pe()
1945 pe->table_group.pgsizes = pnv_ioda_parse_tce_sizes(phb); in pnv_pci_ioda2_setup_dma_pe()
1947 rc = pnv_pci_ioda2_setup_default_config(pe); in pnv_pci_ioda2_setup_dma_pe()
1952 pe->table_group.ops = &pnv_pci_ioda2_ops; in pnv_pci_ioda2_setup_dma_pe()
1953 iommu_register_group(&pe->table_group, phb->hose->global_number, in pnv_pci_ioda2_setup_dma_pe()
1954 pe->pe_number); in pnv_pci_ioda2_setup_dma_pe()
1956 pe->dma_setup_done = true; in pnv_pci_ioda2_setup_dma_pe()
2033 struct pnv_ioda_pe *pe = pnv_ioda_get_pe(dev); in __pnv_pci_ioda_msi_setup() local
2040 /* No PE assigned ? bail out ... no MSI for you ! */ in __pnv_pci_ioda_msi_setup()
2041 if (pe == NULL) in __pnv_pci_ioda_msi_setup()
2045 if (pe->mve_number < 0) in __pnv_pci_ioda_msi_setup()
2052 /* Assign XIVE to PE */ in __pnv_pci_ioda_msi_setup()
2053 rc = opal_pci_set_xive_pe(phb->opal_id, pe->pe_number, xive_num); in __pnv_pci_ioda_msi_setup()
2055 pr_warn("%s: OPAL error %d setting XIVE %d PE\n", in __pnv_pci_ioda_msi_setup()
2063 rc = opal_get_msi_64(phb->opal_id, pe->mve_number, xive_num, 1, in __pnv_pci_ioda_msi_setup()
2075 rc = opal_get_msi_32(phb->opal_id, pe->mve_number, xive_num, 1, in __pnv_pci_ioda_msi_setup()
2320 static void pnv_ioda_setup_pe_res(struct pnv_ioda_pe *pe, in pnv_ioda_setup_pe_res() argument
2323 struct pnv_phb *phb = pe->phb; in pnv_ioda_setup_pe_res()
2338 phb->ioda.io_segmap[index] = pe->pe_number; in pnv_ioda_setup_pe_res()
2340 pe->pe_number, OPAL_IO_WINDOW_TYPE, 0, index); in pnv_ioda_setup_pe_res()
2342 pr_err("%s: Error %lld mapping IO segment#%d to PE#%x\n", in pnv_ioda_setup_pe_res()
2343 __func__, rc, index, pe->pe_number); in pnv_ioda_setup_pe_res()
2362 phb->ioda.m32_segmap[index] = pe->pe_number; in pnv_ioda_setup_pe_res()
2364 pe->pe_number, OPAL_M32_WINDOW_TYPE, 0, index); in pnv_ioda_setup_pe_res()
2366 pr_err("%s: Error %lld mapping M32 segment#%d to PE#%x", in pnv_ioda_setup_pe_res()
2367 __func__, rc, index, pe->pe_number); in pnv_ioda_setup_pe_res()
2378 * This function is supposed to be called on basis of PE from top
2380 * parent PE could be overridden by its child PEs if necessary.
2382 static void pnv_ioda_setup_pe_seg(struct pnv_ioda_pe *pe) in pnv_ioda_setup_pe_seg() argument
2388 * NOTE: We only care PCI bus based PE for now. For PCI in pnv_ioda_setup_pe_seg()
2389 * device based PE, for example SRIOV sensitive VF should in pnv_ioda_setup_pe_seg()
2392 BUG_ON(!(pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL))); in pnv_ioda_setup_pe_seg()
2394 list_for_each_entry(pdev, &pe->pbus->devices, bus_list) { in pnv_ioda_setup_pe_seg()
2396 pnv_ioda_setup_pe_res(pe, &pdev->resource[i]); in pnv_ioda_setup_pe_seg()
2399 * If the PE contains all subordinate PCI buses, the in pnv_ioda_setup_pe_seg()
2401 * the PE as well. in pnv_ioda_setup_pe_seg()
2403 if (!(pe->flags & PNV_IODA_PE_BUS_ALL) || !pci_is_bridge(pdev)) in pnv_ioda_setup_pe_seg()
2406 pnv_ioda_setup_pe_res(pe, in pnv_ioda_setup_pe_seg()
2437 struct pnv_ioda_pe *pe = &phb->ioda.pe_array[pe_num]; in pnv_pci_ioda_pe_dump() local
2442 pe_warn(pe, "rid: %04x dev count: %2d flags: %s%s%s%s%s%s\n", in pnv_pci_ioda_pe_dump()
2443 pe->rid, pe->device_count, in pnv_pci_ioda_pe_dump()
2444 (pe->flags & PNV_IODA_PE_DEV) ? "dev " : "", in pnv_pci_ioda_pe_dump()
2445 (pe->flags & PNV_IODA_PE_BUS) ? "bus " : "", in pnv_pci_ioda_pe_dump()
2446 (pe->flags & PNV_IODA_PE_BUS_ALL) ? "all " : "", in pnv_pci_ioda_pe_dump()
2447 (pe->flags & PNV_IODA_PE_MASTER) ? "master " : "", in pnv_pci_ioda_pe_dump()
2448 (pe->flags & PNV_IODA_PE_SLAVE) ? "slave " : "", in pnv_pci_ioda_pe_dump()
2449 (pe->flags & PNV_IODA_PE_VF) ? "vf " : ""); in pnv_pci_ioda_pe_dump()
2530 * For now, we return I/O or M32 segment size for PE sensitive
2534 * The current PCI bus might be put into one PE, which was
2631 struct pnv_ioda_pe *pe; in pnv_pci_configure_bus() local
2634 dev_info(&bus->dev, "Configuring PE for bus\n"); in pnv_pci_configure_bus()
2636 /* Don't assign PE to PCI bus, which doesn't have subordinate devices */ in pnv_pci_configure_bus()
2644 * Assign PE. We might run here because of partial hotplug. in pnv_pci_configure_bus()
2645 * For the case, we just pick up the existing PE and should in pnv_pci_configure_bus()
2648 pe = pnv_ioda_setup_bus_PE(bus, all); in pnv_pci_configure_bus()
2649 if (!pe) in pnv_pci_configure_bus()
2652 pnv_ioda_setup_pe_seg(pe); in pnv_pci_configure_bus()
2661 * assign a PE
2669 pci_err(dev, "pci_enable_device() blocked, no PE assigned.\n"); in pnv_pci_enable_device_hook()
2679 struct pnv_ioda_pe *pe; in pnv_ocapi_enable_device_hook() local
2686 pe = pnv_ioda_setup_dev_PE(dev); in pnv_ocapi_enable_device_hook()
2687 if (!pe) in pnv_ocapi_enable_device_hook()
2696 struct pnv_ioda_pe *pe = container_of(table_group, in pnv_pci_ioda1_unset_window() local
2698 struct pnv_phb *phb = pe->phb; in pnv_pci_ioda1_unset_window()
2702 pe_info(pe, "Removing DMA window #%d\n", num); in pnv_pci_ioda1_unset_window()
2704 if (phb->ioda.dma32_segmap[idx] != pe->pe_number) in pnv_pci_ioda1_unset_window()
2707 rc = opal_pci_map_pe_dma_window(phb->opal_id, pe->pe_number, in pnv_pci_ioda1_unset_window()
2710 pe_warn(pe, "Failure %ld unmapping DMA32 segment#%d\n", in pnv_pci_ioda1_unset_window()
2722 static void pnv_pci_ioda1_release_pe_dma(struct pnv_ioda_pe *pe) in pnv_pci_ioda1_release_pe_dma() argument
2724 struct iommu_table *tbl = pe->table_group.tables[0]; in pnv_pci_ioda1_release_pe_dma()
2727 if (!pe->dma_setup_done) in pnv_pci_ioda1_release_pe_dma()
2730 rc = pnv_pci_ioda1_unset_window(&pe->table_group, 0); in pnv_pci_ioda1_release_pe_dma()
2735 if (pe->table_group.group) { in pnv_pci_ioda1_release_pe_dma()
2736 iommu_group_put(pe->table_group.group); in pnv_pci_ioda1_release_pe_dma()
2737 WARN_ON(pe->table_group.group); in pnv_pci_ioda1_release_pe_dma()
2744 void pnv_pci_ioda2_release_pe_dma(struct pnv_ioda_pe *pe) in pnv_pci_ioda2_release_pe_dma() argument
2746 struct iommu_table *tbl = pe->table_group.tables[0]; in pnv_pci_ioda2_release_pe_dma()
2749 if (!pe->dma_setup_done) in pnv_pci_ioda2_release_pe_dma()
2752 rc = pnv_pci_ioda2_unset_window(&pe->table_group, 0); in pnv_pci_ioda2_release_pe_dma()
2754 pe_warn(pe, "OPAL error %lld release DMA window\n", rc); in pnv_pci_ioda2_release_pe_dma()
2756 pnv_pci_ioda2_set_bypass(pe, false); in pnv_pci_ioda2_release_pe_dma()
2757 if (pe->table_group.group) { in pnv_pci_ioda2_release_pe_dma()
2758 iommu_group_put(pe->table_group.group); in pnv_pci_ioda2_release_pe_dma()
2759 WARN_ON(pe->table_group.group); in pnv_pci_ioda2_release_pe_dma()
2765 static void pnv_ioda_free_pe_seg(struct pnv_ioda_pe *pe, in pnv_ioda_free_pe_seg() argument
2769 struct pnv_phb *phb = pe->phb; in pnv_ioda_free_pe_seg()
2774 if (map[idx] != pe->pe_number) in pnv_ioda_free_pe_seg()
2781 pe_warn(pe, "Error %lld unmapping (%d) segment#%d\n", in pnv_ioda_free_pe_seg()
2788 static void pnv_ioda_release_pe_seg(struct pnv_ioda_pe *pe) in pnv_ioda_release_pe_seg() argument
2790 struct pnv_phb *phb = pe->phb; in pnv_ioda_release_pe_seg()
2793 pnv_ioda_free_pe_seg(pe, OPAL_IO_WINDOW_TYPE, in pnv_ioda_release_pe_seg()
2795 pnv_ioda_free_pe_seg(pe, OPAL_M32_WINDOW_TYPE, in pnv_ioda_release_pe_seg()
2799 pnv_ioda_free_pe_seg(pe, OPAL_M32_WINDOW_TYPE, in pnv_ioda_release_pe_seg()
2804 static void pnv_ioda_release_pe(struct pnv_ioda_pe *pe) in pnv_ioda_release_pe() argument
2806 struct pnv_phb *phb = pe->phb; in pnv_ioda_release_pe()
2809 pe_info(pe, "Releasing PE\n"); in pnv_ioda_release_pe()
2812 list_del(&pe->list); in pnv_ioda_release_pe()
2817 pnv_pci_ioda1_release_pe_dma(pe); in pnv_ioda_release_pe()
2820 pnv_pci_ioda2_release_pe_dma(pe); in pnv_ioda_release_pe()
2828 pnv_ioda_release_pe_seg(pe); in pnv_ioda_release_pe()
2829 pnv_ioda_deconfigure_pe(pe->phb, pe); in pnv_ioda_release_pe()
2831 /* Release slave PEs in the compound PE */ in pnv_ioda_release_pe()
2832 if (pe->flags & PNV_IODA_PE_MASTER) { in pnv_ioda_release_pe()
2833 list_for_each_entry_safe(slave, tmp, &pe->slaves, list) { in pnv_ioda_release_pe()
2840 * The PE for root bus can be removed because of hotplug in EEH in pnv_ioda_release_pe()
2841 * recovery for fenced PHB error. We need to mark the PE dead so in pnv_ioda_release_pe()
2842 * that it can be populated again in PCI hot add path. The PE in pnv_ioda_release_pe()
2845 if (phb->ioda.root_pe_idx == pe->pe_number) in pnv_ioda_release_pe()
2848 pnv_ioda_free_pe(pe); in pnv_ioda_release_pe()
2855 struct pnv_ioda_pe *pe; in pnv_pci_release_device() local
2857 /* The VF PE state is torn down when sriov_disable() is called */ in pnv_pci_release_device()
2877 * set the PE number in @pdn to an invalid one. Otherwise, the PE's in pnv_pci_release_device()
2879 * be increased on adding devices. It leads to unbalanced PE's device in pnv_pci_release_device()
2882 pe = &phb->ioda.pe_array[pdn->pe_number]; in pnv_pci_release_device()
2885 WARN_ON(--pe->device_count < 0); in pnv_pci_release_device()
2886 if (pe->device_count == 0) in pnv_pci_release_device()
2887 pnv_ioda_release_pe(pe); in pnv_pci_release_device()
2901 struct pnv_ioda_pe *pe; in pnv_pci_ioda_dma_bus_setup() local
2903 list_for_each_entry(pe, &phb->ioda.pe_list, list) { in pnv_pci_ioda_dma_bus_setup()
2904 if (!(pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL))) in pnv_pci_ioda_dma_bus_setup()
2907 if (!pe->pbus) in pnv_pci_ioda_dma_bus_setup()
2910 if (bus->number == ((pe->rid >> 8) & 0xFF)) { in pnv_pci_ioda_dma_bus_setup()
2911 pe->pbus = bus; in pnv_pci_ioda_dma_bus_setup()
3033 prop32 = of_get_property(np, "ibm,opal-reserved-pe", NULL); in pnv_pci_init_ioda_phb()
3037 /* Invalidate RID to PE# mapping */ in pnv_pci_init_ioda_phb()
3097 * Choose PE number for root bus, which shouldn't have in pnv_pci_init_ioda_phb()
3099 * the PE number adjacent to the reserved one if possible. in pnv_pci_init_ioda_phb()
3130 pr_info(" %03d (%03d) PE's M32: 0x%x [segment=0x%x]\n", in pnv_pci_init_ioda_phb()
3154 * the child P2P bridges) can form individual PE. in pnv_pci_init_ioda_phb()