Home
last modified time | relevance | path

Searched full:pe (Results 1 – 25 of 392) sorted by relevance

12345678910>>...16

/Linux-v5.10/drivers/net/ethernet/marvell/mvpp2/
Dmvpp2_prs.c22 static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe) in mvpp2_prs_hw_write() argument
26 if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1) in mvpp2_prs_hw_write()
30 pe->tcam[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK; in mvpp2_prs_hw_write()
33 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index); in mvpp2_prs_hw_write()
35 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam[i]); in mvpp2_prs_hw_write()
38 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index); in mvpp2_prs_hw_write()
40 mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram[i]); in mvpp2_prs_hw_write()
46 int mvpp2_prs_init_from_hw(struct mvpp2 *priv, struct mvpp2_prs_entry *pe, in mvpp2_prs_init_from_hw() argument
54 memset(pe, 0, sizeof(*pe)); in mvpp2_prs_init_from_hw()
55 pe->index = tid; in mvpp2_prs_init_from_hw()
[all …]
/Linux-v5.10/arch/powerpc/kernel/
Deeh_pe.c3 * The file intends to implement PE based on the information from
7 * PE is only meaningful in one PHB domain.
26 * eeh_set_pe_aux_size - Set PE auxillary data size
27 * @size: PE auxillary data size
29 * Set PE auxillary data size
40 * eeh_pe_alloc - Allocate PE
42 * @type: PE type
44 * Allocate PE instance dynamically.
48 struct eeh_pe *pe; in eeh_pe_alloc() local
57 /* Allocate PHB PE */ in eeh_pe_alloc()
[all …]
Deeh_driver.c90 if (eeh_pe_passed(edev->pe)) in eeh_edev_actionable()
207 if (edev->pe && (edev->pe->state & EEH_PE_CFG_RESTRICTED)) in eeh_dev_save_state()
219 struct eeh_pe *pe; in eeh_set_channel_state() local
222 eeh_for_each_pe(root, pe) in eeh_set_channel_state()
223 eeh_pe_for_each_dev(pe, edev, tmp) in eeh_set_channel_state()
230 struct eeh_pe *pe; in eeh_set_irq_state() local
233 eeh_for_each_pe(root, pe) { in eeh_set_irq_state()
234 eeh_pe_for_each_dev(pe, edev, tmp) { in eeh_set_irq_state()
293 !eeh_dev_removed(edev), !eeh_pe_passed(edev->pe));
304 struct eeh_pe *pe; in eeh_pe_report() local
[all …]
Deeh.c89 * PE would be created there.
95 * EEH allowed maximal frozen times. If one particular PE's
96 * frozen count in last hour exceeds this limit, the PE will
137 u64 slot_resets; /* PE reset */
176 edev->pe->phb->global_number, edev->bdfn >> 8, in eeh_dump_dev_log()
179 edev->pe->phb->global_number, edev->bdfn >> 8, in eeh_dump_dev_log()
269 static void *eeh_dump_pe_log(struct eeh_pe *pe, void *flag) in eeh_dump_pe_log() argument
274 eeh_pe_for_each_dev(pe, edev, tmp) in eeh_dump_pe_log()
283 * @pe: EEH PE
291 void eeh_slot_error_detail(struct eeh_pe *pe, int severity) in eeh_slot_error_detail() argument
[all …]
Deeh_event.c60 /* We might have event without binding PE */ in eeh_event_handler()
61 if (event->pe) in eeh_event_handler()
62 eeh_handle_normal_event(event->pe); in eeh_event_handler()
96 * @pe: EEH PE
102 int __eeh_send_failure_event(struct eeh_pe *pe) in __eeh_send_failure_event() argument
112 event->pe = pe; in __eeh_send_failure_event()
115 * Mark the PE as recovering before inserting it in the queue. in __eeh_send_failure_event()
116 * This prevents the PE from being free()ed by a hotplug driver in __eeh_send_failure_event()
117 * while the PE is sitting in the event queue. in __eeh_send_failure_event()
119 if (pe) { in __eeh_send_failure_event()
[all …]
/Linux-v5.10/net/netfilter/ipvs/
Dip_vs_pe.c14 /* IPVS pe list */
20 /* Get pe in the pe list by name */
23 struct ip_vs_pe *pe; in __ip_vs_pe_getbyname() local
29 list_for_each_entry_rcu(pe, &ip_vs_pe, n_list) { in __ip_vs_pe_getbyname()
31 if (pe->module && in __ip_vs_pe_getbyname()
32 !try_module_get(pe->module)) { in __ip_vs_pe_getbyname()
33 /* This pe is just deleted */ in __ip_vs_pe_getbyname()
36 if (strcmp(pe_name, pe->name)==0) { in __ip_vs_pe_getbyname()
39 return pe; in __ip_vs_pe_getbyname()
41 module_put(pe->module); in __ip_vs_pe_getbyname()
[all …]
/Linux-v5.10/arch/powerpc/platforms/powernv/
Dpci-ioda.c53 static void pnv_pci_ioda2_set_bypass(struct pnv_ioda_pe *pe, bool enable);
56 void pe_level_printk(const struct pnv_ioda_pe *pe, const char *level, in pe_level_printk() argument
68 if (pe->flags & PNV_IODA_PE_DEV) in pe_level_printk()
69 strlcpy(pfix, dev_name(&pe->pdev->dev), sizeof(pfix)); in pe_level_printk()
70 else if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL)) in pe_level_printk()
72 pci_domain_nr(pe->pbus), pe->pbus->number); in pe_level_printk()
74 else if (pe->flags & PNV_IODA_PE_VF) in pe_level_printk()
76 pci_domain_nr(pe->parent_dev->bus), in pe_level_printk()
77 (pe->rid & 0xff00) >> 8, in pe_level_printk()
78 PCI_SLOT(pe->rid), PCI_FUNC(pe->rid)); in pe_level_printk()
[all …]
Deeh-powernv.c69 struct eeh_pe *pe; in pnv_eeh_ei_write() local
89 /* Retrieve PE */ in pnv_eeh_ei_write()
90 pe = eeh_pe_get(hose, pe_no); in pnv_eeh_ei_write()
91 if (!pe) in pnv_eeh_ei_write()
95 ret = eeh_ops->err_inject(pe, type, func, addr, mask); in pnv_eeh_ei_write()
155 * to clear frozen PE during PCI config access. in pnv_eeh_enable_phbs()
300 /* for VFs we use the PF's PE as the upstream PE */ in pnv_eeh_get_upstream_pe()
305 /* otherwise use the PE of our parent bridge */ in pnv_eeh_get_upstream_pe()
338 if (!edev || edev->pe) in pnv_eeh_probe()
378 /* Create PE */ in pnv_eeh_probe()
[all …]
Dnpu-dma.c33 * the PCI device, but callers don't need that actually as the PE in get_pci_dev()
95 * Returns the PE assoicated with the PCI device of the given
104 struct pnv_ioda_pe *pe; in get_gpu_pci_dev_and_pe() local
117 pe = &phb->ioda.pe_array[pdn->pe_number]; in get_gpu_pci_dev_and_pe()
122 return pe; in get_gpu_pci_dev_and_pe()
265 struct pnv_ioda_pe *pe[NV_NPU_MAX_PE_NUM]; member
283 if (!npucomp->pe_num || !npucomp->pe[0] || in pnv_npu_peers_create_table_userspace()
284 !npucomp->pe[0]->table_group.ops || in pnv_npu_peers_create_table_userspace()
285 !npucomp->pe[0]->table_group.ops->create_table) in pnv_npu_peers_create_table_userspace()
288 return npucomp->pe[0]->table_group.ops->create_table( in pnv_npu_peers_create_table_userspace()
[all …]
Dpci.h29 #define PNV_IODA_PE_DEV (1 << 0) /* PE has single PCI device */
30 #define PNV_IODA_PE_BUS (1 << 1) /* PE has primary PCI bus */
31 #define PNV_IODA_PE_BUS_ALL (1 << 2) /* PE has subordinate buses */
32 #define PNV_IODA_PE_MASTER (1 << 3) /* Master PE in compound case */
33 #define PNV_IODA_PE_SLAVE (1 << 4) /* Slave PE in compound case */
34 #define PNV_IODA_PE_VF (1 << 5) /* PE for one VF */
41 * (and PE) that initiated a DMA. In legacy PCI individual memory read/write
51 * bus of the bridge should go into the same PE.
54 /* Indicates operations are frozen for a PE: MMIO in PESTA & DMA in PESTB. */
57 /* Data associated with a PE, including IOMMU tracking etc.. */
[all …]
Dpci-sriov.c17 * the need to put the MMIO space for each VF into a separate PE. Internally
18 * the PHB maps MMIO addresses to a specific PE using the "Memory BAR Table".
42 * segments. The n'th segment is mapped to the n'th PE.
43 * b) An un-segmented BAR that maps the whole address range to a specific PE.
84 * This is where we actually allocate PE numbers for each VF and setup the
88 * ability because the PE space is shared by all devices on the same PHB.
89 * When using mode a) described above segment 0 in maps to PE#0 which might
92 * As a result we need allocate a contigious range of PE numbers, then shift
95 * PE number. This is handled in pnv_pci_vf_resource_shift().
100 * PE that we allocated for it rather than the PE associated with the bus.
[all …]
/Linux-v5.10/arch/powerpc/include/asm/
Deeh.h34 * Delay for PE reset, all in ms
44 * The struct is used to trace PE related EEH functionality.
46 * be created against particular PE. In nature, PEs correlate
49 * PE has EEH errors.
51 * Also, one particular PE might be composed of PCI device, PCI
53 * the information. Further more, one particular PE is only meaingful
58 #define EEH_PE_PHB (1 << 1) /* PHB PE */
59 #define EEH_PE_DEVICE (1 << 2) /* Device PE */
60 #define EEH_PE_BUS (1 << 3) /* Bus PE */
61 #define EEH_PE_VF (1 << 4) /* VF PE */
[all …]
/Linux-v5.10/arch/powerpc/platforms/pseries/
Deeh_pseries.c72 * parent PE in in pseries_eeh_init_edev(). in pseries_pcibios_bus_add_device()
74 struct eeh_pe *physfn_pe = pci_dev_to_eeh_dev(pdev->physfn)->pe; in pseries_pcibios_bus_add_device()
78 eeh_pe_tree_remove(edev); /* Remove as it is adding to bus pe */ in pseries_pcibios_bus_add_device()
79 eeh_pe_tree_insert(edev, physfn_pe); /* Add as VF PE type */ in pseries_pcibios_bus_add_device()
91 * pe_config_addr) as a handle to a given PE. This function finds the
108 * part of a PE or not. ret[0] being zero indicates it's not. in pseries_eeh_get_pe_config_addr()
116 /* Retrieve the associated PE config address with function 0 */ in pseries_eeh_get_pe_config_addr()
121 pr_warn("%s: Failed to get address for PHB#%x-PE#%x\n", in pseries_eeh_get_pe_config_addr()
134 pr_warn("%s: Failed to get address for PHB#%x-PE#%x\n", in pseries_eeh_get_pe_config_addr()
158 * Reset the specified PHB/PE
[all …]
/Linux-v5.10/arch/alpha/include/asm/
Dcore_marvel.h57 #define EV7_IPE(pe) ((~((long)(pe)) & EV7_PE_MASK) << 35) argument
59 #define EV7_CSR_PHYS(pe, off) (EV7_IPE(pe) | (0x7FFCUL << 20) | (off)) argument
60 #define EV7_CSRS_PHYS(pe) (EV7_CSR_PHYS(pe, 0UL)) argument
62 #define EV7_CSR_KERN(pe, off) (EV7_KERN_ADDR(EV7_CSR_PHYS(pe, off))) argument
63 #define EV7_CSRS_KERN(pe) (EV7_KERN_ADDR(EV7_CSRS_PHYS(pe))) argument
249 #define IO7_IPE(pe) (EV7_IPE(pe)) argument
252 #define IO7_HOSE(pe, port) (IO7_IPE(pe) | IO7_IPORT(port)) argument
254 #define IO7_MEM_PHYS(pe, port) (IO7_HOSE(pe, port) | 0x00000000UL) argument
255 #define IO7_CONF_PHYS(pe, port) (IO7_HOSE(pe, port) | 0xFE000000UL) argument
256 #define IO7_IO_PHYS(pe, port) (IO7_HOSE(pe, port) | 0xFF000000UL) argument
[all …]
/Linux-v5.10/drivers/misc/cxl/
Dtrace.h20 { CXL_PSL9_DSISR_An_PE, "PE" }, \
30 { CXL_PSL_DSISR_An_PE, "PE" }, \
71 __field(u16, pe)
77 __entry->pe = ctx->pe;
80 TP_printk("afu%i.%i pe=%i",
83 __entry->pe
96 __field(u16, pe)
106 __entry->pe = ctx->pe;
113 TP_printk("afu%i.%i pid=%i pe=%i wed=0x%016llx irqs=%i amr=0x%llx",
117 __entry->pe,
[all …]
/Linux-v5.10/drivers/gpu/drm/amd/amdgpu/
Damdgpu_pmu.c64 struct amdgpu_pmu_entry *pe = container_of(event->pmu, in amdgpu_perf_start() local
74 switch (pe->pmu_perf_type) { in amdgpu_perf_start()
77 pe->adev->df.funcs->pmc_start(pe->adev, hwc->config, 1); in amdgpu_perf_start()
79 pe->adev->df.funcs->pmc_start(pe->adev, hwc->config, 0); in amdgpu_perf_start()
93 struct amdgpu_pmu_entry *pe = container_of(event->pmu, in amdgpu_perf_read() local
102 switch (pe->pmu_perf_type) { in amdgpu_perf_read()
104 pe->adev->df.funcs->pmc_get_count(pe->adev, hwc->config, in amdgpu_perf_read()
120 struct amdgpu_pmu_entry *pe = container_of(event->pmu, in amdgpu_perf_stop() local
127 switch (pe->pmu_perf_type) { in amdgpu_perf_stop()
129 pe->adev->df.funcs->pmc_stop(pe->adev, hwc->config, 0); in amdgpu_perf_stop()
[all …]
/Linux-v5.10/tools/perf/arch/x86/tests/
Dintel-cqm.c44 struct perf_event_attr pe; in test__intel_cqm_count_nmi_context() local
72 memset(&pe, 0, sizeof(pe)); in test__intel_cqm_count_nmi_context()
73 pe.size = sizeof(pe); in test__intel_cqm_count_nmi_context()
75 pe.type = PERF_TYPE_HARDWARE; in test__intel_cqm_count_nmi_context()
76 pe.config = PERF_COUNT_HW_CPU_CYCLES; in test__intel_cqm_count_nmi_context()
77 pe.read_format = PERF_FORMAT_GROUP; in test__intel_cqm_count_nmi_context()
79 pe.sample_period = 128; in test__intel_cqm_count_nmi_context()
80 pe.sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_READ; in test__intel_cqm_count_nmi_context()
84 fd[0] = sys_perf_event_open(&pe, pid, -1, -1, flag); in test__intel_cqm_count_nmi_context()
90 memset(&pe, 0, sizeof(pe)); in test__intel_cqm_count_nmi_context()
[all …]
/Linux-v5.10/tools/perf/tests/
Dbp_signal_overflow.c64 struct perf_event_attr pe; in test__bp_signal_overflow() local
79 memset(&pe, 0, sizeof(struct perf_event_attr)); in test__bp_signal_overflow()
80 pe.type = PERF_TYPE_BREAKPOINT; in test__bp_signal_overflow()
81 pe.size = sizeof(struct perf_event_attr); in test__bp_signal_overflow()
83 pe.config = 0; in test__bp_signal_overflow()
84 pe.bp_type = HW_BREAKPOINT_X; in test__bp_signal_overflow()
85 pe.bp_addr = (unsigned long) test_function; in test__bp_signal_overflow()
86 pe.bp_len = sizeof(long); in test__bp_signal_overflow()
88 pe.sample_period = THRESHOLD; in test__bp_signal_overflow()
89 pe.sample_type = PERF_SAMPLE_IP; in test__bp_signal_overflow()
[all …]
Dpe-file.c3 // pe-file.exe and pe-file.exe.debug built with;
4 // x86_64-w64-mingw32-gcc -o pe-file.exe pe-file.c
7 // --compress-debug-sections pe-file.exe pe-file.exe.debug
9 // --add-gnu-debuglink=pe-file.exe.debug pe-file.exe
/Linux-v5.10/Documentation/powerpc/
Dpci_iov_resource_on_powernv.rst22 A Partitionable Endpoint (PE) is a way to group the various resources
28 There is thus, in HW, a table of PE states that contains a pair of "frozen"
30 cleared independently) for each PE.
32 When a PE is frozen, all stores in any direction are dropped and all loads
54 correspondence between a PCIe RID (bus/dev/fn) with a PE number.
57 - For DMA we then provide an entire address space for each PE that can
66 bridge being triggered. There's a PE# in the interrupt controller
67 descriptor table as well which is compared with the PE# obtained from
96 maps each segment to a PE#. That allows portions of the MMIO space
103 can be assigned to a PE.
[all …]
/Linux-v5.10/drivers/vfio/
Dvfio_spapr_eeh.c34 struct eeh_pe *pe; in vfio_spapr_iommu_eeh_ioctl() local
47 pe = eeh_iommu_group_to_pe(group); in vfio_spapr_iommu_eeh_ioctl()
48 if (!pe) in vfio_spapr_iommu_eeh_ioctl()
59 ret = eeh_pe_set_option(pe, EEH_OPT_DISABLE); in vfio_spapr_iommu_eeh_ioctl()
62 ret = eeh_pe_set_option(pe, EEH_OPT_ENABLE); in vfio_spapr_iommu_eeh_ioctl()
65 ret = eeh_pe_set_option(pe, EEH_OPT_THAW_MMIO); in vfio_spapr_iommu_eeh_ioctl()
68 ret = eeh_pe_set_option(pe, EEH_OPT_THAW_DMA); in vfio_spapr_iommu_eeh_ioctl()
71 ret = eeh_pe_get_state(pe); in vfio_spapr_iommu_eeh_ioctl()
74 ret = eeh_pe_reset(pe, EEH_RESET_DEACTIVATE, true); in vfio_spapr_iommu_eeh_ioctl()
77 ret = eeh_pe_reset(pe, EEH_RESET_HOT, true); in vfio_spapr_iommu_eeh_ioctl()
[all …]
/Linux-v5.10/drivers/iommu/intel/
Dpasid.c282 static inline void pasid_clear_entry(struct pasid_entry *pe) in pasid_clear_entry() argument
284 WRITE_ONCE(pe->val[0], 0); in pasid_clear_entry()
285 WRITE_ONCE(pe->val[1], 0); in pasid_clear_entry()
286 WRITE_ONCE(pe->val[2], 0); in pasid_clear_entry()
287 WRITE_ONCE(pe->val[3], 0); in pasid_clear_entry()
288 WRITE_ONCE(pe->val[4], 0); in pasid_clear_entry()
289 WRITE_ONCE(pe->val[5], 0); in pasid_clear_entry()
290 WRITE_ONCE(pe->val[6], 0); in pasid_clear_entry()
291 WRITE_ONCE(pe->val[7], 0); in pasid_clear_entry()
294 static inline void pasid_clear_entry_with_fpd(struct pasid_entry *pe) in pasid_clear_entry_with_fpd() argument
[all …]
/Linux-v5.10/drivers/pinctrl/freescale/
Dpinctrl-imx21.c19 #define PE 4 macro
120 MX21_PAD_TEST_WB2 = PAD_ID(PE, 0),
121 MX21_PAD_TEST_WB1 = PAD_ID(PE, 1),
122 MX21_PAD_TEST_WB0 = PAD_ID(PE, 2),
123 MX21_PAD_UART2_CTS = PAD_ID(PE, 3),
124 MX21_PAD_UART2_RTS = PAD_ID(PE, 4),
125 MX21_PAD_PWMO = PAD_ID(PE, 5),
126 MX21_PAD_UART2_TXD = PAD_ID(PE, 6),
127 MX21_PAD_UART2_RXD = PAD_ID(PE, 7),
128 MX21_PAD_UART3_TXD = PAD_ID(PE, 8),
[all …]
Dpinctrl-imx27.c23 #define PE 4 macro
150 MX27_PAD_USBOTG_NXT = PAD_ID(PE, 0),
151 MX27_PAD_USBOTG_STP = PAD_ID(PE, 1),
152 MX27_PAD_USBOTG_DIR = PAD_ID(PE, 2),
153 MX27_PAD_UART2_CTS = PAD_ID(PE, 3),
154 MX27_PAD_UART2_RTS = PAD_ID(PE, 4),
155 MX27_PAD_PWMO = PAD_ID(PE, 5),
156 MX27_PAD_UART2_TXD = PAD_ID(PE, 6),
157 MX27_PAD_UART2_RXD = PAD_ID(PE, 7),
158 MX27_PAD_UART3_TXD = PAD_ID(PE, 8),
[all …]
/Linux-v5.10/drivers/misc/ocxl/
Dlink.c50 struct radix_tree_root pe_tree; /* Maps PE handles to pe_data */
65 u64 pe; member
100 static void read_irq(struct spa *spa, u64 *dsisr, u64 *dar, u64 *pe) in read_irq() argument
107 *pe = reg & SPA_PE_MASK; in read_irq()
123 trace_ocxl_fault_ack(spa->spa_mem, spa->xsl_fault.pe, in ack_irq()
187 struct ocxl_process_element *pe; in xsl_fault_handler() local
195 pe = spa->spa_mem + pe_handle; in xsl_fault_handler()
196 pid = be32_to_cpu(pe->pid); in xsl_fault_handler()
197 /* We could be reading all null values here if the PE is being in xsl_fault_handler()
215 * AFU about PASID termination before removing the PE, in xsl_fault_handler()
[all …]

12345678910>>...16