Home
last modified time | relevance | path

Searched refs:iop (Results 1 – 25 of 48) sorted by relevance

12

/Linux-v5.15/lib/
Dirq_poll.c27 void irq_poll_sched(struct irq_poll *iop) in irq_poll_sched() argument
31 if (test_bit(IRQ_POLL_F_DISABLE, &iop->state)) in irq_poll_sched()
33 if (test_and_set_bit(IRQ_POLL_F_SCHED, &iop->state)) in irq_poll_sched()
37 list_add_tail(&iop->list, this_cpu_ptr(&blk_cpu_iopoll)); in irq_poll_sched()
51 static void __irq_poll_complete(struct irq_poll *iop) in __irq_poll_complete() argument
53 list_del(&iop->list); in __irq_poll_complete()
55 clear_bit_unlock(IRQ_POLL_F_SCHED, &iop->state); in __irq_poll_complete()
68 void irq_poll_complete(struct irq_poll *iop) in irq_poll_complete() argument
73 __irq_poll_complete(iop); in irq_poll_complete()
87 struct irq_poll *iop; in irq_poll_softirq() local
[all …]
/Linux-v5.15/arch/m68k/mac/
Diop.c168 static __inline__ void iop_loadaddr(volatile struct mac_iop *iop, __u16 addr) in iop_loadaddr() argument
170 iop->ram_addr_lo = addr; in iop_loadaddr()
171 iop->ram_addr_hi = addr >> 8; in iop_loadaddr()
174 static __inline__ __u8 iop_readb(volatile struct mac_iop *iop, __u16 addr) in iop_readb() argument
176 iop->ram_addr_lo = addr; in iop_readb()
177 iop->ram_addr_hi = addr >> 8; in iop_readb()
178 return iop->ram_data; in iop_readb()
181 static __inline__ void iop_writeb(volatile struct mac_iop *iop, __u16 addr, __u8 data) in iop_writeb() argument
183 iop->ram_addr_lo = addr; in iop_writeb()
184 iop->ram_addr_hi = addr >> 8; in iop_writeb()
[all …]
DMakefile6 obj-y := config.o macints.o iop.o via.o oss.o psc.o \
/Linux-v5.15/drivers/iommu/
Dio-pgtable.c37 struct io_pgtable *iop; in alloc_io_pgtable_ops() local
47 iop = fns->alloc(cfg, cookie); in alloc_io_pgtable_ops()
48 if (!iop) in alloc_io_pgtable_ops()
51 iop->fmt = fmt; in alloc_io_pgtable_ops()
52 iop->cookie = cookie; in alloc_io_pgtable_ops()
53 iop->cfg = *cfg; in alloc_io_pgtable_ops()
55 return &iop->ops; in alloc_io_pgtable_ops()
65 struct io_pgtable *iop; in free_io_pgtable_ops() local
70 iop = io_pgtable_ops_to_pgtable(ops); in free_io_pgtable_ops()
71 io_pgtable_tlb_flush_all(iop); in free_io_pgtable_ops()
[all …]
Dio-pgtable-arm.c31 container_of((x), struct arm_lpae_io_pgtable, iop)
145 struct io_pgtable iop; member
267 struct io_pgtable_cfg *cfg = &data->iop.cfg; in __arm_lpae_init_pte()
271 if (data->iop.fmt != ARM_MALI_LPAE && lvl == ARM_LPAE_MAX_LEVELS - 1) in __arm_lpae_init_pte()
291 if (iopte_leaf(ptep[i], lvl, data->iop.fmt)) { in arm_lpae_init_pte()
354 struct io_pgtable_cfg *cfg = &data->iop.cfg; in __arm_lpae_map()
390 if (pte && !iopte_leaf(pte, lvl, data->iop.fmt)) { in __arm_lpae_map()
408 if (data->iop.fmt == APPLE_DART) { in arm_lpae_prot_to_pte()
417 if (data->iop.fmt == ARM_64_LPAE_S1 || in arm_lpae_prot_to_pte()
418 data->iop.fmt == ARM_32_LPAE_S1) { in arm_lpae_prot_to_pte()
[all …]
Dio-pgtable-arm-v7s.c40 container_of((x), struct arm_v7s_io_pgtable, iop)
165 struct io_pgtable iop; member
231 return phys_to_virt(iopte_to_paddr(pte, lvl, &data->iop.cfg)); in iopte_deref()
237 struct io_pgtable_cfg *cfg = &data->iop.cfg; in __arm_v7s_alloc_table()
285 struct io_pgtable_cfg *cfg = &data->iop.cfg; in __arm_v7s_free_table()
418 struct io_pgtable_cfg *cfg = &data->iop.cfg; in arm_v7s_init_pte()
479 struct io_pgtable_cfg *cfg = &data->iop.cfg; in __arm_v7s_map()
529 if (WARN_ON(iova >= (1ULL << data->iop.cfg.ias) || in arm_v7s_map_pages()
530 paddr >= (1ULL << data->iop.cfg.oas))) in arm_v7s_map_pages()
563 static void arm_v7s_free_pgtable(struct io_pgtable *iop) in arm_v7s_free_pgtable() argument
[all …]
Dipmmu-vmsa.c76 struct io_pgtable_ops *iop; member
456 domain->iop = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &domain->cfg, in ipmmu_domain_init_context()
458 if (!domain->iop) { in ipmmu_domain_init_context()
591 free_io_pgtable_ops(domain->iop); in ipmmu_domain_free()
667 return domain->iop->map(domain->iop, iova, paddr, size, prot, gfp); in ipmmu_map()
675 return domain->iop->unmap(domain->iop, iova, size, gather); in ipmmu_unmap()
699 return domain->iop->iova_to_phys(domain->iop, iova); in ipmmu_iova_to_phys()
Dmtk_iommu.c127 struct io_pgtable_ops *iop; member
394 dom->iop = data->m4u_dom->iop; in mtk_iommu_domain_finalise()
414 dom->iop = alloc_io_pgtable_ops(ARM_V7S, &dom->cfg, data); in mtk_iommu_domain_finalise()
415 if (!dom->iop) { in mtk_iommu_domain_finalise()
508 return dom->iop->map(dom->iop, iova, paddr, size, prot, gfp); in mtk_iommu_map()
518 return dom->iop->unmap(dom->iop, iova, size, gather); in mtk_iommu_unmap()
552 pa = dom->iop->iova_to_phys(dom->iop, iova); in mtk_iommu_iova_to_phys()
Dmsm_iommu.c44 struct io_pgtable_ops *iop; member
352 priv->iop = alloc_io_pgtable_ops(ARM_V7S, &priv->cfg, priv); in msm_iommu_domain_config()
353 if (!priv->iop) { in msm_iommu_domain_config()
459 free_io_pgtable_ops(priv->iop); in msm_iommu_detach_dev()
485 ret = priv->iop->map(priv->iop, iova, pa, len, prot, GFP_ATOMIC); in msm_iommu_map()
506 len = priv->iop->unmap(priv->iop, iova, len, gather); in msm_iommu_unmap()
/Linux-v5.15/drivers/md/bcache/
Drequest.c482 struct data_insert_op iop; member
499 s->iop.status = bio->bi_status; in bch_cache_read_endio()
501 ptr_stale(s->iop.c, &b->key, 0)) { in bch_cache_read_endio()
502 atomic_long_inc(&s->iop.c->cache_read_races); in bch_cache_read_endio()
503 s->iop.status = BLK_STS_IOERR; in bch_cache_read_endio()
506 bch_bbio_endio(s->iop.c, bio, bio->bi_status, "reading from cache"); in bch_cache_read_endio()
520 if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0)) <= 0) in cache_lookup_fn()
523 if (KEY_INODE(k) != s->iop.inode || in cache_lookup_fn()
526 unsigned int sectors = KEY_INODE(k) == s->iop.inode in cache_lookup_fn()
557 bch_cut_front(&KEY(s->iop.inode, n->bi_iter.bi_sector, 0), bio_key); in cache_lookup_fn()
[all …]
/Linux-v5.15/include/linux/
Dio-pgtable.h216 static inline void io_pgtable_tlb_flush_all(struct io_pgtable *iop) in io_pgtable_tlb_flush_all() argument
218 if (iop->cfg.tlb && iop->cfg.tlb->tlb_flush_all) in io_pgtable_tlb_flush_all()
219 iop->cfg.tlb->tlb_flush_all(iop->cookie); in io_pgtable_tlb_flush_all()
223 io_pgtable_tlb_flush_walk(struct io_pgtable *iop, unsigned long iova, in io_pgtable_tlb_flush_walk() argument
226 if (iop->cfg.tlb && iop->cfg.tlb->tlb_flush_walk) in io_pgtable_tlb_flush_walk()
227 iop->cfg.tlb->tlb_flush_walk(iova, size, granule, iop->cookie); in io_pgtable_tlb_flush_walk()
231 io_pgtable_tlb_add_page(struct io_pgtable *iop, in io_pgtable_tlb_add_page() argument
235 if (iop->cfg.tlb && iop->cfg.tlb->tlb_add_page) in io_pgtable_tlb_add_page()
236 iop->cfg.tlb->tlb_add_page(gather, iova, granule, iop->cookie); in io_pgtable_tlb_add_page()
248 void (*free)(struct io_pgtable *iop);
/Linux-v5.15/arch/powerpc/platforms/8xx/
Dcpm1.c299 struct cpm_ioport32e __iomem *iop; in cpm1_set_pin32() local
303 iop = (struct cpm_ioport32e __iomem *) in cpm1_set_pin32()
306 iop = (struct cpm_ioport32e __iomem *) in cpm1_set_pin32()
310 setbits32(&iop->dir, pin); in cpm1_set_pin32()
312 clrbits32(&iop->dir, pin); in cpm1_set_pin32()
315 setbits32(&iop->par, pin); in cpm1_set_pin32()
317 clrbits32(&iop->par, pin); in cpm1_set_pin32()
328 setbits32(&iop->sor, pin); in cpm1_set_pin32()
330 clrbits32(&iop->sor, pin); in cpm1_set_pin32()
341 struct cpm_ioport16 __iomem *iop = in cpm1_set_pin16() local
[all …]
/Linux-v5.15/fs/iomap/
Dbuffered-io.c54 struct iomap_page *iop = to_iomap_page(page); in iomap_page_create() local
57 if (iop || nr_blocks <= 1) in iomap_page_create()
58 return iop; in iomap_page_create()
60 iop = kzalloc(struct_size(iop, uptodate, BITS_TO_LONGS(nr_blocks)), in iomap_page_create()
62 spin_lock_init(&iop->uptodate_lock); in iomap_page_create()
64 bitmap_fill(iop->uptodate, nr_blocks); in iomap_page_create()
65 attach_page_private(page, iop); in iomap_page_create()
66 return iop; in iomap_page_create()
72 struct iomap_page *iop = detach_page_private(page); in iomap_page_release() local
75 if (!iop) in iomap_page_release()
[all …]
/Linux-v5.15/drivers/iommu/amd/
Dio_pgtable.c191 if (address <= PM_LEVEL_SIZE(domain->iop.mode)) in increase_address_space()
195 if (WARN_ON_ONCE(domain->iop.mode == PAGE_MODE_6_LEVEL)) in increase_address_space()
198 *pte = PM_LEVEL_PDE(domain->iop.mode, iommu_virt_to_phys(domain->iop.root)); in increase_address_space()
200 domain->iop.root = pte; in increase_address_space()
201 domain->iop.mode += 1; in increase_address_space()
209 amd_iommu_domain_set_pgtable(domain, pte, domain->iop.mode); in increase_address_space()
233 while (address > PM_LEVEL_SIZE(domain->iop.mode)) { in alloc_pte()
243 level = domain->iop.mode - 1; in alloc_pte()
244 pte = &domain->iop.root[PM_LEVEL_INDEX(level, address)]; in alloc_pte()
510 static void v1_free_pgtable(struct io_pgtable *iop) in v1_free_pgtable() argument
[all …]
Damd_iommu.h106 atomic64_set(&domain->iop.pt_root, root); in amd_iommu_domain_set_pt_root()
107 domain->iop.root = (u64 *)(root & PAGE_MASK); in amd_iommu_domain_set_pt_root()
108 domain->iop.mode = root & 7; /* lowest 3 bits encode pgtable mode */ in amd_iommu_domain_set_pt_root()
Diommu.c1443 if (domain->iop.mode != PAGE_MODE_NONE) in set_dte_entry()
1444 pte_root = iommu_virt_to_phys(domain->iop.root); in set_dte_entry()
1446 pte_root |= (domain->iop.mode & DEV_ENTRY_MODE_MASK) in set_dte_entry()
1874 if (domain->iop.pgtbl_cfg.tlb) in protection_domain_free()
1875 free_io_pgtable_ops(&domain->iop.iop.ops); in protection_domain_free()
1937 pgtbl_ops = alloc_io_pgtable_ops(pgtable, &domain->iop.pgtbl_cfg, domain); in protection_domain_alloc()
2049 struct io_pgtable_ops *ops = &domain->iop.iop.ops; in amd_iommu_iotlb_sync_map()
2060 struct io_pgtable_ops *ops = &domain->iop.iop.ops; in amd_iommu_map()
2065 (domain->iop.mode == PAGE_MODE_NONE)) in amd_iommu_map()
2105 struct io_pgtable_ops *ops = &domain->iop.iop.ops; in amd_iommu_unmap()
[all …]
Damd_iommu_types.h488 container_of((x), struct amd_io_pgtable, iop)
495 struct protection_domain, iop)
502 struct io_pgtable iop; member
516 struct amd_io_pgtable iop; member
/Linux-v5.15/arch/powerpc/sysdev/
Dcpm_common.c113 struct cpm2_ioports __iomem *iop = mm_gc->regs; in cpm2_gpio32_save_regs() local
115 cpm2_gc->cpdata = in_be32(&iop->dat); in cpm2_gpio32_save_regs()
121 struct cpm2_ioports __iomem *iop = mm_gc->regs; in cpm2_gpio32_get() local
126 return !!(in_be32(&iop->dat) & pin_mask); in cpm2_gpio32_get()
133 struct cpm2_ioports __iomem *iop = mm_gc->regs; in __cpm2_gpio32_set() local
140 out_be32(&iop->dat, cpm2_gc->cpdata); in __cpm2_gpio32_set()
161 struct cpm2_ioports __iomem *iop = mm_gc->regs; in cpm2_gpio32_dir_out() local
167 setbits32(&iop->dir, pin_mask); in cpm2_gpio32_dir_out()
179 struct cpm2_ioports __iomem *iop = mm_gc->regs; in cpm2_gpio32_dir_in() local
185 clrbits32(&iop->dir, pin_mask); in cpm2_gpio32_dir_in()
Dcpm2.c331 struct cpm2_ioports __iomem *iop = in cpm2_set_pin() local
337 setbits32(&iop[port].dir, pin); in cpm2_set_pin()
339 clrbits32(&iop[port].dir, pin); in cpm2_set_pin()
342 setbits32(&iop[port].par, pin); in cpm2_set_pin()
344 clrbits32(&iop[port].par, pin); in cpm2_set_pin()
347 setbits32(&iop[port].sor, pin); in cpm2_set_pin()
349 clrbits32(&iop[port].sor, pin); in cpm2_set_pin()
352 setbits32(&iop[port].odr, pin); in cpm2_set_pin()
354 clrbits32(&iop[port].odr, pin); in cpm2_set_pin()
/Linux-v5.15/drivers/net/fddi/skfp/h/
Dskfbi.h697 #define ADDR(a) (char far *) smc->hw.iop+(a)
698 #define ADDRS(smc,a) (char far *) (smc)->hw.iop+(a)
700 #define ADDR(a) (((a)>>7) ? (outp(smc->hw.iop+B0_RAP,(a)>>7), \
701 (smc->hw.iop+(((a)&0x7F)|((a)>>7 ? 0x80:0)))) : \
702 (smc->hw.iop+(((a)&0x7F)|((a)>>7 ? 0x80:0))))
703 #define ADDRS(smc,a) (((a)>>7) ? (outp((smc)->hw.iop+B0_RAP,(a)>>7), \
704 ((smc)->hw.iop+(((a)&0x7F)|((a)>>7 ? 0x80:0)))) : \
705 ((smc)->hw.iop+(((a)&0x7F)|((a)>>7 ? 0x80:0))))
742 #define GET_ISR_SMP(iop) inpd((iop)+B0_ISRC) argument
744 #define CHECK_ISR_SMP(iop) (inpd((iop)+B0_ISRC) & inpd((iop)+B0_IMSK)) argument
[all …]
Dtargetos.h51 #define ADDR(a) (smc->hw.iop+(a))
53 …ADDR(a) (((a)>>7) ? (outp(smc->hw.iop+B0_RAP,(a)>>7), (smc->hw.iop+( ((a)&0x7F) | ((a)>>7 ? 0x80:0…
/Linux-v5.15/include/uapi/linux/
Di2o-dev.h50 unsigned int iop; /* IOP unit number */ member
55 unsigned int iop; /* IOP unit number */ member
60 unsigned int iop; /* IOP unit number */ member
66 unsigned int iop; /* IOP unit number */ member
75 unsigned int iop; /* IOP unit number */ member
86 unsigned int iop; /* IOP unit number */ member
98 unsigned int iop; member
/Linux-v5.15/drivers/scsi/
Dhptiop.c52 req = readl(&hba->u.itl.iop->inbound_queue); in iop_wait_ready_itl()
59 writel(req, &hba->u.itl.iop->outbound_queue); in iop_wait_ready_itl()
60 readl(&hba->u.itl.iop->outbound_intstatus); in iop_wait_ready_itl()
90 while ((req = readl(&hba->u.itl.iop->outbound_queue)) != in hptiop_drain_outbound_queue_itl()
99 ((char __iomem *)hba->u.itl.iop + req); in hptiop_drain_outbound_queue_itl()
115 struct hpt_iopmu_itl __iomem *iop = hba->u.itl.iop; in iop_intr_itl() local
123 status = readl(&iop->outbound_intstatus); in iop_intr_itl()
126 u32 msg = readl(&iop->outbound_msgaddr0); in iop_intr_itl()
129 writel(IOPMU_OUTBOUND_INT_MSG0, &iop->outbound_intstatus); in iop_intr_itl()
306 writel((unsigned long)req - (unsigned long)hba->u.itl.iop, in iop_send_sync_request_itl()
[all …]
/Linux-v5.15/arch/alpha/kernel/
Dcore_wildfire.c187 wildfire_iop *iop; in wildfire_hardware_probe() local
286 iop = WILDFIRE_iop(soft_qbb); in wildfire_hardware_probe()
290 if ((iop->iop_hose[i].init.csr & 1) == 1 && in wildfire_hardware_probe()
552 wildfire_iop *iop = WILDFIRE_iop(qbbno); in wildfire_dump_iop_regs() local
555 printk(KERN_ERR "IOP registers for QBB %d (%p)\n", qbbno, iop); in wildfire_dump_iop_regs()
557 printk(KERN_ERR " IOA_CONFIG: 0x%16lx\n", iop->ioa_config.csr); in wildfire_dump_iop_regs()
558 printk(KERN_ERR " IOD_CONFIG: 0x%16lx\n", iop->iod_config.csr); in wildfire_dump_iop_regs()
560 iop->iop_switch_credits.csr); in wildfire_dump_iop_regs()
562 iop->iop_hose_credits.csr); in wildfire_dump_iop_regs()
566 i, iop->iop_hose[i].init.csr); in wildfire_dump_iop_regs()
[all …]
/Linux-v5.15/drivers/infiniband/core/
Dcq.c152 static int ib_poll_handler(struct irq_poll *iop, int budget) in ib_poll_handler() argument
154 struct ib_cq *cq = container_of(iop, struct ib_cq, iop); in ib_poll_handler()
160 irq_poll_complete(&cq->iop); in ib_poll_handler()
163 irq_poll_sched(&cq->iop); in ib_poll_handler()
176 irq_poll_sched(&cq->iop); in ib_cq_completion_softirq()
254 irq_poll_init(&cq->iop, IB_POLL_BUDGET_IRQ, ib_poll_handler); in __ib_alloc_cq()
332 irq_poll_disable(&cq->iop); in ib_free_cq()

12