/Linux-v5.4/crypto/async_tx/ |
D | async_xor.c | 23 do_async_xor(struct dma_chan *chan, struct dmaengine_unmap_data *unmap, in do_async_xor() argument 32 int src_cnt = unmap->to_cnt; in do_async_xor() 34 dma_addr_t dma_dest = unmap->addr[unmap->to_cnt]; in do_async_xor() 35 dma_addr_t *src_list = unmap->addr; in do_async_xor() 63 if (src_list > unmap->addr) in do_async_xor() 66 xor_src_cnt, unmap->len, in do_async_xor() 77 xor_src_cnt, unmap->len, in do_async_xor() 82 dma_set_unmap(tx, unmap); in do_async_xor() 167 struct dmaengine_unmap_data *unmap = NULL; in async_xor() local 172 unmap = dmaengine_get_unmap_data(device->dev, src_cnt+1, GFP_NOWAIT); in async_xor() [all …]
|
D | async_pq.c | 37 struct dmaengine_unmap_data *unmap, in do_async_gen_syndrome() argument 76 dma_dest[0] = unmap->addr[disks - 2]; in do_async_gen_syndrome() 77 dma_dest[1] = unmap->addr[disks - 1]; in do_async_gen_syndrome() 79 &unmap->addr[src_off], in do_async_gen_syndrome() 81 &scfs[src_off], unmap->len, in do_async_gen_syndrome() 89 dma_set_unmap(tx, unmap); in do_async_gen_syndrome() 171 struct dmaengine_unmap_data *unmap = NULL; in async_gen_syndrome() local 176 unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOWAIT); in async_gen_syndrome() 179 if (unmap && !(submit->flags & ASYNC_TX_PQ_XOR_DST) && in async_gen_syndrome() 195 unmap->len = len; in async_gen_syndrome() [all …]
|
D | async_memcpy.c | 40 struct dmaengine_unmap_data *unmap = NULL; in async_memcpy() local 43 unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOWAIT); in async_memcpy() 45 if (unmap && is_dma_copy_aligned(device, src_offset, dest_offset, len)) { in async_memcpy() 53 unmap->to_cnt = 1; in async_memcpy() 54 unmap->addr[0] = dma_map_page(device->dev, src, src_offset, len, in async_memcpy() 56 unmap->from_cnt = 1; in async_memcpy() 57 unmap->addr[1] = dma_map_page(device->dev, dest, dest_offset, len, in async_memcpy() 59 unmap->len = len; in async_memcpy() 61 tx = device->device_prep_dma_memcpy(chan, unmap->addr[1], in async_memcpy() 62 unmap->addr[0], len, in async_memcpy() [all …]
|
D | async_raid6_recov.c | 24 struct dmaengine_unmap_data *unmap = NULL; in async_sum_product() local 30 unmap = dmaengine_get_unmap_data(dma->dev, 3, GFP_NOWAIT); in async_sum_product() 32 if (unmap) { in async_sum_product() 40 unmap->addr[0] = dma_map_page(dev, srcs[0], 0, len, DMA_TO_DEVICE); in async_sum_product() 41 unmap->addr[1] = dma_map_page(dev, srcs[1], 0, len, DMA_TO_DEVICE); in async_sum_product() 42 unmap->to_cnt = 2; in async_sum_product() 44 unmap->addr[2] = dma_map_page(dev, dest, 0, len, DMA_BIDIRECTIONAL); in async_sum_product() 45 unmap->bidi_cnt = 1; in async_sum_product() 47 pq[1] = unmap->addr[2]; in async_sum_product() 49 unmap->len = len; in async_sum_product() [all …]
|
/Linux-v5.4/drivers/xen/xenbus/ |
D | xenbus_client.c | 79 int (*unmap)(struct xenbus_device *dev, void *vaddr); member 467 struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS]; in __xenbus_map_ring() local 499 memset(&unmap[j], 0, sizeof(unmap[j])); in __xenbus_map_ring() 500 gnttab_set_unmap_op(&unmap[j], (phys_addr_t)addrs[i], in __xenbus_map_ring() 506 if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap, j)) in __xenbus_map_ring() 511 if (unmap[i].status != GNTST_okay) { in __xenbus_map_ring() 664 return ring_ops->unmap(dev, vaddr); in xenbus_unmap_ring_vfree() 730 struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS]; in xenbus_unmap_ring_vfree_pv() local 756 memset(&unmap[i], 0, sizeof(unmap[i])); in xenbus_unmap_ring_vfree_pv() 758 unmap[i].host_addr = arbitrary_virt_to_machine( in xenbus_unmap_ring_vfree_pv() [all …]
|
/Linux-v5.4/tools/testing/selftests/vm/ |
D | mlock2-tests.c | 311 goto unmap; in test_mlock_lock() 315 goto unmap; in test_mlock_lock() 320 goto unmap; in test_mlock_lock() 325 unmap: in test_mlock_lock() 420 goto unmap; in test_mlock_onfault() 424 goto unmap; in test_mlock_onfault() 433 goto unmap; in test_mlock_onfault() 437 unmap: in test_mlock_onfault() 465 goto unmap; in test_lock_onfault_of_present() 476 goto unmap; in test_lock_onfault_of_present() [all …]
|
/Linux-v5.4/drivers/dma/ |
D | dmaengine.c | 1206 struct dmaengine_unmap_data *unmap = container_of(kref, typeof(*unmap), kref); in dmaengine_unmap() local 1207 struct device *dev = unmap->dev; in dmaengine_unmap() 1210 cnt = unmap->to_cnt; in dmaengine_unmap() 1212 dma_unmap_page(dev, unmap->addr[i], unmap->len, in dmaengine_unmap() 1214 cnt += unmap->from_cnt; in dmaengine_unmap() 1216 dma_unmap_page(dev, unmap->addr[i], unmap->len, in dmaengine_unmap() 1218 cnt += unmap->bidi_cnt; in dmaengine_unmap() 1220 if (unmap->addr[i] == 0) in dmaengine_unmap() 1222 dma_unmap_page(dev, unmap->addr[i], unmap->len, in dmaengine_unmap() 1225 cnt = unmap->map_cnt; in dmaengine_unmap() [all …]
|
D | mv_xor.c | 769 struct dmaengine_unmap_data *unmap; in mv_chan_memcpy_self_test() local 792 unmap = dmaengine_get_unmap_data(dma_chan->device->dev, 2, GFP_KERNEL); in mv_chan_memcpy_self_test() 793 if (!unmap) { in mv_chan_memcpy_self_test() 801 unmap->addr[0] = src_dma; in mv_chan_memcpy_self_test() 808 unmap->to_cnt = 1; in mv_chan_memcpy_self_test() 813 unmap->addr[1] = dest_dma; in mv_chan_memcpy_self_test() 820 unmap->from_cnt = 1; in mv_chan_memcpy_self_test() 821 unmap->len = PAGE_SIZE; in mv_chan_memcpy_self_test() 862 dmaengine_unmap_put(unmap); in mv_chan_memcpy_self_test() 880 struct dmaengine_unmap_data *unmap; in mv_chan_xor_self_test() local [all …]
|
/Linux-v5.4/drivers/net/ethernet/brocade/bna/ |
D | bnad.c | 93 struct bnad_tx_unmap *unmap; in bnad_tx_buff_unmap() local 97 unmap = &unmap_q[index]; in bnad_tx_buff_unmap() 98 nvecs = unmap->nvecs; in bnad_tx_buff_unmap() 100 skb = unmap->skb; in bnad_tx_buff_unmap() 101 unmap->skb = NULL; in bnad_tx_buff_unmap() 102 unmap->nvecs = 0; in bnad_tx_buff_unmap() 104 dma_unmap_addr(&unmap->vectors[0], dma_addr), in bnad_tx_buff_unmap() 106 dma_unmap_addr_set(&unmap->vectors[0], dma_addr, 0); in bnad_tx_buff_unmap() 115 unmap = &unmap_q[index]; in bnad_tx_buff_unmap() 119 dma_unmap_addr(&unmap->vectors[vector], dma_addr), in bnad_tx_buff_unmap() [all …]
|
/Linux-v5.4/include/xen/ |
D | grant_table.h | 163 gnttab_set_unmap_op(struct gnttab_unmap_grant_ref *unmap, phys_addr_t addr, in gnttab_set_unmap_op() argument 167 unmap->host_addr = addr; in gnttab_set_unmap_op() 169 unmap->host_addr = __pa(addr); in gnttab_set_unmap_op() 171 unmap->host_addr = addr; in gnttab_set_unmap_op() 173 unmap->handle = handle; in gnttab_set_unmap_op() 174 unmap->dev_bus_addr = 0; in gnttab_set_unmap_op()
|
/Linux-v5.4/drivers/acpi/ |
D | nvs.c | 78 bool unmap; member 137 if (entry->unmap) { in suspend_nvs_free() 139 entry->unmap = false; in suspend_nvs_free() 183 entry->unmap = !!entry->kaddr; in suspend_nvs_save()
|
/Linux-v5.4/arch/um/kernel/ |
D | exec.c | 29 ret = unmap(¤t->mm->context.id, 0, STUB_START, 0, &data); in flush_thread() 30 ret = ret || unmap(¤t->mm->context.id, STUB_END, in flush_thread()
|
/Linux-v5.4/drivers/ntb/ |
D | ntb_transport.c | 1529 struct dmaengine_unmap_data *unmap; in ntb_async_rx_submit() local 1541 unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOWAIT); in ntb_async_rx_submit() 1542 if (!unmap) in ntb_async_rx_submit() 1545 unmap->len = len; in ntb_async_rx_submit() 1546 unmap->addr[0] = dma_map_page(device->dev, virt_to_page(offset), in ntb_async_rx_submit() 1548 if (dma_mapping_error(device->dev, unmap->addr[0])) in ntb_async_rx_submit() 1551 unmap->to_cnt = 1; in ntb_async_rx_submit() 1553 unmap->addr[1] = dma_map_page(device->dev, virt_to_page(buf), in ntb_async_rx_submit() 1555 if (dma_mapping_error(device->dev, unmap->addr[1])) in ntb_async_rx_submit() 1558 unmap->from_cnt = 1; in ntb_async_rx_submit() [all …]
|
/Linux-v5.4/drivers/vfio/ |
D | vfio_iommu_type1.c | 872 struct vfio_iommu_type1_dma_unmap *unmap) in vfio_dma_do_unmap() argument 881 if (unmap->iova & mask) in vfio_dma_do_unmap() 883 if (!unmap->size || unmap->size & mask) in vfio_dma_do_unmap() 885 if (unmap->iova + unmap->size - 1 < unmap->iova || in vfio_dma_do_unmap() 886 unmap->size > SIZE_MAX) in vfio_dma_do_unmap() 925 dma = vfio_find_dma(iommu, unmap->iova, 1); in vfio_dma_do_unmap() 926 if (dma && dma->iova != unmap->iova) { in vfio_dma_do_unmap() 930 dma = vfio_find_dma(iommu, unmap->iova + unmap->size - 1, 0); in vfio_dma_do_unmap() 931 if (dma && dma->iova + dma->size != unmap->iova + unmap->size) { in vfio_dma_do_unmap() 937 while ((dma = vfio_find_dma(iommu, unmap->iova, unmap->size))) { in vfio_dma_do_unmap() [all …]
|
/Linux-v5.4/include/linux/ |
D | dmaengine.h | 506 struct dmaengine_unmap_data *unmap; member 516 struct dmaengine_unmap_data *unmap) in dma_set_unmap() argument 518 kref_get(&unmap->kref); in dma_set_unmap() 519 tx->unmap = unmap; in dma_set_unmap() 524 void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap); 527 struct dmaengine_unmap_data *unmap) in dma_set_unmap() argument 535 static inline void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap) in dmaengine_unmap_put() argument 542 if (tx->unmap) { in dma_descriptor_unmap() 543 dmaengine_unmap_put(tx->unmap); in dma_descriptor_unmap() 544 tx->unmap = NULL; in dma_descriptor_unmap()
|
/Linux-v5.4/drivers/ntb/test/ |
D | ntb_perf.c | 782 struct dmaengine_unmap_data *unmap; in perf_copy_chunk() local 797 unmap = dmaengine_get_unmap_data(dma_dev, 2, GFP_NOWAIT); in perf_copy_chunk() 798 if (!unmap) in perf_copy_chunk() 801 unmap->len = len; in perf_copy_chunk() 802 unmap->addr[0] = dma_map_page(dma_dev, virt_to_page(src), in perf_copy_chunk() 804 if (dma_mapping_error(dma_dev, unmap->addr[0])) { in perf_copy_chunk() 808 unmap->to_cnt = 1; in perf_copy_chunk() 810 unmap->addr[1] = dma_map_page(dma_dev, virt_to_page(dst), in perf_copy_chunk() 812 if (dma_mapping_error(dma_dev, unmap->addr[1])) { in perf_copy_chunk() 816 unmap->from_cnt = 1; in perf_copy_chunk() [all …]
|
/Linux-v5.4/drivers/xen/ |
D | arm-device.c | 70 goto unmap; in xen_map_device_mmio() 94 goto unmap; in xen_map_device_mmio() 99 unmap: in xen_map_device_mmio()
|
D | xen-front-pgdir-shbuf.c | 69 int (*unmap)(struct xen_front_pgdir_shbuf *buf); member 126 if (buf->ops && buf->ops->unmap) in xen_front_pgdir_shbuf_unmap() 127 return buf->ops->unmap(buf); in xen_front_pgdir_shbuf_unmap() 498 .unmap = backend_unmap
|
/Linux-v5.4/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ |
D | vmmgm200.c | 37 .unmap = gf100_vmm_pgt_unmap, 47 .unmap = gf100_vmm_pgt_unmap, 62 .unmap = gf100_vmm_pgt_unmap,
|
/Linux-v5.4/arch/x86/platform/geode/ |
D | net5501.c | 112 goto unmap; in net5501_present() 126 unmap: in net5501_present()
|
/Linux-v5.4/drivers/block/xen-blkback/ |
D | blkback.c | 307 struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST]; in free_persistent_gnts() local 315 unmap_data.unmap_ops = unmap; in free_persistent_gnts() 321 gnttab_set_unmap_op(&unmap[segs_to_unmap], in free_persistent_gnts() 348 struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST]; in xen_blkbk_unmap_purged_grants() local 356 unmap_data.unmap_ops = unmap; in xen_blkbk_unmap_purged_grants() 365 gnttab_set_unmap_op(&unmap[segs_to_unmap], in xen_blkbk_unmap_purged_grants() 760 req->unmap, req->unmap_pages); in xen_blkbk_unmap_and_respond() 764 work->unmap_ops = req->unmap; in xen_blkbk_unmap_and_respond() 784 struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST]; in xen_blkbk_unmap() local 793 unmap, unmap_pages); in xen_blkbk_unmap() [all …]
|
/Linux-v5.4/Documentation/vm/ |
D | transhuge.rst | 125 - map/unmap of the pages with PTE entry increment/decrement ->_mapcount 128 - map/unmap of the whole compound page is accounted for in compound_mapcount 131 last unmap of subpages. 137 get race-free detection of unmap of subpages when we have them mapped with 142 map/unmap of the whole compound page. 177 Partial unmap and deferred_split_huge_page() 186 the place where we can detect partial unmap. It also might be 187 counterproductive since in many cases partial unmap happens during exit(2) if
|
/Linux-v5.4/drivers/clk/sunxi/ |
D | clk-sun4i-display.c | 126 goto unmap; in sun4i_a10_display_init() 131 goto unmap; in sun4i_a10_display_init() 216 unmap: in sun4i_a10_display_init()
|
/Linux-v5.4/drivers/firmware/efi/ |
D | memattr.c | 38 goto unmap; in efi_memattr_init() 45 unmap: in efi_memattr_init()
|
/Linux-v5.4/arch/s390/pci/ |
D | pci_dma.c | 462 goto unmap; in __s390_dma_map_sg() 468 goto unmap; in __s390_dma_map_sg() 475 unmap: in __s390_dma_map_sg() 504 goto unmap; in s390_dma_map_sg() 517 goto unmap; in s390_dma_map_sg() 523 unmap: in s390_dma_map_sg()
|