Lines Matching refs:ops

112 		const struct dma_map_ops *ops)  in dma_go_direct()  argument
114 if (likely(!ops)) in dma_go_direct()
131 const struct dma_map_ops *ops) in dma_alloc_direct() argument
133 return dma_go_direct(dev, dev->coherent_dma_mask, ops); in dma_alloc_direct()
137 const struct dma_map_ops *ops) in dma_map_direct() argument
139 return dma_go_direct(dev, *dev->dma_mask, ops); in dma_map_direct()
146 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_map_page_attrs() local
154 if (dma_map_direct(dev, ops) || in dma_map_page_attrs()
158 addr = ops->map_page(dev, page, offset, size, dir, attrs); in dma_map_page_attrs()
168 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_unmap_page_attrs() local
171 if (dma_map_direct(dev, ops) || in dma_unmap_page_attrs()
174 else if (ops->unmap_page) in dma_unmap_page_attrs()
175 ops->unmap_page(dev, addr, size, dir, attrs); in dma_unmap_page_attrs()
183 const struct dma_map_ops *ops = get_dma_ops(dev); in __dma_map_sg_attrs() local
191 if (dma_map_direct(dev, ops) || in __dma_map_sg_attrs()
195 ents = ops->map_sg(dev, sg, nents, dir, attrs); in __dma_map_sg_attrs()
276 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_unmap_sg_attrs() local
280 if (dma_map_direct(dev, ops) || in dma_unmap_sg_attrs()
283 else if (ops->unmap_sg) in dma_unmap_sg_attrs()
284 ops->unmap_sg(dev, sg, nents, dir, attrs); in dma_unmap_sg_attrs()
291 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_map_resource() local
303 if (dma_map_direct(dev, ops)) in dma_map_resource()
305 else if (ops->map_resource) in dma_map_resource()
306 addr = ops->map_resource(dev, phys_addr, size, dir, attrs); in dma_map_resource()
316 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_unmap_resource() local
319 if (!dma_map_direct(dev, ops) && ops->unmap_resource) in dma_unmap_resource()
320 ops->unmap_resource(dev, addr, size, dir, attrs); in dma_unmap_resource()
328 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_sync_single_for_cpu() local
331 if (dma_map_direct(dev, ops)) in dma_sync_single_for_cpu()
333 else if (ops->sync_single_for_cpu) in dma_sync_single_for_cpu()
334 ops->sync_single_for_cpu(dev, addr, size, dir); in dma_sync_single_for_cpu()
342 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_sync_single_for_device() local
345 if (dma_map_direct(dev, ops)) in dma_sync_single_for_device()
347 else if (ops->sync_single_for_device) in dma_sync_single_for_device()
348 ops->sync_single_for_device(dev, addr, size, dir); in dma_sync_single_for_device()
356 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_sync_sg_for_cpu() local
359 if (dma_map_direct(dev, ops)) in dma_sync_sg_for_cpu()
361 else if (ops->sync_sg_for_cpu) in dma_sync_sg_for_cpu()
362 ops->sync_sg_for_cpu(dev, sg, nelems, dir); in dma_sync_sg_for_cpu()
370 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_sync_sg_for_device() local
373 if (dma_map_direct(dev, ops)) in dma_sync_sg_for_device()
375 else if (ops->sync_sg_for_device) in dma_sync_sg_for_device()
376 ops->sync_sg_for_device(dev, sg, nelems, dir); in dma_sync_sg_for_device()
396 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_get_sgtable_attrs() local
398 if (dma_alloc_direct(dev, ops)) in dma_get_sgtable_attrs()
401 if (!ops->get_sgtable) in dma_get_sgtable_attrs()
403 return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size, attrs); in dma_get_sgtable_attrs()
435 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_can_mmap() local
437 if (dma_alloc_direct(dev, ops)) in dma_can_mmap()
439 return ops->mmap != NULL; in dma_can_mmap()
460 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_mmap_attrs() local
462 if (dma_alloc_direct(dev, ops)) in dma_mmap_attrs()
465 if (!ops->mmap) in dma_mmap_attrs()
467 return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs); in dma_mmap_attrs()
473 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_get_required_mask() local
475 if (dma_alloc_direct(dev, ops)) in dma_get_required_mask()
477 if (ops->get_required_mask) in dma_get_required_mask()
478 return ops->get_required_mask(dev); in dma_get_required_mask()
495 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_alloc_attrs() local
506 if (dma_alloc_direct(dev, ops)) in dma_alloc_attrs()
508 else if (ops->alloc) in dma_alloc_attrs()
509 cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs); in dma_alloc_attrs()
521 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_free_attrs() local
538 if (dma_alloc_direct(dev, ops)) in dma_free_attrs()
540 else if (ops->free) in dma_free_attrs()
541 ops->free(dev, size, cpu_addr, dma_handle, attrs); in dma_free_attrs()
548 const struct dma_map_ops *ops = get_dma_ops(dev); in __dma_alloc_pages() local
556 if (dma_alloc_direct(dev, ops)) in __dma_alloc_pages()
558 if (!ops->alloc_pages) in __dma_alloc_pages()
560 return ops->alloc_pages(dev, size, dma_handle, dir, gfp); in __dma_alloc_pages()
577 const struct dma_map_ops *ops = get_dma_ops(dev); in __dma_free_pages() local
580 if (dma_alloc_direct(dev, ops)) in __dma_free_pages()
582 else if (ops->free_pages) in __dma_free_pages()
583 ops->free_pages(dev, size, page, dma_handle, dir); in __dma_free_pages()
634 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_alloc_noncontiguous() local
640 if (ops && ops->alloc_noncontiguous) in dma_alloc_noncontiguous()
641 sgt = ops->alloc_noncontiguous(dev, size, dir, gfp, attrs); in dma_alloc_noncontiguous()
665 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_free_noncontiguous() local
668 if (ops && ops->free_noncontiguous) in dma_free_noncontiguous()
669 ops->free_noncontiguous(dev, size, sgt, dir); in dma_free_noncontiguous()
678 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_vmap_noncontiguous() local
681 if (ops && ops->alloc_noncontiguous) in dma_vmap_noncontiguous()
689 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_vunmap_noncontiguous() local
691 if (ops && ops->alloc_noncontiguous) in dma_vunmap_noncontiguous()
699 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_mmap_noncontiguous() local
701 if (ops && ops->alloc_noncontiguous) { in dma_mmap_noncontiguous()
715 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_supported() local
721 if (!ops) in dma_supported()
723 if (!ops->dma_supported) in dma_supported()
725 return ops->dma_supported(dev, mask); in dma_supported()
772 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_max_mapping_size() local
775 if (dma_map_direct(dev, ops)) in dma_max_mapping_size()
777 else if (ops && ops->max_mapping_size) in dma_max_mapping_size()
778 size = ops->max_mapping_size(dev); in dma_max_mapping_size()
786 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_need_sync() local
788 if (dma_map_direct(dev, ops)) in dma_need_sync()
790 return ops->sync_single_for_cpu || ops->sync_single_for_device; in dma_need_sync()
796 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_get_merge_boundary() local
798 if (!ops || !ops->get_merge_boundary) in dma_get_merge_boundary()
801 return ops->get_merge_boundary(dev); in dma_get_merge_boundary()