1 /*
2 * Copyright (c) 2015, Xilinx Inc. and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7 #include <metal/errno.h>
8 #include <string.h>
9 #include <metal/device.h>
10 #include <metal/log.h>
11 #include <metal/dma.h>
12 #include <metal/atomic.h>
13
metal_dma_map(struct metal_device * dev,uint32_t dir,struct metal_sg * sg_in,int nents_in,struct metal_sg * sg_out)14 int metal_dma_map(struct metal_device *dev,
15 uint32_t dir,
16 struct metal_sg *sg_in,
17 int nents_in,
18 struct metal_sg *sg_out)
19 {
20 int nents_out;
21
22 if (!dev || !sg_in || !sg_out)
23 return -EINVAL;
24 if (!dev->bus->ops.dev_dma_map)
25 return -ENODEV;
26
27 /* memory barrier */
28 if (dir == METAL_DMA_DEV_R)
29 /* If it is device read, apply memory write fence. */
30 atomic_thread_fence(memory_order_release);
31 else
32 /* If it is device write or r/w, apply memory r/w fence. */
33 atomic_thread_fence(memory_order_acq_rel);
34 nents_out = dev->bus->ops.dev_dma_map(dev->bus,
35 dev, dir, sg_in, nents_in, sg_out);
36 return nents_out;
37 }
38
metal_dma_unmap(struct metal_device * dev,uint32_t dir,struct metal_sg * sg,int nents)39 void metal_dma_unmap(struct metal_device *dev,
40 uint32_t dir,
41 struct metal_sg *sg,
42 int nents)
43 {
44 /* memory barrier */
45 if (dir == METAL_DMA_DEV_R)
46 /* If it is device read, apply memory write fence. */
47 atomic_thread_fence(memory_order_release);
48 else
49 /*If it is device write or r/w, apply memory r/w fence */
50 atomic_thread_fence(memory_order_acq_rel);
51
52 if (!dev || !dev->bus->ops.dev_dma_unmap || !sg)
53 return;
54 dev->bus->ops.dev_dma_unmap(dev->bus,
55 dev, dir, sg, nents);
56 }
57