1 /*
2  * Copyright (c) 2018 Intel Corporation
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #define DT_DRV_COMPAT altr_msgdma
8 
9 #include <zephyr/device.h>
10 #include <errno.h>
11 #include <zephyr/init.h>
12 #include <string.h>
13 #include <soc.h>
14 #include <zephyr/drivers/dma.h>
15 #include <altera_common.h>
16 #include "altera_msgdma_csr_regs.h"
17 #include "altera_msgdma_descriptor_regs.h"
18 #include "altera_msgdma.h"
19 
20 #include <zephyr/logging/log.h>
21 #include <zephyr/irq.h>
22 LOG_MODULE_REGISTER(dma_nios2, CONFIG_DMA_LOG_LEVEL);
23 
24 /* Device configuration parameters */
25 struct nios2_msgdma_dev_data {
26 	const struct device *dev;
27 	alt_msgdma_dev *msgdma_dev;
28 	alt_msgdma_standard_descriptor desc;
29 	uint32_t direction;
30 	struct k_sem sem_lock;
31 	void *user_data;
32 	dma_callback_t dma_callback;
33 };
34 
nios2_msgdma_isr(void * arg)35 static void nios2_msgdma_isr(void *arg)
36 {
37 	const struct device *dev = (const struct device *)arg;
38 	struct nios2_msgdma_dev_data *dev_data = (struct nios2_msgdma_dev_data *)dev->data;
39 
40 	/* Call Altera HAL driver ISR */
41 	alt_handle_irq(dev_data->msgdma_dev, DT_INST_IRQN(0));
42 }
43 
nios2_msgdma_callback(void * context)44 static void nios2_msgdma_callback(void *context)
45 {
46 	struct nios2_msgdma_dev_data *dev_data =
47 		(struct nios2_msgdma_dev_data *)context;
48 	int dma_status;
49 	uint32_t status;
50 
51 	status = IORD_ALTERA_MSGDMA_CSR_STATUS(dev_data->msgdma_dev->csr_base);
52 
53 	if (status & ALTERA_MSGDMA_CSR_STOPPED_ON_ERROR_MASK) {
54 		dma_status = -EIO;
55 	} else if (status & ALTERA_MSGDMA_CSR_BUSY_MASK) {
56 		dma_status = -EBUSY;
57 	} else {
58 		dma_status = DMA_STATUS_COMPLETE;
59 	}
60 
61 	LOG_DBG("msgdma csr status Reg: 0x%x", status);
62 
63 	dev_data->dma_callback(dev_data->dev, dev_data->user_data, 0, dma_status);
64 }
65 
nios2_msgdma_config(const struct device * dev,uint32_t channel,struct dma_config * cfg)66 static int nios2_msgdma_config(const struct device *dev, uint32_t channel,
67 			       struct dma_config *cfg)
68 {
69 	struct nios2_msgdma_dev_data *dev_data = (struct nios2_msgdma_dev_data *)dev->data;
70 	struct dma_block_config *dma_block;
71 	int status;
72 	uint32_t control;
73 
74 	/* Nios-II MSGDMA supports only one channel per DMA core */
75 	if (channel != 0U) {
76 		LOG_ERR("invalid channel number");
77 		return -EINVAL;
78 	}
79 
80 #if MSGDMA_0_CSR_PREFETCHER_ENABLE
81 	if (cfg->block_count > 1) {
82 		LOG_ERR("driver yet add support multiple descriptors");
83 		return -EINVAL;
84 	}
85 #else
86 	if (cfg->block_count != 1U) {
87 		LOG_ERR("invalid block count!!");
88 		return -EINVAL;
89 	}
90 #endif
91 
92 	if (cfg->head_block == NULL) {
93 		LOG_ERR("head_block ptr NULL!!");
94 		return -EINVAL;
95 	}
96 
97 	if (cfg->head_block->block_size > MSGDMA_0_DESCRIPTOR_SLAVE_MAX_BYTE) {
98 		LOG_ERR("DMA error: Data size too big: %d",
99 			    cfg->head_block->block_size);
100 		return -EINVAL;
101 	}
102 
103 	k_sem_take(&dev_data->sem_lock, K_FOREVER);
104 	dev_data->dma_callback = cfg->dma_callback;
105 	dev_data->user_data = cfg->user_data;
106 	dev_data->direction = cfg->channel_direction;
107 	dma_block = cfg->head_block;
108 	control =  ALTERA_MSGDMA_DESCRIPTOR_CONTROL_TRANSFER_COMPLETE_IRQ_MASK |
109 		  ALTERA_MSGDMA_DESCRIPTOR_CONTROL_EARLY_TERMINATION_IRQ_MASK;
110 
111 	if (dev_data->direction == MEMORY_TO_MEMORY) {
112 		status = alt_msgdma_construct_standard_mm_to_mm_descriptor(
113 			dev_data->msgdma_dev, &dev_data->desc,
114 			(alt_u32 *)dma_block->source_address,
115 			(alt_u32 *)dma_block->dest_address,
116 			dma_block->block_size,
117 			control);
118 	} else if (dev_data->direction == MEMORY_TO_PERIPHERAL) {
119 		status = alt_msgdma_construct_standard_mm_to_st_descriptor(
120 			dev_data->msgdma_dev, &dev_data->desc,
121 			(alt_u32 *)dma_block->source_address,
122 			dma_block->block_size,
123 			control);
124 	} else if (dev_data->direction == PERIPHERAL_TO_MEMORY) {
125 		status = alt_msgdma_construct_standard_st_to_mm_descriptor(
126 			dev_data->msgdma_dev, &dev_data->desc,
127 			(alt_u32 *)dma_block->dest_address,
128 			dma_block->block_size,
129 			control);
130 	} else {
131 		LOG_ERR("invalid channel direction");
132 		status = -EINVAL;
133 	}
134 
135 	/* Register msgdma callback */
136 	alt_msgdma_register_callback(dev_data->msgdma_dev,
137 			nios2_msgdma_callback,
138 			ALTERA_MSGDMA_CSR_GLOBAL_INTERRUPT_MASK |
139 			ALTERA_MSGDMA_CSR_STOP_ON_ERROR_MASK |
140 			ALTERA_MSGDMA_CSR_STOP_ON_EARLY_TERMINATION_MASK,
141 			dev_data);
142 
143 	/* Clear the IRQ status */
144 	IOWR_ALTERA_MSGDMA_CSR_STATUS(dev_data->msgdma_dev->csr_base,
145 				      ALTERA_MSGDMA_CSR_IRQ_SET_MASK);
146 	k_sem_give(&dev_data->sem_lock);
147 
148 	return status;
149 }
150 
nios2_msgdma_transfer_start(const struct device * dev,uint32_t channel)151 static int nios2_msgdma_transfer_start(const struct device *dev,
152 				       uint32_t channel)
153 {
154 	struct nios2_msgdma_dev_data *cfg = (struct nios2_msgdma_dev_data *)dev->data;
155 	int status;
156 
157 	/* Nios-II MSGDMA supports only one channel per DMA core */
158 	if (channel != 0U) {
159 		LOG_ERR("Invalid channel number");
160 		return -EINVAL;
161 	}
162 
163 	k_sem_take(&cfg->sem_lock, K_FOREVER);
164 	status = alt_msgdma_standard_descriptor_async_transfer(cfg->msgdma_dev,
165 								&cfg->desc);
166 	k_sem_give(&cfg->sem_lock);
167 
168 	if (status < 0) {
169 		LOG_ERR("DMA transfer error (%d)", status);
170 	}
171 
172 	return status;
173 }
174 
nios2_msgdma_transfer_stop(const struct device * dev,uint32_t channel)175 static int nios2_msgdma_transfer_stop(const struct device *dev,
176 				      uint32_t channel)
177 {
178 	struct nios2_msgdma_dev_data *cfg = (struct nios2_msgdma_dev_data *)dev->data;
179 	int ret = -EIO;
180 	uint32_t status;
181 
182 	k_sem_take(&cfg->sem_lock, K_FOREVER);
183 	/* Stop the DMA Dispatcher */
184 	IOWR_ALTERA_MSGDMA_CSR_CONTROL(cfg->msgdma_dev->csr_base,
185 				       ALTERA_MSGDMA_CSR_STOP_MASK);
186 
187 	status = IORD_ALTERA_MSGDMA_CSR_STATUS(cfg->msgdma_dev->csr_base);
188 	k_sem_give(&cfg->sem_lock);
189 
190 	if (status & ALTERA_MSGDMA_CSR_STOP_STATE_MASK) {
191 		LOG_DBG("DMA Dispatcher stopped");
192 		ret = 0;
193 	}
194 
195 	LOG_DBG("msgdma csr status Reg: 0x%x", status);
196 
197 	return status;
198 }
199 
200 static DEVICE_API(dma, nios2_msgdma_driver_api) = {
201 	.config = nios2_msgdma_config,
202 	.start = nios2_msgdma_transfer_start,
203 	.stop = nios2_msgdma_transfer_stop,
204 };
205 
nios2_msgdma0_initialize(const struct device * dev)206 static int nios2_msgdma0_initialize(const struct device *dev)
207 {
208 	struct nios2_msgdma_dev_data *dev_data = (struct nios2_msgdma_dev_data *)dev->data;
209 
210 	dev_data->dev = dev;
211 
212 	/* Initialize semaphore */
213 	k_sem_init(&dev_data->sem_lock, 1, 1);
214 
215 	alt_msgdma_init(dev_data->msgdma_dev, 0, DT_INST_IRQN(0));
216 
217 	IRQ_CONNECT(DT_INST_IRQN(0), DT_INST_IRQ(0, priority),
218 		    nios2_msgdma_isr, DEVICE_DT_INST_GET(0), 0);
219 
220 	irq_enable(DT_INST_IRQN(0));
221 
222 	return 0;
223 }
224 
225 ALTERA_MSGDMA_CSR_DESCRIPTOR_SLAVE_INSTANCE(MSGDMA_0, MSGDMA_0_CSR,
226 				MSGDMA_0_DESCRIPTOR_SLAVE, msgdma_dev0)
227 
228 static struct nios2_msgdma_dev_data dma0_nios2_data = {
229 	.msgdma_dev = &msgdma_dev0,
230 };
231 
232 DEVICE_DT_INST_DEFINE(0, &nios2_msgdma0_initialize,
233 		NULL, &dma0_nios2_data, NULL, POST_KERNEL,
234 		CONFIG_DMA_INIT_PRIORITY, &nios2_msgdma_driver_api);
235