1 /** @file
2 *@brief Driver for Xilinx AXI DMA.
3 */
4 /*
5 * Copyright (c) 2024 CISPA Helmholtz Center for Information Security gGmbH
6 *
7 * SPDX-License-Identifier: Apache-2.0
8 */
9
10 #include <zephyr/device.h>
11 #include <zephyr/drivers/dma.h>
12 #include <zephyr/logging/log.h>
13 #include <zephyr/irq.h>
14 #include <zephyr/sys/barrier.h>
15 #include <zephyr/sys/sys_io.h>
16
17 #include "dma_xilinx_axi_dma.h"
18
19 #define XILINX_AXI_DMA_SG_DESCRIPTOR_ADDRESS_MASK 0x3f
20
21 LOG_MODULE_REGISTER(dma_xilinx_axi_dma, CONFIG_DMA_LOG_LEVEL);
22
23 /* masks for control field in SG descriptor */
24 #define XILINX_AXI_DMA_SG_DESCRIPTOR_CTRL_RESERVED_MASK 0xF0000000
25 /* descriptor is for start of transfer */
26 #define XILINX_AXI_DMA_SG_DESCRIPTOR_CTRL_SOF_MASK 0x08000000
27 /* descriptor is for end of transfer */
28 #define XILINX_AXI_DMA_SG_DESCRIPTOR_CTRL_EOF_MASK 0x04000000
29 /* length of the associated buffer in main memory */
30 #define XILINX_AXI_DMA_SG_DESCRIPTOR_CTRL_LENGTH_MASK 0x03FFFFFF
31 #define XILINX_AXI_DMA_SG_DESCRIPTOR_STATUS_LENGTH_MASK 0x03FFFFFF
32
33 /* masks for status field in SG descriptor */
34 /* transfer completed */
35 #define XILINX_AXI_DMA_SG_DESCRIPTOR_STATUS_COMPLETE_MASK 0x80000000
36 /* decode error, i.e., DECERR on AXI bus from memory */
37 #define XILINX_AXI_DMA_SG_DESCRIPTOR_STATUS_DEC_ERR_MASK 0x40000000
38 /* slave error, i.e., SLVERR on AXI bus from memory */
39 #define XILINX_AXI_DMA_SG_DESCRIPTOR_STATUS_SLV_ERR_MASK 0x20000000
40 /* internal DMA error, e.g., 0-length transfer */
41 #define XILINX_AXI_DMA_SG_DESCRIPTOR_STATUS_INT_ERR_MASK 0x10000000
42 /* reserved */
43 #define XILINX_AXI_DMA_SG_DESCRIPTOR_STATUS_INT_RES_MASK 0x0C000000
44 /* number of transferred bytes */
45 #define XILINX_AXI_DMA_SG_DESCRIPTOR_STATUS_TRANSFERRED_MASK 0x03FFFFFF
46
47 #define XILINX_AXI_DMA_SG_DESCRIPTOR_APP0_CHECKSUM_OFFLOAD_FULL 0x00000002
48 #define XILINX_AXI_DMA_SG_DESCRIPTOR_APP0_CHECKSUM_OFFLOAD_NONE 0x00000000
49 #define XILINX_AXI_DMA_SG_DESCRIPTOR_APP2_FCS_ERR_MASK 0x00000100
50 #define XILINX_AXI_DMA_SG_DESCRIPTOR_APP2_IP_ERR_MASK 0x00000028
51 #define XILINX_AXI_DMA_SG_DESCRIPTOR_APP2_UDP_ERR_MASK 0x00000030
52 #define XILINX_AXI_DMA_SG_DESCRIPTOR_APP2_TCP_ERR_MASK 0x00000038
53
54 /* masks for DMA registers */
55
56 #define XILINX_AXI_DMA_REGS_DMACR_IRQTHRESH_SHIFT_BITS 16
57 #define XILINX_AXI_DMA_REGS_DMACR_IRQDELAY_SHIFT_BITS 24
58 /* masks for DMACR register */
59 /* interrupt timeout - trigger interrupt after X cycles when no transfer. Unit is 125 * */
60 /* clock_period. */
61 #define XILINX_AXI_DMA_REGS_DMACR_IRQDELAY 0xFF000000
62 /* irqthreshold - this can be used to generate interrupts after X completed packets */
63 /* instead of after every packet */
64 #define XILINX_AXI_DMA_REGS_DMACR_IRQTHRESH 0x00FF0000
65 #define XILINX_AXI_DMA_REGS_DMACR_RESERVED1 0x00008000
66 /* interrupt on error enable */
67 #define XILINX_AXI_DMA_REGS_DMACR_ERR_IRQEN 0x00004000
68 /* interrupt on delay timer interrupt enable */
69 #define XILINX_AXI_DMA_REGS_DMACR_DLY_IRQEN 0x00002000
70 /* interrupt on complete enable */
71 #define XILINX_AXI_DMA_REGS_DMACR_IOC_IRQEN 0x00001000
72 #define XILINX_AXI_DMA_REGS_DMACR_ALL_IRQEN \
73 (XILINX_AXI_DMA_REGS_DMACR_ERR_IRQEN | XILINX_AXI_DMA_REGS_DMACR_DLY_IRQEN | \
74 XILINX_AXI_DMA_REGS_DMACR_IOC_IRQEN)
75 #define XILINX_AXI_DMA_REGS_DMACR_RESERVED2 0x00000FE0
76 /* DMA ignores completed bit in SG descriptor and overwrites descriptors */
77 #define XILINX_AXI_DMA_REGS_DMACR_CYC_BD_EN 0x00000010
78 /* use AXI fixed burst instead of incrementing burst for TX transfers, e.g., useful for reading a */
79 /* FIFO */
80 #define XILINX_AXI_DMA_REGS_DMACR_KEYHOLE 0x00000008
81 /* soft reset */
82 #define XILINX_AXI_DMA_REGS_DMACR_RESET 0x00000004
83 #define XILINX_AXI_DMA_REGS_DMACR_RESERVED3 0x00000002
84 /* run-stop */
85 #define XILINX_AXI_DMA_REGS_DMACR_RS 0x00000001
86
87 /* masks for DMASR register */
88 /* interrupt delay time status */
89 #define XILINX_AXI_DMA_REGS_DMASR_IRQDELAYSTS 0xFF000000
90 /* interrupt threshold status */
91 #define XILINX_AXI_DMA_REGS_DMASR_IRQTHRESHSTS 0x00FF0000
92 #define XILINX_AXI_DMA_REGS_DMASR_RESERVED1 0x00008000
93 /* current interrupt was generated on error */
94 #define XILINX_AXI_DMA_REGS_DMASR_ERR_IRQ 0x00004000
95 /* current interrupt was generated by timoeout */
96 #define XILINX_AXI_DMA_REGS_DMASR_DLY_IRQ 0x00002000
97 /* current interrupt was generated by completion of a transfer */
98 #define XILINX_AXI_DMA_REGS_DMASR_IOC_IRQ 0x00001000
99 #define XILINX_AXI_DMA_REGS_DMASR_RESERVED2 0x00000800
100 /* scatter gather decode error */
101 #define XILINX_AXI_DMA_REGS_DMASR_SGDECERR 0x00000400
102 /* scatter gather slave error */
103 #define XILINX_AXI_DMA_REGS_DMASR_SGSLVERR 0x00000200
104 /* scatter gather internal error, i.e., fetched a descriptor with complete bit already set */
105 #define XILINX_AXI_DMA_REGS_DMASR_SGINTERR 0x00000100
106 #define XILINX_AXI_DMA_REGS_DMASR_RESERVED3 0x00000080
107 /* DMA decode error */
108 #define XILINX_AXI_DMA_REGS_DMASR_DMADECERR 0x00000040
109 /* DMA slave error */
110 #define XILINX_AXI_DMA_REGS_DMASR_SLVERR 0x00000020
111 /* DMA internal error */
112 #define XILINX_AXI_DMA_REGS_DMASR_INTERR 0x00000010
113 /* scatter/gather support enabled at build time */
114 #define XILINX_AXI_DMA_REGS_DMASR_SGINCL 0x00000008
115 #define XILINX_AXI_DMA_REGS_DMASR_RESERVED4 0x00000004
116 /* DMA channel is idle, i.e., DMA operations completed; writing tail restarts operation */
117 #define XILINX_AXI_DMA_REGS_DMASR_IDLE 0x00000002
118 /* RS (run-stop) in DMACR is 0 and operations completed; writing tail does nothing */
119 #define XILINX_AXI_DMA_REGS_DMASR_HALTED 0x00000001
120
121 #define XILINX_AXI_DMA_REGS_SG_CTRL_CACHE_MASK 0x0000000F
122 #define XILINX_AXI_DMA_REGS_SG_CTRL_RES1_MASK 0x000000F0
123 #define XILINX_AXI_DMA_REGS_SG_CTRL_USER_MASK 0x00000F00
124 #define XILINX_AXI_DMA_REGS_SG_CTRL_RES2_MASK 0xFFFFF000
125
126 #ifdef CONFIG_DMA_XILINX_AXI_DMA_DISABLE_CACHE_WHEN_ACCESSING_SG_DESCRIPTORS
127 #include <zephyr/arch/cache.h>
dma_xilinx_axi_dma_disable_cache(void)128 static inline void dma_xilinx_axi_dma_disable_cache(void)
129 {
130 cache_data_disable();
131 }
dma_xilinx_axi_dma_enable_cache(void)132 static inline void dma_xilinx_axi_dma_enable_cache(void)
133 {
134 cache_data_enable();
135 }
136 #else
dma_xilinx_axi_dma_disable_cache(void)137 static inline void dma_xilinx_axi_dma_disable_cache(void)
138 {
139 /* do nothing */
140 }
dma_xilinx_axi_dma_enable_cache(void)141 static inline void dma_xilinx_axi_dma_enable_cache(void)
142 {
143 /* do nothing */
144 }
145 #endif
146
147 /* in-memory descriptor, read by the DMA, that instructs it how many bits to transfer from which */
148 /* buffer */
149 struct __attribute__((__packed__)) dma_xilinx_axi_dma_sg_descriptor {
150 /* next descriptor[31:6], bits 5-0 reserved */
151 uint32_t nxtdesc;
152 /* next descriptor[63:32] */
153 uint32_t nxtdesc_msb;
154 /* address of buffer to transfer[31:0] */
155 uint32_t buffer_address;
156 /* address of buffer to transfer[63:32] */
157 uint32_t buffer_address_msb;
158 uint32_t reserved1;
159 uint32_t reserved2;
160
161 /* bitfield, masks for access defined above */
162 uint32_t control;
163 /* bitfield, masks for access defined above */
164 uint32_t status;
165
166 /* application-specific fields used, e.g., to enable checksum offloading */
167 /* for the Ethernet Subsystem */
168 uint32_t app0;
169 uint32_t app1;
170 uint32_t app2;
171 uint32_t app3;
172 uint32_t app4;
173 } __aligned(64);
174
175 __aligned(64) static struct dma_xilinx_axi_dma_sg_descriptor
176 descriptors_tx[CONFIG_DMA_XILINX_AXI_DMA_SG_DESCRIPTOR_NUM_TX] = {0};
177 __aligned(64) static struct dma_xilinx_axi_dma_sg_descriptor
178 descriptors_rx[CONFIG_DMA_XILINX_AXI_DMA_SG_DESCRIPTOR_NUM_RX] = {0};
179 /* registers are the same with different name */
180 struct __attribute__((__packed__)) dma_xilinx_axi_dma_mm2s_s2mm_registers {
181 /* DMA control register */
182 /* bitfield, masks defined above */
183 uint32_t dmacr;
184 /* DMA status register */
185 /* bitfield, masks defined above */
186 uint32_t dmasr;
187 /* current descriptor address[31:0] */
188 uint32_t curdesc;
189 /* current descriptor address[63:0] */
190 uint32_t curdesc_msb;
191 /* current descriptor address[31:0] */
192 uint32_t taildesc;
193 /* current descriptor address[63:0] */
194 uint32_t taildesc_msb;
195 /* transfer source address for "direct register mode"[31:0] */
196 uint32_t sa;
197 /* transfer source address for "direct register mode"[63:32] */
198 uint32_t sa_msb;
199 uint32_t reserved1;
200 uint32_t reserved2;
201 /* transfer length for "direct register mode" */
202 uint32_t length;
203 };
204
205 struct __attribute__((__packed__)) dma_xilinx_axi_dma_register_space {
206 struct dma_xilinx_axi_dma_mm2s_s2mm_registers mm2s_registers;
207 /* scatter/gather user and cache register or reserved */
208 /* controls arcache/awcache and aruser/awuser of generated transactions */
209 uint32_t sg_ctl;
210 struct dma_xilinx_axi_dma_mm2s_s2mm_registers s2mm_registers;
211 };
212
213 /* global configuration per DMA device */
214 struct dma_xilinx_axi_dma_config {
215 void *reg;
216 /* this should always be 2 - one for TX, one for RX */
217 uint32_t channels;
218 void (*irq_configure)();
219 uint32_t *irq0_channels;
220 size_t irq0_channels_size;
221 };
222
223 typedef void (*dma_xilinx_axi_dma_isr_t)(const struct device *dev);
224
225 /* parameters for polling timer */
226 struct dma_xilinx_axi_dma_timer_params {
227 /* back reference for the device */
228 const struct device *dev;
229 /* number of this channel's IRQ */
230 unsigned int irq_number;
231 /* ISR that normally handles the channel's interrupts */
232 dma_xilinx_axi_dma_isr_t isr;
233 };
234
235 /* per-channel state */
236 struct dma_xilinx_axi_dma_channel {
237 volatile struct dma_xilinx_axi_dma_sg_descriptor *descriptors;
238
239 struct k_timer polling_timer;
240
241 struct dma_xilinx_axi_dma_timer_params polling_timer_params;
242
243 size_t num_descriptors;
244
245 size_t current_transfer_start_index, current_transfer_end_index;
246
247 volatile struct dma_xilinx_axi_dma_mm2s_s2mm_registers *channel_regs;
248
249 enum dma_channel_direction last_transfer_direction;
250
251 /* call this when the transfer is complete */
252 dma_callback_t completion_callback;
253 void *completion_callback_user_data;
254
255 uint32_t last_rx_size;
256
257 uint32_t sg_desc_app0;
258 bool check_csum_in_isr;
259 };
260
261 /* global state for device and array of per-channel states */
262 struct dma_xilinx_axi_dma_data {
263 struct dma_context ctx;
264 struct dma_xilinx_axi_dma_channel *channels;
265 bool device_has_been_reset;
266 };
267
268 #ifdef CONFIG_DMA_XILINX_AXI_DMA_LOCK_ALL_IRQS
dma_xilinx_axi_dma_lock_irq(const struct dma_xilinx_axi_dma_config * cfg,const uint32_t channel_num)269 static inline int dma_xilinx_axi_dma_lock_irq(const struct dma_xilinx_axi_dma_config *cfg,
270 const uint32_t channel_num)
271 {
272 (void)cfg;
273 (void)channel_num;
274 return irq_lock();
275 }
276
dma_xilinx_axi_dma_unlock_irq(const struct dma_xilinx_axi_dma_config * cfg,const uint32_t channel_num,int key)277 static inline void dma_xilinx_axi_dma_unlock_irq(const struct dma_xilinx_axi_dma_config *cfg,
278 const uint32_t channel_num, int key)
279 {
280 (void)cfg;
281 (void)channel_num;
282 return irq_unlock(key);
283 }
284 #elif defined(CONFIG_DMA_XILINX_AXI_DMA_LOCK_DMA_IRQS)
dma_xilinx_axi_dma_lock_irq(const struct dma_xilinx_axi_dma_config * cfg,const uint32_t channel_num)285 static inline int dma_xilinx_axi_dma_lock_irq(const struct dma_xilinx_axi_dma_config *cfg,
286 const uint32_t channel_num)
287 {
288 int ret;
289 (void)channel_num;
290
291 /* TX is 0, RX is 1 */
292 ret = irq_is_enabled(cfg->irq0_channels[0]) ? 1 : 0;
293 ret |= (irq_is_enabled(cfg->irq0_channels[1]) ? 1 : 0) << 1;
294
295 LOG_DBG("DMA IRQ state: %x TX IRQN: %" PRIu32 " RX IRQN: %" PRIu32, ret,
296 cfg->irq0_channels[0], cfg->irq0_channels[1]);
297
298 irq_disable(cfg->irq0_channels[0]);
299 irq_disable(cfg->irq0_channels[1]);
300
301 return ret;
302 }
303
dma_xilinx_axi_dma_unlock_irq(const struct dma_xilinx_axi_dma_config * cfg,const uint32_t channel_num,int key)304 static inline void dma_xilinx_axi_dma_unlock_irq(const struct dma_xilinx_axi_dma_config *cfg,
305 const uint32_t channel_num, int key)
306 {
307 (void)channel_num;
308
309 if (key & 0x1) {
310 /* TX was enabled */
311 irq_enable(cfg->irq0_channels[0]);
312 }
313 if (key & 0x2) {
314 /* RX was enabled */
315 irq_enable(cfg->irq0_channels[1]);
316 }
317 }
318 #elif defined(CONFIG_DMA_XILINX_AXI_DMA_LOCK_CHANNEL_IRQ)
dma_xilinx_axi_dma_lock_irq(const struct dma_xilinx_axi_dma_config * cfg,const uint32_t channel_num)319 static inline int dma_xilinx_axi_dma_lock_irq(const struct dma_xilinx_axi_dma_config *cfg,
320 const uint32_t channel_num)
321 {
322 int ret;
323
324 ret = irq_is_enabled(cfg->irq0_channels[channel_num]);
325
326 LOG_DBG("DMA IRQ state: %x ", ret);
327
328 irq_disable(cfg->irq0_channels[channel_num]);
329
330 return ret;
331 }
332
dma_xilinx_axi_dma_unlock_irq(const struct dma_xilinx_axi_dma_config * cfg,const uint32_t channel_num,int key)333 static inline void dma_xilinx_axi_dma_unlock_irq(const struct dma_xilinx_axi_dma_config *cfg,
334 const uint32_t channel_num, int key)
335 {
336 if (key) {
337 /* was enabled */
338 irq_enable(cfg->irq0_channels[channel_num]);
339 }
340 }
341 #else
342 #error "No IRQ strategy selected in Kconfig!"
343 #endif
344
dma_xilinx_axi_dma_write_reg(volatile uint32_t * reg,uint32_t val)345 static inline void dma_xilinx_axi_dma_write_reg(volatile uint32_t *reg, uint32_t val)
346 {
347 sys_write32(val, (mem_addr_t)(uintptr_t)reg);
348 }
349
dma_xilinx_axi_dma_read_reg(volatile uint32_t * reg)350 static inline uint32_t dma_xilinx_axi_dma_read_reg(volatile uint32_t *reg)
351 {
352 return sys_read32((mem_addr_t)(uintptr_t)reg);
353 }
354
dma_xilinx_axi_dma_last_received_frame_length(const struct device * dev)355 uint32_t dma_xilinx_axi_dma_last_received_frame_length(const struct device *dev)
356 {
357 const struct dma_xilinx_axi_dma_data *data = dev->data;
358
359 return data->channels[XILINX_AXI_DMA_RX_CHANNEL_NUM].last_rx_size;
360 }
361
362 #pragma GCC diagnostic push
363 #pragma GCC diagnostic ignored "-Waddress-of-packed-member"
364 static inline void
dma_xilinx_axi_dma_acknowledge_interrupt(struct dma_xilinx_axi_dma_channel * channel_data)365 dma_xilinx_axi_dma_acknowledge_interrupt(struct dma_xilinx_axi_dma_channel *channel_data)
366 {
367 /* interrupt handler might have called dma_start */
368 /* this overwrites the DMA control register */
369 /* so we cannot just write the old value back */
370 uint32_t dmacr = dma_xilinx_axi_dma_read_reg(&channel_data->channel_regs->dmacr);
371
372 dma_xilinx_axi_dma_write_reg(&channel_data->channel_regs->dmacr, dmacr);
373 }
374 #pragma GCC diagnostic pop
375
376 #pragma GCC diagnostic push
377 #pragma GCC diagnostic ignored "-Waddress-of-packed-member"
dma_xilinx_axi_dma_channel_has_error(const struct dma_xilinx_axi_dma_channel * channel_data,volatile const struct dma_xilinx_axi_dma_sg_descriptor * descriptor)378 static bool dma_xilinx_axi_dma_channel_has_error(
379 const struct dma_xilinx_axi_dma_channel *channel_data,
380 volatile const struct dma_xilinx_axi_dma_sg_descriptor *descriptor)
381 {
382 bool error = false;
383
384 /* check register errors first, as the SG descriptor might not be valid */
385 if (dma_xilinx_axi_dma_read_reg(&channel_data->channel_regs->dmasr) &
386 XILINX_AXI_DMA_REGS_DMASR_INTERR) {
387 LOG_ERR("DMA has internal error, DMASR = %" PRIx32,
388 dma_xilinx_axi_dma_read_reg(&channel_data->channel_regs->dmasr));
389 error = true;
390 }
391
392 if (dma_xilinx_axi_dma_read_reg(&channel_data->channel_regs->dmasr) &
393 XILINX_AXI_DMA_REGS_DMASR_SLVERR) {
394 LOG_ERR("DMA has slave error, DMASR = %" PRIx32,
395 dma_xilinx_axi_dma_read_reg(&channel_data->channel_regs->dmasr));
396 error = true;
397 }
398
399 if (dma_xilinx_axi_dma_read_reg(&channel_data->channel_regs->dmasr) &
400 XILINX_AXI_DMA_REGS_DMASR_DMADECERR) {
401 LOG_ERR("DMA has decode error, DMASR = %" PRIx32,
402 dma_xilinx_axi_dma_read_reg(&channel_data->channel_regs->dmasr));
403 error = true;
404 }
405
406 if (dma_xilinx_axi_dma_read_reg(&channel_data->channel_regs->dmasr) &
407 XILINX_AXI_DMA_REGS_DMASR_SGINTERR) {
408 LOG_ERR("DMA has SG internal error, DMASR = %" PRIx32,
409 dma_xilinx_axi_dma_read_reg(&channel_data->channel_regs->dmasr));
410 error = true;
411 }
412
413 if (dma_xilinx_axi_dma_read_reg(&channel_data->channel_regs->dmasr) &
414 XILINX_AXI_DMA_REGS_DMASR_SGSLVERR) {
415 LOG_ERR("DMA has SG slave error, DMASR = %" PRIx32,
416 dma_xilinx_axi_dma_read_reg(&channel_data->channel_regs->dmasr));
417 error = true;
418 }
419
420 if (dma_xilinx_axi_dma_read_reg(&channel_data->channel_regs->dmasr) &
421 XILINX_AXI_DMA_REGS_DMASR_SGDECERR) {
422 LOG_ERR("DMA has SG decode error, DMASR = %" PRIx32,
423 dma_xilinx_axi_dma_read_reg(&channel_data->channel_regs->dmasr));
424 error = true;
425 }
426
427 if (descriptor->status & XILINX_AXI_DMA_SG_DESCRIPTOR_STATUS_DEC_ERR_MASK) {
428 LOG_ERR("Descriptor has SG decode error, status=%" PRIx32, descriptor->status);
429 error = true;
430 }
431
432 if (descriptor->status & XILINX_AXI_DMA_SG_DESCRIPTOR_STATUS_SLV_ERR_MASK) {
433 LOG_ERR("Descriptor has SG slave error, status=%" PRIx32, descriptor->status);
434 error = true;
435 }
436
437 if (descriptor->status & XILINX_AXI_DMA_SG_DESCRIPTOR_STATUS_INT_ERR_MASK) {
438 LOG_ERR("Descriptor has SG internal error, status=%" PRIx32, descriptor->status);
439 error = true;
440 }
441
442 return error;
443 }
444 #pragma GCC diagnostic pop
445
446 static int
dma_xilinx_axi_dma_clean_up_sg_descriptors(const struct device * dev,struct dma_xilinx_axi_dma_channel * channel_data,const char * chan_name)447 dma_xilinx_axi_dma_clean_up_sg_descriptors(const struct device *dev,
448 struct dma_xilinx_axi_dma_channel *channel_data,
449 const char *chan_name)
450 {
451 volatile struct dma_xilinx_axi_dma_sg_descriptor *current_descriptor =
452 &channel_data->descriptors[channel_data->current_transfer_end_index];
453 unsigned int processed_packets = 0;
454
455 while (current_descriptor->status & XILINX_AXI_DMA_SG_DESCRIPTOR_STATUS_COMPLETE_MASK ||
456 current_descriptor->status & ~XILINX_AXI_DMA_SG_DESCRIPTOR_STATUS_TRANSFERRED_MASK) {
457 /* descriptor completed or errored out - need to call callback */
458 int retval = DMA_STATUS_COMPLETE;
459
460 /* this is meaningless / ignored for TX channel */
461 channel_data->last_rx_size = current_descriptor->status &
462 XILINX_AXI_DMA_SG_DESCRIPTOR_STATUS_LENGTH_MASK;
463
464 if (dma_xilinx_axi_dma_channel_has_error(channel_data, current_descriptor)) {
465 LOG_ERR("Channel / descriptor error on %s chan!", chan_name);
466 retval = -EFAULT;
467 }
468
469 if (channel_data->check_csum_in_isr) {
470 uint32_t checksum_status = current_descriptor->app2;
471
472 if (checksum_status & XILINX_AXI_DMA_SG_DESCRIPTOR_APP2_FCS_ERR_MASK) {
473 LOG_ERR("Checksum offloading has FCS error status %" PRIx32 "!",
474 checksum_status);
475 retval = -EFAULT;
476 }
477
478 if ((checksum_status & XILINX_AXI_DMA_SG_DESCRIPTOR_APP2_IP_ERR_MASK) ==
479 XILINX_AXI_DMA_SG_DESCRIPTOR_APP2_IP_ERR_MASK) {
480 LOG_ERR("Checksum offloading has IP error status %" PRIx32 "!",
481 checksum_status);
482 retval = -EFAULT;
483 }
484
485 if ((checksum_status & XILINX_AXI_DMA_SG_DESCRIPTOR_APP2_UDP_ERR_MASK) ==
486 XILINX_AXI_DMA_SG_DESCRIPTOR_APP2_UDP_ERR_MASK) {
487 LOG_ERR("Checksum offloading has UDP error status %" PRIx32 "!",
488 checksum_status);
489 retval = -EFAULT;
490 }
491
492 if ((checksum_status & XILINX_AXI_DMA_SG_DESCRIPTOR_APP2_TCP_ERR_MASK) ==
493 XILINX_AXI_DMA_SG_DESCRIPTOR_APP2_TCP_ERR_MASK) {
494 LOG_ERR("Checksum offloading has TCP error status %" PRIx32 "!",
495 checksum_status);
496 retval = -EFAULT;
497 }
498 /* FIXME in some corner cases, the hardware cannot check the checksum */
499 /* in this case, we cannot let the Zephyr network stack know, */
500 /* as we do not have per-skb flags for checksum status */
501 }
502
503 /* clears the flags such that the DMA does not transfer it twice or errors */
504 current_descriptor->control = current_descriptor->status = 0;
505
506 /* callback might start new transfer */
507 /* hence, the transfer end needs to be updated */
508 channel_data->current_transfer_end_index++;
509 if (channel_data->current_transfer_end_index >= channel_data->num_descriptors) {
510 channel_data->current_transfer_end_index = 0;
511 }
512
513 if (channel_data->completion_callback) {
514 LOG_DBG("Received packet with %u bytes!", channel_data->last_rx_size);
515 channel_data->completion_callback(
516 dev, channel_data->completion_callback_user_data,
517 XILINX_AXI_DMA_TX_CHANNEL_NUM, retval);
518 }
519
520 current_descriptor =
521 &channel_data->descriptors[channel_data->current_transfer_end_index];
522 processed_packets++;
523 }
524
525 #pragma GCC diagnostic push
526 #pragma GCC diagnostic ignored "-Waddress-of-packed-member"
527 /* this clears the IRQ */
528 /* FIXME write the same value back... */
529 dma_xilinx_axi_dma_write_reg(&channel_data->channel_regs->dmasr, 0xffffffff);
530 #pragma GCC diagnostic pop
531
532 /* writes must commit before returning from ISR */
533 barrier_dmem_fence_full();
534
535 return processed_packets;
536 }
537
dma_xilinx_axi_dma_tx_isr(const struct device * dev)538 static void dma_xilinx_axi_dma_tx_isr(const struct device *dev)
539 {
540 struct dma_xilinx_axi_dma_data *data = dev->data;
541 struct dma_xilinx_axi_dma_channel *channel_data =
542 &data->channels[XILINX_AXI_DMA_TX_CHANNEL_NUM];
543 int processed_packets;
544
545 dma_xilinx_axi_dma_disable_cache();
546
547 processed_packets = dma_xilinx_axi_dma_clean_up_sg_descriptors(dev, channel_data, "TX");
548
549 dma_xilinx_axi_dma_enable_cache();
550
551 LOG_DBG("Received %u RX packets in this ISR!\n", processed_packets);
552
553 dma_xilinx_axi_dma_acknowledge_interrupt(channel_data);
554 }
555
dma_xilinx_axi_dma_rx_isr(const struct device * dev)556 static void dma_xilinx_axi_dma_rx_isr(const struct device *dev)
557 {
558 struct dma_xilinx_axi_dma_data *data = dev->data;
559 struct dma_xilinx_axi_dma_channel *channel_data =
560 &data->channels[XILINX_AXI_DMA_RX_CHANNEL_NUM];
561 int processed_packets;
562
563 dma_xilinx_axi_dma_disable_cache();
564
565 processed_packets = dma_xilinx_axi_dma_clean_up_sg_descriptors(dev, channel_data, "RX");
566
567 dma_xilinx_axi_dma_enable_cache();
568
569 LOG_DBG("Cleaned up %u TX packets in this ISR!\n", processed_packets);
570
571 dma_xilinx_axi_dma_acknowledge_interrupt(channel_data);
572 }
573
574 #ifdef CONFIG_DMA_64BIT
575 typedef uint64_t dma_addr_t;
576 #else
577 typedef uint32_t dma_addr_t;
578 #endif
579
dma_xilinx_axi_dma_start(const struct device * dev,uint32_t channel)580 static int dma_xilinx_axi_dma_start(const struct device *dev, uint32_t channel)
581 {
582 const struct dma_xilinx_axi_dma_config *cfg = dev->config;
583 struct dma_xilinx_axi_dma_data *data = dev->data;
584 struct dma_xilinx_axi_dma_channel *channel_data = &data->channels[channel];
585 volatile struct dma_xilinx_axi_dma_sg_descriptor *current_descriptor;
586 volatile struct dma_xilinx_axi_dma_sg_descriptor *first_unprocessed_descriptor;
587 size_t tail_descriptor;
588
589 bool halted = false;
590
591 /* running ISR in parallel could cause issues with the metadata */
592 const int irq_key = dma_xilinx_axi_dma_lock_irq(cfg, channel);
593
594 if (channel >= cfg->channels) {
595 LOG_ERR("Invalid channel %" PRIu32 " - must be < %" PRIu32 "!", channel,
596 cfg->channels);
597 dma_xilinx_axi_dma_unlock_irq(cfg, channel, irq_key);
598 return -EINVAL;
599 }
600
601 tail_descriptor = channel_data->current_transfer_start_index++;
602
603 if (channel_data->current_transfer_start_index >= channel_data->num_descriptors) {
604 LOG_DBG("Wrapping tail descriptor on %s chan!",
605 channel == XILINX_AXI_DMA_TX_CHANNEL_NUM ? "TX" : "RX");
606 channel_data->current_transfer_start_index = 0;
607 }
608
609 dma_xilinx_axi_dma_disable_cache();
610 current_descriptor = &channel_data->descriptors[tail_descriptor];
611 first_unprocessed_descriptor =
612 &channel_data->descriptors[channel_data->current_transfer_end_index];
613
614 LOG_DBG("Starting DMA on %s channel with tail ptr %zu start ptr %zu",
615 channel == XILINX_AXI_DMA_TX_CHANNEL_NUM ? "TX" : "RX", tail_descriptor,
616 channel_data->current_transfer_end_index);
617
618 #pragma GCC diagnostic push
619 #pragma GCC diagnostic ignored "-Waddress-of-packed-member"
620 if (dma_xilinx_axi_dma_read_reg(&channel_data->channel_regs->dmasr) &
621 XILINX_AXI_DMA_REGS_DMASR_HALTED) {
622
623 halted = true;
624
625 LOG_DBG("AXI DMA is halted - restart operation!");
626
627 #ifdef CONFIG_DMA_64BIT
628 dma_xilinx_axi_dma_write_reg(
629 &channel_data->channel_regs->curdesc,
630 (uint32_t)(((uintptr_t)first_unprocessed_descriptor) & 0xffffffff));
631 dma_xilinx_axi_dma_write_reg(
632 &channel_data->channel_regs->curdesc_msb,
633 (uint32_t)(((uintptr_t)first_unprocessed_descriptor) >> 32));
634 #else
635 dma_xilinx_axi_dma_write_reg(&channel_data->channel_regs->curdesc,
636 (uint32_t)(uintptr_t)first_unprocessed_descriptor);
637 #endif
638 }
639 #pragma GCC diagnostic pop
640
641 /* current descriptor MUST be set before tail descriptor */
642 barrier_dmem_fence_full();
643
644 if (halted) {
645 uint32_t new_control = 0;
646
647 new_control |= XILINX_AXI_DMA_REGS_DMACR_RS;
648 /* no reset */
649 new_control &= ~XILINX_AXI_DMA_REGS_DMACR_RESET;
650 /* TODO make this a DT parameter */
651 /* for Eth DMA, this should never be used */
652 new_control &= ~XILINX_AXI_DMA_REGS_DMACR_KEYHOLE;
653 /* no cyclic mode - we use completed bit to control which */
654 /* transfers where completed */
655 new_control &= ~XILINX_AXI_DMA_REGS_DMACR_CYC_BD_EN;
656 /* we want interrupts on complete */
657 new_control |= XILINX_AXI_DMA_REGS_DMACR_IOC_IRQEN;
658 /* we do want timeout IRQs */
659 /* they are used to catch cases where we missed interrupts */
660 new_control |= XILINX_AXI_DMA_REGS_DMACR_DLY_IRQEN;
661 /* we want IRQs on error */
662 new_control |= XILINX_AXI_DMA_REGS_DMACR_ERR_IRQEN;
663 /* interrupt after every completed transfer */
664 new_control |= CONFIG_DMA_XILINX_AXI_DMA_INTERRUPT_THRESHOLD
665 << XILINX_AXI_DMA_REGS_DMACR_IRQTHRESH_SHIFT_BITS;
666 /* timeout after config * 125 * clock period */
667 new_control |= CONFIG_DMA_XILINX_AXI_DMA_INTERRUPT_TIMEOUT
668 << XILINX_AXI_DMA_REGS_DMACR_IRQDELAY_SHIFT_BITS;
669
670 LOG_DBG("New DMACR value: %" PRIx32, new_control);
671
672 #pragma GCC diagnostic push
673 #pragma GCC diagnostic ignored "-Waddress-of-packed-member"
674 dma_xilinx_axi_dma_write_reg(&channel_data->channel_regs->dmacr, new_control);
675 /* need to make sure start was committed before writing tail */
676 barrier_dmem_fence_full();
677 }
678
679 #ifdef CONFIG_DMA_64BIT
680 dma_xilinx_axi_dma_write_reg(&channel_data->channel_regs->taildesc,
681 (uint32_t)(((uintptr_t)current_descriptor) & 0xffffffff));
682 dma_xilinx_axi_dma_write_reg(&channel_data->channel_regs->taildesc_msb,
683 (uint32_t)(((uintptr_t)current_descriptor) >> 32));
684 #else
685 dma_xilinx_axi_dma_write_reg(&channel_data->channel_regs->taildesc,
686 (uint32_t)(uintptr_t)current_descriptor);
687 #endif
688 #pragma GCC diagnostic pop
689
690 dma_xilinx_axi_dma_enable_cache();
691
692 dma_xilinx_axi_dma_unlock_irq(cfg, channel, irq_key);
693
694 /* commit stores before returning to caller */
695 barrier_dmem_fence_full();
696
697 return 0;
698 }
699
dma_xilinx_axi_dma_stop(const struct device * dev,uint32_t channel)700 static int dma_xilinx_axi_dma_stop(const struct device *dev, uint32_t channel)
701 {
702 const struct dma_xilinx_axi_dma_config *cfg = dev->config;
703 struct dma_xilinx_axi_dma_data *data = dev->data;
704 struct dma_xilinx_axi_dma_channel *channel_data = &data->channels[channel];
705
706 uint32_t new_control;
707
708 if (channel >= cfg->channels) {
709 LOG_ERR("Invalid channel %" PRIu32 " - must be < %" PRIu32 "!", channel,
710 cfg->channels);
711 return -EINVAL;
712 }
713
714 k_timer_stop(&channel_data->polling_timer);
715
716 new_control = channel_data->channel_regs->dmacr;
717 /* RS = 0 --> DMA will complete ongoing transactions and then go into hold */
718 new_control = new_control & ~XILINX_AXI_DMA_REGS_DMACR_RS;
719
720 #pragma GCC diagnostic push
721 #pragma GCC diagnostic ignored "-Waddress-of-packed-member"
722 dma_xilinx_axi_dma_write_reg(&channel_data->channel_regs->dmacr, new_control);
723 #pragma GCC diagnostic pop
724
725 /* commit before returning to caller */
726 barrier_dmem_fence_full();
727
728 return 0;
729 }
730
dma_xilinx_axi_dma_get_status(const struct device * dev,uint32_t channel,struct dma_status * stat)731 static int dma_xilinx_axi_dma_get_status(const struct device *dev, uint32_t channel,
732 struct dma_status *stat)
733 {
734 const struct dma_xilinx_axi_dma_config *cfg = dev->config;
735 struct dma_xilinx_axi_dma_data *data = dev->data;
736 struct dma_xilinx_axi_dma_channel *channel_data = &data->channels[channel];
737
738 if (channel >= cfg->channels) {
739 LOG_ERR("Invalid channel %" PRIu32 " - must be < %" PRIu32 "!", channel,
740 cfg->channels);
741 return -EINVAL;
742 }
743
744 memset(stat, 0, sizeof(*stat));
745
746 #pragma GCC diagnostic push
747 #pragma GCC diagnostic ignored "-Waddress-of-packed-member"
748 stat->busy = !(dma_xilinx_axi_dma_read_reg(&channel_data->channel_regs->dmasr) &
749 XILINX_AXI_DMA_REGS_DMASR_IDLE) &&
750 !(dma_xilinx_axi_dma_read_reg(&channel_data->channel_regs->dmasr) &
751 XILINX_AXI_DMA_REGS_DMASR_HALTED);
752 #pragma GCC diagnostic pop
753 stat->dir = channel_data->last_transfer_direction;
754
755 /* FIXME fill hardware-specific fields */
756
757 return 0;
758 }
759 /**
760 * Transfers a single buffer through the DMA
761 * If is_first or is_last are NOT set, the buffer is considered part of a SG transfer consisting of
762 * multiple blocks. Otherwise, the block is one transfer.
763 */
dma_xilinx_axi_dma_transfer_block(const struct dma_xilinx_axi_dma_config * cfg,uint32_t channel,struct dma_xilinx_axi_dma_channel * channel_data,dma_addr_t buffer_addr,size_t block_size,bool is_first,bool is_last)764 static inline int dma_xilinx_axi_dma_transfer_block(const struct dma_xilinx_axi_dma_config *cfg,
765 uint32_t channel,
766 struct dma_xilinx_axi_dma_channel *channel_data,
767 dma_addr_t buffer_addr, size_t block_size,
768 bool is_first, bool is_last)
769 {
770 volatile struct dma_xilinx_axi_dma_sg_descriptor *current_descriptor;
771
772 /* running ISR in parallel could cause issues with the metadata */
773 const int irq_key = dma_xilinx_axi_dma_lock_irq(cfg, channel);
774
775 current_descriptor = &channel_data->descriptors[channel_data->current_transfer_start_index];
776
777 dma_xilinx_axi_dma_disable_cache();
778
779 #ifdef CONFIG_DMA_64BIT
780 current_descriptor->buffer_address = (uint32_t)buffer_addr & 0xffffffff;
781 current_descriptor->buffer_address_msb = (uint32_t)(buffer_addr >> 32);
782 #else
783 current_descriptor->buffer_address = buffer_addr;
784 #endif
785 current_descriptor->app0 = channel_data->sg_desc_app0;
786
787 if (block_size > UINT32_MAX) {
788 LOG_ERR("Too large block: %zu bytes!", block_size);
789
790 dma_xilinx_axi_dma_enable_cache();
791 dma_xilinx_axi_dma_unlock_irq(cfg, channel, irq_key);
792
793 return -EINVAL;
794 }
795 /* clears the start of frame / end of frame flags as well */
796 current_descriptor->control = (uint32_t)block_size;
797
798 if (is_first) {
799 current_descriptor->control =
800 current_descriptor->control | XILINX_AXI_DMA_SG_DESCRIPTOR_CTRL_SOF_MASK;
801 }
802 if (is_last) {
803 current_descriptor->control =
804 current_descriptor->control | XILINX_AXI_DMA_SG_DESCRIPTOR_CTRL_EOF_MASK;
805 }
806
807 /* SG descriptor must be completed BEFORE hardware is made aware of it */
808 barrier_dmem_fence_full();
809
810 dma_xilinx_axi_dma_enable_cache();
811
812 dma_xilinx_axi_dma_unlock_irq(cfg, channel, irq_key);
813
814 return 0;
815 }
816
817 #ifdef CONFIG_DMA_64BIT
dma_xilinx_axi_dma_config_reload(const struct device * dev,uint32_t channel,uint64_t src,uint64_t dst,size_t size)818 static inline int dma_xilinx_axi_dma_config_reload(const struct device *dev, uint32_t channel,
819 uint64_t src, uint64_t dst, size_t size)
820 #else
821 static inline int dma_xilinx_axi_dma_config_reload(const struct device *dev, uint32_t channel,
822 uint32_t src, uint32_t dst, size_t size)
823 #endif
824 {
825 const struct dma_xilinx_axi_dma_config *cfg = dev->config;
826 struct dma_xilinx_axi_dma_data *data = dev->data;
827 struct dma_xilinx_axi_dma_channel *channel_data = &data->channels[channel];
828
829 if (channel >= cfg->channels) {
830 LOG_ERR("Invalid channel %" PRIu32 " - must be < %" PRIu32 "!", channel,
831 cfg->channels);
832 return -EINVAL;
833 }
834 /* one-block-at-a-time transfer */
835 return dma_xilinx_axi_dma_transfer_block(
836 cfg, channel, channel_data, channel == XILINX_AXI_DMA_TX_CHANNEL_NUM ? src : dst,
837 size, true, true);
838 }
839
840 /* regularly check if we missed an interrupt from the device */
841 /* as interrupts are level-sensitive, this can happen on certain platforms */
polling_timer_handler(struct k_timer * timer)842 static void polling_timer_handler(struct k_timer *timer)
843 {
844 struct dma_xilinx_axi_dma_channel *channel =
845 CONTAINER_OF(timer, struct dma_xilinx_axi_dma_channel, polling_timer);
846 const struct device *dev = channel->polling_timer_params.dev;
847 const unsigned int irq_number = channel->polling_timer_params.irq_number;
848 const int was_enabled = irq_is_enabled(irq_number);
849
850 irq_disable(irq_number);
851
852 LOG_DBG("Polling ISR!");
853
854 channel->polling_timer_params.isr(dev);
855
856 if (was_enabled) {
857 irq_enable(irq_number);
858 }
859 }
860
dma_xilinx_axi_dma_configure(const struct device * dev,uint32_t channel,struct dma_config * dma_cfg)861 static int dma_xilinx_axi_dma_configure(const struct device *dev, uint32_t channel,
862 struct dma_config *dma_cfg)
863 {
864 const struct dma_xilinx_axi_dma_config *cfg = dev->config;
865 struct dma_xilinx_axi_dma_data *data = dev->data;
866 struct dma_block_config *current_block = dma_cfg->head_block;
867 int ret = 0;
868 int block_count = 0;
869
870 struct dma_xilinx_axi_dma_register_space *regs =
871 (struct dma_xilinx_axi_dma_register_space *)cfg->reg;
872
873 if (channel >= cfg->channels) {
874 LOG_ERR("Invalid channel %" PRIu32 " - must be < %" PRIu32 "!", channel,
875 cfg->channels);
876 return -EINVAL;
877 }
878
879 if (cfg->channels != XILINX_AXI_DMA_NUM_CHANNELS) {
880 LOG_ERR("Invalid number of configured channels (%" PRIu32
881 ") - Xilinx AXI DMA must have %" PRIu32 " channels!",
882 cfg->channels, XILINX_AXI_DMA_NUM_CHANNELS);
883 return -EINVAL;
884 }
885
886 if (dma_cfg->head_block->source_addr_adj == DMA_ADDR_ADJ_DECREMENT) {
887 LOG_ERR("Xilinx AXI DMA only supports incrementing addresses!");
888 return -ENOTSUP;
889 }
890
891 if (dma_cfg->head_block->dest_addr_adj == DMA_ADDR_ADJ_DECREMENT) {
892 LOG_ERR("Xilinx AXI DMA only supports incrementing addresses!");
893 return -ENOTSUP;
894 }
895
896 if (dma_cfg->head_block->source_addr_adj != DMA_ADDR_ADJ_INCREMENT &&
897 dma_cfg->head_block->source_addr_adj != DMA_ADDR_ADJ_NO_CHANGE) {
898 LOG_ERR("invalid source_addr_adj %" PRIu16, dma_cfg->head_block->source_addr_adj);
899 return -ENOTSUP;
900 }
901 if (dma_cfg->head_block->dest_addr_adj != DMA_ADDR_ADJ_INCREMENT &&
902 dma_cfg->head_block->dest_addr_adj != DMA_ADDR_ADJ_NO_CHANGE) {
903 LOG_ERR("invalid dest_addr_adj %" PRIu16, dma_cfg->head_block->dest_addr_adj);
904 return -ENOTSUP;
905 }
906
907 if (channel == XILINX_AXI_DMA_TX_CHANNEL_NUM &&
908 dma_cfg->channel_direction != MEMORY_TO_PERIPHERAL) {
909 LOG_ERR("TX channel must be used with MEMORY_TO_PERIPHERAL!");
910 return -ENOTSUP;
911 }
912
913 if (channel == XILINX_AXI_DMA_RX_CHANNEL_NUM &&
914 dma_cfg->channel_direction != PERIPHERAL_TO_MEMORY) {
915 LOG_ERR("RX channel must be used with PERIPHERAL_TO_MEMORY!");
916 return -ENOTSUP;
917 }
918
919 k_timer_init(&data->channels[channel].polling_timer, polling_timer_handler, NULL);
920
921 data->channels[channel].polling_timer_params.dev = dev;
922 data->channels[channel].polling_timer_params.irq_number = cfg->irq0_channels[channel];
923 data->channels[channel].polling_timer_params.isr =
924 (channel == XILINX_AXI_DMA_TX_CHANNEL_NUM) ? dma_xilinx_axi_dma_tx_isr
925 : dma_xilinx_axi_dma_rx_isr;
926
927 data->channels[channel].last_transfer_direction = dma_cfg->channel_direction;
928
929 dma_xilinx_axi_dma_disable_cache();
930
931 if (channel == XILINX_AXI_DMA_TX_CHANNEL_NUM) {
932 data->channels[channel].descriptors = descriptors_tx;
933 data->channels[channel].num_descriptors = ARRAY_SIZE(descriptors_tx);
934
935 data->channels[channel].channel_regs = ®s->mm2s_registers;
936 } else {
937 data->channels[channel].descriptors = descriptors_rx;
938 data->channels[channel].num_descriptors = ARRAY_SIZE(descriptors_rx);
939
940 data->channels[channel].channel_regs = ®s->s2mm_registers;
941 }
942
943 LOG_DBG("Resetting DMA channel!");
944
945 if (!data->device_has_been_reset) {
946 LOG_INF("Soft-resetting the DMA core!");
947 #pragma GCC diagnostic push
948 #pragma GCC diagnostic ignored "-Waddress-of-packed-member"
949 /* this resets BOTH RX and TX channels, although it is triggered in per-channel
950 * DMACR
951 */
952 dma_xilinx_axi_dma_write_reg(&data->channels[channel].channel_regs->dmacr,
953 XILINX_AXI_DMA_REGS_DMACR_RESET);
954 #pragma GCC diagnostic pop
955 data->device_has_been_reset = true;
956 }
957
958 LOG_DBG("Configuring %zu DMA descriptors for %s", data->channels[channel].num_descriptors,
959 channel == XILINX_AXI_DMA_TX_CHANNEL_NUM ? "TX" : "RX");
960
961 /* only configures fields whos default is not 0, as descriptors are in zero-initialized */
962 /* segment */
963 data->channels[channel].current_transfer_start_index =
964 data->channels[channel].current_transfer_end_index = 0;
965 for (int i = 0; i < data->channels[channel].num_descriptors; i++) {
966 uintptr_t nextdesc;
967 uint32_t low_bytes;
968 #ifdef CONFIG_DMA_64BIT
969 uint32_t high_bytes;
970 #endif
971 if (i + 1 < data->channels[channel].num_descriptors) {
972 nextdesc = (uintptr_t)&data->channels[channel].descriptors[i + 1];
973 } else {
974 nextdesc = (uintptr_t)&data->channels[channel].descriptors[0];
975 }
976 /* SG descriptors have 64-byte alignment requirements */
977 /* we check this here, for each descriptor */
978 __ASSERT(
979 nextdesc & XILINX_AXI_DMA_SG_DESCRIPTOR_ADDRESS_MASK == 0,
980 "SG descriptor address %p (offset %u) was not aligned to 64-byte boundary!",
981 (void *)nextdesc, i);
982
983 low_bytes = (uint32_t)(((uint64_t)nextdesc) & 0xffffffff);
984 data->channels[channel].descriptors[i].nxtdesc = low_bytes;
985
986 #ifdef CONFIG_DMA_64BIT
987 high_bytes = (uint32_t)(((uint64_t)nextdesc >> 32) & 0xffffffff);
988 data->channels[channel].descriptors[i].nxtdesc_msb = high_bytes;
989 #endif
990 }
991
992 dma_xilinx_axi_dma_enable_cache();
993
994 data->channels[channel].check_csum_in_isr = false;
995
996 /* the DMA passes the app fields through to the AXIStream-connected device */
997 /* whether the connected device understands these flags needs to be determined by the */
998 /* caller! */
999 switch (dma_cfg->linked_channel) {
1000 case XILINX_AXI_DMA_LINKED_CHANNEL_FULL_CSUM_OFFLOAD:
1001 if (channel == XILINX_AXI_DMA_TX_CHANNEL_NUM) {
1002 /* for the TX channel, we need to indicate that we would like to use */
1003 /* checksum offloading */
1004 data->channels[channel].sg_desc_app0 =
1005 XILINX_AXI_DMA_SG_DESCRIPTOR_APP0_CHECKSUM_OFFLOAD_FULL;
1006 } else {
1007 /* for the RX channel, the Ethernet core will indicate to us that it has */
1008 /* computed a checksum and whether it is valid we need to check this in */
1009 /* the ISR and report it upstream */
1010 data->channels[channel].check_csum_in_isr = true;
1011 }
1012 break;
1013 case XILINX_AXI_DMA_LINKED_CHANNEL_NO_CSUM_OFFLOAD:
1014 data->channels[channel].sg_desc_app0 =
1015 XILINX_AXI_DMA_SG_DESCRIPTOR_APP0_CHECKSUM_OFFLOAD_NONE;
1016 break;
1017 default:
1018 LOG_ERR("Linked channel invalid! Valid values: %u for full ethernt checksum "
1019 "offloading %u for no checksum offloading!",
1020 XILINX_AXI_DMA_LINKED_CHANNEL_FULL_CSUM_OFFLOAD,
1021 XILINX_AXI_DMA_LINKED_CHANNEL_NO_CSUM_OFFLOAD);
1022 return -EINVAL;
1023 }
1024
1025 data->channels[channel].completion_callback = dma_cfg->dma_callback;
1026 data->channels[channel].completion_callback_user_data = dma_cfg->user_data;
1027
1028 LOG_INF("Completed configuration of AXI DMA - Starting transfer!");
1029
1030 do {
1031 ret = ret ||
1032 dma_xilinx_axi_dma_transfer_block(cfg, channel, &data->channels[channel],
1033 channel == XILINX_AXI_DMA_TX_CHANNEL_NUM
1034 ? current_block->source_address
1035 : current_block->dest_address,
1036 current_block->block_size, block_count == 0,
1037 current_block->next_block == NULL);
1038 block_count++;
1039 } while ((current_block = current_block->next_block) && ret == 0);
1040
1041 k_timer_start(&data->channels[channel].polling_timer,
1042 K_MSEC(CONFIG_DMA_XILINX_AXI_DMA_POLL_INTERVAL),
1043 K_MSEC(CONFIG_DMA_XILINX_AXI_DMA_POLL_INTERVAL));
1044
1045 return ret;
1046 }
1047
dma_xilinx_axi_dma_chan_filter(const struct device * dev,int channel,void * filter_param)1048 static bool dma_xilinx_axi_dma_chan_filter(const struct device *dev, int channel,
1049 void *filter_param)
1050 {
1051 const char *filter_str = (const char *)filter_param;
1052
1053 if (strcmp(filter_str, "tx") == 0) {
1054 return channel == XILINX_AXI_DMA_TX_CHANNEL_NUM;
1055 }
1056 if (strcmp(filter_str, "rx") == 0) {
1057 return channel == XILINX_AXI_DMA_RX_CHANNEL_NUM;
1058 }
1059
1060 return false;
1061 }
1062
1063 /* DMA API callbacks */
1064 static DEVICE_API(dma, dma_xilinx_axi_dma_driver_api) = {
1065 .config = dma_xilinx_axi_dma_configure,
1066 .reload = dma_xilinx_axi_dma_config_reload,
1067 .start = dma_xilinx_axi_dma_start,
1068 .stop = dma_xilinx_axi_dma_stop,
1069 .suspend = NULL,
1070 .resume = NULL,
1071 .get_status = dma_xilinx_axi_dma_get_status,
1072 .chan_filter = dma_xilinx_axi_dma_chan_filter,
1073 };
1074
dma_xilinx_axi_dma_init(const struct device * dev)1075 static int dma_xilinx_axi_dma_init(const struct device *dev)
1076 {
1077 const struct dma_xilinx_axi_dma_config *cfg = dev->config;
1078
1079 cfg->irq_configure();
1080 return 0;
1081 }
1082
1083 /* first IRQ is TX */
1084 #define TX_IRQ_CONFIGURE(inst) \
1085 IRQ_CONNECT(DT_INST_IRQN_BY_IDX(inst, 0), DT_INST_IRQ_BY_IDX(inst, 0, priority), \
1086 dma_xilinx_axi_dma_tx_isr, DEVICE_DT_INST_GET(inst), 0); \
1087 irq_enable(DT_INST_IRQN_BY_IDX(inst, 0));
1088 /* second IRQ is RX */
1089 #define RX_IRQ_CONFIGURE(inst) \
1090 IRQ_CONNECT(DT_INST_IRQN_BY_IDX(inst, 1), DT_INST_IRQ_BY_IDX(inst, 1, priority), \
1091 dma_xilinx_axi_dma_rx_isr, DEVICE_DT_INST_GET(inst), 0); \
1092 irq_enable(DT_INST_IRQN_BY_IDX(inst, 1));
1093
1094 #define CONFIGURE_ALL_IRQS(inst) \
1095 TX_IRQ_CONFIGURE(inst); \
1096 RX_IRQ_CONFIGURE(inst);
1097
1098 #define XILINX_AXI_DMA_INIT(inst) \
1099 static void dma_xilinx_axi_dma##inst##_irq_configure(void) \
1100 { \
1101 CONFIGURE_ALL_IRQS(inst); \
1102 } \
1103 static uint32_t dma_xilinx_axi_dma##inst##_irq0_channels[] = \
1104 DT_INST_PROP_OR(inst, interrupts, {0}); \
1105 static const struct dma_xilinx_axi_dma_config dma_xilinx_axi_dma##inst##_config = { \
1106 .reg = (void *)(uintptr_t)DT_INST_REG_ADDR(inst), \
1107 .channels = DT_INST_PROP(inst, dma_channels), \
1108 .irq_configure = dma_xilinx_axi_dma##inst##_irq_configure, \
1109 .irq0_channels = dma_xilinx_axi_dma##inst##_irq0_channels, \
1110 .irq0_channels_size = ARRAY_SIZE(dma_xilinx_axi_dma##inst##_irq0_channels), \
1111 }; \
1112 static struct dma_xilinx_axi_dma_channel \
1113 dma_xilinx_axi_dma##inst##_channels[DT_INST_PROP(inst, dma_channels)]; \
1114 ATOMIC_DEFINE(dma_xilinx_axi_dma_atomic##inst, DT_INST_PROP(inst, dma_channels)); \
1115 static struct dma_xilinx_axi_dma_data dma_xilinx_axi_dma##inst##_data = { \
1116 .ctx = {.magic = DMA_MAGIC, .atomic = NULL}, \
1117 .channels = dma_xilinx_axi_dma##inst##_channels, \
1118 }; \
1119 \
1120 DEVICE_DT_INST_DEFINE(inst, &dma_xilinx_axi_dma_init, NULL, \
1121 &dma_xilinx_axi_dma##inst##_data, \
1122 &dma_xilinx_axi_dma##inst##_config, POST_KERNEL, \
1123 CONFIG_DMA_INIT_PRIORITY, &dma_xilinx_axi_dma_driver_api);
1124
1125 /* two different compatibles match the very same Xilinx AXI DMA, */
1126 /* depending on if it is used in the AXI Ethernet subsystem or not */
1127 #define DT_DRV_COMPAT xlnx_eth_dma
1128 DT_INST_FOREACH_STATUS_OKAY(XILINX_AXI_DMA_INIT)
1129
1130 #undef DT_DRV_COMPAT
1131 #define DT_DRV_COMPAT xlnx_axi_dma_1_00_a
1132 DT_INST_FOREACH_STATUS_OKAY(XILINX_AXI_DMA_INIT)
1133