1 /*
2  * Copyright (c) 2022 Andriy Gelman
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #define DT_DRV_COMPAT infineon_xmc4xxx_dma
8 
9 #include <soc.h>
10 #include <stdint.h>
11 #include <xmc_dma.h>
12 #include <zephyr/device.h>
13 #include <zephyr/drivers/dma.h>
14 #include <zephyr/dt-bindings/dma/infineon-xmc4xxx-dma.h>
15 #include <zephyr/irq.h>
16 
17 #include <zephyr/logging/log.h>
18 LOG_MODULE_REGISTER(dma_xmc4xxx, CONFIG_DMA_LOG_LEVEL);
19 
20 #define MAX_PRIORITY	  7
21 #define DMA_MAX_BLOCK_LEN 4095
22 #define DLR_LINE_UNSET	  0xff
23 
24 #define DLR_SRSEL_RS_BITSIZE 4
25 #define DLR_SRSEL_RS_MSK     0xf
26 
27 #define ALL_EVENTS                                                                                 \
28 	(XMC_DMA_CH_EVENT_TRANSFER_COMPLETE | XMC_DMA_CH_EVENT_BLOCK_TRANSFER_COMPLETE |           \
29 	 XMC_DMA_CH_EVENT_SRC_TRANSACTION_COMPLETE | XMC_DMA_CH_EVENT_DST_TRANSACTION_COMPLETE |   \
30 	 XMC_DMA_CH_EVENT_ERROR)
31 
32 struct dma_xmc4xxx_channel {
33 	dma_callback_t cb;
34 	void *user_data;
35 	uint16_t block_ts;
36 	uint8_t source_data_size;
37 	uint8_t dlr_line;
38 };
39 
40 struct dma_xmc4xxx_config {
41 	XMC_DMA_t *dma;
42 	void (*irq_configure)(void);
43 };
44 
45 struct dma_xmc4xxx_data {
46 	struct dma_context ctx;
47 	struct dma_xmc4xxx_channel *channels;
48 };
49 
50 #define HANDLE_EVENT(event_test, get_channels_event, ret)                                  \
51 do {                                                                                       \
52 	if (event & (XMC_DMA_CH_##event_test)) {                                           \
53 		uint32_t channels_event = get_channels_event(dma);                         \
54 		int channel = find_lsb_set(channels_event) - 1;                            \
55 		struct dma_xmc4xxx_channel *dma_channel;                                   \
56 											   \
57 		__ASSERT_NO_MSG(channel >= 0);                                             \
58 		dma_channel = &dev_data->channels[channel];                                \
59 		/* Event has to be cleared before callback. The callback may call */       \
60 		/* dma_start() and re-enable the event */                                  \
61 		XMC_DMA_CH_ClearEventStatus(dma, channel, XMC_DMA_CH_##event_test);        \
62 		if (dma_channel->cb) {                                                     \
63 			dma_channel->cb(dev, dma_channel->user_data, channel, (ret));      \
64 		}                                                                          \
65 }                                                                                          \
66 } while (0)
67 
68 /* Isr is level triggered, so we don't have to loop over all the channels */
69 /* in a single call */
dma_xmc4xxx_isr(const struct device * dev)70 static void dma_xmc4xxx_isr(const struct device *dev)
71 {
72 	struct dma_xmc4xxx_data *dev_data = dev->data;
73 	const struct dma_xmc4xxx_config *dev_cfg = dev->config;
74 	int num_dma_channels = dev_data->ctx.dma_channels;
75 	XMC_DMA_t *dma = dev_cfg->dma;
76 	uint32_t event;
77 	uint32_t sr_overruns;
78 
79 	/* There are two types of possible DMA error events: */
80 	/* 1. Error response from AHB slave on the HRESP bus during DMA transfer. */
81 	/*    Treat this as EPERM error. */
82 	/* 2. Service request overruns on the DLR line. */
83 	/*    Treat this EIO error. */
84 
85 	event = XMC_DMA_GetEventStatus(dma);
86 	HANDLE_EVENT(EVENT_ERROR, XMC_DMA_GetChannelsErrorStatus, -EPERM);
87 	HANDLE_EVENT(EVENT_BLOCK_TRANSFER_COMPLETE, XMC_DMA_GetChannelsBlockCompleteStatus, 0);
88 	HANDLE_EVENT(EVENT_TRANSFER_COMPLETE, XMC_DMA_GetChannelsTransferCompleteStatus, 0);
89 
90 	sr_overruns = DLR->OVRSTAT;
91 
92 	if (sr_overruns == 0) {
93 		return;
94 	}
95 
96 	/* clear the overruns */
97 	DLR->OVRCLR = sr_overruns;
98 
99 	/* notify about overruns */
100 	for (int i = 0; i < num_dma_channels; i++) {
101 		struct dma_xmc4xxx_channel *dma_channel;
102 
103 		dma_channel = &dev_data->channels[i];
104 		if (dma_channel->dlr_line != DLR_LINE_UNSET &&
105 		    sr_overruns & BIT(dma_channel->dlr_line)) {
106 
107 			/* From XMC4700/4800 reference documentation - Section 4.4.1 */
108 			/* Once the overrun condition is entered the user can clear the */
109 			/* overrun status bits by writing to the DLR_OVRCLR register. */
110 			/* Additionally the pending request must be reset by successively */
111 			/* disabling and enabling the respective line. */
112 			DLR->LNEN &= ~BIT(dma_channel->dlr_line);
113 			DLR->LNEN |= BIT(dma_channel->dlr_line);
114 
115 			LOG_ERR("Overruns detected on channel %d", i);
116 			if (dma_channel->cb != NULL) {
117 				dma_channel->cb(dev, dma_channel->user_data, i, -EIO);
118 			}
119 		}
120 	}
121 }
122 
dma_xmc4xxx_config(const struct device * dev,uint32_t channel,struct dma_config * config)123 static int dma_xmc4xxx_config(const struct device *dev, uint32_t channel, struct dma_config *config)
124 {
125 	struct dma_xmc4xxx_data *dev_data = dev->data;
126 	const struct dma_xmc4xxx_config *dev_cfg = dev->config;
127 	struct dma_block_config *block = config->head_block;
128 	XMC_DMA_t *dma = dev_cfg->dma;
129 	uint8_t dlr_line = DLR_LINE_UNSET;
130 
131 	if (channel >= dev_data->ctx.dma_channels) {
132 		LOG_ERR("Invalid channel number");
133 		return -EINVAL;
134 	}
135 
136 	if (config->channel_priority > MAX_PRIORITY) {
137 		LOG_ERR("Invalid priority");
138 		return -EINVAL;
139 	}
140 
141 	if (config->source_chaining_en || config->dest_chaining_en) {
142 		LOG_ERR("Channel chaining is not supported");
143 		return -EINVAL;
144 	}
145 
146 	if (config->channel_direction != MEMORY_TO_MEMORY &&
147 	    config->channel_direction != MEMORY_TO_PERIPHERAL &&
148 	    config->channel_direction != PERIPHERAL_TO_MEMORY) {
149 		LOG_ERR("Unsupported channel direction");
150 		return -EINVAL;
151 	}
152 
153 	if (config->block_count != 1) {
154 		LOG_ERR("Invalid block count");
155 		return -EINVAL;
156 	}
157 
158 	if (block->source_gather_en || block->dest_scatter_en) {
159 		if (dma != XMC_DMA0 || channel >= 2) {
160 			LOG_ERR("Gather/scatter only supported on DMA0 on ch0 and ch1");
161 			return -EINVAL;
162 		}
163 	}
164 
165 	if (config->dest_data_size != 1 && config->dest_data_size != 2 &&
166 	    config->dest_data_size != 4) {
167 		LOG_ERR("Invalid dest size, Only 1,2,4 bytes supported");
168 		return -EINVAL;
169 	}
170 
171 	if (config->source_data_size != 1 && config->source_data_size != 2 &&
172 	    config->source_data_size != 4) {
173 		LOG_ERR("Invalid source size, Only 1,2,4 bytes supported");
174 		return -EINVAL;
175 	}
176 
177 	if (config->source_burst_length != 1 && config->source_burst_length != 4 &&
178 	    config->source_burst_length != 8) {
179 		LOG_ERR("Invalid src burst length (data size units). Only 1,4,8 units supported");
180 		return -EINVAL;
181 	}
182 
183 	if (config->dest_burst_length != 1 && config->dest_burst_length != 4 &&
184 	    config->dest_burst_length != 8) {
185 		LOG_ERR("Invalid dest burst length (data size units). Only 1,4,8 units supported");
186 		return -EINVAL;
187 	}
188 
189 	if (block->block_size / config->source_data_size > DMA_MAX_BLOCK_LEN) {
190 		LOG_ERR("Block transactions must be <= 4095");
191 		return -EINVAL;
192 	}
193 
194 	if (XMC_DMA_CH_IsEnabled(dma, channel)) {
195 		LOG_ERR("Channel is still active");
196 		return -EINVAL;
197 	}
198 
199 	XMC_DMA_CH_ClearEventStatus(dma, channel, ALL_EVENTS);
200 
201 	/* check dma slot number */
202 	dma->CH[channel].SAR = block->source_address;
203 	dma->CH[channel].DAR = block->dest_address;
204 	dma->CH[channel].LLP = 0;
205 
206 	/* set number of transactions */
207 	dma->CH[channel].CTLH = block->block_size / config->source_data_size;
208 	/* set priority and software handshaking for src/dst. if hardware hankshaking is used */
209 	/* it will be enabled later in the code */
210 	dma->CH[channel].CFGL = (config->channel_priority << GPDMA0_CH_CFGL_CH_PRIOR_Pos) |
211 				GPDMA0_CH_CFGL_HS_SEL_SRC_Msk | GPDMA0_CH_CFGL_HS_SEL_DST_Msk;
212 
213 	dma->CH[channel].CTLL = config->dest_data_size / 2 << GPDMA0_CH_CTLL_DST_TR_WIDTH_Pos |
214 				config->source_data_size / 2 << GPDMA0_CH_CTLL_SRC_TR_WIDTH_Pos |
215 				block->dest_addr_adj << GPDMA0_CH_CTLL_DINC_Pos |
216 				block->source_addr_adj << GPDMA0_CH_CTLL_SINC_Pos |
217 				config->dest_burst_length / 4 << GPDMA0_CH_CTLL_DEST_MSIZE_Pos |
218 				config->source_burst_length / 4 << GPDMA0_CH_CTLL_SRC_MSIZE_Pos |
219 				BIT(GPDMA0_CH_CTLL_INT_EN_Pos);
220 
221 	if (config->channel_direction == MEMORY_TO_PERIPHERAL ||
222 	    config->channel_direction == PERIPHERAL_TO_MEMORY) {
223 		uint8_t request_source = XMC4XXX_DMA_GET_REQUEST_SOURCE(config->dma_slot);
224 		uint8_t dlr_line_reg = XMC4XXX_DMA_GET_LINE(config->dma_slot);
225 
226 		dlr_line = dlr_line_reg;
227 		if (dma == XMC_DMA0 && dlr_line > 7) {
228 			LOG_ERR("Unsupported request line %d for DMA0."
229 					"Should be in range [0,7]", dlr_line);
230 			return -EINVAL;
231 		}
232 
233 		if (dma == XMC_DMA1 && (dlr_line < 8 || dlr_line > 11)) {
234 			LOG_ERR("Unsupported request line %d for DMA1."
235 					"Should be in range [8,11]", dlr_line);
236 			return -EINVAL;
237 		}
238 
239 		/* clear any overruns */
240 		DLR->OVRCLR = BIT(dlr_line);
241 		/* enable the dma line */
242 		DLR->LNEN &= ~BIT(dlr_line);
243 		DLR->LNEN |= BIT(dlr_line);
244 
245 		/* connect DMA Line to SR */
246 		if (dma == XMC_DMA0) {
247 			DLR->SRSEL0 &= ~(DLR_SRSEL_RS_MSK << (dlr_line_reg * DLR_SRSEL_RS_BITSIZE));
248 			DLR->SRSEL0 |= request_source << (dlr_line_reg * DLR_SRSEL_RS_BITSIZE);
249 		}
250 
251 		if (dma == XMC_DMA1) {
252 			dlr_line_reg -= 8;
253 			DLR->SRSEL1 &= ~(DLR_SRSEL_RS_MSK << (dlr_line_reg * DLR_SRSEL_RS_BITSIZE));
254 			DLR->SRSEL1 |= request_source << (dlr_line_reg * DLR_SRSEL_RS_BITSIZE);
255 		}
256 
257 		/* connect DMA channel to DMA line */
258 		if (config->channel_direction == MEMORY_TO_PERIPHERAL) {
259 			dma->CH[channel].CFGH = (dlr_line_reg << GPDMA0_CH_CFGH_DEST_PER_Pos) | 4;
260 			dma->CH[channel].CFGL &= ~BIT(GPDMA0_CH_CFGL_HS_SEL_DST_Pos);
261 			dma->CH[channel].CTLL |= 1 << GPDMA0_CH_CTLL_TT_FC_Pos;
262 		}
263 
264 		if (config->channel_direction == PERIPHERAL_TO_MEMORY) {
265 			dma->CH[channel].CFGH = (dlr_line_reg << GPDMA0_CH_CFGH_SRC_PER_Pos) | 4;
266 			dma->CH[channel].CFGL &= ~BIT(GPDMA0_CH_CFGL_HS_SEL_SRC_Pos);
267 			dma->CH[channel].CTLL |= 2 << GPDMA0_CH_CTLL_TT_FC_Pos;
268 		}
269 	}
270 
271 	if (block->source_gather_en) {
272 		dma->CH[channel].CTLL |= BIT(GPDMA0_CH_CTLL_SRC_GATHER_EN_Pos);
273 		/* truncate if we are out of range */
274 		dma->CH[channel].SGR = (block->source_gather_interval & GPDMA0_CH_SGR_SGI_Msk) |
275 				       block->source_gather_count << GPDMA0_CH_SGR_SGC_Pos;
276 	}
277 
278 	if (block->dest_scatter_en) {
279 		dma->CH[channel].CTLL |= BIT(GPDMA0_CH_CTLL_DST_SCATTER_EN_Pos);
280 		/* truncate if we are out of range */
281 		dma->CH[channel].DSR = (block->dest_scatter_interval & GPDMA0_CH_DSR_DSI_Msk) |
282 				       block->dest_scatter_count << GPDMA0_CH_DSR_DSC_Pos;
283 	}
284 
285 	dev_data->channels[channel].cb = config->dma_callback;
286 	dev_data->channels[channel].user_data = config->user_data;
287 	dev_data->channels[channel].block_ts = block->block_size / config->source_data_size;
288 	dev_data->channels[channel].source_data_size = config->source_data_size;
289 	dev_data->channels[channel].dlr_line = dlr_line;
290 
291 	XMC_DMA_CH_DisableEvent(dma, channel, ALL_EVENTS);
292 	XMC_DMA_CH_EnableEvent(dma, channel, XMC_DMA_CH_EVENT_TRANSFER_COMPLETE);
293 
294 	/* trigger enable on block transfer complete */
295 	if (config->complete_callback_en) {
296 		XMC_DMA_CH_EnableEvent(dma, channel, XMC_DMA_CH_EVENT_BLOCK_TRANSFER_COMPLETE);
297 	}
298 
299 	if (!config->error_callback_dis) {
300 		XMC_DMA_CH_EnableEvent(dma, channel, XMC_DMA_CH_EVENT_ERROR);
301 	}
302 
303 	LOG_DBG("Configured channel %d for %08X to %08X (%u)", channel, block->source_address,
304 		block->dest_address, block->block_size);
305 
306 	return 0;
307 }
308 
dma_xmc4xxx_start(const struct device * dev,uint32_t channel)309 static int dma_xmc4xxx_start(const struct device *dev, uint32_t channel)
310 {
311 	const struct dma_xmc4xxx_config *dev_cfg = dev->config;
312 
313 	LOG_DBG("Starting channel %d", channel);
314 	XMC_DMA_CH_Enable(dev_cfg->dma, channel);
315 	return 0;
316 }
317 
dma_xmc4xxx_stop(const struct device * dev,uint32_t channel)318 static int dma_xmc4xxx_stop(const struct device *dev, uint32_t channel)
319 {
320 	const struct dma_xmc4xxx_config *dev_cfg = dev->config;
321 	struct dma_xmc4xxx_data *dev_data = dev->data;
322 	struct dma_xmc4xxx_channel *dma_channel;
323 	XMC_DMA_t *dma = dev_cfg->dma;
324 
325 	dma_channel = &dev_data->channels[channel];
326 	XMC_DMA_CH_Suspend(dma, channel);
327 
328 	/* wait until ongoing transfer finishes */
329 	while (XMC_DMA_CH_IsEnabled(dma, channel) &&
330 	      (dma->CH[channel].CFGL & GPDMA0_CH_CFGL_FIFO_EMPTY_Msk) == 0) {
331 	}
332 
333 	/* disconnect DLR line to stop overuns */
334 	if (dma_channel->dlr_line != DLR_LINE_UNSET) {
335 		DLR->LNEN &= ~BIT(dma_channel->dlr_line);
336 	}
337 
338 	dma_channel->dlr_line = DLR_LINE_UNSET;
339 	dma_channel->cb = NULL;
340 
341 	XMC_DMA_CH_Disable(dma, channel);
342 	return 0;
343 }
344 
dma_xmc4xxx_reload(const struct device * dev,uint32_t channel,uint32_t src,uint32_t dst,size_t size)345 static int dma_xmc4xxx_reload(const struct device *dev, uint32_t channel, uint32_t src,
346 			      uint32_t dst, size_t size)
347 {
348 	struct dma_xmc4xxx_data *dev_data = dev->data;
349 	size_t block_ts;
350 	const struct dma_xmc4xxx_config *dev_cfg = dev->config;
351 	XMC_DMA_t *dma = dev_cfg->dma;
352 	struct dma_xmc4xxx_channel *dma_channel;
353 
354 	if (channel >= dev_data->ctx.dma_channels) {
355 		LOG_ERR("Invalid channel number");
356 		return -EINVAL;
357 	}
358 
359 	if (XMC_DMA_CH_IsEnabled(dma, channel)) {
360 		LOG_ERR("Channel is still active");
361 		return -EINVAL;
362 	}
363 
364 	dma_channel = &dev_data->channels[channel];
365 	block_ts = size / dma_channel->source_data_size;
366 	if (block_ts > DMA_MAX_BLOCK_LEN) {
367 		LOG_ERR("Block transactions must be <= 4095");
368 		return -EINVAL;
369 	}
370 	dma_channel->block_ts = block_ts;
371 
372 	/* do we need to clear any errors */
373 	dma->CH[channel].SAR = src;
374 	dma->CH[channel].DAR = dst;
375 	dma->CH[channel].CTLH = block_ts;
376 
377 	return 0;
378 }
379 
dma_xmc4xxx_get_status(const struct device * dev,uint32_t channel,struct dma_status * stat)380 static int dma_xmc4xxx_get_status(const struct device *dev, uint32_t channel,
381 				  struct dma_status *stat)
382 {
383 	struct dma_xmc4xxx_data *dev_data = dev->data;
384 	const struct dma_xmc4xxx_config *dev_cfg = dev->config;
385 	XMC_DMA_t *dma = dev_cfg->dma;
386 	struct dma_xmc4xxx_channel *dma_channel;
387 
388 	if (channel >= dev_data->ctx.dma_channels) {
389 		LOG_ERR("Invalid channel number");
390 		return -EINVAL;
391 	}
392 	dma_channel = &dev_data->channels[channel];
393 
394 	stat->busy = XMC_DMA_CH_IsEnabled(dma, channel);
395 
396 	stat->pending_length  = dma_channel->block_ts - XMC_DMA_CH_GetTransferredData(dma, channel);
397 	stat->pending_length *= dma_channel->source_data_size;
398 	/* stat->dir and other remaining fields are not set. They are not */
399 	/* useful for xmc4xxx peripheral drivers. */
400 
401 	return 0;
402 }
403 
dma_xmc4xxx_chan_filter(const struct device * dev,int channel,void * filter_param)404 static bool dma_xmc4xxx_chan_filter(const struct device *dev, int channel, void *filter_param)
405 {
406 	uint32_t requested_channel;
407 
408 	if (!filter_param) {
409 		return true;
410 	}
411 
412 	requested_channel = *(uint32_t *)filter_param;
413 
414 	if (channel == requested_channel) {
415 		return true;
416 	}
417 
418 	return false;
419 }
420 
dma_xmc4xxx_suspend(const struct device * dev,uint32_t channel)421 static int dma_xmc4xxx_suspend(const struct device *dev, uint32_t channel)
422 {
423 	struct dma_xmc4xxx_data *dev_data = dev->data;
424 	const struct dma_xmc4xxx_config *dev_cfg = dev->config;
425 	XMC_DMA_t *dma = dev_cfg->dma;
426 
427 	if (channel >= dev_data->ctx.dma_channels) {
428 		LOG_ERR("Invalid channel number");
429 		return -EINVAL;
430 	}
431 
432 	XMC_DMA_CH_Suspend(dma, channel);
433 	return 0;
434 }
435 
dma_xmc4xxx_resume(const struct device * dev,uint32_t channel)436 static int dma_xmc4xxx_resume(const struct device *dev, uint32_t channel)
437 {
438 	struct dma_xmc4xxx_data *dev_data = dev->data;
439 	const struct dma_xmc4xxx_config *dev_cfg = dev->config;
440 	XMC_DMA_t *dma = dev_cfg->dma;
441 
442 	if (channel >= dev_data->ctx.dma_channels) {
443 		LOG_ERR("Invalid channel number");
444 		return -EINVAL;
445 	}
446 
447 	XMC_DMA_CH_Resume(dma, channel);
448 	return 0;
449 }
450 
dma_xmc4xxx_init(const struct device * dev)451 static int dma_xmc4xxx_init(const struct device *dev)
452 {
453 	const struct dma_xmc4xxx_config *dev_cfg = dev->config;
454 
455 	XMC_DMA_Enable(dev_cfg->dma);
456 	dev_cfg->irq_configure();
457 	return 0;
458 }
459 
460 static DEVICE_API(dma, dma_xmc4xxx_driver_api) = {
461 	.config = dma_xmc4xxx_config,
462 	.reload = dma_xmc4xxx_reload,
463 	.start = dma_xmc4xxx_start,
464 	.stop = dma_xmc4xxx_stop,
465 	.get_status = dma_xmc4xxx_get_status,
466 	.chan_filter = dma_xmc4xxx_chan_filter,
467 	.suspend = dma_xmc4xxx_suspend,
468 	.resume = dma_xmc4xxx_resume,
469 };
470 
471 #define XMC4XXX_DMA_INIT(inst)                                                  \
472 	static void dma_xmc4xxx##inst##_irq_configure(void)                     \
473 	{                                                                       \
474 		IRQ_CONNECT(DT_INST_IRQ_BY_IDX(inst, 0, irq),                   \
475 			    DT_INST_IRQ_BY_IDX(inst, 0, priority),              \
476 			    dma_xmc4xxx_isr,                                    \
477 			    DEVICE_DT_INST_GET(inst), 0);                       \
478 		irq_enable(DT_INST_IRQ_BY_IDX(inst, 0, irq));                   \
479 	}                                                                       \
480 	static const struct dma_xmc4xxx_config dma_xmc4xxx##inst##_config = {   \
481 		.dma = (XMC_DMA_t *)DT_INST_REG_ADDR(inst),                     \
482 		.irq_configure = dma_xmc4xxx##inst##_irq_configure,             \
483 	};                                                                      \
484 										\
485 	static struct dma_xmc4xxx_channel                                       \
486 		dma_xmc4xxx##inst##_channels[DT_INST_PROP(inst, dma_channels)]; \
487 	ATOMIC_DEFINE(dma_xmc4xxx_atomic##inst,                                 \
488 		      DT_INST_PROP(inst, dma_channels));                        \
489 	static struct dma_xmc4xxx_data dma_xmc4xxx##inst##_data = {             \
490 		.ctx =  {                                                       \
491 			.magic = DMA_MAGIC,                                     \
492 			.atomic = dma_xmc4xxx_atomic##inst,                     \
493 			.dma_channels = DT_INST_PROP(inst, dma_channels),       \
494 		},                                                              \
495 		.channels = dma_xmc4xxx##inst##_channels,                       \
496 	};                                                                      \
497 										\
498 	DEVICE_DT_INST_DEFINE(inst, &dma_xmc4xxx_init, NULL,                    \
499 			      &dma_xmc4xxx##inst##_data,                        \
500 			      &dma_xmc4xxx##inst##_config, PRE_KERNEL_1,        \
501 			      CONFIG_DMA_INIT_PRIORITY, &dma_xmc4xxx_driver_api);
502 
503 DT_INST_FOREACH_STATUS_OKAY(XMC4XXX_DMA_INIT)
504