1 /*
2 * Copyright (c) 2023 Antmicro <www.antmicro.com>
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #define DT_DRV_COMPAT ambiq_spi
8
9 #include <zephyr/logging/log.h>
10 LOG_MODULE_REGISTER(spi_ambiq);
11
12 #include <zephyr/drivers/spi.h>
13 #include <zephyr/drivers/spi/rtio.h>
14 #include <zephyr/drivers/pinctrl.h>
15 #include <zephyr/kernel.h>
16 #include <zephyr/sys/byteorder.h>
17 #include <zephyr/pm/device.h>
18 #include <zephyr/pm/policy.h>
19 #include <zephyr/pm/device_runtime.h>
20
21 #include <stdlib.h>
22 #include <errno.h>
23 #include "spi_context.h"
24 #include <am_mcu_apollo.h>
25
26 #define PWRCTRL_MAX_WAIT_US 5
27
28 typedef int (*ambiq_spi_pwr_func_t)(void);
29
30 struct spi_ambiq_config {
31 uint32_t base;
32 int size;
33 uint32_t clock_freq;
34 const struct pinctrl_dev_config *pcfg;
35 ambiq_spi_pwr_func_t pwr_func;
36 void (*irq_config_func)(void);
37 };
38
39 struct spi_ambiq_data {
40 struct spi_context ctx;
41 am_hal_iom_config_t iom_cfg;
42 void *iom_handler;
43 int inst_idx;
44 bool cont;
45 bool pm_policy_state_on;
46 };
47
48 typedef void (*spi_context_update_trx)(struct spi_context *ctx, uint8_t dfs, uint32_t len);
49
50 #define SPI_WORD_SIZE 8
51
spi_ambiq_pm_policy_state_lock_get(const struct device * dev)52 static void spi_ambiq_pm_policy_state_lock_get(const struct device *dev)
53 {
54 if (IS_ENABLED(CONFIG_PM)) {
55 struct spi_ambiq_data *data = dev->data;
56
57 if (!data->pm_policy_state_on) {
58 data->pm_policy_state_on = true;
59 pm_policy_state_lock_get(PM_STATE_SUSPEND_TO_RAM, PM_ALL_SUBSTATES);
60 pm_device_runtime_get(dev);
61 }
62 }
63 }
64
spi_ambiq_pm_policy_state_lock_put(const struct device * dev)65 static void spi_ambiq_pm_policy_state_lock_put(const struct device *dev)
66 {
67 if (IS_ENABLED(CONFIG_PM)) {
68 struct spi_ambiq_data *data = dev->data;
69
70 if (data->pm_policy_state_on) {
71 data->pm_policy_state_on = false;
72 pm_device_runtime_put(dev);
73 pm_policy_state_lock_put(PM_STATE_SUSPEND_TO_RAM, PM_ALL_SUBSTATES);
74 }
75 }
76 }
77
78 #ifdef CONFIG_SPI_AMBIQ_DMA
79 static __aligned(32) struct {
80 __aligned(32) uint32_t buf[CONFIG_SPI_DMA_TCB_BUFFER_SIZE];
81 } spi_dma_tcb_buf[DT_NUM_INST_STATUS_OKAY(DT_DRV_COMPAT)] __attribute__((__section__(".nocache")));
82
spi_ambiq_callback(void * callback_ctxt,uint32_t status)83 static void spi_ambiq_callback(void *callback_ctxt, uint32_t status)
84 {
85 const struct device *dev = callback_ctxt;
86 struct spi_ambiq_data *data = dev->data;
87 struct spi_context *ctx = &data->ctx;
88
89 /* de-assert cs until transfer finished and no need to hold cs */
90 if (!data->cont) {
91 spi_context_cs_control(ctx, false);
92 }
93 spi_context_complete(ctx, dev, (status == AM_HAL_STATUS_SUCCESS) ? 0 : -EIO);
94 }
95 #endif
96
spi_ambiq_reset(const struct device * dev)97 static void spi_ambiq_reset(const struct device *dev)
98 {
99 struct spi_ambiq_data *data = dev->data;
100 struct spi_context *ctx = &data->ctx;
101
102 /* cancel timed out transaction */
103 am_hal_iom_disable(data->iom_handler);
104 /* NULL config to trigger reconfigure on next xfer */
105 ctx->config = NULL;
106 spi_context_cs_control(ctx, false);
107 /* signal any thread waiting on sync semaphore */
108 spi_context_complete(ctx, dev, -ETIMEDOUT);
109 /* clean up for next xfer */
110 k_sem_reset(&ctx->sync);
111 }
112
spi_ambiq_isr(const struct device * dev)113 static void spi_ambiq_isr(const struct device *dev)
114 {
115 uint32_t ui32Status;
116 struct spi_ambiq_data *data = dev->data;
117
118 am_hal_iom_interrupt_status_get(data->iom_handler, false, &ui32Status);
119 am_hal_iom_interrupt_clear(data->iom_handler, ui32Status);
120 am_hal_iom_interrupt_service(data->iom_handler, ui32Status);
121 }
122
spi_config(const struct device * dev,const struct spi_config * config)123 static int spi_config(const struct device *dev, const struct spi_config *config)
124 {
125 struct spi_ambiq_data *data = dev->data;
126 const struct spi_ambiq_config *cfg = dev->config;
127 struct spi_context *ctx = &(data->ctx);
128
129 data->iom_cfg.eInterfaceMode = AM_HAL_IOM_SPI_MODE;
130
131 int ret = 0;
132
133 if (spi_context_configured(ctx, config)) {
134 /* Already configured. No need to do it again. */
135 return 0;
136 }
137
138 if (SPI_WORD_SIZE_GET(config->operation) != SPI_WORD_SIZE) {
139 LOG_ERR("Word size must be %d", SPI_WORD_SIZE);
140 return -ENOTSUP;
141 }
142
143 if ((config->operation & SPI_LINES_MASK) != SPI_LINES_SINGLE) {
144 LOG_ERR("Only supports single mode");
145 return -ENOTSUP;
146 }
147
148 if (config->operation & SPI_LOCK_ON) {
149 LOG_ERR("Lock On not supported");
150 return -ENOTSUP;
151 }
152
153 if (config->operation & SPI_TRANSFER_LSB) {
154 LOG_ERR("LSB first not supported");
155 return -ENOTSUP;
156 }
157
158 if (config->operation & SPI_MODE_CPOL) {
159 if (config->operation & SPI_MODE_CPHA) {
160 data->iom_cfg.eSpiMode = AM_HAL_IOM_SPI_MODE_3;
161 } else {
162 data->iom_cfg.eSpiMode = AM_HAL_IOM_SPI_MODE_2;
163 }
164 } else {
165 if (config->operation & SPI_MODE_CPHA) {
166 data->iom_cfg.eSpiMode = AM_HAL_IOM_SPI_MODE_1;
167 } else {
168 data->iom_cfg.eSpiMode = AM_HAL_IOM_SPI_MODE_0;
169 }
170 }
171
172 if (config->operation & SPI_OP_MODE_SLAVE) {
173 LOG_ERR("Device mode not supported");
174 return -ENOTSUP;
175 }
176 if (config->operation & SPI_MODE_LOOP) {
177 LOG_ERR("Loopback mode not supported");
178 return -ENOTSUP;
179 }
180
181 if (cfg->clock_freq > AM_HAL_IOM_MAX_FREQ) {
182 LOG_ERR("Clock frequency too high");
183 return -ENOTSUP;
184 }
185
186 /* Select slower of two: SPI bus frequency for SPI device or SPI controller clock frequency
187 */
188 data->iom_cfg.ui32ClockFreq =
189 (config->frequency ? MIN(config->frequency, cfg->clock_freq) : cfg->clock_freq);
190 ctx->config = config;
191
192 #ifdef CONFIG_SPI_AMBIQ_DMA
193 data->iom_cfg.pNBTxnBuf = spi_dma_tcb_buf[data->inst_idx].buf;
194 data->iom_cfg.ui32NBTxnBufLength = CONFIG_SPI_DMA_TCB_BUFFER_SIZE;
195 #endif
196
197 /* Disable IOM instance as it cannot be configured when enabled*/
198 ret = am_hal_iom_disable(data->iom_handler);
199
200 ret = am_hal_iom_configure(data->iom_handler, &data->iom_cfg);
201
202 ret = am_hal_iom_enable(data->iom_handler);
203
204 return ret;
205 }
206
spi_ambiq_xfer_half_duplex(const struct device * dev,am_hal_iom_dir_e dir)207 static int spi_ambiq_xfer_half_duplex(const struct device *dev, am_hal_iom_dir_e dir)
208 {
209 am_hal_iom_transfer_t trans = {0};
210 struct spi_ambiq_data *data = dev->data;
211 struct spi_context *ctx = &data->ctx;
212 bool is_last = false;
213 uint32_t rem_num, cur_num = 0;
214 int ret = 0;
215 spi_context_update_trx ctx_update;
216
217 if (dir == AM_HAL_IOM_FULLDUPLEX) {
218 return -EINVAL;
219 } else if (dir == AM_HAL_IOM_RX) {
220 trans.eDirection = AM_HAL_IOM_RX;
221 ctx_update = spi_context_update_rx;
222 } else {
223 trans.eDirection = AM_HAL_IOM_TX;
224 ctx_update = spi_context_update_tx;
225 }
226 if (dir == AM_HAL_IOM_RX) {
227 rem_num = ctx->rx_len;
228 } else {
229 rem_num = ctx->tx_len;
230 }
231 while (rem_num) {
232 cur_num = (rem_num > AM_HAL_IOM_MAX_TXNSIZE_SPI) ? AM_HAL_IOM_MAX_TXNSIZE_SPI
233 : rem_num;
234 trans.ui32NumBytes = cur_num;
235 trans.pui32TxBuffer = (uint32_t *)ctx->tx_buf;
236 trans.pui32RxBuffer = (uint32_t *)ctx->rx_buf;
237 ctx_update(ctx, 1, cur_num);
238 if ((!spi_context_tx_buf_on(ctx)) && (!spi_context_rx_buf_on(ctx))) {
239 is_last = true;
240 }
241 #ifdef CONFIG_SPI_AMBIQ_DMA
242 if (AM_HAL_STATUS_SUCCESS !=
243 am_hal_iom_nonblocking_transfer(data->iom_handler, &trans,
244 ((is_last == true) ? spi_ambiq_callback : NULL),
245 (void *)dev)) {
246 return -EIO;
247 }
248 if (is_last) {
249 ret = spi_context_wait_for_completion(ctx);
250 }
251 #else
252 ret = am_hal_iom_blocking_transfer(data->iom_handler, &trans);
253 #endif
254 rem_num -= cur_num;
255 if (ret != 0) {
256 return -EIO;
257 }
258 }
259
260 return 0;
261 }
262
spi_ambiq_xfer_full_duplex(const struct device * dev)263 static int spi_ambiq_xfer_full_duplex(const struct device *dev)
264 {
265 am_hal_iom_transfer_t trans = {0};
266 struct spi_ambiq_data *data = dev->data;
267 struct spi_context *ctx = &data->ctx;
268 bool trx_once = (ctx->tx_len == ctx->rx_len);
269 int ret = 0;
270
271 /* Tx and Rx length must be the same for am_hal_iom_spi_blocking_fullduplex */
272 trans.eDirection = AM_HAL_IOM_FULLDUPLEX;
273 trans.ui32NumBytes = MIN(ctx->rx_len, ctx->tx_len);
274 trans.pui32RxBuffer = (uint32_t *)ctx->rx_buf;
275 trans.pui32TxBuffer = (uint32_t *)ctx->tx_buf;
276 spi_context_update_tx(ctx, 1, trans.ui32NumBytes);
277 spi_context_update_rx(ctx, 1, trans.ui32NumBytes);
278
279 ret = am_hal_iom_spi_blocking_fullduplex(data->iom_handler, &trans);
280 if (ret != 0) {
281 return -EIO;
282 }
283
284 /* Transfer the remaining bytes */
285 if (!trx_once) {
286 spi_context_update_trx ctx_update;
287
288 if (ctx->tx_len) {
289 trans.eDirection = AM_HAL_IOM_TX;
290 trans.ui32NumBytes = ctx->tx_len;
291 trans.pui32TxBuffer = (uint32_t *)ctx->tx_buf;
292 ctx_update = spi_context_update_tx;
293 } else {
294 trans.eDirection = AM_HAL_IOM_RX;
295 trans.ui32NumBytes = ctx->rx_len;
296 trans.pui32RxBuffer = (uint32_t *)ctx->rx_buf;
297 ctx_update = spi_context_update_rx;
298 }
299 ret = am_hal_iom_blocking_transfer(data->iom_handler, &trans);
300 ctx_update(ctx, 1, trans.ui32NumBytes);
301 if (ret != 0) {
302 return -EIO;
303 }
304 }
305
306 return 0;
307 }
308
spi_ambiq_xfer(const struct device * dev,const struct spi_config * config)309 static int spi_ambiq_xfer(const struct device *dev, const struct spi_config *config)
310 {
311 struct spi_ambiq_data *data = dev->data;
312 struct spi_context *ctx = &data->ctx;
313 int ret = 0;
314 data->cont = (config->operation & SPI_HOLD_ON_CS) ? true : false;
315
316 spi_context_cs_control(ctx, true);
317
318 while (1) {
319 if (spi_context_tx_buf_on(ctx) && spi_context_rx_buf_on(ctx)) {
320 if (ctx->rx_buf == ctx->tx_buf) {
321 spi_context_update_rx(ctx, 1, ctx->rx_len);
322 } else if (!(config->operation & SPI_HALF_DUPLEX)) {
323 ret = spi_ambiq_xfer_full_duplex(dev);
324 if (ret != 0) {
325 spi_ambiq_reset(dev);
326 LOG_ERR("SPI full-duplex comm error: %d", ret);
327 return ret;
328 }
329 }
330 }
331 if (spi_context_tx_on(ctx)) {
332 if (ctx->tx_buf == NULL) {
333 spi_context_update_tx(ctx, 1, ctx->tx_len);
334 } else {
335 ret = spi_ambiq_xfer_half_duplex(dev, AM_HAL_IOM_TX);
336 if (ret != 0) {
337 spi_ambiq_reset(dev);
338 LOG_ERR("SPI TX comm error: %d", ret);
339 return ret;
340 }
341 }
342 } else if (spi_context_rx_on(ctx)) {
343 if (ctx->rx_buf == NULL) {
344 spi_context_update_rx(ctx, 1, ctx->rx_len);
345 } else {
346 ret = spi_ambiq_xfer_half_duplex(dev, AM_HAL_IOM_RX);
347 if (ret != 0) {
348 spi_ambiq_reset(dev);
349 LOG_ERR("SPI Rx comm error: %d", ret);
350 return ret;
351 }
352 }
353 } else {
354 break;
355 }
356 }
357
358 #ifndef CONFIG_SPI_AMBIQ_DMA
359 if (!data->cont) {
360 spi_context_cs_control(ctx, false);
361 spi_context_complete(ctx, dev, ret);
362 }
363 #endif
364 return ret;
365 }
366
spi_ambiq_transceive(const struct device * dev,const struct spi_config * config,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs)367 static int spi_ambiq_transceive(const struct device *dev, const struct spi_config *config,
368 const struct spi_buf_set *tx_bufs,
369 const struct spi_buf_set *rx_bufs)
370 {
371 struct spi_ambiq_data *data = dev->data;
372 int ret = 0;
373
374 if (!tx_bufs && !rx_bufs) {
375 return 0;
376 }
377
378 /* context setup */
379 spi_context_lock(&data->ctx, false, NULL, NULL, config);
380
381 spi_ambiq_pm_policy_state_lock_get(dev);
382
383 ret = spi_config(dev, config);
384
385 if (ret) {
386 LOG_ERR("spi_config failed: %d", ret);
387 goto xfer_end;
388 }
389
390 spi_context_buffers_setup(&data->ctx, tx_bufs, rx_bufs, 1);
391
392 ret = spi_ambiq_xfer(dev, config);
393
394 xfer_end:
395 spi_ambiq_pm_policy_state_lock_put(dev);
396
397 spi_context_release(&data->ctx, ret);
398
399 return ret;
400 }
401
spi_ambiq_release(const struct device * dev,const struct spi_config * config)402 static int spi_ambiq_release(const struct device *dev, const struct spi_config *config)
403 {
404 struct spi_ambiq_data *data = dev->data;
405 am_hal_iom_status_t iom_status;
406
407 am_hal_iom_status_get(data->iom_handler, &iom_status);
408
409 if ((iom_status.bStatIdle != IOM0_STATUS_IDLEST_IDLE) ||
410 (iom_status.bStatCmdAct == IOM0_STATUS_CMDACT_ACTIVE) ||
411 (iom_status.ui32NumPendTransactions)) {
412 return -EBUSY;
413 }
414
415 spi_context_unlock_unconditionally(&data->ctx);
416
417 return 0;
418 }
419
420 static DEVICE_API(spi, spi_ambiq_driver_api) = {
421 .transceive = spi_ambiq_transceive,
422 #ifdef CONFIG_SPI_RTIO
423 .iodev_submit = spi_rtio_iodev_default_submit,
424 #endif
425 .release = spi_ambiq_release,
426 };
427
spi_ambiq_init(const struct device * dev)428 static int spi_ambiq_init(const struct device *dev)
429 {
430 struct spi_ambiq_data *data = dev->data;
431 const struct spi_ambiq_config *cfg = dev->config;
432 int ret = 0;
433
434 if (AM_HAL_STATUS_SUCCESS !=
435 am_hal_iom_initialize((cfg->base - IOM0_BASE) / cfg->size, &data->iom_handler)) {
436 LOG_ERR("Fail to initialize SPI\n");
437 return -ENXIO;
438 }
439
440 ret = cfg->pwr_func();
441
442 ret |= pinctrl_apply_state(cfg->pcfg, PINCTRL_STATE_DEFAULT);
443 ret |= spi_context_cs_configure_all(&data->ctx);
444 if (ret < 0) {
445 LOG_ERR("Fail to config SPI pins\n");
446 goto end;
447 }
448
449 #ifdef CONFIG_SPI_AMBIQ_DMA
450 am_hal_iom_interrupt_clear(data->iom_handler, AM_HAL_IOM_INT_CQUPD | AM_HAL_IOM_INT_ERR);
451 am_hal_iom_interrupt_enable(data->iom_handler, AM_HAL_IOM_INT_CQUPD | AM_HAL_IOM_INT_ERR);
452 cfg->irq_config_func();
453 #endif
454 end:
455 if (ret < 0) {
456 am_hal_iom_uninitialize(data->iom_handler);
457 } else {
458 spi_context_unlock_unconditionally(&data->ctx);
459 }
460 return ret;
461 }
462
463 #ifdef CONFIG_PM_DEVICE
spi_ambiq_pm_action(const struct device * dev,enum pm_device_action action)464 static int spi_ambiq_pm_action(const struct device *dev, enum pm_device_action action)
465 {
466 struct spi_ambiq_data *data = dev->data;
467 uint32_t ret;
468 am_hal_sysctrl_power_state_e status;
469
470 switch (action) {
471 case PM_DEVICE_ACTION_RESUME:
472 status = AM_HAL_SYSCTRL_WAKE;
473 break;
474 case PM_DEVICE_ACTION_SUSPEND:
475 status = AM_HAL_SYSCTRL_DEEPSLEEP;
476 break;
477 default:
478 return -ENOTSUP;
479 }
480
481 ret = am_hal_iom_power_ctrl(data->iom_handler, status, true);
482
483 if (ret != AM_HAL_STATUS_SUCCESS) {
484 LOG_ERR("am_hal_iom_power_ctrl failed: %d", ret);
485 return -EPERM;
486 } else {
487 return 0;
488 }
489 }
490 #endif /* CONFIG_PM_DEVICE */
491
492 #define AMBIQ_SPI_INIT(n) \
493 PINCTRL_DT_INST_DEFINE(n); \
494 static int pwr_on_ambiq_spi_##n(void) \
495 { \
496 uint32_t addr = DT_REG_ADDR(DT_INST_PHANDLE(n, ambiq_pwrcfg)) + \
497 DT_INST_PHA(n, ambiq_pwrcfg, offset); \
498 sys_write32((sys_read32(addr) | DT_INST_PHA(n, ambiq_pwrcfg, mask)), addr); \
499 k_busy_wait(PWRCTRL_MAX_WAIT_US); \
500 return 0; \
501 } \
502 static void spi_irq_config_func_##n(void) \
503 { \
504 IRQ_CONNECT(DT_INST_IRQN(n), DT_INST_IRQ(n, priority), spi_ambiq_isr, \
505 DEVICE_DT_INST_GET(n), 0); \
506 irq_enable(DT_INST_IRQN(n)); \
507 }; \
508 static struct spi_ambiq_data spi_ambiq_data##n = { \
509 SPI_CONTEXT_INIT_LOCK(spi_ambiq_data##n, ctx), \
510 SPI_CONTEXT_INIT_SYNC(spi_ambiq_data##n, ctx), \
511 SPI_CONTEXT_CS_GPIOS_INITIALIZE(DT_DRV_INST(n), ctx).inst_idx = n}; \
512 static const struct spi_ambiq_config spi_ambiq_config##n = { \
513 .base = DT_INST_REG_ADDR(n), \
514 .size = DT_INST_REG_SIZE(n), \
515 .clock_freq = DT_INST_PROP(n, clock_frequency), \
516 .pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n), \
517 .irq_config_func = spi_irq_config_func_##n, \
518 .pwr_func = pwr_on_ambiq_spi_##n}; \
519 PM_DEVICE_DT_INST_DEFINE(n, spi_ambiq_pm_action); \
520 SPI_DEVICE_DT_INST_DEFINE(n, spi_ambiq_init, PM_DEVICE_DT_INST_GET(n), &spi_ambiq_data##n, \
521 &spi_ambiq_config##n, POST_KERNEL, CONFIG_SPI_INIT_PRIORITY, \
522 &spi_ambiq_driver_api);
523
524 DT_INST_FOREACH_STATUS_OKAY(AMBIQ_SPI_INIT)
525