Lines Matching +full:periph +full:- +full:clock +full:- +full:config
4 * SPDX-License-Identifier: Apache-2.0
36 #define DEV_CFG(_dev) ((const struct spi_mcux_config *)(_dev)->config)
37 #define DEV_DATA(_dev) ((struct spi_mcux_data *)(_dev)->data)
97 struct spi_mcux_data *data = dev->data; in spi_mcux_isr()
100 LPSPI_MasterTransferHandleIRQ(LPSPI_IRQ_HANDLE_ARG, &data->handle); in spi_mcux_isr()
108 spi_context_update_tx(&data->ctx, 1, data->transfer_len); in spi_mcux_master_callback()
109 spi_context_update_rx(&data->ctx, 1, data->transfer_len); in spi_mcux_master_callback()
111 spi_mcux_transfer_next_packet(data->dev); in spi_mcux_master_callback()
116 struct spi_mcux_data *data = dev->data; in spi_mcux_transfer_next_packet()
118 struct spi_context *ctx = &data->ctx; in spi_mcux_transfer_next_packet()
129 data->transfer_len = max_chunk; in spi_mcux_transfer_next_packet()
131 transfer.configFlags = LPSPI_MASTER_XFER_CFG_FLAGS(ctx->config->slave); in spi_mcux_transfer_next_packet()
132 transfer.txData = (ctx->tx_len == 0 ? NULL : ctx->tx_buf); in spi_mcux_transfer_next_packet()
133 transfer.rxData = (ctx->rx_len == 0 ? NULL : ctx->rx_buf); in spi_mcux_transfer_next_packet()
136 status = LPSPI_MasterTransferNonBlocking(base, &data->handle, &transfer); in spi_mcux_transfer_next_packet()
138 LOG_ERR("Transfer could not start on %s: %d", dev->name, status); in spi_mcux_transfer_next_packet()
139 return status == kStatus_LPSPI_Busy ? -EBUSY : -EINVAL; in spi_mcux_transfer_next_packet()
147 const struct spi_mcux_config *config = dev->config; in spi_mcux_configure() local
148 struct spi_mcux_data *data = dev->data; in spi_mcux_configure()
150 uint32_t word_size = SPI_WORD_SIZE_GET(spi_cfg->operation); in spi_mcux_configure()
155 if (spi_cfg->operation & SPI_HALF_DUPLEX) { in spi_mcux_configure()
157 LOG_ERR("Half-duplex not supported"); in spi_mcux_configure()
158 return -ENOTSUP; in spi_mcux_configure()
170 return -EINVAL; in spi_mcux_configure()
173 if (spi_cfg->slave > LPSPI_CHIP_SELECT_COUNT) { in spi_mcux_configure()
174 LOG_ERR("Peripheral %d select exceeds max %d", spi_cfg->slave, in spi_mcux_configure()
175 LPSPI_CHIP_SELECT_COUNT - 1); in spi_mcux_configure()
176 return -EINVAL; in spi_mcux_configure()
179 ret = clock_control_get_rate(config->clock_dev, config->clock_subsys, &clock_freq); in spi_mcux_configure()
184 if (data->ctx.config != NULL) { in spi_mcux_configure()
186 * disable if already configured, otherwise the clock is not enabled and the in spi_mcux_configure()
190 while ((base->CR & LPSPI_CR_MEN_MASK) != 0U) { in spi_mcux_configure()
198 data->ctx.config = spi_cfg; in spi_mcux_configure()
203 master_config.cpol = (SPI_MODE_GET(spi_cfg->operation) & SPI_MODE_CPOL) in spi_mcux_configure()
206 master_config.cpha = (SPI_MODE_GET(spi_cfg->operation) & SPI_MODE_CPHA) in spi_mcux_configure()
210 (spi_cfg->operation & SPI_TRANSFER_LSB) ? kLPSPI_LsbFirst : kLPSPI_MsbFirst; in spi_mcux_configure()
211 master_config.baudRate = spi_cfg->frequency; in spi_mcux_configure()
212 master_config.pcsToSckDelayInNanoSec = config->pcs_sck_delay; in spi_mcux_configure()
213 master_config.lastSckToPcsDelayInNanoSec = config->sck_pcs_delay; in spi_mcux_configure()
214 master_config.betweenTransferDelayInNanoSec = config->transfer_delay; in spi_mcux_configure()
215 master_config.pinCfg = config->data_pin_config; in spi_mcux_configure()
221 base->CR |= LPSPI_CR_DBGEN_MASK; in spi_mcux_configure()
230 return (data->dma_tx.dma_dev && data->dma_rx.dma_dev); in lpspi_inst_has_dma()
238 struct spi_mcux_data *data = (struct spi_mcux_data *)spi_dev->data; in spi_mcux_dma_callback()
246 if (channel == data->dma_tx.channel) { in spi_mcux_dma_callback()
248 data->status_flags |= LPSPI_DMA_TX_DONE_FLAG; in spi_mcux_dma_callback()
250 } else if (channel == data->dma_rx.channel) { in spi_mcux_dma_callback()
252 data->status_flags |= LPSPI_DMA_RX_DONE_FLAG; in spi_mcux_dma_callback()
261 if (data->ctx.asynchronous && (data->status_flags & LPSPI_DMA_DONE_FLAG)) { in spi_mcux_dma_callback()
263 size_t dma_size = spi_context_max_continuous_chunk(data->ctx); in spi_mcux_dma_callback()
269 spi_context_update_tx(&data->ctx, 1, dma_size); in spi_mcux_dma_callback()
270 spi_context_update_rx(&data->ctx, 1, dma_size); in spi_mcux_dma_callback()
277 data->status_flags |= LPSPI_DMA_ERROR_FLAG; in spi_mcux_dma_callback()
279 spi_context_complete(&data->ctx, spi_dev, 0); in spi_mcux_dma_callback()
286 struct spi_mcux_data *data = dev->data; in spi_mcux_dma_common_load()
287 struct dma_block_config *blk_cfg = &stream->dma_blk_cfg; in spi_mcux_dma_common_load()
292 blk_cfg->block_size = len; in spi_mcux_dma_common_load()
295 blk_cfg->source_address = (uint32_t)&data->dummy_buffer; in spi_mcux_dma_common_load()
296 blk_cfg->dest_address = (uint32_t)&data->dummy_buffer; in spi_mcux_dma_common_load()
298 stream->dma_cfg.channel_direction = PERIPHERAL_TO_PERIPHERAL; in spi_mcux_dma_common_load()
300 blk_cfg->source_address = (uint32_t)buf; in spi_mcux_dma_common_load()
301 blk_cfg->dest_address = (uint32_t)buf; in spi_mcux_dma_common_load()
305 stream->dma_cfg.source_burst_length = 1; in spi_mcux_dma_common_load()
306 stream->dma_cfg.user_data = (void *)dev; in spi_mcux_dma_common_load()
307 stream->dma_cfg.head_block = blk_cfg; in spi_mcux_dma_common_load()
315 struct spi_mcux_data *data = dev->data; in spi_mcux_dma_tx_load()
317 struct spi_dma_stream *stream = &data->dma_tx; in spi_mcux_dma_tx_load()
321 /* tx direction has memory as source and periph as dest. */ in spi_mcux_dma_tx_load()
322 stream->dma_cfg.channel_direction = MEMORY_TO_PERIPHERAL; in spi_mcux_dma_tx_load()
326 blk_cfg->dest_address = LPSPI_GetTxRegisterAddress(base); in spi_mcux_dma_tx_load()
329 /* pass our client origin to the dma: data->dma_tx.dma_channel */ in spi_mcux_dma_tx_load()
330 return dma_config(stream->dma_dev, stream->channel, &stream->dma_cfg); in spi_mcux_dma_tx_load()
336 struct spi_mcux_data *data = dev->data; in spi_mcux_dma_rx_load()
338 struct spi_dma_stream *stream = &data->dma_rx; in spi_mcux_dma_rx_load()
342 /* rx direction has periph as source and mem as dest. */ in spi_mcux_dma_rx_load()
343 stream->dma_cfg.channel_direction = PERIPHERAL_TO_MEMORY; in spi_mcux_dma_rx_load()
347 blk_cfg->source_address = LPSPI_GetRxRegisterAddress(base); in spi_mcux_dma_rx_load()
349 /* pass our client origin to the dma: data->dma_rx.channel */ in spi_mcux_dma_rx_load()
350 return dma_config(stream->dma_dev, stream->channel, &stream->dma_cfg); in spi_mcux_dma_rx_load()
355 struct spi_mcux_data *data = dev->data; in wait_dma_rx_tx_done()
359 ret = spi_context_wait_for_completion(&data->ctx); in wait_dma_rx_tx_done()
363 } else if (data->status_flags & LPSPI_DMA_ERROR_FLAG) { in wait_dma_rx_tx_done()
364 return -EIO; in wait_dma_rx_tx_done()
366 } while (!((data->status_flags & LPSPI_DMA_DONE_FLAG) == LPSPI_DMA_DONE_FLAG)); in wait_dma_rx_tx_done()
374 struct spi_mcux_data *data = dev->data; in spi_mcux_dma_rxtx_load()
375 struct spi_context *ctx = &data->ctx; in spi_mcux_dma_rxtx_load()
379 data->status_flags = 0U; in spi_mcux_dma_rxtx_load()
384 ret = spi_mcux_dma_tx_load(dev, ctx->tx_buf, *dma_size); in spi_mcux_dma_rxtx_load()
389 ret = spi_mcux_dma_rx_load(dev, ctx->rx_buf, *dma_size); in spi_mcux_dma_rxtx_load()
395 ret = dma_start(data->dma_tx.dma_dev, data->dma_tx.channel); in spi_mcux_dma_rxtx_load()
400 ret = dma_start(data->dma_rx.dma_dev, data->dma_rx.channel); in spi_mcux_dma_rxtx_load()
407 struct spi_mcux_data *data = dev->data; in transceive_dma_async()
408 struct spi_context *ctx = &data->ctx; in transceive_dma_async()
412 ctx->asynchronous = true; in transceive_dma_async()
413 ctx->callback = cb; in transceive_dma_async()
414 ctx->callback_data = userdata; in transceive_dma_async()
433 struct spi_mcux_data *data = dev->data; in transceive_dma_sync()
434 struct spi_context *ctx = &data->ctx; in transceive_dma_sync()
441 while (ctx->rx_len > 0 || ctx->tx_len > 0) { in transceive_dma_sync()
479 base->TCR = 0; in transceive_dma_sync()
488 struct spi_mcux_data *data = dev->data; in transceive_dma()
493 spi_context_lock(&data->ctx, asynchronous, cb, userdata, spi_cfg); in transceive_dma()
504 base->TCR |= LPSPI_TCR_CONT_MASK; in transceive_dma()
510 spi_context_buffers_setup(&data->ctx, tx_bufs, rx_bufs, 1); in transceive_dma()
519 spi_context_release(&data->ctx, ret); in transceive_dma()
534 struct spi_rtio *rtio_ctx = data->rtio_ctx; in spi_mcux_master_rtio_callback()
536 if (rtio_ctx->txn_head != NULL) { in spi_mcux_master_rtio_callback()
537 spi_mcux_iodev_complete(data->dev, status); in spi_mcux_master_rtio_callback()
546 struct spi_mcux_data *data = dev->data; in spi_mcux_iodev_start()
547 struct spi_rtio *rtio_ctx = data->rtio_ctx; in spi_mcux_iodev_start()
548 struct rtio_sqe *sqe = &rtio_ctx->txn_curr->sqe; in spi_mcux_iodev_start()
549 struct spi_dt_spec *spi_dt_spec = sqe->iodev->data; in spi_mcux_iodev_start()
550 struct spi_config *spi_cfg = &spi_dt_spec->config; in spi_mcux_iodev_start()
561 LPSPI_MasterTransferCreateHandle(base, &data->handle, spi_mcux_master_rtio_callback, data); in spi_mcux_iodev_start()
563 transfer.configFlags = LPSPI_MASTER_XFER_CFG_FLAGS(spi_cfg->slave); in spi_mcux_iodev_start()
565 switch (sqe->op) { in spi_mcux_iodev_start()
568 transfer.rxData = sqe->rx.buf; in spi_mcux_iodev_start()
569 transfer.dataSize = sqe->rx.buf_len; in spi_mcux_iodev_start()
573 transfer.txData = sqe->tx.buf; in spi_mcux_iodev_start()
574 transfer.dataSize = sqe->tx.buf_len; in spi_mcux_iodev_start()
578 transfer.txData = sqe->tiny_tx.buf; in spi_mcux_iodev_start()
579 transfer.dataSize = sqe->tiny_tx.buf_len; in spi_mcux_iodev_start()
582 transfer.txData = sqe->txrx.tx_buf; in spi_mcux_iodev_start()
583 transfer.rxData = sqe->txrx.rx_buf; in spi_mcux_iodev_start()
584 transfer.dataSize = sqe->txrx.buf_len; in spi_mcux_iodev_start()
587 LOG_ERR("Invalid op code %d for submission %p\n", sqe->op, (void *)sqe); in spi_mcux_iodev_start()
588 spi_mcux_iodev_complete(dev, -EINVAL); in spi_mcux_iodev_start()
592 data->transfer_len = transfer.dataSize; in spi_mcux_iodev_start()
594 spi_context_cs_control(&data->ctx, true); in spi_mcux_iodev_start()
596 status = LPSPI_MasterTransferNonBlocking(base, &data->handle, &transfer); in spi_mcux_iodev_start()
598 LOG_ERR("Transfer could not start on %s: %d", dev->name, status); in spi_mcux_iodev_start()
599 spi_mcux_iodev_complete(dev, -EIO); in spi_mcux_iodev_start()
605 struct spi_mcux_data *data = dev->data; in spi_mcux_iodev_complete()
606 struct spi_rtio *rtio_ctx = data->rtio_ctx; in spi_mcux_iodev_complete()
608 if (!status && rtio_ctx->txn_curr->sqe.flags & RTIO_SQE_TRANSACTION) { in spi_mcux_iodev_complete()
609 rtio_ctx->txn_curr = rtio_txn_next(rtio_ctx->txn_curr); in spi_mcux_iodev_complete()
614 /** De-assert CS-line to space from next transaction */ in spi_mcux_iodev_complete()
615 spi_context_cs_control(&data->ctx, false); in spi_mcux_iodev_complete()
624 struct spi_mcux_data *data = dev->data; in spi_mcux_iodev_submit()
625 struct spi_rtio *rtio_ctx = data->rtio_ctx; in spi_mcux_iodev_submit()
636 struct spi_mcux_data *data = dev->data; in transceive_rtio()
637 struct spi_rtio *rtio_ctx = data->rtio_ctx; in transceive_rtio()
640 spi_context_lock(&data->ctx, false, NULL, NULL, spi_cfg); in transceive_rtio()
644 spi_context_release(&data->ctx, ret); in transceive_rtio()
657 struct spi_mcux_data *data = dev->data; in transceive()
660 spi_context_lock(&data->ctx, asynchronous, cb, userdata, spi_cfg); in transceive()
667 LPSPI_MasterTransferCreateHandle(base, &data->handle, spi_mcux_master_callback, data); in transceive()
669 spi_context_buffers_setup(&data->ctx, tx_bufs, rx_bufs, 1); in transceive()
671 spi_context_cs_control(&data->ctx, true); in transceive()
678 ret = spi_context_wait_for_completion(&data->ctx); in transceive()
680 spi_context_release(&data->ctx, ret); in transceive()
689 struct spi_mcux_data *data = dev->data; in spi_mcux_transceive()
721 struct spi_mcux_data *data = dev->data; in spi_mcux_release()
723 spi_context_unlock_unconditionally(&data->ctx); in spi_mcux_release()
743 LOG_ERR("%s device is not ready", dma_dev->name); in lpspi_dma_dev_ready()
744 return -ENODEV; in lpspi_dma_dev_ready()
752 return lpspi_dma_dev_ready(data->dma_tx.dma_dev) | in lpspi_dma_devs_ready()
753 lpspi_dma_dev_ready(data->dma_rx.dma_dev); in lpspi_dma_devs_ready()
761 const struct spi_mcux_config *config = dev->config; in spi_mcux_init() local
762 struct spi_mcux_data *data = dev->data; in spi_mcux_init()
767 data->dev = dev; in spi_mcux_init()
769 if (!device_is_ready(config->clock_dev)) { in spi_mcux_init()
770 LOG_ERR("clock control device not ready"); in spi_mcux_init()
771 return -ENODEV; in spi_mcux_init()
781 err = spi_context_cs_configure_all(&data->ctx); in spi_mcux_init()
786 err = pinctrl_apply_state(config->pincfg, PINCTRL_STATE_DEFAULT); in spi_mcux_init()
791 config->irq_config_func(dev); in spi_mcux_init()
794 spi_rtio_init(data->rtio_ctx, dev); in spi_mcux_init()
796 spi_context_unlock_unconditionally(&data->ctx); in spi_mcux_init()