Lines Matching +full:tx +full:- +full:sync +full:- +full:mode
4 * SPDX-License-Identifier: Apache-2.0
35 struct k_sem sync; member
62 ._ctx_name.sync = Z_SEM_INITIALIZER(_data._ctx_name.sync, 0, 1)
81 return !!(ctx->config == config); in spi_context_configured()
86 return (ctx->config->operation & SPI_OP_MODE_SLAVE); in spi_context_is_slave()
95 if ((spi_cfg->operation & SPI_LOCK_ON) && in spi_context_lock()
96 (k_sem_count_get(&ctx->lock) == 0) && in spi_context_lock()
97 (ctx->owner == spi_cfg)) { in spi_context_lock()
101 k_sem_take(&ctx->lock, K_FOREVER); in spi_context_lock()
102 ctx->owner = spi_cfg; in spi_context_lock()
105 ctx->asynchronous = asynchronous; in spi_context_lock()
106 ctx->callback = callback; in spi_context_lock()
107 ctx->callback_data = callback_data; in spi_context_lock()
114 if (status >= 0 && (ctx->config->operation & SPI_LOCK_ON)) { in spi_context_release()
120 if (!ctx->asynchronous || (status < 0)) { in spi_context_release()
121 ctx->owner = NULL; in spi_context_release()
122 k_sem_give(&ctx->lock); in spi_context_release()
125 if (!(ctx->config->operation & SPI_LOCK_ON)) { in spi_context_release()
126 ctx->owner = NULL; in spi_context_release()
127 k_sem_give(&ctx->lock); in spi_context_release()
141 wait = !ctx->asynchronous; in spi_context_wait_for_completion()
149 /* Do not use any timeout in the slave mode, as in this case in spi_context_wait_for_completion()
161 ctx->config->frequency; in spi_context_wait_for_completion()
167 if (k_sem_take(&ctx->sync, timeout)) { in spi_context_wait_for_completion()
169 return -ETIMEDOUT; in spi_context_wait_for_completion()
171 status = ctx->sync_status; in spi_context_wait_for_completion()
176 return ctx->recv_frames; in spi_context_wait_for_completion()
188 if (!ctx->asynchronous) { in spi_context_complete()
189 ctx->sync_status = status; in spi_context_complete()
190 k_sem_give(&ctx->sync); in spi_context_complete()
192 if (ctx->callback) { in spi_context_complete()
198 status = ctx->recv_frames; in spi_context_complete()
201 ctx->callback(dev, status, ctx->callback_data); in spi_context_complete()
204 if (!(ctx->config->operation & SPI_LOCK_ON)) { in spi_context_complete()
205 ctx->owner = NULL; in spi_context_complete()
206 k_sem_give(&ctx->lock); in spi_context_complete()
210 ctx->sync_status = status; in spi_context_complete()
211 k_sem_give(&ctx->sync); in spi_context_complete()
220 for (cs_gpio = ctx->cs_gpios; cs_gpio < &ctx->cs_gpios[ctx->num_cs_gpios]; cs_gpio++) { in spi_context_cs_configure_all()
221 if (!device_is_ready(cs_gpio->port)) { in spi_context_cs_configure_all()
223 cs_gpio->port->name, cs_gpio->pin); in spi_context_cs_configure_all()
224 return -ENODEV; in spi_context_cs_configure_all()
239 if (ctx->config && spi_cs_is_gpio(ctx->config)) { in _spi_context_cs_control()
241 gpio_pin_set_dt(&ctx->config->cs.gpio, 1); in _spi_context_cs_control()
242 k_busy_wait(ctx->config->cs.delay); in _spi_context_cs_control()
245 ctx->config->operation & SPI_HOLD_ON_CS) { in _spi_context_cs_control()
249 k_busy_wait(ctx->config->cs.delay); in _spi_context_cs_control()
250 gpio_pin_set_dt(&ctx->config->cs.gpio, 0); in _spi_context_cs_control()
265 if (!k_sem_count_get(&ctx->lock)) { in spi_context_unlock_unconditionally()
266 ctx->owner = NULL; in spi_context_unlock_unconditionally()
267 k_sem_give(&ctx->lock); in spi_context_unlock_unconditionally()
276 /* This loop skips zero-length buffers in the set, if any. */ in spi_context_get_next_buf()
278 if (((*current)->len / dfs) != 0) { in spi_context_get_next_buf()
279 *buf_len = (*current)->len / dfs; in spi_context_get_next_buf()
280 return (*current)->buf; in spi_context_get_next_buf()
283 --(*count); in spi_context_get_next_buf()
296 LOG_DBG("tx_bufs %p - rx_bufs %p - %u", tx_bufs, rx_bufs, dfs); in spi_context_buffers_setup()
298 ctx->current_tx = tx_bufs ? tx_bufs->buffers : NULL; in spi_context_buffers_setup()
299 ctx->tx_count = ctx->current_tx ? tx_bufs->count : 0; in spi_context_buffers_setup()
300 ctx->tx_buf = (const uint8_t *) in spi_context_buffers_setup()
301 spi_context_get_next_buf(&ctx->current_tx, &ctx->tx_count, in spi_context_buffers_setup()
302 &ctx->tx_len, dfs); in spi_context_buffers_setup()
304 ctx->current_rx = rx_bufs ? rx_bufs->buffers : NULL; in spi_context_buffers_setup()
305 ctx->rx_count = ctx->current_rx ? rx_bufs->count : 0; in spi_context_buffers_setup()
306 ctx->rx_buf = (uint8_t *) in spi_context_buffers_setup()
307 spi_context_get_next_buf(&ctx->current_rx, &ctx->rx_count, in spi_context_buffers_setup()
308 &ctx->rx_len, dfs); in spi_context_buffers_setup()
310 ctx->sync_status = 0; in spi_context_buffers_setup()
313 ctx->recv_frames = 0; in spi_context_buffers_setup()
317 " tx buf/len %p/%zu, rx buf/len %p/%zu", in spi_context_buffers_setup()
318 ctx->current_tx, ctx->tx_count, in spi_context_buffers_setup()
319 ctx->current_rx, ctx->rx_count, in spi_context_buffers_setup()
320 (void *)ctx->tx_buf, ctx->tx_len, in spi_context_buffers_setup()
321 (void *)ctx->rx_buf, ctx->rx_len); in spi_context_buffers_setup()
331 if (!ctx->tx_len) { in spi_context_update_tx()
335 if (len > ctx->tx_len) { in spi_context_update_tx()
340 ctx->tx_len -= len; in spi_context_update_tx()
341 if (!ctx->tx_len) { in spi_context_update_tx()
343 ++ctx->current_tx; in spi_context_update_tx()
344 --ctx->tx_count; in spi_context_update_tx()
345 ctx->tx_buf = (const uint8_t *) in spi_context_update_tx()
346 spi_context_get_next_buf(&ctx->current_tx, in spi_context_update_tx()
347 &ctx->tx_count, in spi_context_update_tx()
348 &ctx->tx_len, dfs); in spi_context_update_tx()
349 } else if (ctx->tx_buf) { in spi_context_update_tx()
350 ctx->tx_buf += dfs * len; in spi_context_update_tx()
353 LOG_DBG("tx buf/len %p/%zu", (void *)ctx->tx_buf, ctx->tx_len); in spi_context_update_tx()
359 return !!(ctx->tx_len); in spi_context_tx_on()
365 return !!(ctx->tx_buf && ctx->tx_len); in spi_context_tx_buf_on()
377 ctx->recv_frames += len; in spi_context_update_rx()
382 if (!ctx->rx_len) { in spi_context_update_rx()
386 if (len > ctx->rx_len) { in spi_context_update_rx()
391 ctx->rx_len -= len; in spi_context_update_rx()
392 if (!ctx->rx_len) { in spi_context_update_rx()
394 ++ctx->current_rx; in spi_context_update_rx()
395 --ctx->rx_count; in spi_context_update_rx()
396 ctx->rx_buf = (uint8_t *) in spi_context_update_rx()
397 spi_context_get_next_buf(&ctx->current_rx, in spi_context_update_rx()
398 &ctx->rx_count, in spi_context_update_rx()
399 &ctx->rx_len, dfs); in spi_context_update_rx()
400 } else if (ctx->rx_buf) { in spi_context_update_rx()
401 ctx->rx_buf += dfs * len; in spi_context_update_rx()
404 LOG_DBG("rx buf/len %p/%zu", (void *)ctx->rx_buf, ctx->rx_len); in spi_context_update_rx()
410 return !!(ctx->rx_len); in spi_context_rx_on()
416 return !!(ctx->rx_buf && ctx->rx_len); in spi_context_rx_buf_on()
422 * can be done with DMA that handles only non-scattered buffers.
426 if (!ctx->tx_len) { in spi_context_max_continuous_chunk()
427 return ctx->rx_len; in spi_context_max_continuous_chunk()
428 } else if (!ctx->rx_len) { in spi_context_max_continuous_chunk()
429 return ctx->tx_len; in spi_context_max_continuous_chunk()
432 return MIN(ctx->tx_len, ctx->rx_len); in spi_context_max_continuous_chunk()
437 return ctx->tx_len > ctx->rx_len ? ctx->tx_len : ctx->rx_len; in spi_context_longest_current_buf()
445 for (n = 0; n < ctx->tx_count; ++n) { in spi_context_total_tx_len()
446 total_len += ctx->current_tx[n].len; in spi_context_total_tx_len()
457 for (n = 0; n < ctx->rx_count; ++n) { in spi_context_total_rx_len()
458 total_len += ctx->current_rx[n].len; in spi_context_total_rx_len()