1 /*
2  * Copyright (c) 2023 Intel Corporation
3  * Copyright (c) 2024 Croxel Inc
4  *
5  * SPDX-License-Identifier: Apache-2.0
6  */
7 
8 #include <zephyr/kernel.h>
9 #include <zephyr/drivers/spi.h>
10 #include <zephyr/rtio/work.h>
11 #include <zephyr/drivers/spi/rtio.h>
12 #include <zephyr/sys/mpsc_lockfree.h>
13 
14 #include <zephyr/logging/log.h>
15 LOG_MODULE_DECLARE(spi_rtio, CONFIG_SPI_LOG_LEVEL);
16 
17 const struct rtio_iodev_api spi_iodev_api = {
18 	.submit = spi_iodev_submit,
19 };
20 
spi_rtio_iodev_default_submit_sync(struct rtio_iodev_sqe * iodev_sqe)21 static void spi_rtio_iodev_default_submit_sync(struct rtio_iodev_sqe *iodev_sqe)
22 {
23 	struct spi_dt_spec *dt_spec = iodev_sqe->sqe.iodev->data;
24 	const struct device *dev = dt_spec->bus;
25 	int err = 0;
26 
27 	LOG_DBG("Sync RTIO work item for: %p", (void *)dev);
28 
29 	/** Take care of Multi-submissions transactions in the same context.
30 	 * This guarantees that linked items will be consumed in the expected
31 	 * order, regardless pending items in the workqueue.
32 	 */
33 	struct rtio_iodev_sqe *txn_head = iodev_sqe;
34 	struct rtio_iodev_sqe *txn_curr = iodev_sqe;
35 
36 	do {
37 		struct rtio_sqe *sqe = &txn_curr->sqe;
38 		struct spi_buf tx_buf = {0};
39 		struct spi_buf_set tx_buf_set = {
40 			.buffers = &tx_buf,
41 		};
42 
43 		struct spi_buf rx_buf = {0};
44 		struct spi_buf_set rx_buf_set = {
45 			.buffers = &rx_buf,
46 		};
47 
48 		LOG_DBG("Preparing transfer: %p", txn_curr);
49 
50 		switch (sqe->op) {
51 		case RTIO_OP_RX:
52 			rx_buf.buf = sqe->rx.buf;
53 			rx_buf.len = sqe->rx.buf_len;
54 			rx_buf_set.count = 1;
55 			break;
56 		case RTIO_OP_TX:
57 			tx_buf.buf = (uint8_t *)sqe->tx.buf;
58 			tx_buf.len = sqe->tx.buf_len;
59 			tx_buf_set.count = 1;
60 			break;
61 		case RTIO_OP_TINY_TX:
62 			tx_buf.buf = (uint8_t *)sqe->tiny_tx.buf;
63 			tx_buf.len = sqe->tiny_tx.buf_len;
64 			tx_buf_set.count = 1;
65 			break;
66 		case RTIO_OP_TXRX:
67 			rx_buf.buf = sqe->txrx.rx_buf;
68 			rx_buf.len = sqe->txrx.buf_len;
69 			tx_buf.buf = (uint8_t *)sqe->txrx.tx_buf;
70 			tx_buf.len = sqe->txrx.buf_len;
71 			rx_buf_set.count = 1;
72 			tx_buf_set.count = 1;
73 			break;
74 		default:
75 			LOG_ERR("Invalid op code %d for submission %p\n", sqe->op, (void *)sqe);
76 			err = -EIO;
77 			break;
78 		}
79 
80 		if (!err) {
81 			struct spi_buf_set *tx_buf_ptr = tx_buf_set.count > 0 ? &tx_buf_set : NULL;
82 			struct spi_buf_set *rx_buf_ptr = rx_buf_set.count > 0 ? &rx_buf_set : NULL;
83 
84 			err = spi_transceive_dt(dt_spec, tx_buf_ptr, rx_buf_ptr);
85 
86 			/* NULL if this submission is not a transaction */
87 			txn_curr = rtio_txn_next(txn_curr);
88 		}
89 	} while (err >= 0 && txn_curr != NULL);
90 
91 	if (err < 0) {
92 		LOG_ERR("Transfer failed: %d", err);
93 		rtio_iodev_sqe_err(txn_head, err);
94 	} else {
95 		LOG_DBG("Transfer OK: %d", err);
96 		rtio_iodev_sqe_ok(txn_head, err);
97 	}
98 }
99 
spi_rtio_iodev_default_submit(const struct device * dev,struct rtio_iodev_sqe * iodev_sqe)100 void spi_rtio_iodev_default_submit(const struct device *dev,
101 				   struct rtio_iodev_sqe *iodev_sqe)
102 {
103 	LOG_DBG("Executing fallback for dev: %p, sqe: %p", (void *)dev, (void *)iodev_sqe);
104 
105 	struct rtio_work_req *req = rtio_work_req_alloc();
106 
107 	if (req == NULL) {
108 		LOG_ERR("RTIO work item allocation failed. Consider to increase "
109 			"CONFIG_RTIO_WORKQ_POOL_ITEMS.");
110 		rtio_iodev_sqe_err(iodev_sqe, -ENOMEM);
111 		return;
112 	}
113 
114 	rtio_work_req_submit(req, iodev_sqe, spi_rtio_iodev_default_submit_sync);
115 }
116 
117 /**
118  * @brief Copy the tx_bufs and rx_bufs into a set of RTIO requests
119  *
120  * @param[in] r rtio context
121  * @param[in] iodev iodev to transceive with
122  * @param[in] tx_bufs transmit buffer set
123  * @param[in] rx_bufs receive buffer set
124  * @param[out] last_sqe last sqe submitted, NULL if not enough memory
125  *
126  * @retval Number of submission queue entries
127  * @retval -ENOMEM out of memory
128  */
spi_rtio_copy(struct rtio * r,struct rtio_iodev * iodev,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs,struct rtio_sqe ** last_sqe)129 int spi_rtio_copy(struct rtio *r,
130 		  struct rtio_iodev *iodev,
131 		  const struct spi_buf_set *tx_bufs,
132 		  const struct spi_buf_set *rx_bufs,
133 		  struct rtio_sqe **last_sqe)
134 {
135 	int ret = 0;
136 	size_t tx_count = tx_bufs ? tx_bufs->count : 0;
137 	size_t rx_count = rx_bufs ? rx_bufs->count : 0;
138 
139 	uint32_t tx = 0, tx_len = 0;
140 	uint32_t rx = 0, rx_len = 0;
141 	uint8_t *tx_buf, *rx_buf;
142 
143 	struct rtio_sqe *sqe = NULL;
144 
145 	if (tx < tx_count) {
146 		tx_buf = tx_bufs->buffers[tx].buf;
147 		tx_len = tx_bufs->buffers[tx].len;
148 	} else {
149 		tx_buf = NULL;
150 		tx_len = rx_bufs->buffers[rx].len;
151 	}
152 
153 	if (rx < rx_count) {
154 		rx_buf = rx_bufs->buffers[rx].buf;
155 		rx_len = rx_bufs->buffers[rx].len;
156 	} else {
157 		rx_buf = NULL;
158 		rx_len = tx_bufs->buffers[tx].len;
159 	}
160 
161 
162 	while ((tx < tx_count || rx < rx_count) && (tx_len > 0 || rx_len > 0)) {
163 		sqe = rtio_sqe_acquire(r);
164 
165 		if (sqe == NULL) {
166 			ret = -ENOMEM;
167 			rtio_sqe_drop_all(r);
168 			goto out;
169 		}
170 
171 		ret++;
172 
173 		/* If tx/rx len are same, we can do a simple transceive */
174 		if (tx_len == rx_len) {
175 			if (tx_buf == NULL) {
176 				rtio_sqe_prep_read(sqe, iodev, RTIO_PRIO_NORM,
177 						   rx_buf, rx_len, NULL);
178 			} else if (rx_buf == NULL) {
179 				rtio_sqe_prep_write(sqe, iodev, RTIO_PRIO_NORM,
180 						    tx_buf, tx_len, NULL);
181 			} else {
182 				rtio_sqe_prep_transceive(sqe, iodev, RTIO_PRIO_NORM,
183 							 tx_buf, rx_buf, rx_len, NULL);
184 			}
185 			tx++;
186 			rx++;
187 			if (rx < rx_count) {
188 				rx_buf = rx_bufs->buffers[rx].buf;
189 				rx_len = rx_bufs->buffers[rx].len;
190 			} else {
191 				rx_buf = NULL;
192 				rx_len = 0;
193 			}
194 			if (tx < tx_count) {
195 				tx_buf = tx_bufs->buffers[tx].buf;
196 				tx_len = tx_bufs->buffers[tx].len;
197 			} else {
198 				tx_buf = NULL;
199 				tx_len = 0;
200 			}
201 		} else if (tx_len == 0) {
202 			rtio_sqe_prep_read(sqe, iodev, RTIO_PRIO_NORM,
203 					   (uint8_t *)rx_buf,
204 					   (uint32_t)rx_len,
205 					   NULL);
206 			rx++;
207 			if (rx < rx_count) {
208 				rx_buf = rx_bufs->buffers[rx].buf;
209 				rx_len = rx_bufs->buffers[rx].len;
210 			} else {
211 				rx_buf = NULL;
212 				rx_len = 0;
213 			}
214 		} else if (rx_len == 0) {
215 			rtio_sqe_prep_write(sqe, iodev, RTIO_PRIO_NORM,
216 					    (uint8_t *)tx_buf,
217 					    (uint32_t)tx_len,
218 					    NULL);
219 			tx++;
220 			if (tx < tx_count) {
221 				tx_buf = rx_bufs->buffers[rx].buf;
222 				tx_len = rx_bufs->buffers[rx].len;
223 			} else {
224 				tx_buf = NULL;
225 				tx_len = 0;
226 			}
227 		} else if (tx_len > rx_len) {
228 			rtio_sqe_prep_transceive(sqe, iodev, RTIO_PRIO_NORM,
229 						 (uint8_t *)tx_buf,
230 						 (uint8_t *)rx_buf,
231 						 (uint32_t)rx_len,
232 						 NULL);
233 			tx_len -= rx_len;
234 			tx_buf += rx_len;
235 			rx++;
236 			if (rx < rx_count) {
237 				rx_buf = rx_bufs->buffers[rx].buf;
238 				rx_len = rx_bufs->buffers[rx].len;
239 			} else {
240 				rx_buf = NULL;
241 				rx_len = tx_len;
242 			}
243 		} else if (rx_len > tx_len) {
244 			rtio_sqe_prep_transceive(sqe, iodev, RTIO_PRIO_NORM,
245 						 (uint8_t *)tx_buf,
246 						 (uint8_t *)rx_buf,
247 						 (uint32_t)tx_len,
248 						 NULL);
249 			rx_len -= tx_len;
250 			rx_buf += tx_len;
251 			tx++;
252 			if (tx < tx_count) {
253 				tx_buf = tx_bufs->buffers[tx].buf;
254 				tx_len = tx_bufs->buffers[tx].len;
255 			} else {
256 				tx_buf = NULL;
257 				tx_len = rx_len;
258 			}
259 		} else {
260 			__ASSERT(false, "Invalid %s state", __func__);
261 		}
262 
263 		sqe->flags = RTIO_SQE_TRANSACTION;
264 	}
265 
266 	if (sqe != NULL) {
267 		sqe->flags = 0;
268 		*last_sqe = sqe;
269 	}
270 
271 out:
272 	return ret;
273 }
274 
275 /**
276  * @brief Lock the SPI RTIO spinlock
277  *
278  * This is used internally for controlling the SPI RTIO context, and is
279  * exposed to the user as it's required for safely implementing
280  * iodev_start API, specific to each driver.
281  *
282  * @param ctx SPI RTIO context
283  *
284  * @retval Spinlock key
285  */
spi_spin_lock(struct spi_rtio * ctx)286 static inline k_spinlock_key_t spi_spin_lock(struct spi_rtio *ctx)
287 {
288 	return k_spin_lock(&ctx->lock);
289 }
290 
291 /**
292  * @brief Unlock the previously obtained SPI RTIO spinlock
293  *
294  * @param ctx SPI RTIO context
295  * @param key Spinlock key
296  */
spi_spin_unlock(struct spi_rtio * ctx,k_spinlock_key_t key)297 static inline void spi_spin_unlock(struct spi_rtio *ctx, k_spinlock_key_t key)
298 {
299 	k_spin_unlock(&ctx->lock, key);
300 }
301 
spi_rtio_init(struct spi_rtio * ctx,const struct device * dev)302 void spi_rtio_init(struct spi_rtio *ctx,
303 		   const struct device *dev)
304 {
305 	mpsc_init(&ctx->io_q);
306 	ctx->txn_head = NULL;
307 	ctx->txn_curr = NULL;
308 	ctx->dt_spec.bus = dev;
309 	ctx->iodev.data = &ctx->dt_spec;
310 	ctx->iodev.api = &spi_iodev_api;
311 }
312 
313 /**
314  * @private
315  * @brief Setup the next transaction (could be a single op) if needed
316  *
317  * @retval true New transaction to start with the hardware is setup
318  * @retval false No new transaction to start
319  */
spi_rtio_next(struct spi_rtio * ctx,bool completion)320 static bool spi_rtio_next(struct spi_rtio *ctx, bool completion)
321 {
322 	k_spinlock_key_t key = spi_spin_lock(ctx);
323 
324 	if (!completion && ctx->txn_curr != NULL) {
325 		spi_spin_unlock(ctx, key);
326 		return false;
327 	}
328 
329 	struct mpsc_node *next = mpsc_pop(&ctx->io_q);
330 
331 	if (next != NULL) {
332 		struct rtio_iodev_sqe *next_sqe = CONTAINER_OF(next, struct rtio_iodev_sqe, q);
333 
334 		ctx->txn_head = next_sqe;
335 		ctx->txn_curr = next_sqe;
336 	} else {
337 		ctx->txn_head = NULL;
338 		ctx->txn_curr = NULL;
339 	}
340 
341 	spi_spin_unlock(ctx, key);
342 
343 	return (ctx->txn_curr != NULL);
344 }
345 
spi_rtio_complete(struct spi_rtio * ctx,int status)346 bool spi_rtio_complete(struct spi_rtio *ctx, int status)
347 {
348 	struct rtio_iodev_sqe *txn_head = ctx->txn_head;
349 	bool result;
350 
351 	result = spi_rtio_next(ctx, true);
352 
353 	if (status < 0) {
354 		rtio_iodev_sqe_err(txn_head, status);
355 	} else {
356 		rtio_iodev_sqe_ok(txn_head, status);
357 	}
358 
359 	return result;
360 }
361 
spi_rtio_submit(struct spi_rtio * ctx,struct rtio_iodev_sqe * iodev_sqe)362 bool spi_rtio_submit(struct spi_rtio *ctx,
363 		     struct rtio_iodev_sqe *iodev_sqe)
364 {
365 	/** Done */
366 	mpsc_push(&ctx->io_q, &iodev_sqe->q);
367 	return spi_rtio_next(ctx, false);
368 }
369 
spi_rtio_transceive(struct spi_rtio * ctx,const struct spi_config * config,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs)370 int spi_rtio_transceive(struct spi_rtio *ctx,
371 			const struct spi_config *config,
372 			const struct spi_buf_set *tx_bufs,
373 			const struct spi_buf_set *rx_bufs)
374 {
375 	struct spi_dt_spec *dt_spec = &ctx->dt_spec;
376 	struct rtio_sqe *sqe;
377 	struct rtio_cqe *cqe;
378 	int err = 0;
379 	int ret;
380 
381 	dt_spec->config = *config;
382 
383 	ret = spi_rtio_copy(ctx->r, &ctx->iodev, tx_bufs, rx_bufs, &sqe);
384 	if (ret < 0) {
385 		return ret;
386 	}
387 
388 	/** Submit request and wait */
389 	rtio_submit(ctx->r, ret);
390 
391 	while (ret > 0) {
392 		cqe = rtio_cqe_consume(ctx->r);
393 		if (cqe->result < 0) {
394 			err = cqe->result;
395 		}
396 
397 		rtio_cqe_release(ctx->r, cqe);
398 		ret--;
399 	}
400 
401 	return err;
402 }
403