1 /*
2  * Copyright (c) 2023 Intel Corporation
3  * Copyright (c) 2024 Croxel Inc
4  *
5  * SPDX-License-Identifier: Apache-2.0
6  */
7 
8 #include <zephyr/kernel.h>
9 #include <zephyr/drivers/spi.h>
10 #include <zephyr/rtio/work.h>
11 #include <zephyr/drivers/spi/rtio.h>
12 #include <zephyr/sys/mpsc_lockfree.h>
13 
14 #include <zephyr/logging/log.h>
15 LOG_MODULE_REGISTER(spi_rtio, CONFIG_SPI_LOG_LEVEL);
16 
17 const struct rtio_iodev_api spi_iodev_api = {
18 	.submit = spi_iodev_submit,
19 };
20 
spi_rtio_iodev_default_submit_sync(struct rtio_iodev_sqe * iodev_sqe)21 static void spi_rtio_iodev_default_submit_sync(struct rtio_iodev_sqe *iodev_sqe)
22 {
23 	struct spi_dt_spec *dt_spec = iodev_sqe->sqe.iodev->data;
24 	const struct device *dev = dt_spec->bus;
25 	uint8_t num_msgs = 0;
26 	int err = 0;
27 
28 	LOG_DBG("Sync RTIO work item for: %p", (void *)dev);
29 
30 	/** Take care of Multi-submissions transactions in the same context.
31 	 * This guarantees that linked items will be consumed in the expected
32 	 * order, regardless pending items in the workqueue.
33 	 */
34 	struct rtio_iodev_sqe *txn_head = iodev_sqe;
35 	struct rtio_iodev_sqe *txn_curr = iodev_sqe;
36 
37 	/* We allocate the spi_buf's on the stack, to do so
38 	 * the count of messages needs to be determined to
39 	 * ensure we don't go over the statically sized array.
40 	 */
41 	do {
42 		switch (txn_curr->sqe.op) {
43 		case RTIO_OP_RX:
44 		case RTIO_OP_TX:
45 		case RTIO_OP_TINY_TX:
46 		case RTIO_OP_TXRX:
47 			num_msgs++;
48 			break;
49 		default:
50 			LOG_ERR("Invalid op code %d for submission %p", txn_curr->sqe.op,
51 				(void *)&txn_curr->sqe);
52 			err = -EIO;
53 			break;
54 		}
55 		txn_curr = rtio_txn_next(txn_curr);
56 	} while (err == 0 && txn_curr != NULL);
57 
58 	if (err != 0) {
59 		rtio_iodev_sqe_err(txn_head, err);
60 		return;
61 	}
62 
63 	/* Allocate msgs on the stack, MISRA doesn't like VLAs so we need a statically
64 	 * sized array here. It's pretty unlikely we have more than 4 spi messages
65 	 * in a transaction as we typically would only have 2, one to write a
66 	 * register address, and another to read/write the register into an array
67 	 */
68 	if (num_msgs > CONFIG_SPI_RTIO_FALLBACK_MSGS) {
69 		LOG_ERR("At most CONFIG_SPI_RTIO_FALLBACK_MSGS"
70 			" submissions in a transaction are"
71 			" allowed in the default handler");
72 		rtio_iodev_sqe_err(txn_head, -ENOMEM);
73 		return;
74 	}
75 
76 	struct spi_buf tx_bufs[CONFIG_SPI_RTIO_FALLBACK_MSGS];
77 	struct spi_buf rx_bufs[CONFIG_SPI_RTIO_FALLBACK_MSGS];
78 	struct spi_buf_set tx_buf_set = {
79 		.buffers = tx_bufs,
80 		.count = num_msgs,
81 	};
82 	struct spi_buf_set rx_buf_set = {
83 		.buffers = rx_bufs,
84 		.count = num_msgs,
85 	};
86 
87 	txn_curr = txn_head;
88 
89 	for (size_t i = 0 ; i < num_msgs ; i++) {
90 		struct rtio_sqe *sqe = &txn_curr->sqe;
91 
92 		switch (sqe->op) {
93 		case RTIO_OP_RX:
94 			rx_bufs[i].buf = sqe->rx.buf;
95 			rx_bufs[i].len = sqe->rx.buf_len;
96 			tx_bufs[i].buf = NULL;
97 			tx_bufs[i].len = sqe->rx.buf_len;
98 			break;
99 		case RTIO_OP_TX:
100 			rx_bufs[i].buf = NULL;
101 			rx_bufs[i].len = sqe->tx.buf_len;
102 			tx_bufs[i].buf = (uint8_t *)sqe->tx.buf;
103 			tx_bufs[i].len = sqe->tx.buf_len;
104 			break;
105 		case RTIO_OP_TINY_TX:
106 			rx_bufs[i].buf = NULL;
107 			rx_bufs[i].len = sqe->tiny_tx.buf_len;
108 			tx_bufs[i].buf = (uint8_t *)sqe->tiny_tx.buf;
109 			tx_bufs[i].len = sqe->tiny_tx.buf_len;
110 			break;
111 		case RTIO_OP_TXRX:
112 			rx_bufs[i].buf = sqe->txrx.rx_buf;
113 			rx_bufs[i].len = sqe->txrx.buf_len;
114 			tx_bufs[i].buf = (uint8_t *)sqe->txrx.tx_buf;
115 			tx_bufs[i].len = sqe->txrx.buf_len;
116 			break;
117 		default:
118 			err = -EIO;
119 			break;
120 		}
121 
122 		txn_curr = rtio_txn_next(txn_curr);
123 	}
124 
125 	if (err == 0) {
126 		__ASSERT_NO_MSG(num_msgs > 0);
127 		err = spi_transceive_dt(dt_spec, &tx_buf_set, &rx_buf_set);
128 	}
129 
130 	if (err != 0) {
131 		rtio_iodev_sqe_err(txn_head, err);
132 	} else {
133 		rtio_iodev_sqe_ok(txn_head, 0);
134 	}
135 }
136 
spi_rtio_iodev_default_submit(const struct device * dev,struct rtio_iodev_sqe * iodev_sqe)137 void spi_rtio_iodev_default_submit(const struct device *dev,
138 				   struct rtio_iodev_sqe *iodev_sqe)
139 {
140 	LOG_DBG("Executing fallback for dev: %p, sqe: %p", (void *)dev, (void *)iodev_sqe);
141 
142 	struct rtio_work_req *req = rtio_work_req_alloc();
143 
144 	if (req == NULL) {
145 		LOG_ERR("RTIO work item allocation failed. Consider to increase "
146 			"CONFIG_RTIO_WORKQ_POOL_ITEMS.");
147 		rtio_iodev_sqe_err(iodev_sqe, -ENOMEM);
148 		return;
149 	}
150 
151 	rtio_work_req_submit(req, iodev_sqe, spi_rtio_iodev_default_submit_sync);
152 }
153 
154 /**
155  * @brief Copy the tx_bufs and rx_bufs into a set of RTIO requests
156  *
157  * @param[in] r rtio context
158  * @param[in] iodev iodev to transceive with
159  * @param[in] tx_bufs transmit buffer set
160  * @param[in] rx_bufs receive buffer set
161  * @param[out] last_sqe last sqe submitted, NULL if not enough memory
162  *
163  * @retval Number of submission queue entries
164  * @retval -ENOMEM out of memory
165  */
spi_rtio_copy(struct rtio * r,struct rtio_iodev * iodev,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs,struct rtio_sqe ** last_sqe)166 int spi_rtio_copy(struct rtio *r,
167 		  struct rtio_iodev *iodev,
168 		  const struct spi_buf_set *tx_bufs,
169 		  const struct spi_buf_set *rx_bufs,
170 		  struct rtio_sqe **last_sqe)
171 {
172 	int ret = 0;
173 	size_t tx_count = tx_bufs ? tx_bufs->count : 0;
174 	size_t rx_count = rx_bufs ? rx_bufs->count : 0;
175 
176 	uint32_t tx = 0, tx_len = 0;
177 	uint32_t rx = 0, rx_len = 0;
178 	uint8_t *tx_buf, *rx_buf;
179 
180 	struct rtio_sqe *sqe = NULL;
181 
182 	if (tx < tx_count) {
183 		tx_buf = tx_bufs->buffers[tx].buf;
184 		tx_len = tx_bufs->buffers[tx].len;
185 	} else {
186 		tx_buf = NULL;
187 		tx_len = rx_bufs->buffers[rx].len;
188 	}
189 
190 	if (rx < rx_count) {
191 		rx_buf = rx_bufs->buffers[rx].buf;
192 		rx_len = rx_bufs->buffers[rx].len;
193 	} else {
194 		rx_buf = NULL;
195 		rx_len = tx_bufs->buffers[tx].len;
196 	}
197 
198 
199 	while ((tx < tx_count || rx < rx_count) && (tx_len > 0 || rx_len > 0)) {
200 		sqe = rtio_sqe_acquire(r);
201 
202 		if (sqe == NULL) {
203 			ret = -ENOMEM;
204 			rtio_sqe_drop_all(r);
205 			goto out;
206 		}
207 
208 		ret++;
209 
210 		/* If tx/rx len are same, we can do a simple transceive */
211 		if (tx_len == rx_len) {
212 			if (tx_buf == NULL) {
213 				rtio_sqe_prep_read(sqe, iodev, RTIO_PRIO_NORM,
214 						   rx_buf, rx_len, NULL);
215 			} else if (rx_buf == NULL) {
216 				rtio_sqe_prep_write(sqe, iodev, RTIO_PRIO_NORM,
217 						    tx_buf, tx_len, NULL);
218 			} else {
219 				rtio_sqe_prep_transceive(sqe, iodev, RTIO_PRIO_NORM,
220 							 tx_buf, rx_buf, rx_len, NULL);
221 			}
222 			tx++;
223 			rx++;
224 			if (rx < rx_count) {
225 				rx_buf = rx_bufs->buffers[rx].buf;
226 				rx_len = rx_bufs->buffers[rx].len;
227 			} else {
228 				rx_buf = NULL;
229 				rx_len = 0;
230 			}
231 			if (tx < tx_count) {
232 				tx_buf = tx_bufs->buffers[tx].buf;
233 				tx_len = tx_bufs->buffers[tx].len;
234 			} else {
235 				tx_buf = NULL;
236 				tx_len = 0;
237 			}
238 		} else if (tx_len == 0) {
239 			rtio_sqe_prep_read(sqe, iodev, RTIO_PRIO_NORM,
240 					   (uint8_t *)rx_buf,
241 					   (uint32_t)rx_len,
242 					   NULL);
243 			rx++;
244 			if (rx < rx_count) {
245 				rx_buf = rx_bufs->buffers[rx].buf;
246 				rx_len = rx_bufs->buffers[rx].len;
247 			} else {
248 				rx_buf = NULL;
249 				rx_len = 0;
250 			}
251 		} else if (rx_len == 0) {
252 			rtio_sqe_prep_write(sqe, iodev, RTIO_PRIO_NORM,
253 					    (uint8_t *)tx_buf,
254 					    (uint32_t)tx_len,
255 					    NULL);
256 			tx++;
257 			if (tx < tx_count) {
258 				tx_buf = rx_bufs->buffers[rx].buf;
259 				tx_len = rx_bufs->buffers[rx].len;
260 			} else {
261 				tx_buf = NULL;
262 				tx_len = 0;
263 			}
264 		} else if (tx_len > rx_len) {
265 			rtio_sqe_prep_transceive(sqe, iodev, RTIO_PRIO_NORM,
266 						 (uint8_t *)tx_buf,
267 						 (uint8_t *)rx_buf,
268 						 (uint32_t)rx_len,
269 						 NULL);
270 			tx_len -= rx_len;
271 			tx_buf += rx_len;
272 			rx++;
273 			if (rx < rx_count) {
274 				rx_buf = rx_bufs->buffers[rx].buf;
275 				rx_len = rx_bufs->buffers[rx].len;
276 			} else {
277 				rx_buf = NULL;
278 				rx_len = tx_len;
279 			}
280 		} else if (rx_len > tx_len) {
281 			rtio_sqe_prep_transceive(sqe, iodev, RTIO_PRIO_NORM,
282 						 (uint8_t *)tx_buf,
283 						 (uint8_t *)rx_buf,
284 						 (uint32_t)tx_len,
285 						 NULL);
286 			rx_len -= tx_len;
287 			rx_buf += tx_len;
288 			tx++;
289 			if (tx < tx_count) {
290 				tx_buf = tx_bufs->buffers[tx].buf;
291 				tx_len = tx_bufs->buffers[tx].len;
292 			} else {
293 				tx_buf = NULL;
294 				tx_len = rx_len;
295 			}
296 		} else {
297 			__ASSERT(false, "Invalid %s state", __func__);
298 		}
299 
300 		sqe->flags = RTIO_SQE_TRANSACTION;
301 	}
302 
303 	if (sqe != NULL) {
304 		sqe->flags = 0;
305 		*last_sqe = sqe;
306 	}
307 
308 out:
309 	return ret;
310 }
311 
312 /**
313  * @brief Lock the SPI RTIO spinlock
314  *
315  * This is used internally for controlling the SPI RTIO context, and is
316  * exposed to the user as it's required for safely implementing
317  * iodev_start API, specific to each driver.
318  *
319  * @param ctx SPI RTIO context
320  *
321  * @retval Spinlock key
322  */
spi_spin_lock(struct spi_rtio * ctx)323 static inline k_spinlock_key_t spi_spin_lock(struct spi_rtio *ctx)
324 {
325 	return k_spin_lock(&ctx->lock);
326 }
327 
328 /**
329  * @brief Unlock the previously obtained SPI RTIO spinlock
330  *
331  * @param ctx SPI RTIO context
332  * @param key Spinlock key
333  */
spi_spin_unlock(struct spi_rtio * ctx,k_spinlock_key_t key)334 static inline void spi_spin_unlock(struct spi_rtio *ctx, k_spinlock_key_t key)
335 {
336 	k_spin_unlock(&ctx->lock, key);
337 }
338 
spi_rtio_init(struct spi_rtio * ctx,const struct device * dev)339 void spi_rtio_init(struct spi_rtio *ctx,
340 		   const struct device *dev)
341 {
342 	mpsc_init(&ctx->io_q);
343 	ctx->txn_head = NULL;
344 	ctx->txn_curr = NULL;
345 	ctx->dt_spec.bus = dev;
346 	ctx->iodev.data = &ctx->dt_spec;
347 	ctx->iodev.api = &spi_iodev_api;
348 }
349 
350 /**
351  * @private
352  * @brief Setup the next transaction (could be a single op) if needed
353  *
354  * @retval true New transaction to start with the hardware is setup
355  * @retval false No new transaction to start
356  */
spi_rtio_next(struct spi_rtio * ctx,bool completion)357 static bool spi_rtio_next(struct spi_rtio *ctx, bool completion)
358 {
359 	k_spinlock_key_t key = spi_spin_lock(ctx);
360 
361 	if (!completion && ctx->txn_curr != NULL) {
362 		spi_spin_unlock(ctx, key);
363 		return false;
364 	}
365 
366 	struct mpsc_node *next = mpsc_pop(&ctx->io_q);
367 
368 	if (next != NULL) {
369 		struct rtio_iodev_sqe *next_sqe = CONTAINER_OF(next, struct rtio_iodev_sqe, q);
370 
371 		ctx->txn_head = next_sqe;
372 		ctx->txn_curr = next_sqe;
373 	} else {
374 		ctx->txn_head = NULL;
375 		ctx->txn_curr = NULL;
376 	}
377 
378 	spi_spin_unlock(ctx, key);
379 
380 	return (ctx->txn_curr != NULL);
381 }
382 
spi_rtio_complete(struct spi_rtio * ctx,int status)383 bool spi_rtio_complete(struct spi_rtio *ctx, int status)
384 {
385 	struct rtio_iodev_sqe *txn_head = ctx->txn_head;
386 	bool result;
387 
388 	result = spi_rtio_next(ctx, true);
389 
390 	if (status < 0) {
391 		rtio_iodev_sqe_err(txn_head, status);
392 	} else {
393 		rtio_iodev_sqe_ok(txn_head, status);
394 	}
395 
396 	return result;
397 }
398 
spi_rtio_submit(struct spi_rtio * ctx,struct rtio_iodev_sqe * iodev_sqe)399 bool spi_rtio_submit(struct spi_rtio *ctx,
400 		     struct rtio_iodev_sqe *iodev_sqe)
401 {
402 	/** Done */
403 	mpsc_push(&ctx->io_q, &iodev_sqe->q);
404 	return spi_rtio_next(ctx, false);
405 }
406 
spi_rtio_transceive(struct spi_rtio * ctx,const struct spi_config * config,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs)407 int spi_rtio_transceive(struct spi_rtio *ctx,
408 			const struct spi_config *config,
409 			const struct spi_buf_set *tx_bufs,
410 			const struct spi_buf_set *rx_bufs)
411 {
412 	struct spi_dt_spec *dt_spec = &ctx->dt_spec;
413 	struct rtio_sqe *sqe;
414 	struct rtio_cqe *cqe;
415 	int err = 0;
416 	int ret;
417 
418 	dt_spec->config = *config;
419 
420 	ret = spi_rtio_copy(ctx->r, &ctx->iodev, tx_bufs, rx_bufs, &sqe);
421 	if (ret < 0) {
422 		return ret;
423 	}
424 
425 	/** Submit request and wait */
426 	rtio_submit(ctx->r, ret);
427 
428 	while (ret > 0) {
429 		cqe = rtio_cqe_consume(ctx->r);
430 		if (cqe->result < 0) {
431 			err = cqe->result;
432 		}
433 
434 		rtio_cqe_release(ctx->r, cqe);
435 		ret--;
436 	}
437 
438 	return err;
439 }
440