1 /*
2 * Copyright (c) 2022 Intel Corporation.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/ztest.h>
8 #include <zephyr/rtio/rtio.h>
9 #include <zephyr/sys/mpsc_lockfree.h>
10 #include <zephyr/kernel.h>
11
12 #ifndef RTIO_IODEV_TEST_H_
13 #define RTIO_IODEV_TEST_H_
14
15 struct rtio_iodev_test_data {
16 /* k_timer for an asynchronous task */
17 struct k_timer timer;
18
19 /* Queue of requests */
20 struct mpsc io_q;
21
22 /* Currently executing transaction */
23 struct rtio_iodev_sqe *txn_head;
24 struct rtio_iodev_sqe *txn_curr;
25
26 /* Count of submit calls */
27 atomic_t submit_count;
28
29 /* Lock around kicking off next timer */
30 struct k_spinlock lock;
31 };
32
rtio_iodev_test_next(struct rtio_iodev_test_data * data,bool completion)33 static void rtio_iodev_test_next(struct rtio_iodev_test_data *data, bool completion)
34 {
35 /* The next section must be serialized to ensure single consumer semantics */
36 k_spinlock_key_t key = k_spin_lock(&data->lock);
37
38 /* Already working on something, bail early */
39 if (!completion && data->txn_head != NULL) {
40 goto out;
41 }
42
43 struct mpsc_node *next = mpsc_pop(&data->io_q);
44
45 /* Nothing left to do, cleanup */
46 if (next == NULL) {
47 data->txn_head = NULL;
48 data->txn_curr = NULL;
49 goto out;
50 }
51
52 struct rtio_iodev_sqe *next_sqe = CONTAINER_OF(next, struct rtio_iodev_sqe, q);
53
54 data->txn_head = next_sqe;
55 data->txn_curr = next_sqe;
56 k_timer_start(&data->timer, K_MSEC(10), K_NO_WAIT);
57
58 out:
59 k_spin_unlock(&data->lock, key);
60 }
61
rtio_iodev_test_complete(struct rtio_iodev_test_data * data,int status)62 static void rtio_iodev_test_complete(struct rtio_iodev_test_data *data, int status)
63 {
64 if (status < 0) {
65 rtio_iodev_sqe_err(data->txn_head, status);
66 rtio_iodev_test_next(data, true);
67 }
68
69 data->txn_curr = rtio_txn_next(data->txn_curr);
70 if (data->txn_curr) {
71 k_timer_start(&data->timer, K_MSEC(10), K_NO_WAIT);
72 return;
73 }
74
75 rtio_iodev_sqe_ok(data->txn_head, status);
76 rtio_iodev_test_next(data, true);
77 }
78
rtio_iodev_timer_fn(struct k_timer * tm)79 static void rtio_iodev_timer_fn(struct k_timer *tm)
80 {
81 struct rtio_iodev_test_data *data = CONTAINER_OF(tm, struct rtio_iodev_test_data, timer);
82 struct rtio_iodev_sqe *iodev_sqe = data->txn_curr;
83 uint8_t *buf;
84 uint32_t buf_len;
85 int rc;
86
87 switch (iodev_sqe->sqe.op) {
88 case RTIO_OP_NOP:
89 rtio_iodev_test_complete(data, 0);
90 break;
91 case RTIO_OP_RX:
92 rc = rtio_sqe_rx_buf(iodev_sqe, 16, 16, &buf, &buf_len);
93 if (rc != 0) {
94 rtio_iodev_test_complete(data, rc);
95 return;
96 }
97 /* For reads the test device copies from the given userdata */
98 memcpy(buf, ((uint8_t *)iodev_sqe->sqe.userdata), 16);
99 rtio_iodev_test_complete(data, 0);
100 break;
101 default:
102 rtio_iodev_test_complete(data, -ENOTSUP);
103 }
104 }
105
rtio_iodev_test_submit(struct rtio_iodev_sqe * iodev_sqe)106 static void rtio_iodev_test_submit(struct rtio_iodev_sqe *iodev_sqe)
107 {
108 struct rtio_iodev *iodev = (struct rtio_iodev *)iodev_sqe->sqe.iodev;
109 struct rtio_iodev_test_data *data = iodev->data;
110
111 atomic_inc(&data->submit_count);
112
113 /* The only safe operation is enqueuing */
114 mpsc_push(&data->io_q, &iodev_sqe->q);
115
116 rtio_iodev_test_next(data, false);
117 }
118
119 const struct rtio_iodev_api rtio_iodev_test_api = {
120 .submit = rtio_iodev_test_submit,
121 };
122
rtio_iodev_test_init(struct rtio_iodev * test)123 void rtio_iodev_test_init(struct rtio_iodev *test)
124 {
125 struct rtio_iodev_test_data *data = test->data;
126
127 mpsc_init(&data->io_q);
128 data->txn_head = NULL;
129 data->txn_curr = NULL;
130 k_timer_init(&data->timer, rtio_iodev_timer_fn, NULL);
131 }
132
133 #define RTIO_IODEV_TEST_DEFINE(name) \
134 static struct rtio_iodev_test_data _iodev_data_##name; \
135 RTIO_IODEV_DEFINE(name, &rtio_iodev_test_api, &_iodev_data_##name)
136
137
138
139 #endif /* RTIO_IODEV_TEST_H_ */
140