1 /* Copyright (c) 2024 Google LLC
2 * SPDX-License-Identifier: Apache-2.0
3 */
4
5 #include "blocking_emul.hpp"
6
7 #include <zephyr/drivers/i2c.h>
8 #include <zephyr/fff.h>
9 #include <zephyr/rtio/rtio.h>
10 #include <zephyr/ztest.h>
11
12 DEFINE_FFF_GLOBALS;
13
14 static const struct device *i2c_dev = DEVICE_DT_GET(DT_NODELABEL(i2c0));
15 I2C_DT_IODEV_DEFINE(blocking_emul_iodev, DT_NODELABEL(blocking_emul));
16
17 RTIO_DEFINE(test_rtio_ctx, 4, 4);
18
rtio_i2c_before(void * fixture)19 static void rtio_i2c_before(void *fixture)
20 {
21 ARG_UNUSED(fixture);
22 RESET_FAKE(blocking_emul_i2c_transfer);
23 FFF_RESET_HISTORY();
24
25 rtio_sqe_drop_all(&test_rtio_ctx);
26
27 struct rtio_cqe *cqe;
28
29 while ((cqe = rtio_cqe_consume(&test_rtio_ctx)) != NULL) {
30 rtio_cqe_release(&test_rtio_ctx, cqe);
31 }
32 }
33
34 ZTEST_SUITE(rtio_i2c, NULL, NULL, rtio_i2c_before, NULL, NULL);
35
ZTEST(rtio_i2c,test_emulated_api_uses_fallback_submit)36 ZTEST(rtio_i2c, test_emulated_api_uses_fallback_submit)
37 {
38 zassert_not_null(i2c_dev->api);
39 zassert_equal_ptr(i2c_iodev_submit_fallback,
40 ((const struct i2c_driver_api *)i2c_dev->api)->iodev_submit);
41 }
42
ZTEST(rtio_i2c,test_fallback_submit_tx)43 ZTEST(rtio_i2c, test_fallback_submit_tx)
44 {
45 uint8_t data[] = {0x01, 0x02, 0x03};
46 struct i2c_msg msg = {
47 .buf = data,
48 .len = ARRAY_SIZE(data),
49 .flags = I2C_MSG_WRITE | I2C_MSG_STOP,
50 };
51
52 blocking_emul_i2c_transfer_fake.custom_fake =
53 [&msg](const struct emul *, struct i2c_msg *msgs, int msg_count, int) {
54 zassert_equal(1, msg_count);
55 zassert_equal(msg.len, msgs[0].len);
56 zassert_mem_equal(msg.buf, msgs[0].buf, msg.len);
57 zassert_equal(msg.flags, msgs[0].flags);
58 return 0;
59 };
60
61 struct rtio_sqe *sqe = i2c_rtio_copy(&test_rtio_ctx, &blocking_emul_iodev, &msg, 1);
62
63 zassert_not_null(sqe);
64 zassert_ok(rtio_submit(&test_rtio_ctx, 1));
65 zassert_equal(1, blocking_emul_i2c_transfer_fake.call_count);
66
67 struct rtio_cqe *cqe = rtio_cqe_consume_block(&test_rtio_ctx);
68
69 zassert_ok(cqe->result);
70 rtio_cqe_release(&test_rtio_ctx, cqe);
71 }
72
ZTEST(rtio_i2c,test_fallback_submit_invalid_op)73 ZTEST(rtio_i2c, test_fallback_submit_invalid_op)
74 {
75 struct rtio_sqe *sqe = rtio_sqe_acquire(&test_rtio_ctx);
76
77 zassert_not_null(sqe);
78 sqe->op = UINT8_MAX;
79 sqe->prio = RTIO_PRIO_NORM;
80 sqe->iodev = &blocking_emul_iodev;
81 sqe->userdata = NULL;
82
83 zassert_ok(rtio_submit(&test_rtio_ctx, 1));
84 zassert_equal(0, blocking_emul_i2c_transfer_fake.call_count);
85
86 struct rtio_cqe *cqe = rtio_cqe_consume_block(&test_rtio_ctx);
87
88 zassert_equal(-EIO, cqe->result);
89 rtio_cqe_release(&test_rtio_ctx, cqe);
90 }
91
ZTEST(rtio_i2c,test_fallback_submit_tiny_tx)92 ZTEST(rtio_i2c, test_fallback_submit_tiny_tx)
93 {
94 uint8_t data[] = {0x01, 0x02, 0x03};
95 struct rtio_sqe *sqe = rtio_sqe_acquire(&test_rtio_ctx);
96
97 blocking_emul_i2c_transfer_fake.custom_fake =
98 [&data](const struct emul *, struct i2c_msg *msgs, int msg_count, int) {
99 zassert_equal(1, msg_count);
100 zassert_equal(ARRAY_SIZE(data), msgs[0].len);
101 zassert_mem_equal(data, msgs[0].buf, msgs[0].len);
102 zassert_equal(I2C_MSG_WRITE | I2C_MSG_STOP, msgs[0].flags);
103 return 0;
104 };
105
106 zassert_not_null(sqe);
107
108 rtio_sqe_prep_tiny_write(sqe, &blocking_emul_iodev, RTIO_PRIO_NORM, data, ARRAY_SIZE(data),
109 NULL);
110 zassert_ok(rtio_submit(&test_rtio_ctx, 1));
111 zassert_equal(1, blocking_emul_i2c_transfer_fake.call_count);
112
113 struct rtio_cqe *cqe = rtio_cqe_consume_block(&test_rtio_ctx);
114
115 zassert_ok(cqe->result);
116
117 rtio_cqe_release(&test_rtio_ctx, cqe);
118 }
119
ZTEST(rtio_i2c,test_fallback_submit_rx)120 ZTEST(rtio_i2c, test_fallback_submit_rx)
121 {
122 uint8_t expected_buffer[] = {0x00, 0x01, 0x02};
123 uint8_t buffer[ARRAY_SIZE(expected_buffer)] = {0};
124 struct i2c_msg msg = {
125 .buf = buffer,
126 .len = ARRAY_SIZE(buffer),
127 .flags = I2C_MSG_READ | I2C_MSG_STOP,
128 };
129
130 blocking_emul_i2c_transfer_fake.custom_fake =
131 [&msg](const struct emul *, struct i2c_msg *msgs, int msg_count, int) {
132 zassert_equal(1, msg_count);
133 zassert_equal(msg.len, msgs[0].len);
134 zassert_equal(msg.flags, msgs[0].flags);
135 for (uint8_t i = 0; i < msg.len; ++i) {
136 msgs[0].buf[i] = i;
137 }
138 return 0;
139 };
140
141 struct rtio_sqe *sqe = i2c_rtio_copy(&test_rtio_ctx, &blocking_emul_iodev, &msg, 1);
142
143 zassert_not_null(sqe);
144 zassert_ok(rtio_submit(&test_rtio_ctx, 1));
145 zassert_equal(1, blocking_emul_i2c_transfer_fake.call_count);
146
147 struct rtio_cqe *cqe = rtio_cqe_consume_block(&test_rtio_ctx);
148
149 zassert_ok(cqe->result);
150 zassert_mem_equal(buffer, expected_buffer, ARRAY_SIZE(expected_buffer));
151
152 rtio_cqe_release(&test_rtio_ctx, cqe);
153 }
154
ZTEST(rtio_i2c,test_fallback_transaction_error)155 ZTEST(rtio_i2c, test_fallback_transaction_error)
156 {
157 uint8_t buffer[3];
158 struct rtio_sqe *phase1 = rtio_sqe_acquire(&test_rtio_ctx);
159 struct rtio_sqe *phase2 = rtio_sqe_acquire(&test_rtio_ctx);
160
161 blocking_emul_i2c_transfer_fake.return_val = -EIO;
162
163 zassert_not_null(phase1);
164 zassert_not_null(phase2);
165
166 rtio_sqe_prep_read(phase1, &blocking_emul_iodev, RTIO_PRIO_NORM, buffer, ARRAY_SIZE(buffer),
167 NULL);
168 rtio_sqe_prep_read(phase2, &blocking_emul_iodev, RTIO_PRIO_NORM, buffer, ARRAY_SIZE(buffer),
169 NULL);
170
171 phase1->flags |= RTIO_SQE_TRANSACTION;
172
173 zassert_ok(rtio_submit(&test_rtio_ctx, 2));
174 zassert_equal(1, blocking_emul_i2c_transfer_fake.call_count);
175
176 struct rtio_cqe *cqe = rtio_cqe_consume_block(&test_rtio_ctx);
177
178 zassert_equal(-EIO, cqe->result);
179
180 rtio_cqe_release(&test_rtio_ctx, cqe);
181
182 // We have another CQE for the transaction that must be cleared out.
183 cqe = rtio_cqe_consume_block(&test_rtio_ctx);
184 rtio_cqe_release(&test_rtio_ctx, cqe);
185 }
186
ZTEST(rtio_i2c,test_fallback_transaction)187 ZTEST(rtio_i2c, test_fallback_transaction)
188 {
189 uint8_t buffer[3];
190 struct rtio_sqe *phase1 = rtio_sqe_acquire(&test_rtio_ctx);
191 struct rtio_sqe *phase2 = rtio_sqe_acquire(&test_rtio_ctx);
192
193 zassert_not_null(phase1);
194 zassert_not_null(phase2);
195
196 rtio_sqe_prep_read(phase1, &blocking_emul_iodev, RTIO_PRIO_NORM, buffer, ARRAY_SIZE(buffer),
197 NULL);
198 rtio_sqe_prep_read(phase2, &blocking_emul_iodev, RTIO_PRIO_NORM, buffer, ARRAY_SIZE(buffer),
199 NULL);
200
201 phase1->flags |= RTIO_SQE_TRANSACTION;
202
203 zassert_ok(rtio_submit(&test_rtio_ctx, 2));
204 zassert_equal(1, blocking_emul_i2c_transfer_fake.call_count);
205
206 struct rtio_cqe *cqe;
207
208 // Check the first part of the transaction.
209 cqe = rtio_cqe_consume_block(&test_rtio_ctx);
210 zassert_ok(cqe->result);
211 rtio_cqe_release(&test_rtio_ctx, cqe);
212
213 // We have another CQE for the transaction that must be cleared out.
214 cqe = rtio_cqe_consume_block(&test_rtio_ctx);
215 zassert_ok(cqe->result);
216 rtio_cqe_release(&test_rtio_ctx, cqe);
217 }
218
ZTEST(rtio_i2c,test_work_queue_overflow)219 ZTEST(rtio_i2c, test_work_queue_overflow)
220 {
221 BUILD_ASSERT(CONFIG_RTIO_WORKQ_POOL_ITEMS == 2);
222
223 uint8_t data[][2] = {
224 {0x01, 0x02},
225 {0x03, 0x04},
226 {0x05, 0x06},
227 };
228 struct i2c_msg msg[] = {
229 {
230 .buf = data[0],
231 .len = 2,
232 .flags = I2C_MSG_WRITE | I2C_MSG_STOP,
233 },
234 {
235 .buf = data[1],
236 .len = 2,
237 .flags = I2C_MSG_READ | I2C_MSG_STOP,
238 },
239 {
240 .buf = data[2],
241 .len = 2,
242 .flags = I2C_MSG_READ | I2C_MSG_ADDR_10_BITS | I2C_MSG_STOP,
243 },
244 };
245
246 BUILD_ASSERT(ARRAY_SIZE(data) == ARRAY_SIZE(msg));
247
248 blocking_emul_i2c_transfer_fake.custom_fake =
249 [&msg](const struct emul *, struct i2c_msg *msgs, int msg_count, int) {
250 zassert_equal(1, msg_count);
251
252 int msg_idx = i2c_is_read_op(&msgs[0]) ? 1 : 0;
253
254 zassert_equal(msg[msg_idx].len, msgs[0].len);
255 zassert_mem_equal(msg[msg_idx].buf, msgs[0].buf, msg[msg_idx].len,
256 "Expected [0x%02x, 0x%02x] but got [0x%02x, 0x%02x]",
257 msg[msg_idx].buf[0], msg[msg_idx].buf[1], msgs[0].buf[0],
258 msgs[0].buf[1]);
259 zassert_equal(msg[msg_idx].flags, msgs[0].flags);
260 return 0;
261 };
262
263 struct rtio_sqe *sqe_write =
264 i2c_rtio_copy(&test_rtio_ctx, &blocking_emul_iodev, &msg[0], 1);
265 struct rtio_sqe *sqe_read = i2c_rtio_copy(&test_rtio_ctx, &blocking_emul_iodev, &msg[1], 1);
266 struct rtio_sqe *sqe_dropped =
267 i2c_rtio_copy(&test_rtio_ctx, &blocking_emul_iodev, &msg[2], 1);
268
269 zassert_not_null(sqe_write);
270 zassert_not_null(sqe_read);
271 zassert_not_null(sqe_dropped);
272
273 /* Add userdata so we can match these up with the CQEs */
274 sqe_write->userdata = &msg[0];
275 sqe_read->userdata = &msg[1];
276 sqe_dropped->userdata = &msg[2];
277
278 zassert_ok(rtio_submit(&test_rtio_ctx, 3));
279 zassert_equal(2, blocking_emul_i2c_transfer_fake.call_count);
280
281 struct rtio_cqe *cqe[] = {
282 rtio_cqe_consume_block(&test_rtio_ctx),
283 rtio_cqe_consume_block(&test_rtio_ctx),
284 rtio_cqe_consume_block(&test_rtio_ctx),
285 };
286
287 /*
288 * We need to make sure that we got back results for all 3 messages and that there are no
289 * duplicates
290 */
291 uint8_t msg_seen_mask = 0;
292 for (unsigned int i = 0; i < ARRAY_SIZE(cqe); ++i) {
293 int msg_idx = (struct i2c_msg *)cqe[i]->userdata - msg;
294
295 zassert_true(msg_idx >= 0 && msg_idx < 3);
296 msg_seen_mask |= BIT(msg_idx);
297 if (msg_idx == 0 || msg_idx == 1) {
298 /* Expect the first 2 to succeed */
299 zassert_ok(cqe[i]->result);
300 } else {
301 zassert_equal(-ENOMEM, cqe[i]->result);
302 }
303 }
304
305 /* Make sure bits 0, 1, and 2 were set. */
306 zassert_equal(0x7, msg_seen_mask);
307
308 rtio_cqe_release(&test_rtio_ctx, cqe[0]);
309 rtio_cqe_release(&test_rtio_ctx, cqe[1]);
310 rtio_cqe_release(&test_rtio_ctx, cqe[2]);
311 }
312