1 /*
2 * Copyright (c) 2023 Intel Corporation
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/drivers/i2c.h>
8 #include <zephyr/drivers/i2c/rtio.h>
9 #include <zephyr/rtio/rtio.h>
10 #include <zephyr/sys/mpsc_lockfree.h>
11 #include <zephyr/sys/__assert.h>
12
13 #define LOG_LEVEL CONFIG_I2C_LOG_LEVEL
14 #include <zephyr/logging/log.h>
15 LOG_MODULE_REGISTER(i2c_rtio);
16
17 const struct rtio_iodev_api i2c_iodev_api = {
18 .submit = i2c_iodev_submit,
19 };
20
i2c_rtio_copy(struct rtio * r,struct rtio_iodev * iodev,const struct i2c_msg * msgs,uint8_t num_msgs)21 struct rtio_sqe *i2c_rtio_copy(struct rtio *r, struct rtio_iodev *iodev, const struct i2c_msg *msgs,
22 uint8_t num_msgs)
23 {
24 __ASSERT(num_msgs > 0, "Expecting at least one message to copy");
25
26 struct rtio_sqe *sqe = NULL;
27
28 for (uint8_t i = 0; i < num_msgs; i++) {
29 sqe = rtio_sqe_acquire(r);
30
31 if (sqe == NULL) {
32 rtio_sqe_drop_all(r);
33 return NULL;
34 }
35
36 if (msgs[i].flags & I2C_MSG_READ) {
37 rtio_sqe_prep_read(sqe, iodev, RTIO_PRIO_NORM, msgs[i].buf, msgs[i].len,
38 NULL);
39 } else {
40 rtio_sqe_prep_write(sqe, iodev, RTIO_PRIO_NORM, msgs[i].buf, msgs[i].len,
41 NULL);
42 }
43 sqe->flags |= RTIO_SQE_TRANSACTION;
44 sqe->iodev_flags =
45 ((msgs[i].flags & I2C_MSG_STOP) ? RTIO_IODEV_I2C_STOP : 0) |
46 ((msgs[i].flags & I2C_MSG_RESTART) ? RTIO_IODEV_I2C_RESTART : 0) |
47 ((msgs[i].flags & I2C_MSG_ADDR_10_BITS) ? RTIO_IODEV_I2C_10_BITS : 0);
48 }
49
50 sqe->flags &= ~RTIO_SQE_TRANSACTION;
51
52 return sqe;
53 }
54
i2c_rtio_copy_reg_write_byte(struct rtio * r,struct rtio_iodev * iodev,uint8_t reg_addr,uint8_t data)55 struct rtio_sqe *i2c_rtio_copy_reg_write_byte(struct rtio *r, struct rtio_iodev *iodev,
56 uint8_t reg_addr, uint8_t data)
57 {
58 uint8_t msg[2];
59
60 struct rtio_sqe *sqe = rtio_sqe_acquire(r);
61
62 if (sqe == NULL) {
63 rtio_sqe_drop_all(r);
64 return NULL;
65 }
66 msg[0] = reg_addr;
67 msg[1] = data;
68 rtio_sqe_prep_tiny_write(sqe, iodev, RTIO_PRIO_NORM, msg, sizeof(msg), NULL);
69 sqe->iodev_flags = RTIO_IODEV_I2C_STOP;
70 return sqe;
71 }
72
i2c_rtio_copy_reg_burst_read(struct rtio * r,struct rtio_iodev * iodev,uint8_t start_addr,void * buf,size_t num_bytes)73 struct rtio_sqe *i2c_rtio_copy_reg_burst_read(struct rtio *r, struct rtio_iodev *iodev,
74 uint8_t start_addr, void *buf, size_t num_bytes)
75 {
76 struct rtio_sqe *sqe = rtio_sqe_acquire(r);
77
78 if (sqe == NULL) {
79 rtio_sqe_drop_all(r);
80 return NULL;
81 }
82 rtio_sqe_prep_tiny_write(sqe, iodev, RTIO_PRIO_NORM, &start_addr, 1, NULL);
83 sqe->flags |= RTIO_SQE_TRANSACTION;
84
85 sqe = rtio_sqe_acquire(r);
86 if (sqe == NULL) {
87 rtio_sqe_drop_all(r);
88 return NULL;
89 }
90 rtio_sqe_prep_read(sqe, iodev, RTIO_PRIO_NORM, buf, num_bytes, NULL);
91 sqe->iodev_flags |= RTIO_IODEV_I2C_STOP | RTIO_IODEV_I2C_RESTART;
92
93 return sqe;
94 }
95
i2c_rtio_init(struct i2c_rtio * ctx,const struct device * dev)96 void i2c_rtio_init(struct i2c_rtio *ctx, const struct device *dev)
97 {
98 k_sem_init(&ctx->lock, 1, 1);
99 mpsc_init(&ctx->io_q);
100 ctx->txn_curr = NULL;
101 ctx->txn_head = NULL;
102 ctx->dt_spec.bus = dev;
103 ctx->iodev.data = &ctx->dt_spec;
104 ctx->iodev.api = &i2c_iodev_api;
105 }
106
107 /**
108 * @private
109 * @brief Setup the next transaction (could be a single op) if needed
110 *
111 * @retval true New transaction to start with the hardware is setup
112 * @retval false No new transaction to start
113 */
i2c_rtio_next(struct i2c_rtio * ctx,bool completion)114 static bool i2c_rtio_next(struct i2c_rtio *ctx, bool completion)
115 {
116 k_spinlock_key_t key = k_spin_lock(&ctx->slock);
117
118 /* Already working on something, bail early */
119 if (!completion && ctx->txn_head != NULL) {
120 k_spin_unlock(&ctx->slock, key);
121 return false;
122 }
123
124 struct mpsc_node *next = mpsc_pop(&ctx->io_q);
125
126 /* Nothing left to do */
127 if (next == NULL) {
128 ctx->txn_head = NULL;
129 ctx->txn_curr = NULL;
130 k_spin_unlock(&ctx->slock, key);
131 return false;
132 }
133
134 ctx->txn_head = CONTAINER_OF(next, struct rtio_iodev_sqe, q);
135 ctx->txn_curr = ctx->txn_head;
136
137 k_spin_unlock(&ctx->slock, key);
138
139 return true;
140 }
141
i2c_rtio_complete(struct i2c_rtio * ctx,int status)142 bool i2c_rtio_complete(struct i2c_rtio *ctx, int status)
143 {
144 /* On error bail */
145 if (status < 0) {
146 rtio_iodev_sqe_err(ctx->txn_head, status);
147 return i2c_rtio_next(ctx, true);
148 }
149
150 /* Try for next submission in the transaction */
151 ctx->txn_curr = rtio_txn_next(ctx->txn_curr);
152 if (ctx->txn_curr) {
153 return true;
154 }
155
156 rtio_iodev_sqe_ok(ctx->txn_head, status);
157 return i2c_rtio_next(ctx, true);
158 }
i2c_rtio_submit(struct i2c_rtio * ctx,struct rtio_iodev_sqe * iodev_sqe)159 bool i2c_rtio_submit(struct i2c_rtio *ctx, struct rtio_iodev_sqe *iodev_sqe)
160 {
161 mpsc_push(&ctx->io_q, &iodev_sqe->q);
162 return i2c_rtio_next(ctx, false);
163 }
164
i2c_rtio_transfer(struct i2c_rtio * ctx,struct i2c_msg * msgs,uint8_t num_msgs,uint16_t addr)165 int i2c_rtio_transfer(struct i2c_rtio *ctx, struct i2c_msg *msgs, uint8_t num_msgs, uint16_t addr)
166 {
167 struct rtio_iodev *iodev = &ctx->iodev;
168 struct rtio *const r = ctx->r;
169 struct rtio_sqe *sqe = NULL;
170 struct rtio_cqe *cqe = NULL;
171 int res = 0;
172
173 k_sem_take(&ctx->lock, K_FOREVER);
174
175 ctx->dt_spec.addr = addr;
176
177 sqe = i2c_rtio_copy(r, iodev, msgs, num_msgs);
178 if (sqe == NULL) {
179 LOG_ERR("Not enough submission queue entries");
180 res = -ENOMEM;
181 goto out;
182 }
183
184 rtio_submit(r, 1);
185
186 cqe = rtio_cqe_consume(r);
187 while (cqe != NULL) {
188 res = cqe->result;
189 rtio_cqe_release(r, cqe);
190 cqe = rtio_cqe_consume(r);
191 }
192
193 out:
194 k_sem_give(&ctx->lock);
195 return res;
196 }
197
i2c_rtio_configure(struct i2c_rtio * ctx,uint32_t i2c_config)198 int i2c_rtio_configure(struct i2c_rtio *ctx, uint32_t i2c_config)
199 {
200 struct rtio_iodev *iodev = &ctx->iodev;
201 struct rtio *const r = ctx->r;
202 struct rtio_sqe *sqe = NULL;
203 struct rtio_cqe *cqe = NULL;
204 int res = 0;
205
206 k_sem_take(&ctx->lock, K_FOREVER);
207
208 sqe = rtio_sqe_acquire(r);
209 if (sqe == NULL) {
210 LOG_ERR("Not enough submission queue entries");
211 res = -ENOMEM;
212 goto out;
213 }
214
215 sqe->op = RTIO_OP_I2C_CONFIGURE;
216 sqe->iodev = iodev;
217 sqe->i2c_config = i2c_config;
218
219 rtio_submit(r, 1);
220
221 cqe = rtio_cqe_consume(r);
222 res = cqe->result;
223 rtio_cqe_release(r, cqe);
224
225 out:
226 k_sem_give(&ctx->lock);
227 return res;
228 }
229
i2c_rtio_recover(struct i2c_rtio * ctx)230 int i2c_rtio_recover(struct i2c_rtio *ctx)
231 {
232 struct rtio_iodev *iodev = &ctx->iodev;
233 struct rtio *const r = ctx->r;
234 struct rtio_sqe *sqe = NULL;
235 struct rtio_cqe *cqe = NULL;
236 int res = 0;
237
238 k_sem_take(&ctx->lock, K_FOREVER);
239
240 sqe = rtio_sqe_acquire(r);
241 if (sqe == NULL) {
242 LOG_ERR("Not enough submission queue entries");
243 res = -ENOMEM;
244 goto out;
245 }
246
247 sqe->op = RTIO_OP_I2C_RECOVER;
248 sqe->iodev = iodev;
249
250 rtio_submit(r, 1);
251
252 cqe = rtio_cqe_consume(r);
253 res = cqe->result;
254 rtio_cqe_release(r, cqe);
255
256 out:
257 k_sem_give(&ctx->lock);
258 return res;
259 }
260