1 /*
2 * Copyright (c) 2023 Intel Corporation
3 * Copyright (c) 2024 Meta Platforms
4 *
5 * SPDX-License-Identifier: Apache-2.0
6 */
7
8 #include <zephyr/drivers/i3c.h>
9 #include <zephyr/drivers/i3c/rtio.h>
10 #include <zephyr/rtio/rtio.h>
11 #include <zephyr/sys/mpsc_lockfree.h>
12 #include <zephyr/sys/__assert.h>
13
14 #define LOG_LEVEL CONFIG_I3C_LOG_LEVEL
15 #include <zephyr/logging/log.h>
16 LOG_MODULE_REGISTER(i3c_rtio);
17
18 const struct rtio_iodev_api i3c_iodev_api = {
19 .submit = i3c_iodev_submit,
20 };
21
i3c_rtio_copy(struct rtio * r,struct rtio_iodev * iodev,const struct i3c_msg * msgs,uint8_t num_msgs)22 struct rtio_sqe *i3c_rtio_copy(struct rtio *r, struct rtio_iodev *iodev, const struct i3c_msg *msgs,
23 uint8_t num_msgs)
24 {
25 __ASSERT(num_msgs > 0, "Expecting at least one message to copy");
26
27 struct rtio_sqe *sqe = NULL;
28
29 for (uint8_t i = 0; i < num_msgs; i++) {
30 sqe = rtio_sqe_acquire(r);
31
32 if (sqe == NULL) {
33 rtio_sqe_drop_all(r);
34 return NULL;
35 }
36
37 if (msgs[i].flags & I3C_MSG_READ) {
38 rtio_sqe_prep_read(sqe, iodev, RTIO_PRIO_NORM, msgs[i].buf, msgs[i].len,
39 NULL);
40 } else {
41 rtio_sqe_prep_write(sqe, iodev, RTIO_PRIO_NORM, msgs[i].buf, msgs[i].len,
42 NULL);
43 }
44 sqe->flags |= RTIO_SQE_TRANSACTION;
45 sqe->iodev_flags =
46 ((msgs[i].flags & I3C_MSG_STOP) ? RTIO_IODEV_I3C_STOP : 0) |
47 ((msgs[i].flags & I3C_MSG_RESTART) ? RTIO_IODEV_I3C_RESTART : 0) |
48 ((msgs[i].flags & I3C_MSG_HDR) ? RTIO_IODEV_I3C_HDR : 0) |
49 ((msgs[i].flags & I3C_MSG_NBCH) ? RTIO_IODEV_I3C_NBCH : 0) |
50 RTIO_IODEV_I3C_HDR_MODE_SET(msgs[i].hdr_mode) |
51 RTIO_IODEV_I3C_HDR_CMD_CODE_SET(msgs[i].hdr_cmd_code);
52 }
53
54 sqe->flags &= ~RTIO_SQE_TRANSACTION;
55
56 return sqe;
57 }
58
i3c_rtio_init(struct i3c_rtio * ctx)59 void i3c_rtio_init(struct i3c_rtio *ctx)
60 {
61 k_sem_init(&ctx->lock, 1, 1);
62 mpsc_init(&ctx->io_q);
63 ctx->txn_curr = NULL;
64 ctx->txn_head = NULL;
65 ctx->iodev.api = &i3c_iodev_api;
66 }
67
68 /**
69 * @private
70 * @brief Setup the next transaction (could be a single op) if needed
71 *
72 * @retval true New transaction to start with the hardware is setup
73 * @retval false No new transaction to start
74 */
i3c_rtio_next(struct i3c_rtio * ctx,bool completion)75 static bool i3c_rtio_next(struct i3c_rtio *ctx, bool completion)
76 {
77 k_spinlock_key_t key = k_spin_lock(&ctx->slock);
78
79 /* Already working on something, bail early */
80 if (!completion && ctx->txn_head != NULL) {
81 k_spin_unlock(&ctx->slock, key);
82 return false;
83 }
84
85 struct mpsc_node *next = mpsc_pop(&ctx->io_q);
86
87 /* Nothing left to do */
88 if (next == NULL) {
89 ctx->txn_head = NULL;
90 ctx->txn_curr = NULL;
91 k_spin_unlock(&ctx->slock, key);
92 return false;
93 }
94
95 ctx->txn_head = CONTAINER_OF(next, struct rtio_iodev_sqe, q);
96 ctx->txn_curr = ctx->txn_head;
97
98 k_spin_unlock(&ctx->slock, key);
99
100 return true;
101 }
102
i3c_rtio_complete(struct i3c_rtio * ctx,int status)103 bool i3c_rtio_complete(struct i3c_rtio *ctx, int status)
104 {
105 /* On error bail */
106 if (status < 0) {
107 rtio_iodev_sqe_err(ctx->txn_head, status);
108 return i3c_rtio_next(ctx, true);
109 }
110
111 /* Try for next submission in the transaction */
112 ctx->txn_curr = rtio_txn_next(ctx->txn_curr);
113 if (ctx->txn_curr) {
114 return true;
115 }
116
117 rtio_iodev_sqe_ok(ctx->txn_head, status);
118 return i3c_rtio_next(ctx, true);
119 }
i3c_rtio_submit(struct i3c_rtio * ctx,struct rtio_iodev_sqe * iodev_sqe)120 bool i3c_rtio_submit(struct i3c_rtio *ctx, struct rtio_iodev_sqe *iodev_sqe)
121 {
122 mpsc_push(&ctx->io_q, &iodev_sqe->q);
123 return i3c_rtio_next(ctx, false);
124 }
125
i3c_rtio_transfer(struct i3c_rtio * ctx,struct i3c_msg * msgs,uint8_t num_msgs,struct i3c_device_desc * desc)126 int i3c_rtio_transfer(struct i3c_rtio *ctx, struct i3c_msg *msgs, uint8_t num_msgs,
127 struct i3c_device_desc *desc)
128 {
129 struct rtio_iodev *iodev = &ctx->iodev;
130 struct rtio *const r = ctx->r;
131 struct rtio_sqe *sqe = NULL;
132 struct rtio_cqe *cqe = NULL;
133 int res = 0;
134
135 k_sem_take(&ctx->lock, K_FOREVER);
136
137 ctx->i3c_desc = desc;
138
139 sqe = i3c_rtio_copy(r, iodev, msgs, num_msgs);
140 if (sqe == NULL) {
141 LOG_ERR("Not enough submission queue entries");
142 res = -ENOMEM;
143 goto out;
144 }
145
146 rtio_submit(r, 1);
147
148 cqe = rtio_cqe_consume(r);
149 while (cqe != NULL) {
150 res = cqe->result;
151 rtio_cqe_release(r, cqe);
152 cqe = rtio_cqe_consume(r);
153 }
154
155 out:
156 k_sem_give(&ctx->lock);
157 return res;
158 }
159
i3c_rtio_configure(struct i3c_rtio * ctx,enum i3c_config_type type,void * config)160 int i3c_rtio_configure(struct i3c_rtio *ctx, enum i3c_config_type type, void *config)
161 {
162 struct rtio_iodev *iodev = &ctx->iodev;
163 struct rtio *const r = ctx->r;
164 struct rtio_sqe *sqe = NULL;
165 struct rtio_cqe *cqe = NULL;
166 int res = 0;
167
168 k_sem_take(&ctx->lock, K_FOREVER);
169
170 sqe = rtio_sqe_acquire(r);
171 if (sqe == NULL) {
172 LOG_ERR("Not enough submission queue entries");
173 res = -ENOMEM;
174 goto out;
175 }
176
177 sqe->op = RTIO_OP_I3C_CONFIGURE;
178 sqe->iodev = iodev;
179 sqe->i3c_config.type = type;
180 sqe->i3c_config.config = config;
181
182 rtio_submit(r, 1);
183
184 cqe = rtio_cqe_consume(r);
185 res = cqe->result;
186 rtio_cqe_release(r, cqe);
187
188 out:
189 k_sem_give(&ctx->lock);
190 return res;
191 }
192
i3c_rtio_ccc(struct i3c_rtio * ctx,struct i3c_ccc_payload * payload)193 int i3c_rtio_ccc(struct i3c_rtio *ctx, struct i3c_ccc_payload *payload)
194 {
195 struct rtio_iodev *iodev = &ctx->iodev;
196 struct rtio *const r = ctx->r;
197 struct rtio_sqe *sqe = NULL;
198 struct rtio_cqe *cqe = NULL;
199 int res = 0;
200
201 k_sem_take(&ctx->lock, K_FOREVER);
202
203 sqe = rtio_sqe_acquire(r);
204 if (sqe == NULL) {
205 LOG_ERR("Not enough submission queue entries");
206 res = -ENOMEM;
207 goto out;
208 }
209
210 sqe->op = RTIO_OP_I3C_CCC;
211 sqe->iodev = iodev;
212 sqe->ccc_payload = payload;
213
214 rtio_submit(r, 1);
215
216 cqe = rtio_cqe_consume(r);
217 res = cqe->result;
218 rtio_cqe_release(r, cqe);
219
220 out:
221 k_sem_give(&ctx->lock);
222 return res;
223 }
224
i3c_rtio_recover(struct i3c_rtio * ctx)225 int i3c_rtio_recover(struct i3c_rtio *ctx)
226 {
227 struct rtio_iodev *iodev = &ctx->iodev;
228 struct rtio *const r = ctx->r;
229 struct rtio_sqe *sqe = NULL;
230 struct rtio_cqe *cqe = NULL;
231 int res = 0;
232
233 k_sem_take(&ctx->lock, K_FOREVER);
234
235 sqe = rtio_sqe_acquire(r);
236 if (sqe == NULL) {
237 LOG_ERR("Not enough submission queue entries");
238 res = -ENOMEM;
239 goto out;
240 }
241
242 sqe->op = RTIO_OP_I3C_RECOVER;
243 sqe->iodev = iodev;
244
245 rtio_submit(r, 1);
246
247 cqe = rtio_cqe_consume(r);
248 res = cqe->result;
249 rtio_cqe_release(r, cqe);
250
251 out:
252 k_sem_give(&ctx->lock);
253 return res;
254 }
255