1 /*
2  * Copyright (c) 2024 Google LLC
3  * Copyright (c) 2024 Meta Platforms
4  *
5  * SPDX-License-Identifier: Apache-2.0
6  */
7 
8 #include <zephyr/drivers/i3c.h>
9 #include <zephyr/drivers/i3c/rtio.h>
10 #include <zephyr/rtio/rtio.h>
11 #include <zephyr/rtio/work.h>
12 
13 #include <zephyr/logging/log.h>
14 LOG_MODULE_DECLARE(i3c_rtio, CONFIG_I3C_LOG_LEVEL);
15 
i3c_msg_from_rx(const struct rtio_iodev_sqe * iodev_sqe,struct i3c_msg * msg)16 static inline void i3c_msg_from_rx(const struct rtio_iodev_sqe *iodev_sqe, struct i3c_msg *msg)
17 {
18 	__ASSERT_NO_MSG(iodev_sqe->sqe.op == RTIO_OP_RX);
19 
20 	msg->buf = iodev_sqe->sqe.rx.buf;
21 	msg->len = iodev_sqe->sqe.rx.buf_len;
22 	msg->flags =
23 		((iodev_sqe->sqe.iodev_flags & RTIO_IODEV_I3C_STOP) ? I3C_MSG_STOP : 0) |
24 		((iodev_sqe->sqe.iodev_flags & RTIO_IODEV_I3C_RESTART) ? I3C_MSG_RESTART : 0) |
25 		((iodev_sqe->sqe.iodev_flags & RTIO_IODEV_I3C_HDR) ? I3C_MSG_HDR : 0) |
26 		((iodev_sqe->sqe.iodev_flags & RTIO_IODEV_I3C_NBCH) ? I3C_MSG_NBCH : 0) |
27 		I3C_MSG_READ;
28 }
29 
i3c_msg_from_tx(const struct rtio_iodev_sqe * iodev_sqe,struct i3c_msg * msg)30 static inline void i3c_msg_from_tx(const struct rtio_iodev_sqe *iodev_sqe, struct i3c_msg *msg)
31 {
32 	__ASSERT_NO_MSG(iodev_sqe->sqe.op == RTIO_OP_TX);
33 
34 	msg->buf = (uint8_t *)iodev_sqe->sqe.tx.buf;
35 	msg->len = iodev_sqe->sqe.tx.buf_len;
36 	msg->flags =
37 		((iodev_sqe->sqe.iodev_flags & RTIO_IODEV_I3C_STOP) ? I3C_MSG_STOP : 0) |
38 		((iodev_sqe->sqe.iodev_flags & RTIO_IODEV_I3C_RESTART) ? I3C_MSG_RESTART : 0) |
39 		((iodev_sqe->sqe.iodev_flags & RTIO_IODEV_I3C_HDR) ? I3C_MSG_HDR : 0) |
40 		((iodev_sqe->sqe.iodev_flags & RTIO_IODEV_I3C_NBCH) ? I3C_MSG_NBCH : 0) |
41 		I3C_MSG_WRITE;
42 }
43 
i3c_msg_from_tiny_tx(const struct rtio_iodev_sqe * iodev_sqe,struct i3c_msg * msg)44 static inline void i3c_msg_from_tiny_tx(const struct rtio_iodev_sqe *iodev_sqe, struct i3c_msg *msg)
45 {
46 	__ASSERT_NO_MSG(iodev_sqe->sqe.op == RTIO_OP_TINY_TX);
47 
48 	msg->buf = (uint8_t *)iodev_sqe->sqe.tiny_tx.buf;
49 	msg->len = iodev_sqe->sqe.tiny_tx.buf_len;
50 	msg->flags =
51 		((iodev_sqe->sqe.iodev_flags & RTIO_IODEV_I3C_STOP) ? I3C_MSG_STOP : 0) |
52 		((iodev_sqe->sqe.iodev_flags & RTIO_IODEV_I3C_RESTART) ? I3C_MSG_RESTART : 0) |
53 		((iodev_sqe->sqe.iodev_flags & RTIO_IODEV_I3C_HDR) ? I3C_MSG_HDR : 0) |
54 		((iodev_sqe->sqe.iodev_flags & RTIO_IODEV_I3C_NBCH) ? I3C_MSG_NBCH : 0) |
55 		I3C_MSG_WRITE;
56 }
57 
i3c_iodev_submit_work_handler(struct rtio_iodev_sqe * txn_first)58 void i3c_iodev_submit_work_handler(struct rtio_iodev_sqe *txn_first)
59 {
60 	const struct i3c_iodev_data *data =
61 		(const struct i3c_iodev_data *)txn_first->sqe.iodev->data;
62 	struct i3c_device_desc *desc;
63 
64 	LOG_DBG("Sync RTIO work item for: %p", (void *)txn_first);
65 	uint32_t num_msgs = 0;
66 	int rc = 0;
67 	struct rtio_iodev_sqe *txn_last = txn_first;
68 
69 	/* TODO: there really needs to be a compile time way to get the i3c_device_desc */
70 	desc = i3c_device_find(data->bus, &data->dev_id);
71 	if (!desc) {
72 		LOG_ERR("Cannot find I3C device descriptor");
73 		rc = -ENODEV;
74 		rtio_iodev_sqe_err(txn_first, rc);
75 		return;
76 	}
77 
78 	/* Allocate the i3c_msg's on the stack, to do so
79 	 * the count of messages needs to be determined.
80 	 */
81 	do {
82 		switch (txn_last->sqe.op) {
83 		case RTIO_OP_RX:
84 		case RTIO_OP_TX:
85 		case RTIO_OP_TINY_TX:
86 			num_msgs++;
87 			break;
88 		default:
89 			LOG_ERR("Invalid op code %d for submission %p", txn_last->sqe.op,
90 				(void *)&txn_last->sqe);
91 			rc = -EIO;
92 			break;
93 		}
94 		txn_last = rtio_txn_next(txn_last);
95 	} while (rc == 0 && txn_last != NULL);
96 
97 	if (rc != 0) {
98 		rtio_iodev_sqe_err(txn_first, rc);
99 		return;
100 	}
101 
102 	/* Allocate msgs on the stack, MISRA doesn't like VLAs so we need a statically
103 	 * sized array here. It's pretty unlikely we have more than 4 i3c messages
104 	 * in a transaction as we typically would only have 2, one to write a
105 	 * register address, and another to read/write the register into an array
106 	 */
107 	if (num_msgs > CONFIG_I3C_RTIO_FALLBACK_MSGS) {
108 		LOG_ERR("At most CONFIG_I3C_RTIO_FALLBACK_MSGS"
109 			" submissions in a transaction are"
110 			" allowed in the default handler");
111 		rtio_iodev_sqe_err(txn_first, -ENOMEM);
112 		return;
113 	}
114 	struct i3c_msg msgs[CONFIG_I3C_RTIO_FALLBACK_MSGS];
115 
116 	rc = 0;
117 	txn_last = txn_first;
118 
119 	/* Copy the transaction into the stack allocated msgs */
120 	for (int i = 0; i < num_msgs; i++) {
121 		switch (txn_last->sqe.op) {
122 		case RTIO_OP_RX:
123 			i3c_msg_from_rx(txn_last, &msgs[i]);
124 			break;
125 		case RTIO_OP_TX:
126 			i3c_msg_from_tx(txn_last, &msgs[i]);
127 			break;
128 		case RTIO_OP_TINY_TX:
129 			i3c_msg_from_tiny_tx(txn_last, &msgs[i]);
130 			break;
131 		default:
132 			rc = -EIO;
133 			break;
134 		}
135 
136 		txn_last = rtio_txn_next(txn_last);
137 	}
138 
139 	if (rc == 0) {
140 		__ASSERT_NO_MSG(num_msgs > 0);
141 
142 		rc = i3c_transfer(desc, msgs, num_msgs);
143 	}
144 
145 	if (rc != 0) {
146 		rtio_iodev_sqe_err(txn_first, rc);
147 	} else {
148 		rtio_iodev_sqe_ok(txn_first, 0);
149 	}
150 }
151 
i3c_iodev_submit_fallback(const struct device * dev,struct rtio_iodev_sqe * iodev_sqe)152 void i3c_iodev_submit_fallback(const struct device *dev, struct rtio_iodev_sqe *iodev_sqe)
153 {
154 	LOG_DBG("Executing fallback for dev: %p, sqe: %p", (void *)dev, (void *)iodev_sqe);
155 
156 	struct rtio_work_req *req = rtio_work_req_alloc();
157 
158 	if (req == NULL) {
159 		rtio_iodev_sqe_err(iodev_sqe, -ENOMEM);
160 		return;
161 	}
162 
163 	rtio_work_req_submit(req, iodev_sqe, i3c_iodev_submit_work_handler);
164 }
165