1 /*
2 * Copyright (c) 2023 Google LLC
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/logging/log.h>
8
9 #include "icm42688.h"
10 #include "icm42688_decoder.h"
11 #include "icm42688_reg.h"
12 #include "icm42688_rtio.h"
13
14 LOG_MODULE_DECLARE(ICM42688_RTIO);
15
icm42688_submit_stream(const struct device * sensor,struct rtio_iodev_sqe * iodev_sqe)16 void icm42688_submit_stream(const struct device *sensor, struct rtio_iodev_sqe *iodev_sqe)
17 {
18 const struct sensor_read_config *cfg = iodev_sqe->sqe.iodev->data;
19 struct icm42688_dev_data *data = sensor->data;
20 struct icm42688_cfg new_config = data->cfg;
21
22 new_config.interrupt1_drdy = false;
23 new_config.interrupt1_fifo_ths = false;
24 new_config.interrupt1_fifo_full = false;
25 for (int i = 0; i < cfg->count; ++i) {
26 switch (cfg->triggers[i].trigger) {
27 case SENSOR_TRIG_DATA_READY:
28 new_config.interrupt1_drdy = true;
29 break;
30 case SENSOR_TRIG_FIFO_WATERMARK:
31 new_config.interrupt1_fifo_ths = true;
32 break;
33 case SENSOR_TRIG_FIFO_FULL:
34 new_config.interrupt1_fifo_full = true;
35 break;
36 default:
37 LOG_DBG("Trigger (%d) not supported", cfg->triggers[i].trigger);
38 rtio_iodev_sqe_err(iodev_sqe, -ENOTSUP);
39 return;
40 }
41 }
42
43 if (new_config.interrupt1_drdy != data->cfg.interrupt1_drdy ||
44 new_config.interrupt1_fifo_ths != data->cfg.interrupt1_fifo_ths ||
45 new_config.interrupt1_fifo_full != data->cfg.interrupt1_fifo_full) {
46 int rc = icm42688_safely_configure(sensor, &new_config);
47
48 if (rc != 0) {
49 LOG_ERR("Failed to configure sensor");
50 rtio_iodev_sqe_err(iodev_sqe, rc);
51 return;
52 }
53 }
54
55 data->streaming_sqe = iodev_sqe;
56 }
57
icm42688_complete_cb(struct rtio * r,const struct rtio_sqe * sqe,void * arg)58 static void icm42688_complete_cb(struct rtio *r, const struct rtio_sqe *sqe, void *arg)
59 {
60 const struct device *dev = arg;
61 struct icm42688_dev_data *drv_data = dev->data;
62 const struct icm42688_dev_cfg *drv_cfg = dev->config;
63 struct rtio_iodev_sqe *iodev_sqe = sqe->userdata;
64
65 rtio_iodev_sqe_ok(iodev_sqe, drv_data->fifo_count);
66
67 gpio_pin_interrupt_configure_dt(&drv_cfg->gpio_int1, GPIO_INT_EDGE_TO_ACTIVE);
68 }
69
icm42688_fifo_count_cb(struct rtio * r,const struct rtio_sqe * sqe,void * arg)70 static void icm42688_fifo_count_cb(struct rtio *r, const struct rtio_sqe *sqe, void *arg)
71 {
72 const struct device *dev = arg;
73 struct icm42688_dev_data *drv_data = dev->data;
74 const struct icm42688_dev_cfg *drv_cfg = dev->config;
75 struct rtio_iodev *spi_iodev = drv_data->spi_iodev;
76 uint8_t *fifo_count_buf = (uint8_t *)&drv_data->fifo_count;
77 uint16_t fifo_count = ((fifo_count_buf[0] << 8) | fifo_count_buf[1]);
78
79 drv_data->fifo_count = fifo_count;
80
81 /* Pull a operation from our device iodev queue, validated to only be reads */
82 struct rtio_iodev_sqe *iodev_sqe = drv_data->streaming_sqe;
83
84 drv_data->streaming_sqe = NULL;
85
86 /* Not inherently an underrun/overrun as we may have a buffer to fill next time */
87 if (iodev_sqe == NULL) {
88 LOG_DBG("No pending SQE");
89 gpio_pin_interrupt_configure_dt(&drv_cfg->gpio_int1, GPIO_INT_EDGE_TO_ACTIVE);
90 return;
91 }
92
93 const size_t packet_size = drv_data->cfg.fifo_hires ? 20 : 16;
94 const size_t min_read_size = sizeof(struct icm42688_fifo_data) + packet_size;
95 const size_t ideal_read_size = sizeof(struct icm42688_fifo_data) + fifo_count;
96 uint8_t *buf;
97 uint32_t buf_len;
98
99 if (rtio_sqe_rx_buf(iodev_sqe, min_read_size, ideal_read_size, &buf, &buf_len) != 0) {
100 LOG_ERR("Failed to get buffer");
101 rtio_iodev_sqe_err(iodev_sqe, -ENOMEM);
102 return;
103 }
104 LOG_DBG("Requesting buffer [%u, %u] got %u", (unsigned int)min_read_size,
105 (unsigned int)ideal_read_size, buf_len);
106
107 /* Read FIFO and call back to rtio with rtio_sqe completion */
108 /* TODO is packet format even needed? the fifo has a header per packet
109 * already
110 */
111 struct icm42688_fifo_data hdr = {
112 .header = {
113 .is_fifo = true,
114 .gyro_fs = drv_data->cfg.gyro_fs,
115 .accel_fs = drv_data->cfg.accel_fs,
116 .timestamp = drv_data->timestamp,
117 },
118 .int_status = drv_data->int_status,
119 .gyro_odr = drv_data->cfg.gyro_odr,
120 .accel_odr = drv_data->cfg.accel_odr,
121 };
122 uint32_t buf_avail = buf_len;
123
124 memcpy(buf, &hdr, sizeof(hdr));
125 buf_avail -= sizeof(hdr);
126
127 uint32_t read_len = MIN(fifo_count, buf_avail);
128 uint32_t pkts = read_len / packet_size;
129
130 read_len = pkts * packet_size;
131 ((struct icm42688_fifo_data *)buf)->fifo_count = read_len;
132
133 __ASSERT_NO_MSG(read_len % pkt_size == 0);
134
135 uint8_t *read_buf = buf + sizeof(hdr);
136
137 /* Flush out completions */
138 struct rtio_cqe *cqe;
139
140 do {
141 cqe = rtio_cqe_consume(r);
142 if (cqe != NULL) {
143 rtio_cqe_release(r, cqe);
144 }
145 } while (cqe != NULL);
146
147 /* Setup new rtio chain to read the fifo data and report then check the
148 * result
149 */
150 struct rtio_sqe *write_fifo_addr = rtio_sqe_acquire(r);
151 struct rtio_sqe *read_fifo_data = rtio_sqe_acquire(r);
152 struct rtio_sqe *complete_op = rtio_sqe_acquire(r);
153 const uint8_t reg_addr = REG_SPI_READ_BIT | FIELD_GET(REG_ADDRESS_MASK, REG_FIFO_DATA);
154
155 rtio_sqe_prep_tiny_write(write_fifo_addr, spi_iodev, RTIO_PRIO_NORM, ®_addr, 1, NULL);
156 write_fifo_addr->flags = RTIO_SQE_TRANSACTION;
157 rtio_sqe_prep_read(read_fifo_data, spi_iodev, RTIO_PRIO_NORM, read_buf, read_len,
158 iodev_sqe);
159 read_fifo_data->flags = RTIO_SQE_CHAINED;
160 rtio_sqe_prep_callback(complete_op, icm42688_complete_cb, (void *)dev, iodev_sqe);
161
162 rtio_submit(r, 0);
163 }
164
165 static struct sensor_stream_trigger *
icm42688_get_read_config_trigger(const struct sensor_read_config * cfg,enum sensor_trigger_type trig)166 icm42688_get_read_config_trigger(const struct sensor_read_config *cfg,
167 enum sensor_trigger_type trig)
168 {
169 for (int i = 0; i < cfg->count; ++i) {
170 if (cfg->triggers[i].trigger == trig) {
171 return &cfg->triggers[i];
172 }
173 }
174 LOG_DBG("Unsupported trigger (%d)", trig);
175 return NULL;
176 }
177
icm42688_int_status_cb(struct rtio * r,const struct rtio_sqe * sqr,void * arg)178 static void icm42688_int_status_cb(struct rtio *r, const struct rtio_sqe *sqr, void *arg)
179 {
180 const struct device *dev = arg;
181 struct icm42688_dev_data *drv_data = dev->data;
182 const struct icm42688_dev_cfg *drv_cfg = dev->config;
183 struct rtio_iodev *spi_iodev = drv_data->spi_iodev;
184 struct rtio_iodev_sqe *streaming_sqe = drv_data->streaming_sqe;
185 struct sensor_read_config *read_config;
186
187 if (streaming_sqe == NULL) {
188 return;
189 }
190
191 read_config = (struct sensor_read_config *)streaming_sqe->sqe.iodev->data;
192 __ASSERT_NO_MSG(read_config != NULL);
193
194 if (!read_config->is_streaming) {
195 /* Oops, not really configured for streaming data */
196 return;
197 }
198
199 struct sensor_stream_trigger *fifo_ths_cfg =
200 icm42688_get_read_config_trigger(read_config, SENSOR_TRIG_FIFO_WATERMARK);
201 bool has_fifo_ths_trig = fifo_ths_cfg != NULL &&
202 FIELD_GET(BIT_INT_STATUS_FIFO_THS, drv_data->int_status) != 0;
203
204 struct sensor_stream_trigger *fifo_full_cfg =
205 icm42688_get_read_config_trigger(read_config, SENSOR_TRIG_FIFO_FULL);
206 bool has_fifo_full_trig = fifo_full_cfg != NULL &&
207 FIELD_GET(BIT_INT_STATUS_FIFO_FULL, drv_data->int_status) != 0;
208
209 if (!has_fifo_ths_trig && !has_fifo_full_trig) {
210 gpio_pin_interrupt_configure_dt(&drv_cfg->gpio_int1, GPIO_INT_EDGE_TO_ACTIVE);
211 return;
212 }
213
214 /* Flush completions */
215 struct rtio_cqe *cqe;
216
217 do {
218 cqe = rtio_cqe_consume(r);
219 if (cqe != NULL) {
220 rtio_cqe_release(r, cqe);
221 }
222 } while (cqe != NULL);
223
224 enum sensor_stream_data_opt data_opt;
225
226 if (has_fifo_ths_trig && !has_fifo_full_trig) {
227 /* Only care about fifo threshold */
228 data_opt = fifo_ths_cfg->opt;
229 } else if (!has_fifo_ths_trig && has_fifo_full_trig) {
230 /* Only care about fifo full */
231 data_opt = fifo_full_cfg->opt;
232 } else {
233 /* Both fifo threshold and full */
234 data_opt = MIN(fifo_ths_cfg->opt, fifo_full_cfg->opt);
235 }
236
237 if (data_opt == SENSOR_STREAM_DATA_NOP || data_opt == SENSOR_STREAM_DATA_DROP) {
238 uint8_t *buf;
239 uint32_t buf_len;
240
241 /* Clear streaming_sqe since we're done with the call */
242 drv_data->streaming_sqe = NULL;
243 if (rtio_sqe_rx_buf(streaming_sqe, sizeof(struct icm42688_fifo_data),
244 sizeof(struct icm42688_fifo_data), &buf, &buf_len) != 0) {
245 rtio_iodev_sqe_err(streaming_sqe, -ENOMEM);
246 return;
247 }
248
249 struct icm42688_fifo_data *data = (struct icm42688_fifo_data *)buf;
250
251 memset(buf, 0, buf_len);
252 data->header.timestamp = drv_data->timestamp;
253 data->int_status = drv_data->int_status;
254 data->fifo_count = 0;
255 rtio_iodev_sqe_ok(streaming_sqe, 0);
256 gpio_pin_interrupt_configure_dt(&drv_cfg->gpio_int1, GPIO_INT_EDGE_TO_ACTIVE);
257 if (data_opt == SENSOR_STREAM_DATA_DROP) {
258 /* Flush the FIFO */
259 struct rtio_sqe *write_signal_path_reset = rtio_sqe_acquire(r);
260 uint8_t write_buffer[] = {
261 FIELD_GET(REG_ADDRESS_MASK, REG_SIGNAL_PATH_RESET),
262 BIT_FIFO_FLUSH,
263 };
264
265 rtio_sqe_prep_tiny_write(write_signal_path_reset, spi_iodev, RTIO_PRIO_NORM,
266 write_buffer, ARRAY_SIZE(write_buffer), NULL);
267 /* TODO Add a new flag for fire-and-forget so we don't have to block here */
268 rtio_submit(r, 1);
269 ARG_UNUSED(rtio_cqe_consume(r));
270 }
271 return;
272 }
273
274 /* We need the data, read the fifo length */
275 struct rtio_sqe *write_fifo_count_reg = rtio_sqe_acquire(r);
276 struct rtio_sqe *read_fifo_count = rtio_sqe_acquire(r);
277 struct rtio_sqe *check_fifo_count = rtio_sqe_acquire(r);
278 uint8_t reg = REG_SPI_READ_BIT | FIELD_GET(REG_ADDRESS_MASK, REG_FIFO_COUNTH);
279 uint8_t *read_buf = (uint8_t *)&drv_data->fifo_count;
280
281 rtio_sqe_prep_tiny_write(write_fifo_count_reg, spi_iodev, RTIO_PRIO_NORM, ®, 1, NULL);
282 write_fifo_count_reg->flags = RTIO_SQE_TRANSACTION;
283 rtio_sqe_prep_read(read_fifo_count, spi_iodev, RTIO_PRIO_NORM, read_buf, 2, NULL);
284 read_fifo_count->flags = RTIO_SQE_CHAINED;
285 rtio_sqe_prep_callback(check_fifo_count, icm42688_fifo_count_cb, arg, NULL);
286
287 rtio_submit(r, 0);
288 }
289
icm42688_fifo_event(const struct device * dev)290 void icm42688_fifo_event(const struct device *dev)
291 {
292 struct icm42688_dev_data *drv_data = dev->data;
293 struct rtio_iodev *spi_iodev = drv_data->spi_iodev;
294 struct rtio *r = drv_data->r;
295
296 if (drv_data->streaming_sqe == NULL) {
297 return;
298 }
299
300 drv_data->timestamp = k_ticks_to_ns_floor64(k_uptime_ticks());
301
302 /*
303 * Setup rtio chain of ops with inline calls to make decisions
304 * 1. read int status
305 * 2. call to check int status and get pending RX operation
306 * 4. read fifo len
307 * 5. call to determine read len
308 * 6. read fifo
309 * 7. call to report completion
310 */
311 struct rtio_sqe *write_int_reg = rtio_sqe_acquire(r);
312 struct rtio_sqe *read_int_reg = rtio_sqe_acquire(r);
313 struct rtio_sqe *check_int_status = rtio_sqe_acquire(r);
314 uint8_t reg = REG_SPI_READ_BIT | FIELD_GET(REG_ADDRESS_MASK, REG_INT_STATUS);
315
316 rtio_sqe_prep_tiny_write(write_int_reg, spi_iodev, RTIO_PRIO_NORM, ®, 1, NULL);
317 write_int_reg->flags = RTIO_SQE_TRANSACTION;
318 rtio_sqe_prep_read(read_int_reg, spi_iodev, RTIO_PRIO_NORM, &drv_data->int_status, 1, NULL);
319 read_int_reg->flags = RTIO_SQE_CHAINED;
320 rtio_sqe_prep_callback(check_int_status, icm42688_int_status_cb, (void *)dev, NULL);
321 rtio_submit(r, 0);
322 }
323