1 /*
2  * Copyright (c) 2023 Google LLC
3  * Copyright (c) 2025 Croxel Inc.
4  * Copyright (c) 2025 CogniPilot Foundation
5  *
6  * SPDX-License-Identifier: Apache-2.0
7  */
8 
9 #include <zephyr/drivers/sensor.h>
10 #include <zephyr/drivers/sensor_clock.h>
11 #include <zephyr/rtio/rtio.h>
12 #include <zephyr/sys/atomic.h>
13 
14 #include "icm45686.h"
15 #include "icm45686_bus.h"
16 #include "icm45686_stream.h"
17 
18 #include <zephyr/logging/log.h>
19 LOG_MODULE_REGISTER(ICM45686_STREAM, CONFIG_SENSOR_LOG_LEVEL);
20 
get_read_config_trigger(const struct sensor_read_config * cfg,enum sensor_trigger_type trig)21 static struct sensor_stream_trigger *get_read_config_trigger(const struct sensor_read_config *cfg,
22 							     enum sensor_trigger_type trig)
23 {
24 	for (int i = 0; i < cfg->count; ++i) {
25 		if (cfg->triggers[i].trigger == trig) {
26 			return &cfg->triggers[i];
27 		}
28 	}
29 	LOG_DBG("Unsupported trigger (%d)", trig);
30 	return NULL;
31 }
32 
should_flush_fifo(const struct sensor_read_config * read_cfg,uint8_t int_status)33 static inline bool should_flush_fifo(const struct sensor_read_config *read_cfg,
34 				     uint8_t int_status)
35 {
36 	struct sensor_stream_trigger *trig_fifo_ths = get_read_config_trigger(
37 		read_cfg,
38 		SENSOR_TRIG_FIFO_WATERMARK);
39 	struct sensor_stream_trigger *trig_fifo_full = get_read_config_trigger(
40 		read_cfg,
41 		SENSOR_TRIG_FIFO_FULL);
42 
43 	bool fifo_ths = int_status & REG_INT1_STATUS0_FIFO_THS(true);
44 	bool fifo_full = int_status & REG_INT1_STATUS0_FIFO_FULL(true);
45 
46 	if ((trig_fifo_ths && trig_fifo_ths->opt == SENSOR_STREAM_DATA_DROP && fifo_ths) ||
47 	    (trig_fifo_full && trig_fifo_full->opt == SENSOR_STREAM_DATA_DROP && fifo_full)) {
48 		return true;
49 	}
50 
51 	return false;
52 }
53 
should_read_fifo(const struct sensor_read_config * read_cfg,uint8_t int_status)54 static inline bool should_read_fifo(const struct sensor_read_config *read_cfg,
55 				    uint8_t int_status)
56 {
57 	struct sensor_stream_trigger *trig_fifo_ths = get_read_config_trigger(
58 		read_cfg,
59 		SENSOR_TRIG_FIFO_WATERMARK);
60 	struct sensor_stream_trigger *trig_fifo_full = get_read_config_trigger(
61 		read_cfg,
62 		SENSOR_TRIG_FIFO_FULL);
63 
64 	bool fifo_ths = int_status & REG_INT1_STATUS0_FIFO_THS(true);
65 	bool fifo_full = int_status & REG_INT1_STATUS0_FIFO_FULL(true);
66 
67 	if ((trig_fifo_ths && trig_fifo_ths->opt == SENSOR_STREAM_DATA_INCLUDE && fifo_ths) ||
68 	    (trig_fifo_full && trig_fifo_full->opt == SENSOR_STREAM_DATA_INCLUDE && fifo_full)) {
69 		return true;
70 	}
71 
72 	return false;
73 }
74 
should_read_data(const struct sensor_read_config * read_cfg,uint8_t int_status)75 static inline bool should_read_data(const struct sensor_read_config *read_cfg,
76 				    uint8_t int_status)
77 {
78 	struct sensor_stream_trigger *trig_drdy = get_read_config_trigger(
79 		read_cfg,
80 		SENSOR_TRIG_DATA_READY);
81 
82 	bool drdy = int_status & REG_INT1_STATUS0_DRDY(true);
83 
84 	if (trig_drdy && trig_drdy->opt == SENSOR_STREAM_DATA_INCLUDE && drdy) {
85 		return true;
86 	}
87 
88 	return false;
89 }
90 
icm45686_complete_result(struct rtio * ctx,const struct rtio_sqe * sqe,void * arg)91 static void icm45686_complete_result(struct rtio *ctx,
92 				     const struct rtio_sqe *sqe,
93 				     void *arg)
94 {
95 	const struct device *dev = (const struct device *)arg;
96 	struct icm45686_data *data = dev->data;
97 
98 	struct rtio_iodev_sqe *iodev_sqe = data->stream.iodev_sqe;
99 
100 	memset(&data->stream.data, 0, sizeof(data->stream.data));
101 
102 	rtio_iodev_sqe_ok(iodev_sqe, 0);
103 }
104 
icm45686_handle_event_actions(struct rtio * ctx,const struct rtio_sqe * sqe,void * arg)105 static void icm45686_handle_event_actions(struct rtio *ctx,
106 					  const struct rtio_sqe *sqe,
107 					  void *arg)
108 {
109 	const struct device *dev = (const struct device *)arg;
110 	struct icm45686_data *data = dev->data;
111 	const struct sensor_read_config *read_cfg = data->stream.iodev_sqe->sqe.iodev->data;
112 	uint8_t int_status = data->stream.data.int_status;
113 	int err;
114 
115 	data->stream.data.events.drdy = int_status & REG_INT1_STATUS0_DRDY(true);
116 	data->stream.data.events.fifo_ths = int_status & REG_INT1_STATUS0_FIFO_THS(true);
117 	data->stream.data.events.fifo_full = int_status & REG_INT1_STATUS0_FIFO_FULL(true);
118 
119 	__ASSERT(data->stream.data.fifo_count > 0 &&
120 		 data->stream.data.fifo_count <= FIFO_COUNT_MAX_HIGH_RES,
121 		 "Invalid fifo count: %d", data->stream.data.fifo_count);
122 
123 	struct icm45686_encoded_data *buf;
124 	uint32_t buf_len;
125 	uint32_t buf_len_required = sizeof(struct icm45686_encoded_header);
126 
127 	/** We just need the header to communicate the events occurred during
128 	 * this SQE. Only include more data if the associated trigger needs it.
129 	 */
130 	if (should_read_fifo(read_cfg, int_status)) {
131 		buf_len_required += (data->stream.data.fifo_count *
132 				     sizeof(struct icm45686_encoded_fifo_payload));
133 	} else if (should_read_data(read_cfg, int_status)) {
134 		buf_len_required += sizeof(struct icm45686_encoded_payload);
135 	}
136 
137 	err = rtio_sqe_rx_buf(data->stream.iodev_sqe,
138 			      buf_len_required,
139 			      buf_len_required,
140 			      (uint8_t **)&buf,
141 			      &buf_len);
142 	__ASSERT(err == 0, "Failed to acquire buffer (len: %d) for encoded data: %d. "
143 			   "Please revisit RTIO queue sizing and look for "
144 			   "bottlenecks during sensor data processing",
145 			   buf_len_required, err);
146 
147 	/** Still throw an error even if asserts are off */
148 	if (err) {
149 		struct rtio_iodev_sqe *iodev_sqe = data->stream.iodev_sqe;
150 
151 		LOG_ERR("Failed to acquire buffer for encoded data: %d", err);
152 
153 		data->stream.iodev_sqe = NULL;
154 		rtio_iodev_sqe_err(iodev_sqe, err);
155 		return;
156 	}
157 
158 	LOG_DBG("Alloc buf - required: %d, alloc: %d", buf_len_required, buf_len);
159 
160 	buf->header.timestamp = data->stream.data.timestamp;
161 	buf->header.fifo_count = 0;
162 	buf->header.channels = 0;
163 	buf->header.events = REG_INT1_STATUS0_DRDY(data->stream.data.events.drdy) |
164 			     REG_INT1_STATUS0_FIFO_THS(data->stream.data.events.fifo_ths) |
165 			     REG_INT1_STATUS0_FIFO_FULL(data->stream.data.events.fifo_full);
166 
167 	if (should_read_fifo(read_cfg, int_status)) {
168 
169 		struct rtio_sqe *data_wr_sqe = rtio_sqe_acquire(ctx);
170 		struct rtio_sqe *data_rd_sqe = rtio_sqe_acquire(ctx);
171 		uint8_t read_reg;
172 
173 		if (!data_wr_sqe || !data_rd_sqe) {
174 			struct rtio_iodev_sqe *iodev_sqe = data->stream.iodev_sqe;
175 
176 			LOG_ERR("Failed to acquire RTIO SQEs");
177 
178 			data->stream.iodev_sqe = NULL;
179 			rtio_iodev_sqe_err(iodev_sqe, -ENOMEM);
180 			return;
181 		}
182 
183 		/** In FIFO data, the scale is fixed, irrespective of
184 		 * the configured settings.
185 		 */
186 		buf->header.accel_fs = ICM45686_DT_ACCEL_FS_32;
187 		buf->header.gyro_fs = ICM45686_DT_GYRO_FS_4000;
188 		buf->header.channels = 0x7F; /* Signal all channels are available */
189 		buf->header.fifo_count = data->stream.data.fifo_count;
190 
191 		read_reg = REG_FIFO_DATA | REG_SPI_READ_BIT;
192 		rtio_sqe_prep_tiny_write(data_wr_sqe,
193 					 data->rtio.iodev,
194 					 RTIO_PRIO_HIGH,
195 					 &read_reg,
196 					 1,
197 					 NULL);
198 		data_wr_sqe->flags |= RTIO_SQE_TRANSACTION;
199 
200 		rtio_sqe_prep_read(data_rd_sqe,
201 				   data->rtio.iodev,
202 				   RTIO_PRIO_HIGH,
203 				   (uint8_t *)&buf->fifo_payload,
204 				   (buf->header.fifo_count *
205 				   sizeof(struct icm45686_encoded_fifo_payload)),
206 				   NULL);
207 		data_rd_sqe->flags |= RTIO_SQE_CHAINED;
208 
209 	} else if (should_flush_fifo(read_cfg, int_status)) {
210 
211 		struct rtio_sqe *write_sqe = rtio_sqe_acquire(ctx);
212 
213 		if (!write_sqe) {
214 			struct rtio_iodev_sqe *iodev_sqe = data->stream.iodev_sqe;
215 
216 			LOG_ERR("Failed to acquire RTIO SQE");
217 
218 			data->stream.iodev_sqe = NULL;
219 			rtio_iodev_sqe_err(iodev_sqe, -ENOMEM);
220 			return;
221 		}
222 
223 		uint8_t write_reg[] = {
224 			REG_FIFO_CONFIG2,
225 			REG_FIFO_CONFIG2_FIFO_FLUSH(true) |
226 			REG_FIFO_CONFIG2_FIFO_WM_GT_THS(true)
227 		};
228 
229 		rtio_sqe_prep_tiny_write(write_sqe,
230 					 data->rtio.iodev,
231 					 RTIO_PRIO_HIGH,
232 					 write_reg,
233 					 sizeof(write_reg),
234 					 NULL);
235 		write_sqe->flags |= RTIO_SQE_CHAINED;
236 
237 	} else if (should_read_data(read_cfg, int_status)) {
238 
239 		buf->header.accel_fs = data->edata.header.accel_fs;
240 		buf->header.gyro_fs = data->edata.header.gyro_fs;
241 		buf->header.channels = 0x7F; /* Signal all channels are available */
242 
243 		struct rtio_sqe *write_sqe = rtio_sqe_acquire(ctx);
244 		struct rtio_sqe *read_sqe = rtio_sqe_acquire(ctx);
245 
246 		if (!write_sqe || !read_sqe) {
247 			struct rtio_iodev_sqe *iodev_sqe = data->stream.iodev_sqe;
248 
249 			LOG_ERR("Failed to acquire RTIO SQEs");
250 
251 			data->stream.iodev_sqe = NULL;
252 			rtio_iodev_sqe_err(iodev_sqe, -ENOMEM);
253 			return;
254 		}
255 
256 		uint8_t read_reg = REG_ACCEL_DATA_X1_UI | REG_SPI_READ_BIT;
257 
258 		rtio_sqe_prep_tiny_write(write_sqe,
259 					 data->rtio.iodev,
260 					 RTIO_PRIO_HIGH,
261 					 &read_reg,
262 					 1,
263 					 NULL);
264 		write_sqe->flags |= RTIO_SQE_TRANSACTION;
265 
266 		rtio_sqe_prep_read(read_sqe,
267 				   data->rtio.iodev,
268 				   RTIO_PRIO_HIGH,
269 				   buf->payload.buf,
270 				   sizeof(buf->payload.buf),
271 				   NULL);
272 		read_sqe->flags |= RTIO_SQE_CHAINED;
273 	}
274 
275 	struct rtio_cqe *cqe;
276 
277 	do {
278 		cqe = rtio_cqe_consume(ctx);
279 		if (cqe != NULL) {
280 			err = cqe->result;
281 			rtio_cqe_release(ctx, cqe);
282 		}
283 	} while (cqe != NULL);
284 
285 	struct rtio_sqe *cb_sqe = rtio_sqe_acquire(ctx);
286 
287 	if (!cb_sqe) {
288 		LOG_ERR("Failed to acquire RTIO SQE for completion callback");
289 		rtio_iodev_sqe_err(data->stream.iodev_sqe, -ENOMEM);
290 		return;
291 	}
292 
293 	rtio_sqe_prep_callback_no_cqe(cb_sqe,
294 				      icm45686_complete_result,
295 				      (void *)dev,
296 				      data->stream.iodev_sqe);
297 
298 	rtio_submit(ctx, 0);
299 }
300 
icm45686_event_handler(const struct device * dev)301 static void icm45686_event_handler(const struct device *dev)
302 {
303 	struct icm45686_data *data = dev->data;
304 	uint8_t val = 0;
305 	uint64_t cycles;
306 	int err;
307 
308 	if (!data->stream.iodev_sqe ||
309 	    FIELD_GET(RTIO_SQE_CANCELED, data->stream.iodev_sqe->sqe.flags)) {
310 		LOG_WRN("Callback triggered with no streaming submission - Disabling interrupts");
311 
312 		struct rtio_sqe *write_sqe = rtio_sqe_acquire(data->rtio.ctx);
313 		uint8_t wr_data[] = {REG_INT1_CONFIG0, 0x00};
314 
315 		rtio_sqe_prep_tiny_write(write_sqe,
316 					 data->rtio.iodev,
317 					 RTIO_PRIO_HIGH,
318 					 wr_data,
319 					 sizeof(wr_data),
320 					 NULL);
321 		rtio_submit(data->rtio.ctx, 0);
322 
323 		data->stream.settings.enabled.drdy = false;
324 		data->stream.settings.enabled.fifo_ths = false;
325 		data->stream.settings.enabled.fifo_full = false;
326 		return;
327 	} else if (!atomic_cas(&data->stream.in_progress, 0, 1)) {
328 		/** There's an on-going */
329 		return;
330 	}
331 
332 	err = sensor_clock_get_cycles(&cycles);
333 	if (err) {
334 		struct rtio_iodev_sqe *iodev_sqe = data->stream.iodev_sqe;
335 
336 		LOG_ERR("Failed to get timestamp: %d", err);
337 
338 		data->stream.iodev_sqe = NULL;
339 		rtio_iodev_sqe_err(iodev_sqe, err);
340 		return;
341 	}
342 
343 	data->stream.data.timestamp = sensor_clock_cycles_to_ns(cycles);
344 
345 	/** Prepare an asynchronous read of the INT status register */
346 	struct rtio_sqe *write_sqe = rtio_sqe_acquire(data->rtio.ctx);
347 	struct rtio_sqe *read_sqe = rtio_sqe_acquire(data->rtio.ctx);
348 	struct rtio_sqe *write_fifo_ct_sqe = rtio_sqe_acquire(data->rtio.ctx);
349 	struct rtio_sqe *read_fifo_ct_sqe = rtio_sqe_acquire(data->rtio.ctx);
350 	struct rtio_sqe *complete_sqe = rtio_sqe_acquire(data->rtio.ctx);
351 
352 	if (!write_sqe || !read_sqe || !complete_sqe) {
353 		struct rtio_iodev_sqe *iodev_sqe = data->stream.iodev_sqe;
354 
355 		LOG_ERR("Failed to acquire RTIO SQEs");
356 
357 		data->stream.iodev_sqe = NULL;
358 		rtio_iodev_sqe_err(iodev_sqe, -ENOMEM);
359 		return;
360 	}
361 
362 	/** Directly read Status Register to determine what triggered the event */
363 	val = REG_INT1_STATUS0 | REG_SPI_READ_BIT;
364 	rtio_sqe_prep_tiny_write(write_sqe,
365 				 data->rtio.iodev,
366 				 RTIO_PRIO_HIGH,
367 				 &val,
368 				 1,
369 				 NULL);
370 	write_sqe->flags |= RTIO_SQE_TRANSACTION;
371 
372 	rtio_sqe_prep_read(read_sqe,
373 			   data->rtio.iodev,
374 			   RTIO_PRIO_HIGH,
375 			   &data->stream.data.int_status,
376 			   1,
377 			   NULL);
378 	read_sqe->flags |= RTIO_SQE_CHAINED;
379 
380 	/** Preemptively read FIFO count so we can decide on the next callback
381 	 * how much FIFO data we'd read (if needed).
382 	 */
383 	val = REG_FIFO_COUNT_0 | REG_SPI_READ_BIT;
384 	rtio_sqe_prep_tiny_write(write_fifo_ct_sqe,
385 				 data->rtio.iodev,
386 				 RTIO_PRIO_HIGH,
387 				 &val,
388 				 1,
389 				 NULL);
390 	write_fifo_ct_sqe->flags |= RTIO_SQE_TRANSACTION;
391 
392 	rtio_sqe_prep_read(read_fifo_ct_sqe,
393 			   data->rtio.iodev,
394 			   RTIO_PRIO_HIGH,
395 			   (uint8_t *)&data->stream.data.fifo_count,
396 			   2,
397 			   NULL);
398 	read_fifo_ct_sqe->flags |= RTIO_SQE_CHAINED;
399 
400 	rtio_sqe_prep_callback_no_cqe(complete_sqe,
401 				      icm45686_handle_event_actions,
402 				      (void *)dev,
403 				      data->stream.iodev_sqe);
404 
405 	rtio_submit(data->rtio.ctx, 0);
406 }
407 
icm45686_gpio_callback(const struct device * gpio_dev,struct gpio_callback * cb,uint32_t pins)408 static void icm45686_gpio_callback(const struct device *gpio_dev,
409 				   struct gpio_callback *cb,
410 				   uint32_t pins)
411 {
412 	struct icm45686_stream *stream = CONTAINER_OF(cb,
413 						      struct icm45686_stream,
414 						      cb);
415 	const struct device *dev = stream->dev;
416 
417 	icm45686_event_handler(dev);
418 }
419 
settings_changed(const struct icm45686_stream * a,const struct icm45686_stream * b)420 static inline bool settings_changed(const struct icm45686_stream *a,
421 				    const struct icm45686_stream *b)
422 {
423 	return (a->settings.enabled.drdy != b->settings.enabled.drdy) ||
424 	       (a->settings.opt.drdy != b->settings.opt.drdy) ||
425 	       (a->settings.enabled.fifo_ths != b->settings.enabled.fifo_ths) ||
426 	       (a->settings.opt.fifo_ths != b->settings.opt.fifo_ths) ||
427 	       (a->settings.enabled.fifo_full != b->settings.enabled.fifo_full) ||
428 	       (a->settings.opt.fifo_full != b->settings.opt.fifo_full);
429 }
430 
icm45686_stream_submit(const struct device * dev,struct rtio_iodev_sqe * iodev_sqe)431 void icm45686_stream_submit(const struct device *dev,
432 			    struct rtio_iodev_sqe *iodev_sqe)
433 {
434 	const struct sensor_read_config *read_cfg = iodev_sqe->sqe.iodev->data;
435 	struct icm45686_data *data = dev->data;
436 	const struct icm45686_config *cfg = dev->config;
437 	uint8_t val = 0;
438 	int err;
439 
440 	/** This separate struct is required due to the streaming API using a
441 	 * multi-shot RTIO submission: meaning, re-submitting itself after
442 	 * completion; hence, we don't have context to determine if this was
443 	 * the first submission that kicked things off. We're then, inferring
444 	 * this by comparing if the read-config has changed, and only restart
445 	 * in such case.
446 	 */
447 	struct icm45686_stream stream = {0};
448 
449 	for (size_t i = 0 ; i  < read_cfg->count ; i++) {
450 		switch (read_cfg->triggers[i].trigger) {
451 		case SENSOR_TRIG_DATA_READY:
452 			stream.settings.enabled.drdy = true;
453 			stream.settings.opt.drdy = read_cfg->triggers[i].opt;
454 			break;
455 		case SENSOR_TRIG_FIFO_WATERMARK:
456 			stream.settings.enabled.fifo_ths = true;
457 			stream.settings.opt.fifo_ths = read_cfg->triggers[i].opt;
458 			break;
459 		case SENSOR_TRIG_FIFO_FULL:
460 			stream.settings.enabled.fifo_full = true;
461 			stream.settings.opt.fifo_full = read_cfg->triggers[i].opt;
462 			break;
463 		default:
464 			LOG_ERR("Unsupported trigger (%d)", read_cfg->triggers[i].trigger);
465 			rtio_iodev_sqe_err(iodev_sqe, -ENOTSUP);
466 			return;
467 		}
468 	}
469 
470 	__ASSERT(stream.settings.enabled.drdy ^
471 		 ((stream.settings.enabled.fifo_ths || stream.settings.enabled.fifo_full)),
472 		 "DRDY should not be enabled alongside FIFO triggers");
473 
474 	__ASSERT(!stream.settings.enabled.fifo_ths ||
475 		 (stream.settings.enabled.fifo_ths && cfg->settings.fifo_watermark),
476 		 "FIFO watermark trigger requires a watermark level. Please "
477 		 "configure it on the device-tree");
478 
479 	/* Store context for next submission (handled within callbacks) */
480 	data->stream.iodev_sqe = iodev_sqe;
481 
482 	(void)atomic_clear(&data->stream.in_progress);
483 
484 	if (settings_changed(&data->stream, &stream)) {
485 
486 		data->stream.settings = stream.settings;
487 
488 		/* Disable all interrupts before re-configuring */
489 		err = icm45686_bus_write(dev, REG_INT1_CONFIG0, &val, 1);
490 		if (err) {
491 			LOG_ERR("Failed to disable interrupts on INT1_CONFIG0: %d", err);
492 			data->stream.iodev_sqe = NULL;
493 			rtio_iodev_sqe_err(iodev_sqe, err);
494 			return;
495 		}
496 
497 		/* Read flags to clear them */
498 		err = icm45686_bus_read(dev, REG_INT1_STATUS0, &val, 1);
499 		if (err) {
500 			LOG_ERR("Failed to read INT1_STATUS0: %d", err);
501 			data->stream.iodev_sqe = NULL;
502 			rtio_iodev_sqe_err(iodev_sqe, err);
503 			return;
504 		}
505 
506 		val = REG_FIFO_CONFIG3_FIFO_EN(false) |
507 		      REG_FIFO_CONFIG3_FIFO_ACCEL_EN(false) |
508 		      REG_FIFO_CONFIG3_FIFO_GYRO_EN(false) |
509 		      REG_FIFO_CONFIG3_FIFO_HIRES_EN(false);
510 		err = icm45686_bus_write(dev, REG_FIFO_CONFIG3, &val, 1);
511 		if (err) {
512 			LOG_ERR("Failed to disable all FIFO settings: %d", err);
513 			data->stream.iodev_sqe = NULL;
514 			rtio_iodev_sqe_err(iodev_sqe, err);
515 			return;
516 		}
517 
518 		val = REG_INT1_CONFIG0_STATUS_EN_DRDY(data->stream.settings.enabled.drdy) |
519 		      REG_INT1_CONFIG0_STATUS_EN_FIFO_THS(data->stream.settings.enabled.fifo_ths) |
520 		      REG_INT1_CONFIG0_STATUS_EN_FIFO_FULL(data->stream.settings.enabled.fifo_full);
521 		err = icm45686_bus_write(dev, REG_INT1_CONFIG0, &val, 1);
522 		if (err) {
523 			LOG_ERR("Failed to configure INT1_CONFIG0: %d", err);
524 			data->stream.iodev_sqe = NULL;
525 			rtio_iodev_sqe_err(iodev_sqe, err);
526 			return;
527 		}
528 
529 		val = REG_FIFO_CONFIG0_FIFO_MODE(REG_FIFO_CONFIG0_FIFO_MODE_BYPASS) |
530 		      REG_FIFO_CONFIG0_FIFO_DEPTH(REG_FIFO_CONFIG0_FIFO_DEPTH_2K);
531 		err = icm45686_bus_write(dev, REG_FIFO_CONFIG0, &val, 1);
532 		if (err) {
533 			LOG_ERR("Failed to disable FIFO: %d", err);
534 			data->stream.iodev_sqe = NULL;
535 			rtio_iodev_sqe_err(iodev_sqe, err);
536 			return;
537 		}
538 
539 		if (data->stream.settings.enabled.fifo_ths ||
540 		    data->stream.settings.enabled.fifo_full) {
541 			uint16_t fifo_ths = data->stream.settings.enabled.fifo_ths ?
542 					    cfg->settings.fifo_watermark : 0;
543 
544 			val = REG_FIFO_CONFIG2_FIFO_WM_GT_THS(true) |
545 			      REG_FIFO_CONFIG2_FIFO_FLUSH(true);
546 			err = icm45686_bus_write(dev, REG_FIFO_CONFIG2, &val, 1);
547 			if (err) {
548 				LOG_ERR("Failed to configure greater-than FIFO threshold: %d", err);
549 				data->stream.iodev_sqe = NULL;
550 				rtio_iodev_sqe_err(iodev_sqe, err);
551 				return;
552 			}
553 
554 			err = icm45686_bus_write(dev, REG_FIFO_CONFIG1_0, (uint8_t *)&fifo_ths, 2);
555 			if (err) {
556 				LOG_ERR("Failed to configure FIFO watermark: %d", err);
557 				data->stream.iodev_sqe = NULL;
558 				rtio_iodev_sqe_err(iodev_sqe, err);
559 				return;
560 			}
561 
562 			val = REG_FIFO_CONFIG0_FIFO_MODE(REG_FIFO_CONFIG0_FIFO_MODE_STREAM) |
563 			      REG_FIFO_CONFIG0_FIFO_DEPTH(REG_FIFO_CONFIG0_FIFO_DEPTH_2K);
564 			err = icm45686_bus_write(dev, REG_FIFO_CONFIG0, &val, 1);
565 			if (err) {
566 				LOG_ERR("Failed to disable FIFO: %d", err);
567 				data->stream.iodev_sqe = NULL;
568 				rtio_iodev_sqe_err(iodev_sqe, err);
569 				return;
570 			}
571 
572 			val = REG_FIFO_CONFIG3_FIFO_EN(true) |
573 			      REG_FIFO_CONFIG3_FIFO_ACCEL_EN(true) |
574 			      REG_FIFO_CONFIG3_FIFO_GYRO_EN(true) |
575 			      REG_FIFO_CONFIG3_FIFO_HIRES_EN(true);
576 			err = icm45686_bus_write(dev, REG_FIFO_CONFIG3, &val, 1);
577 			if (err) {
578 				LOG_ERR("Failed to enable FIFO: %d", err);
579 				data->stream.iodev_sqe = NULL;
580 				rtio_iodev_sqe_err(iodev_sqe, err);
581 				return;
582 			}
583 		}
584 	}
585 }
586 
icm45686_stream_init(const struct device * dev)587 int icm45686_stream_init(const struct device *dev)
588 {
589 	const struct icm45686_config *cfg = dev->config;
590 	struct icm45686_data *data = dev->data;
591 	uint8_t val = 0;
592 	int err;
593 
594 	/** Needed to get back the device handle from the callback context */
595 	data->stream.dev = dev;
596 
597 	(void)atomic_clear(&data->stream.in_progress);
598 
599 	if (!cfg->int_gpio.port) {
600 		LOG_ERR("Interrupt GPIO not supplied");
601 		return -ENODEV;
602 	}
603 
604 	if (!gpio_is_ready_dt(&cfg->int_gpio)) {
605 		LOG_ERR("Interrupt GPIO not ready");
606 		return -ENODEV;
607 	}
608 
609 	err = gpio_pin_configure_dt(&cfg->int_gpio, GPIO_INPUT);
610 	if (err) {
611 		LOG_ERR("Failed to configure interrupt GPIO");
612 		return -EIO;
613 	}
614 
615 	gpio_init_callback(&data->stream.cb,
616 			   icm45686_gpio_callback,
617 			   BIT(cfg->int_gpio.pin));
618 
619 	err = gpio_add_callback(cfg->int_gpio.port, &data->stream.cb);
620 	if (err) {
621 		LOG_ERR("Failed to add interrupt callback");
622 		return -EIO;
623 	}
624 
625 	err = gpio_pin_interrupt_configure_dt(&cfg->int_gpio,
626 					      GPIO_INT_EDGE_TO_ACTIVE);
627 	if (err) {
628 		LOG_ERR("Failed to configure interrupt");
629 	}
630 
631 	err = icm45686_bus_write(dev, REG_INT1_CONFIG0, &val, 1);
632 	if (err) {
633 		LOG_ERR("Failed to disable all INTs");
634 	}
635 
636 	val = REG_INT1_CONFIG2_EN_OPEN_DRAIN(false) |
637 	      REG_INT1_CONFIG2_EN_ACTIVE_HIGH(true);
638 
639 	err = icm45686_bus_write(dev, REG_INT1_CONFIG2, &val, 1);
640 	if (err) {
641 		LOG_ERR("Failed to configure INT as push-pull: %d", err);
642 	}
643 
644 	return 0;
645 }
646