1 /*
2 * Copyright (c) 2024 Analog Devices Inc.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/logging/log.h>
8 #include <zephyr/drivers/sensor.h>
9
10 #include "adxl367.h"
11
12 LOG_MODULE_DECLARE(ADXL362, CONFIG_SENSOR_LOG_LEVEL);
13
adxl367_sqe_done(const struct adxl367_dev_config * cfg,struct rtio_iodev_sqe * iodev_sqe,int res)14 static void adxl367_sqe_done(const struct adxl367_dev_config *cfg,
15 struct rtio_iodev_sqe *iodev_sqe, int res)
16 {
17 if (res < 0) {
18 rtio_iodev_sqe_err(iodev_sqe, res);
19 } else {
20 rtio_iodev_sqe_ok(iodev_sqe, res);
21 }
22
23 gpio_pin_interrupt_configure_dt(&cfg->interrupt, GPIO_INT_EDGE_TO_ACTIVE);
24 }
25
adxl367_irq_en_cb(struct rtio * r,const struct rtio_sqe * sqr,void * arg)26 static void adxl367_irq_en_cb(struct rtio *r, const struct rtio_sqe *sqr, void *arg)
27 {
28 const struct device *dev = (const struct device *)arg;
29 const struct adxl367_dev_config *cfg = dev->config;
30
31 gpio_pin_interrupt_configure_dt(&cfg->interrupt, GPIO_INT_EDGE_TO_ACTIVE);
32 }
33
adxl367_fifo_flush_rtio(const struct device * dev)34 static void adxl367_fifo_flush_rtio(const struct device *dev)
35 {
36 struct adxl367_data *data = dev->data;
37 uint8_t pow_reg = data->pwr_reg;
38
39 pow_reg &= ~ADXL367_POWER_CTL_MEASURE_MSK;
40 pow_reg |= FIELD_PREP(ADXL367_POWER_CTL_MEASURE_MSK, ADXL367_STANDBY);
41
42 struct rtio_sqe *sqe = rtio_sqe_acquire(data->rtio_ctx);
43 const uint8_t reg_addr_w[3] = {ADXL367_SPI_WRITE_REG, ADXL367_POWER_CTL, pow_reg};
44
45 rtio_sqe_prep_tiny_write(sqe, data->iodev, RTIO_PRIO_NORM, reg_addr_w, 3, NULL);
46
47 sqe = rtio_sqe_acquire(data->rtio_ctx);
48 const uint8_t reg_addr_w2[3] = {ADXL367_SPI_WRITE_REG, ADXL367_FIFO_CONTROL,
49 FIELD_PREP(ADXL367_FIFO_CONTROL_FIFO_MODE_MSK, ADXL367_FIFO_DISABLED)};
50
51 rtio_sqe_prep_tiny_write(sqe, data->iodev, RTIO_PRIO_NORM, reg_addr_w2, 3, NULL);
52
53 sqe = rtio_sqe_acquire(data->rtio_ctx);
54 const uint8_t reg_addr_w3[3] = {ADXL367_SPI_WRITE_REG, ADXL367_FIFO_CONTROL,
55 FIELD_PREP(ADXL367_FIFO_CONTROL_FIFO_MODE_MSK, data->fifo_config.fifo_mode)};
56
57 rtio_sqe_prep_tiny_write(sqe, data->iodev, RTIO_PRIO_NORM, reg_addr_w3, 3, NULL);
58
59 pow_reg = data->pwr_reg;
60
61 pow_reg &= ~ADXL367_POWER_CTL_MEASURE_MSK;
62 pow_reg |= FIELD_PREP(ADXL367_POWER_CTL_MEASURE_MSK, ADXL367_MEASURE);
63
64 sqe = rtio_sqe_acquire(data->rtio_ctx);
65 struct rtio_sqe *complete_op = rtio_sqe_acquire(data->rtio_ctx);
66 const uint8_t reg_addr_w4[3] = {ADXL367_SPI_WRITE_REG, ADXL367_POWER_CTL, pow_reg};
67
68 rtio_sqe_prep_tiny_write(sqe, data->iodev, RTIO_PRIO_NORM, reg_addr_w4, 3, NULL);
69 sqe->flags |= RTIO_SQE_CHAINED;
70 rtio_sqe_prep_callback(complete_op, adxl367_irq_en_cb, (void *)dev, NULL);
71 rtio_submit(data->rtio_ctx, 0);
72 }
73
adxl367_submit_stream(const struct device * dev,struct rtio_iodev_sqe * iodev_sqe)74 void adxl367_submit_stream(const struct device *dev, struct rtio_iodev_sqe *iodev_sqe)
75 {
76 const struct sensor_read_config *cfg =
77 (const struct sensor_read_config *)iodev_sqe->sqe.iodev->data;
78 struct adxl367_data *data = (struct adxl367_data *)dev->data;
79 const struct adxl367_dev_config *cfg_367 = dev->config;
80 uint8_t int_mask = 0;
81 uint8_t int_value = 0;
82 uint8_t fifo_wmark_irq = 0;
83 uint8_t fifo_full_irq = 0;
84
85 int rc = gpio_pin_interrupt_configure_dt(&cfg_367->interrupt,
86 GPIO_INT_DISABLE);
87 if (rc < 0) {
88 return;
89 }
90
91 for (size_t i = 0; i < cfg->count; i++) {
92 if (cfg->triggers[i].trigger == SENSOR_TRIG_FIFO_WATERMARK) {
93 int_mask |= ADXL367_FIFO_WATERMARK;
94 int_value |= ADXL367_FIFO_WATERMARK;
95 fifo_wmark_irq = 1;
96 }
97
98 if (cfg->triggers[i].trigger == SENSOR_TRIG_FIFO_FULL) {
99 int_mask |= ADXL367_FIFO_OVERRUN;
100 int_value |= ADXL367_FIFO_OVERRUN;
101 fifo_full_irq = 1;
102 }
103 }
104
105 if (data->fifo_wmark_irq && (fifo_wmark_irq == 0)) {
106 int_mask |= ADXL367_FIFO_WATERMARK;
107 }
108
109 if (data->fifo_full_irq && (fifo_full_irq == 0)) {
110 int_mask |= ADXL367_FIFO_OVERRUN;
111 }
112
113 /* Do not flush the FIFO if interrupts are already enabled. */
114 if ((fifo_wmark_irq != data->fifo_wmark_irq) || (fifo_full_irq != data->fifo_full_irq)) {
115 data->fifo_wmark_irq = fifo_wmark_irq;
116 data->fifo_full_irq = fifo_full_irq;
117
118 rc = data->hw_tf->write_reg_mask(dev, ADXL367_INTMAP1_LOWER, int_mask, int_value);
119 if (rc < 0) {
120 return;
121 }
122
123 /* Flush the FIFO by disabling it. Save current mode for after the reset. */
124 enum adxl367_fifo_mode current_fifo_mode = data->fifo_config.fifo_mode;
125
126 if (current_fifo_mode == ADXL367_FIFO_DISABLED) {
127 LOG_ERR("ERROR: FIFO DISABLED");
128 return;
129 }
130
131 adxl367_set_op_mode(dev, ADXL367_STANDBY);
132
133 adxl367_fifo_setup(dev, ADXL367_FIFO_DISABLED, data->fifo_config.fifo_format,
134 data->fifo_config.fifo_read_mode, data->fifo_config.fifo_samples);
135
136 adxl367_fifo_setup(dev, current_fifo_mode, data->fifo_config.fifo_format,
137 data->fifo_config.fifo_read_mode, data->fifo_config.fifo_samples);
138
139 adxl367_set_op_mode(dev, cfg_367->op_mode);
140 }
141
142 rc = gpio_pin_interrupt_configure_dt(&cfg_367->interrupt,
143 GPIO_INT_EDGE_TO_ACTIVE);
144 if (rc < 0) {
145 return;
146 }
147
148 data->sqe = iodev_sqe;
149 }
150
adxl367_fifo_read_cb(struct rtio * rtio_ctx,const struct rtio_sqe * sqe,void * arg)151 static void adxl367_fifo_read_cb(struct rtio *rtio_ctx, const struct rtio_sqe *sqe, void *arg)
152 {
153 const struct device *dev = (const struct device *)arg;
154 const struct adxl367_dev_config *cfg = (const struct adxl367_dev_config *)dev->config;
155 struct rtio_iodev_sqe *iodev_sqe = sqe->userdata;
156
157 adxl367_sqe_done(cfg, iodev_sqe, 0);
158 }
159
adxl367_get_numb_of_samp_in_pkt(const struct adxl367_data * data)160 size_t adxl367_get_numb_of_samp_in_pkt(const struct adxl367_data *data)
161 {
162 size_t sample_numb;
163
164 switch (data->fifo_config.fifo_format) {
165 case ADXL367_FIFO_FORMAT_X:
166 case ADXL367_FIFO_FORMAT_Y:
167 case ADXL367_FIFO_FORMAT_Z:
168 sample_numb = 1;
169 break;
170
171 case ADXL367_FIFO_FORMAT_XT:
172 case ADXL367_FIFO_FORMAT_YT:
173 case ADXL367_FIFO_FORMAT_ZT:
174 case ADXL367_FIFO_FORMAT_XA:
175 case ADXL367_FIFO_FORMAT_YA:
176 case ADXL367_FIFO_FORMAT_ZA:
177 sample_numb = 2;
178 break;
179
180 case ADXL367_FIFO_FORMAT_XYZT:
181 case ADXL367_FIFO_FORMAT_XYZA:
182 sample_numb = 4;
183 break;
184
185 default:
186 sample_numb = 3;
187 break;
188 }
189
190 return sample_numb;
191 }
192
adxl367_process_fifo_samples_cb(struct rtio * r,const struct rtio_sqe * sqr,void * arg)193 static void adxl367_process_fifo_samples_cb(struct rtio *r, const struct rtio_sqe *sqr, void *arg)
194 {
195 const struct device *dev = (const struct device *)arg;
196 struct adxl367_data *data = (struct adxl367_data *)dev->data;
197 const struct adxl367_dev_config *cfg = (const struct adxl367_dev_config *)dev->config;
198 struct rtio_iodev_sqe *current_sqe = data->sqe;
199 uint16_t fifo_samples = ((data->fifo_ent[0]) | ((data->fifo_ent[1] & 0x3) << 8));
200 size_t sample_numb = adxl367_get_numb_of_samp_in_pkt(data);
201 size_t packet_size = sample_numb;
202 uint16_t fifo_packet_cnt = fifo_samples / sample_numb;
203 uint16_t fifo_bytes = 0;
204
205 switch (data->fifo_config.fifo_read_mode) {
206 case ADXL367_8B:
207 fifo_bytes = fifo_packet_cnt;
208 break;
209 case ADXL367_12B:
210 unsigned int fifo_bits = fifo_packet_cnt * sample_numb * 12;
211
212 if (fifo_bits % 8 == 0) {
213 fifo_bytes = fifo_bits / 8;
214 } else {
215 while (fifo_bits % 8) {
216 if (fifo_bits >= sample_numb * 12) {
217 fifo_bits -= sample_numb * 12;
218 } else {
219 fifo_bits = 0;
220 break;
221 }
222 }
223
224 if (fifo_bits) {
225 fifo_bytes = fifo_bits / 8;
226 } else {
227 LOG_ERR("fifo_bytes error: %d", fifo_bytes);
228 adxl367_sqe_done(cfg, current_sqe, -1);
229 return;
230 }
231 }
232
233 packet_size = packet_size * 12 / 8;
234 if ((sample_numb * 12) % 8) {
235 packet_size++;
236 }
237 break;
238
239 default:
240 fifo_bytes = fifo_packet_cnt * 2;
241 packet_size *= 2;
242 break;
243 }
244
245 data->sqe = NULL;
246
247 /* Not inherently an underrun/overrun as we may have a buffer to fill next time */
248 if (current_sqe == NULL) {
249 LOG_ERR("No pending SQE");
250 gpio_pin_interrupt_configure_dt(&cfg->interrupt, GPIO_INT_EDGE_TO_ACTIVE);
251 return;
252 }
253
254 const size_t min_read_size = sizeof(struct adxl367_fifo_data) + packet_size;
255 const size_t ideal_read_size = sizeof(struct adxl367_fifo_data) + fifo_bytes;
256
257 uint8_t *buf;
258 uint32_t buf_len;
259
260 if (rtio_sqe_rx_buf(current_sqe, min_read_size, ideal_read_size, &buf, &buf_len) != 0) {
261 LOG_ERR("Failed to get buffer");
262 adxl367_sqe_done(cfg, current_sqe, -ENOMEM);
263 return;
264 }
265
266 LOG_DBG("Requesting buffer [%u, %u] got %u", (unsigned int)min_read_size,
267 (unsigned int)ideal_read_size, buf_len);
268
269 /* Read FIFO and call back to rtio with rtio_sqe completion */
270 struct adxl367_fifo_data *hdr = (struct adxl367_fifo_data *)buf;
271
272 hdr->is_fifo = 1;
273 hdr->timestamp = data->timestamp;
274 hdr->int_status = data->status;
275 hdr->accel_odr = data->odr;
276 hdr->range = data->range;
277 hdr->fifo_read_mode = data->fifo_config.fifo_read_mode;
278
279 if (data->fifo_config.fifo_read_mode == ADXL367_12B) {
280 hdr->packet_size = sample_numb;
281 } else {
282 hdr->packet_size = packet_size;
283 }
284
285 if ((data->fifo_config.fifo_format == ADXL367_FIFO_FORMAT_X) ||
286 (data->fifo_config.fifo_format == ADXL367_FIFO_FORMAT_XT) ||
287 (data->fifo_config.fifo_format == ADXL367_FIFO_FORMAT_XA) ||
288 (data->fifo_config.fifo_format == ADXL367_FIFO_FORMAT_XYZ) ||
289 (data->fifo_config.fifo_format == ADXL367_FIFO_FORMAT_XYZA) ||
290 (data->fifo_config.fifo_format == ADXL367_FIFO_FORMAT_XYZT)) {
291 hdr->has_x = 1;
292 }
293
294 if ((data->fifo_config.fifo_format == ADXL367_FIFO_FORMAT_Y) ||
295 (data->fifo_config.fifo_format == ADXL367_FIFO_FORMAT_YT) ||
296 (data->fifo_config.fifo_format == ADXL367_FIFO_FORMAT_YA) ||
297 (data->fifo_config.fifo_format == ADXL367_FIFO_FORMAT_XYZ) ||
298 (data->fifo_config.fifo_format == ADXL367_FIFO_FORMAT_XYZA) ||
299 (data->fifo_config.fifo_format == ADXL367_FIFO_FORMAT_XYZT)) {
300 hdr->has_y = 1;
301 }
302
303 if ((data->fifo_config.fifo_format == ADXL367_FIFO_FORMAT_Z) ||
304 (data->fifo_config.fifo_format == ADXL367_FIFO_FORMAT_ZT) ||
305 (data->fifo_config.fifo_format == ADXL367_FIFO_FORMAT_ZA) ||
306 (data->fifo_config.fifo_format == ADXL367_FIFO_FORMAT_XYZ) ||
307 (data->fifo_config.fifo_format == ADXL367_FIFO_FORMAT_XYZA) ||
308 (data->fifo_config.fifo_format == ADXL367_FIFO_FORMAT_XYZT)) {
309 hdr->has_z = 1;
310 }
311
312 if ((data->fifo_config.fifo_format == ADXL367_FIFO_FORMAT_XT) ||
313 (data->fifo_config.fifo_format == ADXL367_FIFO_FORMAT_YT) ||
314 (data->fifo_config.fifo_format == ADXL367_FIFO_FORMAT_ZT) ||
315 (data->fifo_config.fifo_format == ADXL367_FIFO_FORMAT_XYZT)) {
316 hdr->has_tmp = 1;
317 }
318
319 if ((data->fifo_config.fifo_format == ADXL367_FIFO_FORMAT_XA) ||
320 (data->fifo_config.fifo_format == ADXL367_FIFO_FORMAT_YA) ||
321 (data->fifo_config.fifo_format == ADXL367_FIFO_FORMAT_ZA) ||
322 (data->fifo_config.fifo_format == ADXL367_FIFO_FORMAT_XYZA)) {
323 hdr->has_adc = 1;
324 }
325
326 uint32_t buf_avail = buf_len;
327
328 buf_avail -= sizeof(*hdr);
329
330 uint32_t read_len = MIN(fifo_bytes, buf_avail);
331
332 if (data->fifo_config.fifo_read_mode == ADXL367_12B) {
333 unsigned int read_bits = read_len * 8;
334 unsigned int packet_size_bits = sample_numb * 12;
335 unsigned int read_packet_num = read_bits / packet_size_bits;
336 unsigned int read_len_bits = read_packet_num * sample_numb * 12;
337
338 if (read_len_bits % 8 == 0) {
339 read_len = read_len_bits / 8;
340 } else {
341 while (read_len_bits % 8) {
342 if (read_len_bits >= sample_numb * 12) {
343 read_len_bits -= sample_numb * 12;
344 } else {
345 read_len_bits = 0;
346 break;
347 }
348 }
349
350 if (read_len_bits) {
351 read_len = read_len_bits / 8;
352 } else {
353 LOG_ERR("read_len error");
354 adxl367_sqe_done(cfg, current_sqe, -ENOMEM);
355 return;
356 }
357 }
358 } else {
359 uint32_t pkts = read_len / packet_size;
360
361 read_len = pkts * packet_size;
362 }
363
364 ((struct adxl367_fifo_data *)buf)->fifo_byte_count = read_len;
365
366 __ASSERT_NO_MSG(read_len % pkt_size == 0);
367
368 uint8_t *read_buf = buf + sizeof(*hdr);
369
370 /* Flush completions */
371 struct rtio_cqe *cqe;
372 int res = 0;
373
374 do {
375 cqe = rtio_cqe_consume(data->rtio_ctx);
376 if (cqe != NULL) {
377 if ((cqe->result < 0 && res == 0)) {
378 LOG_ERR("Bus error: %d", cqe->result);
379 res = cqe->result;
380 }
381 rtio_cqe_release(data->rtio_ctx, cqe);
382 }
383 } while (cqe != NULL);
384
385 /* Bail/cancel attempt to read sensor on any error */
386 if (res != 0) {
387 adxl367_sqe_done(cfg, current_sqe, res);
388 return;
389 }
390
391 /* Setup new rtio chain to read the fifo data and report then check the
392 * result
393 */
394 struct rtio_sqe *write_fifo_addr = rtio_sqe_acquire(data->rtio_ctx);
395 struct rtio_sqe *read_fifo_data = rtio_sqe_acquire(data->rtio_ctx);
396 struct rtio_sqe *complete_op = rtio_sqe_acquire(data->rtio_ctx);
397 const uint8_t reg_addr = ADXL367_SPI_READ_FIFO;
398
399 rtio_sqe_prep_tiny_write(write_fifo_addr, data->iodev, RTIO_PRIO_NORM, ®_addr, 1, NULL);
400 write_fifo_addr->flags = RTIO_SQE_TRANSACTION;
401 rtio_sqe_prep_read(read_fifo_data, data->iodev, RTIO_PRIO_NORM, read_buf, read_len,
402 current_sqe);
403 read_fifo_data->flags = RTIO_SQE_CHAINED;
404 rtio_sqe_prep_callback(complete_op, adxl367_fifo_read_cb, (void *)dev, current_sqe);
405
406 rtio_submit(data->rtio_ctx, 0);
407 }
408
adxl367_process_status_cb(struct rtio * r,const struct rtio_sqe * sqr,void * arg)409 static void adxl367_process_status_cb(struct rtio *r, const struct rtio_sqe *sqr, void *arg)
410 {
411 const struct device *dev = (const struct device *)arg;
412 struct adxl367_data *data = (struct adxl367_data *) dev->data;
413 const struct adxl367_dev_config *cfg = (const struct adxl367_dev_config *) dev->config;
414 struct rtio_iodev_sqe *current_sqe = data->sqe;
415 struct sensor_read_config *read_config;
416 uint8_t status = data->status;
417
418 __ASSERT(data->sqe != NULL, "%s data->sqe = NULL", __func__);
419
420 read_config = (struct sensor_read_config *)data->sqe->sqe.iodev->data;
421
422 __ASSERT(read_config != NULL, "%s read_config = NULL", __func__);
423
424 __ASSERT(read_config->is_streaming != false,
425 "%s read_config->is_streaming = false", __func__);
426
427 gpio_pin_interrupt_configure_dt(&cfg->interrupt, GPIO_INT_DISABLE);
428
429 struct sensor_stream_trigger *fifo_wmark_cfg = NULL;
430 struct sensor_stream_trigger *fifo_full_cfg = NULL;
431
432 for (int i = 0; i < read_config->count; ++i) {
433 if (read_config->triggers[i].trigger == SENSOR_TRIG_FIFO_WATERMARK) {
434 fifo_wmark_cfg = &read_config->triggers[i];
435 continue;
436 }
437
438 if (read_config->triggers[i].trigger == SENSOR_TRIG_FIFO_FULL) {
439 fifo_full_cfg = &read_config->triggers[i];
440 continue;
441 }
442 }
443
444 bool fifo_full_irq = false;
445 bool fifo_wmark_irq = false;
446
447 if ((fifo_wmark_cfg != NULL) && FIELD_GET(ADXL367_STATUS_FIFO_WATERMARK, status)) {
448 fifo_wmark_irq = true;
449 }
450
451 if ((fifo_full_cfg != NULL) && FIELD_GET(ADXL367_STATUS_FIFO_OVERRUN, status)) {
452 fifo_full_irq = true;
453 }
454
455 if (!fifo_full_irq && !fifo_wmark_irq) {
456 gpio_pin_interrupt_configure_dt(&cfg->interrupt, GPIO_INT_EDGE_TO_ACTIVE);
457 return;
458 }
459
460 /* Flush completions */
461 struct rtio_cqe *cqe;
462 int res = 0;
463
464 do {
465 cqe = rtio_cqe_consume(data->rtio_ctx);
466 if (cqe != NULL) {
467 if ((cqe->result < 0 && res == 0)) {
468 LOG_ERR("Bus error: %d", cqe->result);
469 res = cqe->result;
470 }
471 rtio_cqe_release(data->rtio_ctx, cqe);
472 }
473 } while (cqe != NULL);
474
475 /* Bail/cancel attempt to read sensor on any error */
476 if (res != 0) {
477 adxl367_sqe_done(cfg, current_sqe, res);
478 return;
479 }
480
481 enum sensor_stream_data_opt data_opt;
482
483 if ((fifo_wmark_cfg != NULL) && (fifo_full_cfg == NULL)) {
484 data_opt = fifo_wmark_cfg->opt;
485 } else if ((fifo_wmark_cfg == NULL) && (fifo_full_cfg != NULL)) {
486 data_opt = fifo_full_cfg->opt;
487 } else {
488 data_opt = MIN(fifo_wmark_cfg->opt, fifo_full_cfg->opt);
489 }
490
491 if (data_opt == SENSOR_STREAM_DATA_NOP || data_opt == SENSOR_STREAM_DATA_DROP) {
492 uint8_t *buf;
493 uint32_t buf_len;
494
495 /* Clear streaming_sqe since we're done with the call */
496 data->sqe = NULL;
497 if (rtio_sqe_rx_buf(current_sqe, sizeof(struct adxl367_fifo_data),
498 sizeof(struct adxl367_fifo_data), &buf, &buf_len) != 0) {
499 adxl367_sqe_done(cfg, current_sqe, -ENOMEM);
500 return;
501 }
502
503 struct adxl367_fifo_data *rx_data = (struct adxl367_fifo_data *)buf;
504
505 memset(buf, 0, buf_len);
506 rx_data->is_fifo = 1;
507 rx_data->timestamp = data->timestamp;
508 rx_data->int_status = status;
509 rx_data->fifo_byte_count = 0;
510
511 if (data_opt == SENSOR_STREAM_DATA_DROP) {
512 /* Flush the FIFO by disabling it. Save current mode for after the reset. */
513 adxl367_fifo_flush_rtio(dev);
514 return;
515 }
516
517 adxl367_sqe_done(cfg, current_sqe, 0);
518 return;
519 }
520
521 struct rtio_sqe *write_fifo_addr = rtio_sqe_acquire(data->rtio_ctx);
522 struct rtio_sqe *read_fifo_data = rtio_sqe_acquire(data->rtio_ctx);
523 struct rtio_sqe *complete_op = rtio_sqe_acquire(data->rtio_ctx);
524 const uint8_t reg[2] = {ADXL367_SPI_READ_REG, ADXL367_FIFO_ENTRIES_L};
525
526 rtio_sqe_prep_tiny_write(write_fifo_addr, data->iodev, RTIO_PRIO_NORM, reg, 2, NULL);
527 write_fifo_addr->flags = RTIO_SQE_TRANSACTION;
528 rtio_sqe_prep_read(read_fifo_data, data->iodev, RTIO_PRIO_NORM, data->fifo_ent, 2,
529 current_sqe);
530 read_fifo_data->flags = RTIO_SQE_CHAINED;
531 rtio_sqe_prep_callback(complete_op, adxl367_process_fifo_samples_cb, (void *)dev,
532 current_sqe);
533
534 rtio_submit(data->rtio_ctx, 0);
535 }
536
adxl367_stream_irq_handler(const struct device * dev)537 void adxl367_stream_irq_handler(const struct device *dev)
538 {
539 struct adxl367_data *data = (struct adxl367_data *) dev->data;
540
541 if (data->sqe == NULL) {
542 return;
543 }
544
545 data->timestamp = k_ticks_to_ns_floor64(k_uptime_ticks());
546
547 struct rtio_sqe *write_status_addr = rtio_sqe_acquire(data->rtio_ctx);
548 struct rtio_sqe *read_status_reg = rtio_sqe_acquire(data->rtio_ctx);
549 struct rtio_sqe *check_status_reg = rtio_sqe_acquire(data->rtio_ctx);
550 const uint8_t reg[2] = {ADXL367_SPI_READ_REG, ADXL367_STATUS};
551
552 rtio_sqe_prep_tiny_write(write_status_addr, data->iodev, RTIO_PRIO_NORM, reg, 2, NULL);
553 write_status_addr->flags = RTIO_SQE_TRANSACTION;
554 rtio_sqe_prep_read(read_status_reg, data->iodev, RTIO_PRIO_NORM, &data->status, 1, NULL);
555 read_status_reg->flags = RTIO_SQE_CHAINED;
556 rtio_sqe_prep_callback(check_status_reg, adxl367_process_status_cb, (void *)dev, NULL);
557 rtio_submit(data->rtio_ctx, 0);
558 }
559