1 /*
2 * Copyright (c) 2024 Analog Devices Inc.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/logging/log.h>
8 #include <zephyr/drivers/sensor.h>
9
10 #include "adxl362.h"
11
12 LOG_MODULE_DECLARE(ADXL362, CONFIG_SENSOR_LOG_LEVEL);
13
adxl362_irq_en_cb(struct rtio * r,const struct rtio_sqe * sqr,void * arg)14 static void adxl362_irq_en_cb(struct rtio *r, const struct rtio_sqe *sqr, void *arg)
15 {
16 const struct device *dev = (const struct device *)arg;
17 const struct adxl362_config *cfg = dev->config;
18
19 gpio_pin_interrupt_configure_dt(&cfg->interrupt, GPIO_INT_EDGE_TO_ACTIVE);
20 }
21
adxl362_fifo_flush_rtio(const struct device * dev)22 static void adxl362_fifo_flush_rtio(const struct device *dev)
23 {
24 struct adxl362_data *data = dev->data;
25
26 uint8_t fifo_config = ADXL362_FIFO_CTL_FIFO_MODE(ADXL362_FIFO_DISABLE);
27 struct rtio_sqe *write_fifo_addr = rtio_sqe_acquire(data->rtio_ctx);
28 const uint8_t reg_addr_w[3] = {ADXL362_WRITE_REG, ADXL362_REG_FIFO_CTL, fifo_config};
29
30 rtio_sqe_prep_tiny_write(write_fifo_addr, data->iodev, RTIO_PRIO_NORM, reg_addr_w, 3, NULL);
31
32 fifo_config = ADXL362_FIFO_CTL_FIFO_MODE(data->fifo_mode) |
33 (data->en_temp_read * ADXL362_FIFO_CTL_FIFO_TEMP);
34 if (data->water_mark_lvl & 0x100) {
35 fifo_config |= ADXL362_FIFO_CTL_AH;
36 }
37 write_fifo_addr = rtio_sqe_acquire(data->rtio_ctx);
38 const uint8_t reg_addr_w2[3] = {ADXL362_WRITE_REG, ADXL362_REG_FIFO_CTL, fifo_config};
39
40 rtio_sqe_prep_tiny_write(write_fifo_addr, data->iodev, RTIO_PRIO_NORM,
41 reg_addr_w2, 3, NULL);
42 write_fifo_addr->flags |= RTIO_SQE_CHAINED;
43
44 struct rtio_sqe *complete_op = rtio_sqe_acquire(data->rtio_ctx);
45
46 rtio_sqe_prep_callback(complete_op, adxl362_irq_en_cb, (void *)dev, NULL);
47 rtio_submit(data->rtio_ctx, 0);
48 }
49
adxl362_submit_stream(const struct device * dev,struct rtio_iodev_sqe * iodev_sqe)50 void adxl362_submit_stream(const struct device *dev, struct rtio_iodev_sqe *iodev_sqe)
51 {
52 const struct sensor_read_config *cfg =
53 (const struct sensor_read_config *)iodev_sqe->sqe.iodev->data;
54 struct adxl362_data *data = (struct adxl362_data *)dev->data;
55 const struct adxl362_config *cfg_362 = dev->config;
56 uint8_t int_mask = 0;
57 uint8_t int_value = 0;
58 uint8_t fifo_wmark_irq = 0;
59 uint8_t fifo_full_irq = 0;
60
61 int rc = gpio_pin_interrupt_configure_dt(&cfg_362->interrupt,
62 GPIO_INT_DISABLE);
63 if (rc < 0) {
64 return;
65 }
66
67 for (size_t i = 0; i < cfg->count; i++) {
68 if (cfg->triggers[i].trigger == SENSOR_TRIG_FIFO_WATERMARK) {
69 int_mask |= ADXL362_INTMAP1_FIFO_WATERMARK;
70 int_value |= ADXL362_INTMAP1_FIFO_WATERMARK;
71 fifo_wmark_irq = 1;
72 }
73
74 if (cfg->triggers[i].trigger == SENSOR_TRIG_FIFO_FULL) {
75 int_mask |= ADXL362_INTMAP1_FIFO_OVERRUN;
76 int_value |= ADXL362_INTMAP1_FIFO_OVERRUN;
77 fifo_full_irq = 1;
78 }
79 }
80
81 if (data->fifo_wmark_irq && (fifo_wmark_irq == 0)) {
82 int_mask |= ADXL362_INTMAP1_FIFO_WATERMARK;
83 }
84
85 if (data->fifo_full_irq && (fifo_full_irq == 0)) {
86 int_mask |= ADXL362_INTMAP1_FIFO_OVERRUN;
87 }
88
89 /* Do not flush the FIFO if interrupts are already enabled. */
90 if ((fifo_wmark_irq != data->fifo_wmark_irq) || (fifo_full_irq != data->fifo_full_irq)) {
91 data->fifo_wmark_irq = fifo_wmark_irq;
92 data->fifo_full_irq = fifo_full_irq;
93
94 rc = adxl362_reg_write_mask(dev, ADXL362_REG_INTMAP1, int_mask, int_value);
95 if (rc < 0) {
96 return;
97 }
98
99 /* Flush the FIFO by disabling it. Save current mode for after the reset. */
100 uint8_t fifo_mode = data->fifo_mode;
101 uint8_t en_temp_read = data->en_temp_read;
102 uint16_t water_mark_lvl = data->water_mark_lvl;
103
104 rc = adxl362_fifo_setup(dev, ADXL362_FIFO_DISABLE, 0, 0);
105 if (rc < 0) {
106 return;
107 }
108
109 if (fifo_mode == ADXL362_FIFO_DISABLE) {
110 fifo_mode = ADXL362_FIFO_STREAM;
111 }
112
113 if (en_temp_read == 0) {
114 en_temp_read = 1;
115 }
116
117 if (water_mark_lvl == 0) {
118 water_mark_lvl = 0x80;
119 }
120
121 rc = adxl362_fifo_setup(dev, fifo_mode, water_mark_lvl, en_temp_read);
122 if (rc < 0) {
123 return;
124 }
125 }
126
127 rc = gpio_pin_interrupt_configure_dt(&cfg_362->interrupt,
128 GPIO_INT_EDGE_TO_ACTIVE);
129 if (rc < 0) {
130 return;
131 }
132
133 data->sqe = iodev_sqe;
134 }
135
adxl362_fifo_read_cb(struct rtio * rtio_ctx,const struct rtio_sqe * sqe,void * arg)136 static void adxl362_fifo_read_cb(struct rtio *rtio_ctx, const struct rtio_sqe *sqe, void *arg)
137 {
138 const struct device *dev = (const struct device *)arg;
139 const struct adxl362_config *cfg = (const struct adxl362_config *)dev->config;
140 struct rtio_iodev_sqe *iodev_sqe = sqe->userdata;
141
142 rtio_iodev_sqe_ok(iodev_sqe, 0);
143
144 gpio_pin_interrupt_configure_dt(&cfg->interrupt, GPIO_INT_EDGE_TO_ACTIVE);
145 }
146
adxl362_process_fifo_samples_cb(struct rtio * r,const struct rtio_sqe * sqr,void * arg)147 static void adxl362_process_fifo_samples_cb(struct rtio *r, const struct rtio_sqe *sqr, void *arg)
148 {
149 const struct device *dev = (const struct device *)arg;
150 struct adxl362_data *data = (struct adxl362_data *)dev->data;
151 const struct adxl362_config *cfg = (const struct adxl362_config *)dev->config;
152 struct rtio_iodev_sqe *current_sqe = data->sqe;
153 uint16_t fifo_samples = ((data->fifo_ent[0]) | ((data->fifo_ent[1] & 0x3) << 8));
154 size_t sample_set_size = 6;
155
156 if (data->en_temp_read) {
157 sample_set_size += 2;
158 }
159
160 uint16_t fifo_bytes = fifo_samples * 2 /*sample size*/;
161
162 data->sqe = NULL;
163
164 /* Not inherently an underrun/overrun as we may have a buffer to fill next time */
165 if (current_sqe == NULL) {
166 LOG_ERR("No pending SQE");
167 gpio_pin_interrupt_configure_dt(&cfg->interrupt, GPIO_INT_EDGE_TO_ACTIVE);
168 return;
169 }
170
171 const size_t min_read_size = sizeof(struct adxl362_fifo_data) + sample_set_size;
172 const size_t ideal_read_size = sizeof(struct adxl362_fifo_data) + fifo_bytes;
173
174 uint8_t *buf;
175 uint32_t buf_len;
176
177 if (rtio_sqe_rx_buf(current_sqe, min_read_size, ideal_read_size, &buf, &buf_len) != 0) {
178 LOG_ERR("Failed to get buffer");
179 rtio_iodev_sqe_err(current_sqe, -ENOMEM);
180 gpio_pin_interrupt_configure_dt(&cfg->interrupt, GPIO_INT_EDGE_TO_ACTIVE);
181 return;
182 }
183 LOG_DBG("Requesting buffer [%u, %u] got %u", (unsigned int)min_read_size,
184 (unsigned int)ideal_read_size, buf_len);
185
186 /* Read FIFO and call back to rtio with rtio_sqe completion */
187 struct adxl362_fifo_data *hdr = (struct adxl362_fifo_data *) buf;
188
189 hdr->is_fifo = 1;
190 hdr->timestamp = data->timestamp;
191 hdr->int_status = data->status;
192 hdr->selected_range = data->selected_range;
193 hdr->has_tmp = data->en_temp_read;
194
195 uint32_t buf_avail = buf_len;
196
197 buf_avail -= sizeof(*hdr);
198
199 uint32_t read_len = MIN(fifo_bytes, buf_avail);
200 uint32_t pkts = read_len / sample_set_size;
201
202 read_len = pkts * sample_set_size;
203
204 ((struct adxl362_fifo_data *)buf)->fifo_byte_count = read_len;
205
206 __ASSERT_NO_MSG(read_len % sample_set_size == 0);
207
208 uint8_t *read_buf = buf + sizeof(*hdr);
209
210 /* Flush completions */
211 struct rtio_cqe *cqe;
212 int res = 0;
213
214 do {
215 cqe = rtio_cqe_consume(data->rtio_ctx);
216 if (cqe != NULL) {
217 if ((cqe->result < 0 && res == 0)) {
218 LOG_ERR("Bus error: %d", cqe->result);
219 res = cqe->result;
220 }
221 rtio_cqe_release(data->rtio_ctx, cqe);
222 }
223 } while (cqe != NULL);
224
225 /* Bail/cancel attempt to read sensor on any error */
226 if (res != 0) {
227 rtio_iodev_sqe_err(current_sqe, res);
228 return;
229 }
230
231 /* Setup new rtio chain to read the fifo data and report then check the
232 * result
233 */
234 struct rtio_sqe *write_fifo_addr = rtio_sqe_acquire(data->rtio_ctx);
235 struct rtio_sqe *read_fifo_data = rtio_sqe_acquire(data->rtio_ctx);
236 struct rtio_sqe *complete_op = rtio_sqe_acquire(data->rtio_ctx);
237 const uint8_t reg_addr = ADXL362_READ_FIFO;
238
239 rtio_sqe_prep_tiny_write(write_fifo_addr, data->iodev, RTIO_PRIO_NORM, ®_addr, 1, NULL);
240 write_fifo_addr->flags = RTIO_SQE_TRANSACTION;
241 rtio_sqe_prep_read(read_fifo_data, data->iodev, RTIO_PRIO_NORM, read_buf, read_len,
242 current_sqe);
243 read_fifo_data->flags = RTIO_SQE_CHAINED;
244 rtio_sqe_prep_callback(complete_op, adxl362_fifo_read_cb, (void *)dev, current_sqe);
245
246 rtio_submit(data->rtio_ctx, 0);
247 }
248
adxl362_process_status_cb(struct rtio * r,const struct rtio_sqe * sqr,void * arg)249 static void adxl362_process_status_cb(struct rtio *r, const struct rtio_sqe *sqr, void *arg)
250 {
251 const struct device *dev = (const struct device *)arg;
252 struct adxl362_data *data = (struct adxl362_data *) dev->data;
253 const struct adxl362_config *cfg = (const struct adxl362_config *) dev->config;
254 struct rtio_iodev_sqe *current_sqe = data->sqe;
255 struct sensor_read_config *read_config;
256 uint8_t status = data->status;
257
258 if (data->sqe == NULL) {
259 return;
260 }
261
262 read_config = (struct sensor_read_config *)data->sqe->sqe.iodev->data;
263
264 if (read_config == NULL) {
265 return;
266 }
267
268 if (read_config->is_streaming == false) {
269 return;
270 }
271
272 gpio_pin_interrupt_configure_dt(&cfg->interrupt, GPIO_INT_DISABLE);
273
274 struct sensor_stream_trigger *fifo_wmark_cfg = NULL;
275 struct sensor_stream_trigger *fifo_full_cfg = NULL;
276
277 for (int i = 0; i < read_config->count; ++i) {
278 if (read_config->triggers[i].trigger == SENSOR_TRIG_FIFO_WATERMARK) {
279 fifo_wmark_cfg = &read_config->triggers[i];
280 continue;
281 }
282
283 if (read_config->triggers[i].trigger == SENSOR_TRIG_FIFO_FULL) {
284 fifo_full_cfg = &read_config->triggers[i];
285 continue;
286 }
287 }
288
289 bool fifo_full_irq = false;
290 bool fifo_wmark_irq = false;
291
292 if ((fifo_wmark_cfg != NULL) && ADXL362_STATUS_CHECK_FIFO_WTR(status)) {
293 fifo_wmark_irq = true;
294 }
295
296 if ((fifo_full_cfg != NULL) && ADXL362_STATUS_CHECK_FIFO_OVR(status)) {
297 fifo_full_irq = true;
298 }
299
300 if (!fifo_full_irq && !fifo_wmark_irq) {
301 gpio_pin_interrupt_configure_dt(&cfg->interrupt, GPIO_INT_EDGE_TO_ACTIVE);
302 return;
303 }
304
305 /* Flush completions */
306 struct rtio_cqe *cqe;
307 int res = 0;
308
309 do {
310 cqe = rtio_cqe_consume(data->rtio_ctx);
311 if (cqe != NULL) {
312 if ((cqe->result < 0 && res == 0)) {
313 LOG_ERR("Bus error: %d", cqe->result);
314 res = cqe->result;
315 }
316 rtio_cqe_release(data->rtio_ctx, cqe);
317 }
318 } while (cqe != NULL);
319
320 /* Bail/cancel attempt to read sensor on any error */
321 if (res != 0) {
322 rtio_iodev_sqe_err(current_sqe, res);
323 return;
324 }
325
326 enum sensor_stream_data_opt data_opt;
327
328 if ((fifo_wmark_cfg != NULL) && (fifo_full_cfg == NULL)) {
329 data_opt = fifo_wmark_cfg->opt;
330 } else if ((fifo_wmark_cfg == NULL) && (fifo_full_cfg != NULL)) {
331 data_opt = fifo_full_cfg->opt;
332 } else {
333 data_opt = MIN(fifo_wmark_cfg->opt, fifo_full_cfg->opt);
334 }
335
336 if (data_opt == SENSOR_STREAM_DATA_NOP || data_opt == SENSOR_STREAM_DATA_DROP) {
337 uint8_t *buf;
338 uint32_t buf_len;
339
340 /* Clear streaming_sqe since we're done with the call */
341 data->sqe = NULL;
342 if (rtio_sqe_rx_buf(current_sqe, sizeof(struct adxl362_fifo_data),
343 sizeof(struct adxl362_fifo_data), &buf, &buf_len) != 0) {
344 rtio_iodev_sqe_err(current_sqe, -ENOMEM);
345 gpio_pin_interrupt_configure_dt(&cfg->interrupt, GPIO_INT_EDGE_TO_ACTIVE);
346 return;
347 }
348
349 struct adxl362_fifo_data *rx_data = (struct adxl362_fifo_data *)buf;
350
351 memset(buf, 0, buf_len);
352 rx_data->is_fifo = 1;
353 rx_data->timestamp = data->timestamp;
354 rx_data->int_status = status;
355 rx_data->fifo_byte_count = 0;
356 rtio_iodev_sqe_ok(current_sqe, 0);
357
358 if (data_opt == SENSOR_STREAM_DATA_DROP) {
359 /* Flush the FIFO by disabling it. Save current mode for after the reset. */
360 adxl362_fifo_flush_rtio(dev);
361 return;
362 }
363
364 gpio_pin_interrupt_configure_dt(&cfg->interrupt, GPIO_INT_EDGE_TO_ACTIVE);
365 return;
366 }
367
368 struct rtio_sqe *write_fifo_addr = rtio_sqe_acquire(data->rtio_ctx);
369 struct rtio_sqe *read_fifo_data = rtio_sqe_acquire(data->rtio_ctx);
370 struct rtio_sqe *complete_op = rtio_sqe_acquire(data->rtio_ctx);
371 const uint8_t reg[2] = {ADXL362_READ_REG, ADXL362_REG_FIFO_L};
372
373 rtio_sqe_prep_tiny_write(write_fifo_addr, data->iodev, RTIO_PRIO_NORM, reg, 2, NULL);
374 write_fifo_addr->flags = RTIO_SQE_TRANSACTION;
375 rtio_sqe_prep_read(read_fifo_data, data->iodev, RTIO_PRIO_NORM, data->fifo_ent, 2,
376 current_sqe);
377 read_fifo_data->flags = RTIO_SQE_CHAINED;
378 rtio_sqe_prep_callback(complete_op, adxl362_process_fifo_samples_cb, (void *)dev,
379 current_sqe);
380
381 rtio_submit(data->rtio_ctx, 0);
382 }
383
adxl362_stream_irq_handler(const struct device * dev)384 void adxl362_stream_irq_handler(const struct device *dev)
385 {
386 struct adxl362_data *data = (struct adxl362_data *) dev->data;
387
388 if (data->sqe == NULL) {
389 return;
390 }
391
392 data->timestamp = k_ticks_to_ns_floor64(k_uptime_ticks());
393
394 struct rtio_sqe *write_status_addr = rtio_sqe_acquire(data->rtio_ctx);
395 struct rtio_sqe *read_status_reg = rtio_sqe_acquire(data->rtio_ctx);
396 struct rtio_sqe *check_status_reg = rtio_sqe_acquire(data->rtio_ctx);
397 const uint8_t reg[2] = {ADXL362_READ_REG, ADXL362_REG_STATUS};
398
399 rtio_sqe_prep_tiny_write(write_status_addr, data->iodev, RTIO_PRIO_NORM, reg, 2, NULL);
400 write_status_addr->flags = RTIO_SQE_TRANSACTION;
401 rtio_sqe_prep_read(read_status_reg, data->iodev, RTIO_PRIO_NORM, &data->status, 1, NULL);
402 read_status_reg->flags = RTIO_SQE_CHAINED;
403 rtio_sqe_prep_callback(check_status_reg, adxl362_process_status_cb, (void *)dev, NULL);
404 rtio_submit(data->rtio_ctx, 0);
405 }
406