1 /*
2  * Copyright (c) 2024 Analog Devices Inc.
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <zephyr/logging/log.h>
8 #include <zephyr/drivers/sensor.h>
9 
10 #include "adxl345.h"
11 
12 LOG_MODULE_DECLARE(ADXL345, CONFIG_SENSOR_LOG_LEVEL);
13 
adxl345_submit_stream(const struct device * dev,struct rtio_iodev_sqe * iodev_sqe)14 void adxl345_submit_stream(const struct device *dev, struct rtio_iodev_sqe *iodev_sqe)
15 {
16 	const struct sensor_read_config *cfg =
17 			(const struct sensor_read_config *) iodev_sqe->sqe.iodev->data;
18 	struct adxl345_dev_data *data = (struct adxl345_dev_data *)dev->data;
19 	const struct adxl345_dev_config *cfg_345 = dev->config;
20 	uint8_t int_value = (uint8_t)~ADXL345_INT_MAP_WATERMARK_MSK;
21 	uint8_t fifo_watermark_irq = 0;
22 	int rc = gpio_pin_interrupt_configure_dt(&cfg_345->interrupt,
23 					      GPIO_INT_DISABLE);
24 
25 	if (rc < 0) {
26 		return;
27 	}
28 
29 	for (size_t i = 0; i < cfg->count; i++) {
30 		if (cfg->triggers[i].trigger == SENSOR_TRIG_FIFO_WATERMARK) {
31 			int_value = ADXL345_INT_MAP_WATERMARK_MSK;
32 			fifo_watermark_irq = 1;
33 		}
34 	}
35 		uint8_t status;
36 	if (fifo_watermark_irq != data->fifo_watermark_irq) {
37 		data->fifo_watermark_irq = fifo_watermark_irq;
38 		rc = adxl345_reg_write_mask(dev, ADXL345_INT_MAP, ADXL345_INT_MAP_WATERMARK_MSK,
39 						int_value);
40 		if (rc < 0) {
41 			return;
42 		}
43 
44 		/* Flush the FIFO by disabling it. Save current mode for after the reset. */
45 		enum adxl345_fifo_mode current_fifo_mode = data->fifo_config.fifo_mode;
46 
47 		if (current_fifo_mode == ADXL345_FIFO_BYPASSED) {
48 			current_fifo_mode = ADXL345_FIFO_STREAMED;
49 		}
50 		adxl345_configure_fifo(dev, ADXL345_FIFO_BYPASSED, data->fifo_config.fifo_trigger,
51 				data->fifo_config.fifo_samples);
52 		adxl345_configure_fifo(dev, current_fifo_mode, data->fifo_config.fifo_trigger,
53 				data->fifo_config.fifo_samples);
54 		rc = adxl345_reg_read_byte(dev, ADXL345_FIFO_STATUS_REG, &status);
55 	}
56 
57 	rc = gpio_pin_interrupt_configure_dt(&cfg_345->interrupt,
58 					      GPIO_INT_EDGE_TO_ACTIVE);
59 	if (rc < 0) {
60 		return;
61 	}
62 	data->sqe = iodev_sqe;
63 }
64 
adxl345_irq_en_cb(struct rtio * r,const struct rtio_sqe * sqr,void * arg)65 static void adxl345_irq_en_cb(struct rtio *r, const struct rtio_sqe *sqr, void *arg)
66 {
67 	const struct device *dev = (const struct device *)arg;
68 	const struct adxl345_dev_config *cfg = dev->config;
69 
70 	gpio_pin_interrupt_configure_dt(&cfg->interrupt, GPIO_INT_EDGE_TO_ACTIVE);
71 }
72 
adxl345_fifo_flush_rtio(const struct device * dev)73 static void adxl345_fifo_flush_rtio(const struct device *dev)
74 {
75 	struct adxl345_dev_data *data = dev->data;
76 	uint8_t fifo_config;
77 
78 	fifo_config = (ADXL345_FIFO_CTL_TRIGGER_MODE(data->fifo_config.fifo_trigger) |
79 		       ADXL345_FIFO_CTL_MODE_MODE(ADXL345_FIFO_BYPASSED) |
80 		       ADXL345_FIFO_CTL_SAMPLES_MODE(data->fifo_config.fifo_samples));
81 
82 	struct rtio_sqe *write_fifo_addr = rtio_sqe_acquire(data->rtio_ctx);
83 	const uint8_t reg_addr_w2[2] = {ADXL345_FIFO_CTL_REG, fifo_config};
84 
85 	rtio_sqe_prep_tiny_write(write_fifo_addr, data->iodev, RTIO_PRIO_NORM, reg_addr_w2,
86 					2, NULL);
87 
88 	fifo_config = (ADXL345_FIFO_CTL_TRIGGER_MODE(data->fifo_config.fifo_trigger) |
89 		       ADXL345_FIFO_CTL_MODE_MODE(data->fifo_config.fifo_mode) |
90 		       ADXL345_FIFO_CTL_SAMPLES_MODE(data->fifo_config.fifo_samples));
91 
92 	write_fifo_addr = rtio_sqe_acquire(data->rtio_ctx);
93 	const uint8_t reg_addr_w3[2] = {ADXL345_FIFO_CTL_REG, fifo_config};
94 
95 	rtio_sqe_prep_tiny_write(write_fifo_addr, data->iodev, RTIO_PRIO_NORM, reg_addr_w3,
96 					2, NULL);
97 	write_fifo_addr->flags |= RTIO_SQE_CHAINED;
98 
99 	struct rtio_sqe *complete_op = rtio_sqe_acquire(data->rtio_ctx);
100 
101 	rtio_sqe_prep_callback(complete_op, adxl345_irq_en_cb, (void *)dev, NULL);
102 	rtio_submit(data->rtio_ctx, 0);
103 }
104 
adxl345_fifo_read_cb(struct rtio * rtio_ctx,const struct rtio_sqe * sqe,void * arg)105 static void adxl345_fifo_read_cb(struct rtio *rtio_ctx, const struct rtio_sqe *sqe, void *arg)
106 {
107 	const struct device *dev = (const struct device *)arg;
108 	struct adxl345_dev_data *data = (struct adxl345_dev_data *) dev->data;
109 	const struct adxl345_dev_config *cfg = (const struct adxl345_dev_config *) dev->config;
110 	struct rtio_iodev_sqe *iodev_sqe = sqe->userdata;
111 
112 	if (data->fifo_samples == 0) {
113 		data->fifo_total_bytes = 0;
114 		rtio_iodev_sqe_ok(iodev_sqe, 0);
115 		gpio_pin_interrupt_configure_dt(&cfg->interrupt, GPIO_INT_EDGE_TO_ACTIVE);
116 	}
117 
118 }
119 
adxl345_process_fifo_samples_cb(struct rtio * r,const struct rtio_sqe * sqr,void * arg)120 static void adxl345_process_fifo_samples_cb(struct rtio *r, const struct rtio_sqe *sqr, void *arg)
121 {
122 	const struct device *dev = (const struct device *)arg;
123 	struct adxl345_dev_data *data = (struct adxl345_dev_data *) dev->data;
124 	const struct adxl345_dev_config *cfg = (const struct adxl345_dev_config *) dev->config;
125 	struct rtio_iodev_sqe *current_sqe = data->sqe;
126 	uint16_t fifo_samples = (data->fifo_ent[0]) & SAMPLE_MASK;
127 	size_t sample_set_size = SAMPLE_SIZE;
128 	uint16_t fifo_bytes = fifo_samples * SAMPLE_SIZE;
129 
130 	data->sqe = NULL;
131 
132 	/* Not inherently an underrun/overrun as we may have a buffer to fill next time */
133 	if (current_sqe == NULL) {
134 		LOG_ERR("No pending SQE");
135 		gpio_pin_interrupt_configure_dt(&cfg->interrupt, GPIO_INT_EDGE_TO_ACTIVE);
136 		return;
137 	}
138 
139 	const size_t min_read_size = sizeof(struct adxl345_fifo_data) + sample_set_size;
140 	const size_t ideal_read_size = sizeof(struct adxl345_fifo_data) + fifo_bytes;
141 
142 	uint8_t *buf;
143 	uint32_t buf_len;
144 
145 	if (rtio_sqe_rx_buf(current_sqe, min_read_size, ideal_read_size, &buf, &buf_len) != 0) {
146 		LOG_ERR("Failed to get buffer");
147 		rtio_iodev_sqe_err(current_sqe, -ENOMEM);
148 		gpio_pin_interrupt_configure_dt(&cfg->interrupt, GPIO_INT_EDGE_TO_ACTIVE);
149 		return;
150 	}
151 	LOG_DBG("Requesting buffer [%u, %u] got %u", (unsigned int)min_read_size,
152 		(unsigned int)ideal_read_size, buf_len);
153 
154 	/* Read FIFO and call back to rtio with rtio_sqe completion */
155 	struct adxl345_fifo_data *hdr = (struct adxl345_fifo_data *) buf;
156 
157 	hdr->is_fifo = 1;
158 	hdr->timestamp = data->timestamp;
159 	hdr->int_status = data->status1;
160 	hdr->is_full_res = data->is_full_res;
161 	hdr->selected_range = data->selected_range;
162 	hdr->accel_odr = cfg->odr;
163 	hdr->sample_set_size = sample_set_size;
164 
165 	uint32_t buf_avail = buf_len;
166 
167 	buf_avail -= sizeof(*hdr);
168 
169 	uint32_t read_len = MIN(fifo_bytes, buf_avail);
170 
171 	if (buf_avail < fifo_bytes) {
172 		uint32_t pkts = read_len / sample_set_size;
173 
174 		read_len = pkts * sample_set_size;
175 	}
176 
177 	((struct adxl345_fifo_data *)buf)->fifo_byte_count = read_len;
178 
179 	uint8_t *read_buf = buf + sizeof(*hdr);
180 
181 	/* Flush completions */
182 	struct rtio_cqe *cqe;
183 	int res = 0;
184 
185 	do {
186 		cqe = rtio_cqe_consume(data->rtio_ctx);
187 		if (cqe != NULL) {
188 			if ((cqe->result < 0 && res == 0)) {
189 				LOG_ERR("Bus error: %d", cqe->result);
190 				res = cqe->result;
191 			}
192 			rtio_cqe_release(data->rtio_ctx, cqe);
193 		}
194 	} while (cqe != NULL);
195 
196 	/* Bail/cancel attempt to read sensor on any error */
197 	if (res != 0) {
198 		rtio_iodev_sqe_err(current_sqe, res);
199 		return;
200 	}
201 
202 
203 	data->fifo_samples = fifo_samples;
204 	for (size_t i = 0; i < fifo_samples; i++) {
205 		struct rtio_sqe *write_fifo_addr = rtio_sqe_acquire(data->rtio_ctx);
206 		struct rtio_sqe *read_fifo_data = rtio_sqe_acquire(data->rtio_ctx);
207 
208 		data->fifo_samples--;
209 		const uint8_t reg_addr = ADXL345_REG_READ(ADXL345_X_AXIS_DATA_0_REG)
210 				| ADXL345_MULTIBYTE_FLAG;
211 
212 		rtio_sqe_prep_tiny_write(write_fifo_addr, data->iodev, RTIO_PRIO_NORM, &reg_addr,
213 								1, NULL);
214 		write_fifo_addr->flags = RTIO_SQE_TRANSACTION;
215 		rtio_sqe_prep_read(read_fifo_data, data->iodev, RTIO_PRIO_NORM,
216 							read_buf + data->fifo_total_bytes,
217 							SAMPLE_SIZE, current_sqe);
218 		data->fifo_total_bytes += SAMPLE_SIZE;
219 		if (cfg->bus_type == ADXL345_BUS_I2C) {
220 			read_fifo_data->iodev_flags |= RTIO_IODEV_I2C_STOP | RTIO_IODEV_I2C_RESTART;
221 		}
222 		if (i == fifo_samples-1) {
223 			struct rtio_sqe *complete_op = rtio_sqe_acquire(data->rtio_ctx);
224 
225 			read_fifo_data->flags = RTIO_SQE_CHAINED;
226 			rtio_sqe_prep_callback(complete_op, adxl345_fifo_read_cb, (void *)dev,
227 				current_sqe);
228 		}
229 		rtio_submit(data->rtio_ctx, 0);
230 		ARG_UNUSED(rtio_cqe_consume(data->rtio_ctx));
231 	}
232 }
233 
adxl345_process_status1_cb(struct rtio * r,const struct rtio_sqe * sqr,void * arg)234 static void adxl345_process_status1_cb(struct rtio *r, const struct rtio_sqe *sqr, void *arg)
235 {
236 	const struct device *dev = (const struct device *)arg;
237 	struct adxl345_dev_data *data = (struct adxl345_dev_data *) dev->data;
238 	const struct adxl345_dev_config *cfg = (const struct adxl345_dev_config *) dev->config;
239 	struct rtio_iodev_sqe *current_sqe = data->sqe;
240 	struct sensor_read_config *read_config;
241 	uint8_t status1 = data->status1;
242 
243 	if (data->sqe == NULL) {
244 		return;
245 	}
246 
247 	read_config = (struct sensor_read_config *)data->sqe->sqe.iodev->data;
248 
249 	if (read_config == NULL) {
250 		return;
251 	}
252 
253 	if (read_config->is_streaming == false) {
254 		return;
255 	}
256 
257 	gpio_pin_interrupt_configure_dt(&cfg->interrupt, GPIO_INT_DISABLE);
258 
259 	struct sensor_stream_trigger *fifo_wmark_cfg = NULL;
260 
261 	for (int i = 0; i < read_config->count; ++i) {
262 		if (read_config->triggers[i].trigger == SENSOR_TRIG_FIFO_WATERMARK) {
263 			fifo_wmark_cfg = &read_config->triggers[i];
264 			continue;
265 		}
266 	}
267 
268 	bool fifo_full_irq = false;
269 
270 	if ((fifo_wmark_cfg != NULL)
271 			&& FIELD_GET(ADXL345_INT_MAP_WATERMARK_MSK, status1)) {
272 		fifo_full_irq = true;
273 	}
274 
275 	if (!fifo_full_irq) {
276 		gpio_pin_interrupt_configure_dt(&cfg->interrupt, GPIO_INT_EDGE_TO_ACTIVE);
277 		return;
278 	}
279 
280 	/* Flush completions */
281 	struct rtio_cqe *cqe;
282 	int res = 0;
283 
284 	do {
285 		cqe = rtio_cqe_consume(data->rtio_ctx);
286 		if (cqe != NULL) {
287 			if ((cqe->result < 0) && (res == 0)) {
288 				LOG_ERR("Bus error: %d", cqe->result);
289 				res = cqe->result;
290 			}
291 			rtio_cqe_release(data->rtio_ctx, cqe);
292 		}
293 	} while (cqe != NULL);
294 
295 	/* Bail/cancel attempt to read sensor on any error */
296 	if (res != 0) {
297 		rtio_iodev_sqe_err(current_sqe, res);
298 		return;
299 	}
300 
301 	enum sensor_stream_data_opt data_opt;
302 
303 	if (fifo_wmark_cfg != NULL) {
304 		data_opt = fifo_wmark_cfg->opt;
305 	}
306 
307 	if (data_opt == SENSOR_STREAM_DATA_NOP || data_opt == SENSOR_STREAM_DATA_DROP) {
308 		uint8_t *buf;
309 		uint32_t buf_len;
310 
311 		/* Clear streaming_sqe since we're done with the call */
312 		data->sqe = NULL;
313 		if (rtio_sqe_rx_buf(current_sqe, sizeof(struct adxl345_fifo_data),
314 				    sizeof(struct adxl345_fifo_data), &buf, &buf_len) != 0) {
315 			rtio_iodev_sqe_err(current_sqe, -ENOMEM);
316 			gpio_pin_interrupt_configure_dt(&cfg->interrupt, GPIO_INT_EDGE_TO_ACTIVE);
317 			return;
318 		}
319 
320 		struct adxl345_fifo_data *rx_data = (struct adxl345_fifo_data *)buf;
321 
322 		memset(buf, 0, buf_len);
323 		rx_data->is_fifo = 1;
324 		rx_data->timestamp = data->timestamp;
325 		rx_data->int_status = status1;
326 		rx_data->fifo_byte_count = 0;
327 		rtio_iodev_sqe_ok(current_sqe, 0);
328 
329 		if (data_opt == SENSOR_STREAM_DATA_DROP) {
330 			/* Flush the FIFO by disabling it. Save current mode for after the reset. */
331 			adxl345_fifo_flush_rtio(dev);
332 		}
333 
334 		gpio_pin_interrupt_configure_dt(&cfg->interrupt, GPIO_INT_EDGE_TO_ACTIVE);
335 		return;
336 	}
337 
338 	struct rtio_sqe *write_fifo_addr = rtio_sqe_acquire(data->rtio_ctx);
339 	struct rtio_sqe *read_fifo_data = rtio_sqe_acquire(data->rtio_ctx);
340 	struct rtio_sqe *complete_op = rtio_sqe_acquire(data->rtio_ctx);
341 	const uint8_t reg_addr = ADXL345_REG_READ(ADXL345_FIFO_STATUS_REG);
342 
343 	rtio_sqe_prep_tiny_write(write_fifo_addr, data->iodev, RTIO_PRIO_NORM, &reg_addr, 1, NULL);
344 	write_fifo_addr->flags = RTIO_SQE_TRANSACTION;
345 	rtio_sqe_prep_read(read_fifo_data, data->iodev, RTIO_PRIO_NORM, data->fifo_ent, 1,
346 						current_sqe);
347 	read_fifo_data->flags = RTIO_SQE_CHAINED;
348 	if (cfg->bus_type == ADXL345_BUS_I2C) {
349 		read_fifo_data->iodev_flags |= RTIO_IODEV_I2C_STOP | RTIO_IODEV_I2C_RESTART;
350 	}
351 	rtio_sqe_prep_callback(complete_op, adxl345_process_fifo_samples_cb, (void *)dev,
352 							current_sqe);
353 
354 	rtio_submit(data->rtio_ctx, 0);
355 }
356 
adxl345_stream_irq_handler(const struct device * dev)357 void adxl345_stream_irq_handler(const struct device *dev)
358 {
359 	struct adxl345_dev_data *data = (struct adxl345_dev_data *) dev->data;
360 	const struct adxl345_dev_config *cfg = (const struct adxl345_dev_config *) dev->config;
361 
362 	if (data->sqe == NULL) {
363 		return;
364 	}
365 	data->timestamp = k_ticks_to_ns_floor64(k_uptime_ticks());
366 	struct rtio_sqe *write_status_addr = rtio_sqe_acquire(data->rtio_ctx);
367 	struct rtio_sqe *read_status_reg = rtio_sqe_acquire(data->rtio_ctx);
368 	struct rtio_sqe *check_status_reg = rtio_sqe_acquire(data->rtio_ctx);
369 	uint8_t reg = ADXL345_REG_READ(ADXL345_INT_SOURCE);
370 
371 	rtio_sqe_prep_tiny_write(write_status_addr, data->iodev, RTIO_PRIO_NORM, &reg, 1, NULL);
372 	write_status_addr->flags = RTIO_SQE_TRANSACTION;
373 	rtio_sqe_prep_read(read_status_reg, data->iodev, RTIO_PRIO_NORM, &data->status1, 1, NULL);
374 	read_status_reg->flags = RTIO_SQE_CHAINED;
375 
376 	if (cfg->bus_type == ADXL345_BUS_I2C) {
377 		read_status_reg->iodev_flags |= RTIO_IODEV_I2C_STOP | RTIO_IODEV_I2C_RESTART;
378 	}
379 	rtio_sqe_prep_callback(check_status_reg, adxl345_process_status1_cb, (void *)dev, NULL);
380 	rtio_submit(data->rtio_ctx, 0);
381 }
382