1 /*
2 * Copyright (c) 2018 Nordic Semiconductor ASA
3 * Copyright (c) 2017 Intel Corporation
4 *
5 * SPDX-License-Identifier: Apache-2.0
6 */
7
8 #ifndef ZEPHYR_DRIVERS_ADC_ADC_CONTEXT_H_
9 #define ZEPHYR_DRIVERS_ADC_ADC_CONTEXT_H_
10
11 #include <drivers/adc.h>
12 #include <sys/atomic.h>
13
14 #ifdef __cplusplus
15 extern "C" {
16 #endif
17
18 struct adc_context;
19
20 /*
21 * Each driver should provide implementations of the following two functions:
22 * - adc_context_start_sampling() that will be called when a sampling (of one
23 * or more channels, depending on the realized sequence) is to be started
24 * - adc_context_update_buffer_pointer() that will be called when the sample
25 * buffer pointer should be prepared for writing of next sampling results,
26 * the "repeat_sampling" parameter indicates if the results should be written
27 * in the same place as before (when true) or as consecutive ones (otherwise).
28 */
29 static void adc_context_start_sampling(struct adc_context *ctx);
30 static void adc_context_update_buffer_pointer(struct adc_context *ctx,
31 bool repeat_sampling);
32 /*
33 * If a given driver uses some dedicated hardware timer to trigger consecutive
34 * samplings, it should implement also the following two functions. Otherwise,
35 * it should define the ADC_CONTEXT_USES_KERNEL_TIMER macro to enable parts of
36 * this module that utilize a standard kernel timer.
37 */
38 static void adc_context_enable_timer(struct adc_context *ctx);
39 static void adc_context_disable_timer(struct adc_context *ctx);
40
41
42 struct adc_context {
43 atomic_t sampling_requested;
44 #ifdef ADC_CONTEXT_USES_KERNEL_TIMER
45 struct k_timer timer;
46 #endif /* ADC_CONTEXT_USES_KERNEL_TIMER */
47
48 struct k_sem lock;
49 struct k_sem sync;
50 int status;
51
52 #ifdef CONFIG_ADC_ASYNC
53 struct k_poll_signal *signal;
54 bool asynchronous;
55 #endif /* CONFIG_ADC_ASYNC */
56
57 struct adc_sequence sequence;
58 struct adc_sequence_options options;
59 uint16_t sampling_index;
60 };
61
62 #ifdef ADC_CONTEXT_USES_KERNEL_TIMER
63 #define ADC_CONTEXT_INIT_TIMER(_data, _ctx_name) \
64 ._ctx_name.timer = Z_TIMER_INITIALIZER(_data._ctx_name.timer, \
65 adc_context_on_timer_expired, \
66 NULL)
67 #endif /* ADC_CONTEXT_USES_KERNEL_TIMER */
68
69 #define ADC_CONTEXT_INIT_LOCK(_data, _ctx_name) \
70 ._ctx_name.lock = Z_SEM_INITIALIZER(_data._ctx_name.lock, 0, 1)
71
72 #define ADC_CONTEXT_INIT_SYNC(_data, _ctx_name) \
73 ._ctx_name.sync = Z_SEM_INITIALIZER(_data._ctx_name.sync, 0, 1)
74
75
adc_context_request_next_sampling(struct adc_context * ctx)76 static inline void adc_context_request_next_sampling(struct adc_context *ctx)
77 {
78 if (atomic_inc(&ctx->sampling_requested) == 0) {
79 adc_context_start_sampling(ctx);
80 } else {
81 /*
82 * If a sampling was already requested and was not finished yet,
83 * do not start another one from here, this will be done from
84 * adc_context_on_sampling_done() after the current sampling is
85 * complete. Instead, note this fact, and inform the user about
86 * it after the sequence is done.
87 */
88 ctx->status = -EBUSY;
89 }
90 }
91
92 #ifdef ADC_CONTEXT_USES_KERNEL_TIMER
adc_context_enable_timer(struct adc_context * ctx)93 static inline void adc_context_enable_timer(struct adc_context *ctx)
94 {
95 k_timer_start(&ctx->timer, K_NO_WAIT, K_USEC(ctx->options.interval_us));
96 }
97
adc_context_disable_timer(struct adc_context * ctx)98 static inline void adc_context_disable_timer(struct adc_context *ctx)
99 {
100 k_timer_stop(&ctx->timer);
101 }
102
adc_context_on_timer_expired(struct k_timer * timer_id)103 static void adc_context_on_timer_expired(struct k_timer *timer_id)
104 {
105 struct adc_context *ctx =
106 CONTAINER_OF(timer_id, struct adc_context, timer);
107
108 adc_context_request_next_sampling(ctx);
109 }
110 #endif /* ADC_CONTEXT_USES_KERNEL_TIMER */
111
112
adc_context_lock(struct adc_context * ctx,bool asynchronous,struct k_poll_signal * signal)113 static inline void adc_context_lock(struct adc_context *ctx,
114 bool asynchronous,
115 struct k_poll_signal *signal)
116 {
117 k_sem_take(&ctx->lock, K_FOREVER);
118
119 #ifdef CONFIG_ADC_ASYNC
120 ctx->asynchronous = asynchronous;
121 ctx->signal = signal;
122 #endif /* CONFIG_ADC_ASYNC */
123 }
124
adc_context_release(struct adc_context * ctx,int status)125 static inline void adc_context_release(struct adc_context *ctx, int status)
126 {
127 #ifdef CONFIG_ADC_ASYNC
128 if (ctx->asynchronous && (status == 0)) {
129 return;
130 }
131 #endif /* CONFIG_ADC_ASYNC */
132
133 k_sem_give(&ctx->lock);
134 }
135
adc_context_unlock_unconditionally(struct adc_context * ctx)136 static inline void adc_context_unlock_unconditionally(struct adc_context *ctx)
137 {
138 if (!k_sem_count_get(&ctx->lock)) {
139 k_sem_give(&ctx->lock);
140 }
141 }
142
adc_context_wait_for_completion(struct adc_context * ctx)143 static inline int adc_context_wait_for_completion(struct adc_context *ctx)
144 {
145 #ifdef CONFIG_ADC_ASYNC
146 if (ctx->asynchronous) {
147 return 0;
148 }
149 #endif /* CONFIG_ADC_ASYNC */
150
151 k_sem_take(&ctx->sync, K_FOREVER);
152 return ctx->status;
153 }
154
adc_context_complete(struct adc_context * ctx,int status)155 static inline void adc_context_complete(struct adc_context *ctx, int status)
156 {
157 #ifdef CONFIG_ADC_ASYNC
158 if (ctx->asynchronous) {
159 if (ctx->signal) {
160 k_poll_signal_raise(ctx->signal, status);
161 }
162
163 k_sem_give(&ctx->lock);
164 return;
165 }
166 #endif /* CONFIG_ADC_ASYNC */
167
168 /*
169 * Override the status only when an error is signaled to this function.
170 * Please note that adc_context_request_next_sampling() might have set
171 * this field.
172 */
173 if (status != 0) {
174 ctx->status = status;
175 }
176 k_sem_give(&ctx->sync);
177 }
178
adc_context_start_read(struct adc_context * ctx,const struct adc_sequence * sequence)179 static inline void adc_context_start_read(struct adc_context *ctx,
180 const struct adc_sequence *sequence)
181 {
182 ctx->sequence = *sequence;
183 ctx->status = 0;
184
185 if (sequence->options) {
186 ctx->options = *sequence->options;
187 ctx->sequence.options = &ctx->options;
188 ctx->sampling_index = 0U;
189
190 if (ctx->options.interval_us != 0U) {
191 atomic_set(&ctx->sampling_requested, 0);
192 adc_context_enable_timer(ctx);
193 return;
194 }
195 }
196
197 adc_context_start_sampling(ctx);
198 }
199
200 /*
201 * This function should be called after a sampling (of one or more channels,
202 * depending on the realized sequence) is done. It calls the defined callback
203 * function if required and takes further actions accordingly.
204 */
adc_context_on_sampling_done(struct adc_context * ctx,const struct device * dev)205 static inline void adc_context_on_sampling_done(struct adc_context *ctx,
206 const struct device *dev)
207 {
208 if (ctx->sequence.options) {
209 adc_sequence_callback callback = ctx->options.callback;
210 enum adc_action action;
211 bool finish = false;
212 bool repeat = false;
213
214 if (callback) {
215 action = callback(dev,
216 &ctx->sequence,
217 ctx->sampling_index);
218 } else {
219 action = ADC_ACTION_CONTINUE;
220 }
221
222 switch (action) {
223 case ADC_ACTION_REPEAT:
224 repeat = true;
225 break;
226 case ADC_ACTION_FINISH:
227 finish = true;
228 break;
229 default: /* ADC_ACTION_CONTINUE */
230 if (ctx->sampling_index <
231 ctx->options.extra_samplings) {
232 ++ctx->sampling_index;
233 } else {
234 finish = true;
235 }
236 }
237
238 if (!finish) {
239 adc_context_update_buffer_pointer(ctx, repeat);
240
241 /*
242 * Immediately start the next sampling if working with
243 * a zero interval or if the timer expired again while
244 * the current sampling was in progress.
245 */
246 if (ctx->options.interval_us == 0U) {
247 adc_context_start_sampling(ctx);
248 } else if (atomic_dec(&ctx->sampling_requested) > 1) {
249 adc_context_start_sampling(ctx);
250 }
251
252 return;
253 }
254
255 if (ctx->options.interval_us != 0U) {
256 adc_context_disable_timer(ctx);
257 }
258 }
259
260 adc_context_complete(ctx, 0);
261 }
262
263 #ifdef __cplusplus
264 }
265 #endif
266
267 #endif /* ZEPHYR_DRIVERS_ADC_ADC_CONTEXT_H_ */
268