1 /*
2  * Copyright (c) 2017 Intel Corporation
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 /**
8  * @file
9  * @brief Private API for SPI drivers
10  */
11 
12 #ifndef ZEPHYR_DRIVERS_SPI_SPI_CONTEXT_H_
13 #define ZEPHYR_DRIVERS_SPI_SPI_CONTEXT_H_
14 
15 #include <drivers/gpio.h>
16 #include <drivers/spi.h>
17 
18 #ifdef __cplusplus
19 extern "C" {
20 #endif
21 
22 enum spi_ctx_runtime_op_mode {
23 	SPI_CTX_RUNTIME_OP_MODE_MASTER = BIT(0),
24 	SPI_CTX_RUNTIME_OP_MODE_SLAVE  = BIT(1),
25 };
26 
27 struct spi_context {
28 	const struct spi_config *config;
29 	const struct spi_config *owner;
30 
31 	struct k_sem lock;
32 	struct k_sem sync;
33 	int sync_status;
34 
35 #ifdef CONFIG_SPI_ASYNC
36 	struct k_poll_signal *signal;
37 	bool asynchronous;
38 #endif /* CONFIG_SPI_ASYNC */
39 	const struct spi_buf *current_tx;
40 	size_t tx_count;
41 	const struct spi_buf *current_rx;
42 	size_t rx_count;
43 
44 	const uint8_t *tx_buf;
45 	size_t tx_len;
46 	uint8_t *rx_buf;
47 	size_t rx_len;
48 
49 #ifdef CONFIG_SPI_SLAVE
50 	int recv_frames;
51 #endif /* CONFIG_SPI_SLAVE */
52 };
53 
54 #define SPI_CONTEXT_INIT_LOCK(_data, _ctx_name)				\
55 	._ctx_name.lock = Z_SEM_INITIALIZER(_data._ctx_name.lock, 0, 1)
56 
57 #define SPI_CONTEXT_INIT_SYNC(_data, _ctx_name)				\
58 	._ctx_name.sync = Z_SEM_INITIALIZER(_data._ctx_name.sync, 0, 1)
59 
spi_context_configured(struct spi_context * ctx,const struct spi_config * config)60 static inline bool spi_context_configured(struct spi_context *ctx,
61 					  const struct spi_config *config)
62 {
63 	return !!(ctx->config == config);
64 }
65 
spi_context_is_slave(struct spi_context * ctx)66 static inline bool spi_context_is_slave(struct spi_context *ctx)
67 {
68 	return (ctx->config->operation & SPI_OP_MODE_SLAVE);
69 }
70 
spi_context_lock(struct spi_context * ctx,bool asynchronous,struct k_poll_signal * signal,const struct spi_config * spi_cfg)71 static inline void spi_context_lock(struct spi_context *ctx,
72 				    bool asynchronous,
73 				    struct k_poll_signal *signal,
74 				    const struct spi_config *spi_cfg)
75 {
76 	if ((spi_cfg->operation & SPI_LOCK_ON) &&
77 		(k_sem_count_get(&ctx->lock) == 0) &&
78 		(ctx->owner == spi_cfg)) {
79 			return;
80 	}
81 
82 	k_sem_take(&ctx->lock, K_FOREVER);
83 	ctx->owner = spi_cfg;
84 
85 #ifdef CONFIG_SPI_ASYNC
86 	ctx->asynchronous = asynchronous;
87 	ctx->signal = signal;
88 #endif /* CONFIG_SPI_ASYNC */
89 }
90 
spi_context_release(struct spi_context * ctx,int status)91 static inline void spi_context_release(struct spi_context *ctx, int status)
92 {
93 #ifdef CONFIG_SPI_SLAVE
94 	if (status >= 0 && (ctx->config->operation & SPI_LOCK_ON)) {
95 		return;
96 	}
97 #endif /* CONFIG_SPI_SLAVE */
98 
99 #ifdef CONFIG_SPI_ASYNC
100 	if (!ctx->asynchronous || (status < 0)) {
101 		ctx->owner = NULL;
102 		k_sem_give(&ctx->lock);
103 	}
104 #else
105 	if (!(ctx->config->operation & SPI_LOCK_ON)) {
106 		ctx->owner = NULL;
107 		k_sem_give(&ctx->lock);
108 	}
109 #endif /* CONFIG_SPI_ASYNC */
110 }
111 
spi_context_wait_for_completion(struct spi_context * ctx)112 static inline int spi_context_wait_for_completion(struct spi_context *ctx)
113 {
114 	int status = 0;
115 	k_timeout_t timeout;
116 
117 	/* Do not use any timeout in the slave mode, as in this case it is not
118 	 * known when the transfer will actually start and what the frequency
119 	 * will be.
120 	 */
121 	if (IS_ENABLED(CONFIG_SPI_SLAVE) && spi_context_is_slave(ctx)) {
122 		timeout = K_FOREVER;
123 	} else {
124 		uint32_t timeout_ms;
125 
126 		timeout_ms = MAX(ctx->tx_len, ctx->rx_len) * 8 * 1000 /
127 			     ctx->config->frequency;
128 		timeout_ms += CONFIG_SPI_COMPLETION_TIMEOUT_TOLERANCE;
129 
130 		timeout = K_MSEC(timeout_ms);
131 	}
132 
133 #ifdef CONFIG_SPI_ASYNC
134 	if (!ctx->asynchronous) {
135 		if (k_sem_take(&ctx->sync, timeout)) {
136 			LOG_ERR("Timeout waiting for transfer complete");
137 			return -ETIMEDOUT;
138 		}
139 		status = ctx->sync_status;
140 	}
141 #else
142 	if (k_sem_take(&ctx->sync, timeout)) {
143 		LOG_ERR("Timeout waiting for transfer complete");
144 		return -ETIMEDOUT;
145 	}
146 	status = ctx->sync_status;
147 #endif /* CONFIG_SPI_ASYNC */
148 
149 #ifdef CONFIG_SPI_SLAVE
150 	if (spi_context_is_slave(ctx) && !status) {
151 		return ctx->recv_frames;
152 	}
153 #endif /* CONFIG_SPI_SLAVE */
154 
155 	return status;
156 }
157 
spi_context_complete(struct spi_context * ctx,int status)158 static inline void spi_context_complete(struct spi_context *ctx, int status)
159 {
160 #ifdef CONFIG_SPI_ASYNC
161 	if (!ctx->asynchronous) {
162 		ctx->sync_status = status;
163 		k_sem_give(&ctx->sync);
164 	} else {
165 		if (ctx->signal) {
166 #ifdef CONFIG_SPI_SLAVE
167 			if (spi_context_is_slave(ctx) && !status) {
168 				/* Let's update the status so it tells
169 				 * about number of received frames.
170 				 */
171 				status = ctx->recv_frames;
172 			}
173 #endif /* CONFIG_SPI_SLAVE */
174 			k_poll_signal_raise(ctx->signal, status);
175 		}
176 
177 		if (!(ctx->config->operation & SPI_LOCK_ON)) {
178 			ctx->owner = NULL;
179 			k_sem_give(&ctx->lock);
180 		}
181 	}
182 #else
183 	ctx->sync_status = status;
184 	k_sem_give(&ctx->sync);
185 #endif /* CONFIG_SPI_ASYNC */
186 }
187 
188 static inline
spi_context_cs_active_level(struct spi_context * ctx)189 gpio_dt_flags_t spi_context_cs_active_level(struct spi_context *ctx)
190 {
191 	if (ctx->config->operation & SPI_CS_ACTIVE_HIGH) {
192 		return GPIO_ACTIVE_HIGH;
193 	}
194 
195 	return GPIO_ACTIVE_LOW;
196 }
197 
spi_context_cs_configure(struct spi_context * ctx)198 static inline void spi_context_cs_configure(struct spi_context *ctx)
199 {
200 	if (ctx->config->cs && ctx->config->cs->gpio_dev) {
201 		/* Validate CS active levels are equivalent */
202 		__ASSERT(spi_context_cs_active_level(ctx) ==
203 			 (ctx->config->cs->gpio_dt_flags & GPIO_ACTIVE_LOW),
204 			 "Devicetree and spi_context CS levels are not equal");
205 		gpio_pin_configure(ctx->config->cs->gpio_dev,
206 				   ctx->config->cs->gpio_pin,
207 				   ctx->config->cs->gpio_dt_flags |
208 				   GPIO_OUTPUT_INACTIVE);
209 	} else {
210 		LOG_INF("CS control inhibited (no GPIO device)");
211 	}
212 }
213 
_spi_context_cs_control(struct spi_context * ctx,bool on,bool force_off)214 static inline void _spi_context_cs_control(struct spi_context *ctx,
215 					   bool on, bool force_off)
216 {
217 	if (ctx->config && ctx->config->cs && ctx->config->cs->gpio_dev) {
218 		if (on) {
219 			gpio_pin_set(ctx->config->cs->gpio_dev,
220 				     ctx->config->cs->gpio_pin, 1);
221 			k_busy_wait(ctx->config->cs->delay);
222 		} else {
223 			if (!force_off &&
224 			    ctx->config->operation & SPI_HOLD_ON_CS) {
225 				return;
226 			}
227 
228 			k_busy_wait(ctx->config->cs->delay);
229 			gpio_pin_set(ctx->config->cs->gpio_dev,
230 				     ctx->config->cs->gpio_pin, 0);
231 		}
232 	}
233 }
234 
spi_context_cs_control(struct spi_context * ctx,bool on)235 static inline void spi_context_cs_control(struct spi_context *ctx, bool on)
236 {
237 	_spi_context_cs_control(ctx, on, false);
238 }
239 
spi_context_unlock_unconditionally(struct spi_context * ctx)240 static inline void spi_context_unlock_unconditionally(struct spi_context *ctx)
241 {
242 	/* Forcing CS to go to inactive status */
243 	_spi_context_cs_control(ctx, false, true);
244 
245 	if (!k_sem_count_get(&ctx->lock)) {
246 		ctx->owner = NULL;
247 		k_sem_give(&ctx->lock);
248 	}
249 }
250 
spi_context_get_next_buf(const struct spi_buf ** current,size_t * count,size_t * buf_len,uint8_t dfs)251 static inline void *spi_context_get_next_buf(const struct spi_buf **current,
252 					     size_t *count,
253 					     size_t *buf_len,
254 					     uint8_t dfs)
255 {
256 	/* This loop skips zero-length buffers in the set, if any. */
257 	while (*count) {
258 		if (((*current)->len / dfs) != 0) {
259 			*buf_len = (*current)->len / dfs;
260 			return (*current)->buf;
261 		}
262 		++(*current);
263 		--(*count);
264 	}
265 
266 	*buf_len = 0;
267 	return NULL;
268 }
269 
270 static inline
spi_context_buffers_setup(struct spi_context * ctx,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs,uint8_t dfs)271 void spi_context_buffers_setup(struct spi_context *ctx,
272 			       const struct spi_buf_set *tx_bufs,
273 			       const struct spi_buf_set *rx_bufs,
274 			       uint8_t dfs)
275 {
276 	LOG_DBG("tx_bufs %p - rx_bufs %p - %u", tx_bufs, rx_bufs, dfs);
277 
278 	ctx->current_tx = tx_bufs ? tx_bufs->buffers : NULL;
279 	ctx->tx_count = ctx->current_tx ? tx_bufs->count : 0;
280 	ctx->tx_buf = (const uint8_t *)
281 		spi_context_get_next_buf(&ctx->current_tx, &ctx->tx_count,
282 					 &ctx->tx_len, dfs);
283 
284 	ctx->current_rx = rx_bufs ? rx_bufs->buffers : NULL;
285 	ctx->rx_count = ctx->current_rx ? rx_bufs->count : 0;
286 	ctx->rx_buf = (uint8_t *)
287 		spi_context_get_next_buf(&ctx->current_rx, &ctx->rx_count,
288 					 &ctx->rx_len, dfs);
289 
290 	ctx->sync_status = 0;
291 
292 #ifdef CONFIG_SPI_SLAVE
293 	ctx->recv_frames = 0;
294 #endif /* CONFIG_SPI_SLAVE */
295 
296 	LOG_DBG("current_tx %p (%zu), current_rx %p (%zu),"
297 		" tx buf/len %p/%zu, rx buf/len %p/%zu",
298 		ctx->current_tx, ctx->tx_count,
299 		ctx->current_rx, ctx->rx_count,
300 		ctx->tx_buf, ctx->tx_len, ctx->rx_buf, ctx->rx_len);
301 }
302 
303 static ALWAYS_INLINE
spi_context_update_tx(struct spi_context * ctx,uint8_t dfs,uint32_t len)304 void spi_context_update_tx(struct spi_context *ctx, uint8_t dfs, uint32_t len)
305 {
306 	if (!ctx->tx_len) {
307 		return;
308 	}
309 
310 	if (len > ctx->tx_len) {
311 		LOG_ERR("Update exceeds current buffer");
312 		return;
313 	}
314 
315 	ctx->tx_len -= len;
316 	if (!ctx->tx_len) {
317 		/* Current buffer is done. Get the next one to be processed. */
318 		++ctx->current_tx;
319 		--ctx->tx_count;
320 		ctx->tx_buf = (const uint8_t *)
321 			spi_context_get_next_buf(&ctx->current_tx,
322 						 &ctx->tx_count,
323 						 &ctx->tx_len, dfs);
324 	} else if (ctx->tx_buf) {
325 		ctx->tx_buf += dfs * len;
326 	}
327 
328 	LOG_DBG("tx buf/len %p/%zu", ctx->tx_buf, ctx->tx_len);
329 }
330 
331 static ALWAYS_INLINE
spi_context_tx_on(struct spi_context * ctx)332 bool spi_context_tx_on(struct spi_context *ctx)
333 {
334 	return !!(ctx->tx_len);
335 }
336 
337 static ALWAYS_INLINE
spi_context_tx_buf_on(struct spi_context * ctx)338 bool spi_context_tx_buf_on(struct spi_context *ctx)
339 {
340 	return !!(ctx->tx_buf && ctx->tx_len);
341 }
342 
343 static ALWAYS_INLINE
spi_context_update_rx(struct spi_context * ctx,uint8_t dfs,uint32_t len)344 void spi_context_update_rx(struct spi_context *ctx, uint8_t dfs, uint32_t len)
345 {
346 #ifdef CONFIG_SPI_SLAVE
347 	if (spi_context_is_slave(ctx)) {
348 		ctx->recv_frames += len;
349 	}
350 
351 #endif /* CONFIG_SPI_SLAVE */
352 
353 	if (!ctx->rx_len) {
354 		return;
355 	}
356 
357 	if (len > ctx->rx_len) {
358 		LOG_ERR("Update exceeds current buffer");
359 		return;
360 	}
361 
362 	ctx->rx_len -= len;
363 	if (!ctx->rx_len) {
364 		/* Current buffer is done. Get the next one to be processed. */
365 		++ctx->current_rx;
366 		--ctx->rx_count;
367 		ctx->rx_buf = (uint8_t *)
368 			spi_context_get_next_buf(&ctx->current_rx,
369 						 &ctx->rx_count,
370 						 &ctx->rx_len, dfs);
371 	} else if (ctx->rx_buf) {
372 		ctx->rx_buf += dfs * len;
373 	}
374 
375 	LOG_DBG("rx buf/len %p/%zu", ctx->rx_buf, ctx->rx_len);
376 }
377 
378 static ALWAYS_INLINE
spi_context_rx_on(struct spi_context * ctx)379 bool spi_context_rx_on(struct spi_context *ctx)
380 {
381 	return !!(ctx->rx_len);
382 }
383 
384 static ALWAYS_INLINE
spi_context_rx_buf_on(struct spi_context * ctx)385 bool spi_context_rx_buf_on(struct spi_context *ctx)
386 {
387 	return !!(ctx->rx_buf && ctx->rx_len);
388 }
389 
390 /*
391  * Returns the maximum length of a transfer for which all currently active
392  * directions have a continuous buffer, i.e. the maximum SPI transfer that
393  * can be done with DMA that handles only non-scattered buffers.
394  */
spi_context_max_continuous_chunk(struct spi_context * ctx)395 static inline size_t spi_context_max_continuous_chunk(struct spi_context *ctx)
396 {
397 	if (!ctx->tx_len) {
398 		return ctx->rx_len;
399 	} else if (!ctx->rx_len) {
400 		return ctx->tx_len;
401 	}
402 
403 	return MIN(ctx->tx_len, ctx->rx_len);
404 }
405 
spi_context_longest_current_buf(struct spi_context * ctx)406 static inline size_t spi_context_longest_current_buf(struct spi_context *ctx)
407 {
408 	return ctx->tx_len > ctx->rx_len ? ctx->tx_len : ctx->rx_len;
409 }
410 
spi_context_total_tx_len(struct spi_context * ctx)411 static inline size_t spi_context_total_tx_len(struct spi_context *ctx)
412 {
413 	size_t n;
414 	size_t total_len = 0;
415 
416 	for (n = 0; n < ctx->tx_count; ++n) {
417 		total_len += ctx->current_tx[n].len;
418 	}
419 
420 	return total_len;
421 }
422 
spi_context_total_rx_len(struct spi_context * ctx)423 static inline size_t spi_context_total_rx_len(struct spi_context *ctx)
424 {
425 	size_t n;
426 	size_t total_len = 0;
427 
428 	for (n = 0; n < ctx->rx_count; ++n) {
429 		total_len += ctx->current_rx[n].len;
430 	}
431 
432 	return total_len;
433 }
434 
435 #ifdef __cplusplus
436 }
437 #endif
438 
439 #endif /* ZEPHYR_DRIVERS_SPI_SPI_CONTEXT_H_ */
440