1 /*
2  * Copyright (c) 2017 Intel Corporation
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 /**
8  * @file
9  * @brief Private API for SPI drivers
10  */
11 
12 #ifndef ZEPHYR_DRIVERS_SPI_SPI_CONTEXT_H_
13 #define ZEPHYR_DRIVERS_SPI_SPI_CONTEXT_H_
14 
15 #include <zephyr/drivers/gpio.h>
16 #include <zephyr/drivers/spi.h>
17 #include <zephyr/kernel.h>
18 
19 #ifdef __cplusplus
20 extern "C" {
21 #endif
22 
23 enum spi_ctx_runtime_op_mode {
24 	SPI_CTX_RUNTIME_OP_MODE_MASTER = BIT(0),
25 	SPI_CTX_RUNTIME_OP_MODE_SLAVE  = BIT(1),
26 };
27 
28 struct spi_context {
29 	const struct spi_config *config;
30 	const struct spi_config *owner;
31 	const struct gpio_dt_spec *cs_gpios;
32 	size_t num_cs_gpios;
33 
34 	struct k_sem lock;
35 	struct k_sem sync;
36 	int sync_status;
37 
38 #ifdef CONFIG_SPI_ASYNC
39 	spi_callback_t callback;
40 	void *callback_data;
41 	bool asynchronous;
42 #endif /* CONFIG_SPI_ASYNC */
43 	const struct spi_buf *current_tx;
44 	size_t tx_count;
45 	const struct spi_buf *current_rx;
46 	size_t rx_count;
47 
48 	const uint8_t *tx_buf;
49 	size_t tx_len;
50 	uint8_t *rx_buf;
51 	size_t rx_len;
52 
53 #ifdef CONFIG_SPI_SLAVE
54 	int recv_frames;
55 #endif /* CONFIG_SPI_SLAVE */
56 };
57 
58 #define SPI_CONTEXT_INIT_LOCK(_data, _ctx_name)				\
59 	._ctx_name.lock = Z_SEM_INITIALIZER(_data._ctx_name.lock, 0, 1)
60 
61 #define SPI_CONTEXT_INIT_SYNC(_data, _ctx_name)				\
62 	._ctx_name.sync = Z_SEM_INITIALIZER(_data._ctx_name.sync, 0, 1)
63 
64 #define SPI_CONTEXT_CS_GPIO_SPEC_ELEM(_node_id, _prop, _idx)		\
65 	GPIO_DT_SPEC_GET_BY_IDX(_node_id, _prop, _idx),
66 
67 #define SPI_CONTEXT_CS_GPIOS_FOREACH_ELEM(_node_id)				\
68 	DT_FOREACH_PROP_ELEM(_node_id, cs_gpios,				\
69 				SPI_CONTEXT_CS_GPIO_SPEC_ELEM)
70 
71 #define SPI_CONTEXT_CS_GPIOS_INITIALIZE(_node_id, _ctx_name)				\
72 	._ctx_name.cs_gpios = (const struct gpio_dt_spec []) {				\
73 		COND_CODE_1(DT_SPI_HAS_CS_GPIOS(_node_id),				\
74 			    (SPI_CONTEXT_CS_GPIOS_FOREACH_ELEM(_node_id)), ({0}))	\
75 	},										\
76 	._ctx_name.num_cs_gpios = DT_PROP_LEN_OR(_node_id, cs_gpios, 0),
77 
spi_context_configured(struct spi_context * ctx,const struct spi_config * config)78 static inline bool spi_context_configured(struct spi_context *ctx,
79 					  const struct spi_config *config)
80 {
81 	return !!(ctx->config == config);
82 }
83 
spi_context_is_slave(struct spi_context * ctx)84 static inline bool spi_context_is_slave(struct spi_context *ctx)
85 {
86 	return (ctx->config->operation & SPI_OP_MODE_SLAVE);
87 }
88 
spi_context_lock(struct spi_context * ctx,bool asynchronous,spi_callback_t callback,void * callback_data,const struct spi_config * spi_cfg)89 static inline void spi_context_lock(struct spi_context *ctx,
90 				    bool asynchronous,
91 				    spi_callback_t callback,
92 				    void *callback_data,
93 				    const struct spi_config *spi_cfg)
94 {
95 	if ((spi_cfg->operation & SPI_LOCK_ON) &&
96 		(k_sem_count_get(&ctx->lock) == 0) &&
97 		(ctx->owner == spi_cfg)) {
98 			return;
99 	}
100 
101 	k_sem_take(&ctx->lock, K_FOREVER);
102 	ctx->owner = spi_cfg;
103 
104 #ifdef CONFIG_SPI_ASYNC
105 	ctx->asynchronous = asynchronous;
106 	ctx->callback = callback;
107 	ctx->callback_data = callback_data;
108 #endif /* CONFIG_SPI_ASYNC */
109 }
110 
spi_context_release(struct spi_context * ctx,int status)111 static inline void spi_context_release(struct spi_context *ctx, int status)
112 {
113 #ifdef CONFIG_SPI_SLAVE
114 	if (status >= 0 && (ctx->config->operation & SPI_LOCK_ON)) {
115 		return;
116 	}
117 #endif /* CONFIG_SPI_SLAVE */
118 
119 #ifdef CONFIG_SPI_ASYNC
120 	if (!ctx->asynchronous || (status < 0)) {
121 		ctx->owner = NULL;
122 		k_sem_give(&ctx->lock);
123 	}
124 #else
125 	if (!(ctx->config->operation & SPI_LOCK_ON)) {
126 		ctx->owner = NULL;
127 		k_sem_give(&ctx->lock);
128 	}
129 #endif /* CONFIG_SPI_ASYNC */
130 }
131 
132 static inline size_t spi_context_total_tx_len(struct spi_context *ctx);
133 static inline size_t spi_context_total_rx_len(struct spi_context *ctx);
134 
spi_context_wait_for_completion(struct spi_context * ctx)135 static inline int spi_context_wait_for_completion(struct spi_context *ctx)
136 {
137 	int status = 0;
138 	bool wait;
139 
140 #ifdef CONFIG_SPI_ASYNC
141 	wait = !ctx->asynchronous;
142 #else
143 	wait = true;
144 #endif
145 
146 	if (wait) {
147 		k_timeout_t timeout;
148 
149 		/* Do not use any timeout in the slave mode, as in this case
150 		 * it is not known when the transfer will actually start and
151 		 * what the frequency will be.
152 		 */
153 		if (IS_ENABLED(CONFIG_SPI_SLAVE) && spi_context_is_slave(ctx)) {
154 			timeout = K_FOREVER;
155 		} else {
156 			uint32_t tx_len = spi_context_total_tx_len(ctx);
157 			uint32_t rx_len = spi_context_total_rx_len(ctx);
158 			uint32_t timeout_ms;
159 
160 			timeout_ms = MAX(tx_len, rx_len) * 8 * 1000 /
161 				     ctx->config->frequency;
162 			timeout_ms += CONFIG_SPI_COMPLETION_TIMEOUT_TOLERANCE;
163 
164 			timeout = K_MSEC(timeout_ms);
165 		}
166 
167 		if (k_sem_take(&ctx->sync, timeout)) {
168 			LOG_ERR("Timeout waiting for transfer complete");
169 			return -ETIMEDOUT;
170 		}
171 		status = ctx->sync_status;
172 	}
173 
174 #ifdef CONFIG_SPI_SLAVE
175 	if (spi_context_is_slave(ctx) && !status) {
176 		return ctx->recv_frames;
177 	}
178 #endif /* CONFIG_SPI_SLAVE */
179 
180 	return status;
181 }
182 
spi_context_complete(struct spi_context * ctx,const struct device * dev,int status)183 static inline void spi_context_complete(struct spi_context *ctx,
184 					const struct device *dev,
185 					int status)
186 {
187 #ifdef CONFIG_SPI_ASYNC
188 	if (!ctx->asynchronous) {
189 		ctx->sync_status = status;
190 		k_sem_give(&ctx->sync);
191 	} else {
192 		if (ctx->callback) {
193 #ifdef CONFIG_SPI_SLAVE
194 			if (spi_context_is_slave(ctx) && !status) {
195 				/* Let's update the status so it tells
196 				 * about number of received frames.
197 				 */
198 				status = ctx->recv_frames;
199 			}
200 #endif /* CONFIG_SPI_SLAVE */
201 			ctx->callback(dev, status, ctx->callback_data);
202 		}
203 
204 		if (!(ctx->config->operation & SPI_LOCK_ON)) {
205 			ctx->owner = NULL;
206 			k_sem_give(&ctx->lock);
207 		}
208 	}
209 #else
210 	ctx->sync_status = status;
211 	k_sem_give(&ctx->sync);
212 #endif /* CONFIG_SPI_ASYNC */
213 }
214 
spi_context_cs_configure_all(struct spi_context * ctx)215 static inline int spi_context_cs_configure_all(struct spi_context *ctx)
216 {
217 	int ret;
218 	const struct gpio_dt_spec *cs_gpio;
219 
220 	for (cs_gpio = ctx->cs_gpios; cs_gpio < &ctx->cs_gpios[ctx->num_cs_gpios]; cs_gpio++) {
221 		if (!device_is_ready(cs_gpio->port)) {
222 			LOG_ERR("CS GPIO port %s pin %d is not ready",
223 				cs_gpio->port->name, cs_gpio->pin);
224 			return -ENODEV;
225 		}
226 
227 		ret = gpio_pin_configure_dt(cs_gpio, GPIO_OUTPUT_INACTIVE);
228 		if (ret < 0) {
229 			return ret;
230 		}
231 	}
232 
233 	return 0;
234 }
235 
_spi_context_cs_control(struct spi_context * ctx,bool on,bool force_off)236 static inline void _spi_context_cs_control(struct spi_context *ctx,
237 					   bool on, bool force_off)
238 {
239 	if (ctx->config && spi_cs_is_gpio(ctx->config)) {
240 		if (on) {
241 			gpio_pin_set_dt(&ctx->config->cs.gpio, 1);
242 			k_busy_wait(ctx->config->cs.delay);
243 		} else {
244 			if (!force_off &&
245 			    ctx->config->operation & SPI_HOLD_ON_CS) {
246 				return;
247 			}
248 
249 			k_busy_wait(ctx->config->cs.delay);
250 			gpio_pin_set_dt(&ctx->config->cs.gpio, 0);
251 		}
252 	}
253 }
254 
spi_context_cs_control(struct spi_context * ctx,bool on)255 static inline void spi_context_cs_control(struct spi_context *ctx, bool on)
256 {
257 	_spi_context_cs_control(ctx, on, false);
258 }
259 
spi_context_unlock_unconditionally(struct spi_context * ctx)260 static inline void spi_context_unlock_unconditionally(struct spi_context *ctx)
261 {
262 	/* Forcing CS to go to inactive status */
263 	_spi_context_cs_control(ctx, false, true);
264 
265 	if (!k_sem_count_get(&ctx->lock)) {
266 		ctx->owner = NULL;
267 		k_sem_give(&ctx->lock);
268 	}
269 }
270 
spi_context_get_next_buf(const struct spi_buf ** current,size_t * count,size_t * buf_len,uint8_t dfs)271 static inline void *spi_context_get_next_buf(const struct spi_buf **current,
272 					     size_t *count,
273 					     size_t *buf_len,
274 					     uint8_t dfs)
275 {
276 	/* This loop skips zero-length buffers in the set, if any. */
277 	while (*count) {
278 		if (((*current)->len / dfs) != 0) {
279 			*buf_len = (*current)->len / dfs;
280 			return (*current)->buf;
281 		}
282 		++(*current);
283 		--(*count);
284 	}
285 
286 	*buf_len = 0;
287 	return NULL;
288 }
289 
290 static inline
spi_context_buffers_setup(struct spi_context * ctx,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs,uint8_t dfs)291 void spi_context_buffers_setup(struct spi_context *ctx,
292 			       const struct spi_buf_set *tx_bufs,
293 			       const struct spi_buf_set *rx_bufs,
294 			       uint8_t dfs)
295 {
296 	LOG_DBG("tx_bufs %p - rx_bufs %p - %u", tx_bufs, rx_bufs, dfs);
297 
298 	ctx->current_tx = tx_bufs ? tx_bufs->buffers : NULL;
299 	ctx->tx_count = ctx->current_tx ? tx_bufs->count : 0;
300 	ctx->tx_buf = (const uint8_t *)
301 		spi_context_get_next_buf(&ctx->current_tx, &ctx->tx_count,
302 					 &ctx->tx_len, dfs);
303 
304 	ctx->current_rx = rx_bufs ? rx_bufs->buffers : NULL;
305 	ctx->rx_count = ctx->current_rx ? rx_bufs->count : 0;
306 	ctx->rx_buf = (uint8_t *)
307 		spi_context_get_next_buf(&ctx->current_rx, &ctx->rx_count,
308 					 &ctx->rx_len, dfs);
309 
310 	ctx->sync_status = 0;
311 
312 #ifdef CONFIG_SPI_SLAVE
313 	ctx->recv_frames = 0;
314 #endif /* CONFIG_SPI_SLAVE */
315 
316 	LOG_DBG("current_tx %p (%zu), current_rx %p (%zu),"
317 		" tx buf/len %p/%zu, rx buf/len %p/%zu",
318 		ctx->current_tx, ctx->tx_count,
319 		ctx->current_rx, ctx->rx_count,
320 		(void *)ctx->tx_buf, ctx->tx_len,
321 		(void *)ctx->rx_buf, ctx->rx_len);
322 }
323 
324 static ALWAYS_INLINE
spi_context_update_tx(struct spi_context * ctx,uint8_t dfs,uint32_t len)325 void spi_context_update_tx(struct spi_context *ctx, uint8_t dfs, uint32_t len)
326 {
327 	if (!ctx->tx_len) {
328 		return;
329 	}
330 
331 	if (len > ctx->tx_len) {
332 		LOG_ERR("Update exceeds current buffer");
333 		return;
334 	}
335 
336 	ctx->tx_len -= len;
337 	if (!ctx->tx_len) {
338 		/* Current buffer is done. Get the next one to be processed. */
339 		++ctx->current_tx;
340 		--ctx->tx_count;
341 		ctx->tx_buf = (const uint8_t *)
342 			spi_context_get_next_buf(&ctx->current_tx,
343 						 &ctx->tx_count,
344 						 &ctx->tx_len, dfs);
345 	} else if (ctx->tx_buf) {
346 		ctx->tx_buf += dfs * len;
347 	}
348 
349 	LOG_DBG("tx buf/len %p/%zu", (void *)ctx->tx_buf, ctx->tx_len);
350 }
351 
352 static ALWAYS_INLINE
spi_context_tx_on(struct spi_context * ctx)353 bool spi_context_tx_on(struct spi_context *ctx)
354 {
355 	return !!(ctx->tx_len);
356 }
357 
358 static ALWAYS_INLINE
spi_context_tx_buf_on(struct spi_context * ctx)359 bool spi_context_tx_buf_on(struct spi_context *ctx)
360 {
361 	return !!(ctx->tx_buf && ctx->tx_len);
362 }
363 
364 static ALWAYS_INLINE
spi_context_update_rx(struct spi_context * ctx,uint8_t dfs,uint32_t len)365 void spi_context_update_rx(struct spi_context *ctx, uint8_t dfs, uint32_t len)
366 {
367 #ifdef CONFIG_SPI_SLAVE
368 	if (spi_context_is_slave(ctx)) {
369 		ctx->recv_frames += len;
370 	}
371 
372 #endif /* CONFIG_SPI_SLAVE */
373 
374 	if (!ctx->rx_len) {
375 		return;
376 	}
377 
378 	if (len > ctx->rx_len) {
379 		LOG_ERR("Update exceeds current buffer");
380 		return;
381 	}
382 
383 	ctx->rx_len -= len;
384 	if (!ctx->rx_len) {
385 		/* Current buffer is done. Get the next one to be processed. */
386 		++ctx->current_rx;
387 		--ctx->rx_count;
388 		ctx->rx_buf = (uint8_t *)
389 			spi_context_get_next_buf(&ctx->current_rx,
390 						 &ctx->rx_count,
391 						 &ctx->rx_len, dfs);
392 	} else if (ctx->rx_buf) {
393 		ctx->rx_buf += dfs * len;
394 	}
395 
396 	LOG_DBG("rx buf/len %p/%zu", (void *)ctx->rx_buf, ctx->rx_len);
397 }
398 
399 static ALWAYS_INLINE
spi_context_rx_on(struct spi_context * ctx)400 bool spi_context_rx_on(struct spi_context *ctx)
401 {
402 	return !!(ctx->rx_len);
403 }
404 
405 static ALWAYS_INLINE
spi_context_rx_buf_on(struct spi_context * ctx)406 bool spi_context_rx_buf_on(struct spi_context *ctx)
407 {
408 	return !!(ctx->rx_buf && ctx->rx_len);
409 }
410 
411 /*
412  * Returns the maximum length of a transfer for which all currently active
413  * directions have a continuous buffer, i.e. the maximum SPI transfer that
414  * can be done with DMA that handles only non-scattered buffers.
415  */
spi_context_max_continuous_chunk(struct spi_context * ctx)416 static inline size_t spi_context_max_continuous_chunk(struct spi_context *ctx)
417 {
418 	if (!ctx->tx_len) {
419 		return ctx->rx_len;
420 	} else if (!ctx->rx_len) {
421 		return ctx->tx_len;
422 	}
423 
424 	return MIN(ctx->tx_len, ctx->rx_len);
425 }
426 
spi_context_longest_current_buf(struct spi_context * ctx)427 static inline size_t spi_context_longest_current_buf(struct spi_context *ctx)
428 {
429 	return ctx->tx_len > ctx->rx_len ? ctx->tx_len : ctx->rx_len;
430 }
431 
spi_context_total_tx_len(struct spi_context * ctx)432 static inline size_t spi_context_total_tx_len(struct spi_context *ctx)
433 {
434 	size_t n;
435 	size_t total_len = 0;
436 
437 	for (n = 0; n < ctx->tx_count; ++n) {
438 		total_len += ctx->current_tx[n].len;
439 	}
440 
441 	return total_len;
442 }
443 
spi_context_total_rx_len(struct spi_context * ctx)444 static inline size_t spi_context_total_rx_len(struct spi_context *ctx)
445 {
446 	size_t n;
447 	size_t total_len = 0;
448 
449 	for (n = 0; n < ctx->rx_count; ++n) {
450 		total_len += ctx->current_rx[n].len;
451 	}
452 
453 	return total_len;
454 }
455 
456 #ifdef __cplusplus
457 }
458 #endif
459 
460 #endif /* ZEPHYR_DRIVERS_SPI_SPI_CONTEXT_H_ */
461