1 /*
2  * Copyright (c) 2017 Intel Corporation
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 /**
8  * @file
9  * @brief Private API for SPI drivers
10  */
11 
12 #ifndef ZEPHYR_DRIVERS_SPI_SPI_CONTEXT_H_
13 #define ZEPHYR_DRIVERS_SPI_SPI_CONTEXT_H_
14 
15 #include <zephyr/drivers/gpio.h>
16 #include <zephyr/drivers/spi.h>
17 #include <zephyr/kernel.h>
18 
19 #ifdef __cplusplus
20 extern "C" {
21 #endif
22 
23 enum spi_ctx_runtime_op_mode {
24 	SPI_CTX_RUNTIME_OP_MODE_MASTER = BIT(0),
25 	SPI_CTX_RUNTIME_OP_MODE_SLAVE  = BIT(1),
26 };
27 
28 struct spi_context {
29 	const struct spi_config *config;
30 	const struct spi_config *owner;
31 	const struct gpio_dt_spec *cs_gpios;
32 	size_t num_cs_gpios;
33 
34 	struct k_sem lock;
35 	struct k_sem sync;
36 	int sync_status;
37 
38 #ifdef CONFIG_SPI_ASYNC
39 	spi_callback_t callback;
40 	void *callback_data;
41 	bool asynchronous;
42 #endif /* CONFIG_SPI_ASYNC */
43 	const struct spi_buf *current_tx;
44 	size_t tx_count;
45 	const struct spi_buf *current_rx;
46 	size_t rx_count;
47 
48 	const uint8_t *tx_buf;
49 	size_t tx_len;
50 	uint8_t *rx_buf;
51 	size_t rx_len;
52 
53 #ifdef CONFIG_SPI_SLAVE
54 	int recv_frames;
55 #endif /* CONFIG_SPI_SLAVE */
56 };
57 
58 #define SPI_CONTEXT_INIT_LOCK(_data, _ctx_name)				\
59 	._ctx_name.lock = Z_SEM_INITIALIZER(_data._ctx_name.lock, 0, 1)
60 
61 #define SPI_CONTEXT_INIT_SYNC(_data, _ctx_name)				\
62 	._ctx_name.sync = Z_SEM_INITIALIZER(_data._ctx_name.sync, 0, 1)
63 
64 #define SPI_CONTEXT_CS_GPIO_SPEC_ELEM(_node_id, _prop, _idx)		\
65 	GPIO_DT_SPEC_GET_BY_IDX(_node_id, _prop, _idx),
66 
67 #define SPI_CONTEXT_CS_GPIOS_FOREACH_ELEM(_node_id)				\
68 	DT_FOREACH_PROP_ELEM(_node_id, cs_gpios,				\
69 				SPI_CONTEXT_CS_GPIO_SPEC_ELEM)
70 
71 #define SPI_CONTEXT_CS_GPIOS_INITIALIZE(_node_id, _ctx_name)				\
72 	._ctx_name.cs_gpios = (const struct gpio_dt_spec []) {				\
73 		COND_CODE_1(DT_SPI_HAS_CS_GPIOS(_node_id),				\
74 			    (SPI_CONTEXT_CS_GPIOS_FOREACH_ELEM(_node_id)), ({0}))	\
75 	},										\
76 	._ctx_name.num_cs_gpios = DT_PROP_LEN_OR(_node_id, cs_gpios, 0),
77 
78 /*
79  * Checks if a spi config is the same as the one stored in the spi_context
80  * The intention of this function is to be used to check if a driver can skip
81  * some reconfiguration for a transfer in a fast code path.
82  */
spi_context_configured(struct spi_context * ctx,const struct spi_config * config)83 static inline bool spi_context_configured(struct spi_context *ctx,
84 					  const struct spi_config *config)
85 {
86 	return !!(ctx->config == config);
87 }
88 
89 /* Returns true if the spi configuration stored for this context
90  * specifies a slave mode configuration, returns false otherwise
91  */
spi_context_is_slave(struct spi_context * ctx)92 static inline bool spi_context_is_slave(struct spi_context *ctx)
93 {
94 	return (ctx->config->operation & SPI_OP_MODE_SLAVE);
95 }
96 
97 /*
98  * The purpose of the context lock is to synchronize the usage of the driver/hardware.
99  * The driver should call this function to claim or wait for ownership of the spi resource.
100  * Usually the appropriate time to call this is at the start of the transceive API implementation.
101  */
spi_context_lock(struct spi_context * ctx,bool asynchronous,spi_callback_t callback,void * callback_data,const struct spi_config * spi_cfg)102 static inline void spi_context_lock(struct spi_context *ctx,
103 				    bool asynchronous,
104 				    spi_callback_t callback,
105 				    void *callback_data,
106 				    const struct spi_config *spi_cfg)
107 {
108 	bool already_locked = (spi_cfg->operation & SPI_LOCK_ON) &&
109 			      (k_sem_count_get(&ctx->lock) == 0) &&
110 			      (ctx->owner == spi_cfg);
111 
112 	if (!already_locked) {
113 		k_sem_take(&ctx->lock, K_FOREVER);
114 		ctx->owner = spi_cfg;
115 	}
116 
117 #ifdef CONFIG_SPI_ASYNC
118 	ctx->asynchronous = asynchronous;
119 	ctx->callback = callback;
120 	ctx->callback_data = callback_data;
121 #endif /* CONFIG_SPI_ASYNC */
122 }
123 
124 /*
125  * This function must be called by a driver which has called spi_context_lock in order
126  * to release the ownership of the spi resource.
127  * Usually the appropriate time to call this would be at the end of a transfer that was
128  * initiated by a transceive API call, except in the case that the SPI_LOCK_ON bit was set
129  * in the configuration.
130  */
spi_context_release(struct spi_context * ctx,int status)131 static inline void spi_context_release(struct spi_context *ctx, int status)
132 {
133 #ifdef CONFIG_SPI_SLAVE
134 	if (status >= 0 && (ctx->config->operation & SPI_LOCK_ON)) {
135 		return;
136 	}
137 #endif /* CONFIG_SPI_SLAVE */
138 
139 #ifdef CONFIG_SPI_ASYNC
140 	if (!ctx->asynchronous || (status < 0)) {
141 		ctx->owner = NULL;
142 		k_sem_give(&ctx->lock);
143 	}
144 #else
145 	if (!(ctx->config->operation & SPI_LOCK_ON)) {
146 		ctx->owner = NULL;
147 		k_sem_give(&ctx->lock);
148 	}
149 #endif /* CONFIG_SPI_ASYNC */
150 }
151 
152 static inline size_t spi_context_total_tx_len(struct spi_context *ctx);
153 static inline size_t spi_context_total_rx_len(struct spi_context *ctx);
154 
155 /* This function essentially is a way for a driver to seamlessly implement both the
156  * synchronous transceive API and the asynchronous transceive_async API in the same way.
157  *
158  * The exact way this function is used may depend on driver implementation, but
159  * essentially this will block waiting for a signal from spi_context_complete,
160  * unless the transfer is asynchronous, in which case it does nothing in master mode.
161  */
spi_context_wait_for_completion(struct spi_context * ctx)162 static inline int spi_context_wait_for_completion(struct spi_context *ctx)
163 {
164 	int status = 0;
165 	bool wait;
166 
167 #ifdef CONFIG_SPI_ASYNC
168 	wait = !ctx->asynchronous;
169 #else
170 	wait = true;
171 #endif
172 
173 	if (wait) {
174 		k_timeout_t timeout;
175 
176 		/* Do not use any timeout in the slave mode, as in this case
177 		 * it is not known when the transfer will actually start and
178 		 * what the frequency will be.
179 		 */
180 		if (IS_ENABLED(CONFIG_SPI_SLAVE) && spi_context_is_slave(ctx)) {
181 			timeout = K_FOREVER;
182 		} else {
183 			uint32_t tx_len = spi_context_total_tx_len(ctx);
184 			uint32_t rx_len = spi_context_total_rx_len(ctx);
185 			uint32_t timeout_ms;
186 
187 			timeout_ms = MAX(tx_len, rx_len) * 8 * 1000 /
188 				     ctx->config->frequency;
189 			timeout_ms += CONFIG_SPI_COMPLETION_TIMEOUT_TOLERANCE;
190 
191 			timeout = K_MSEC(timeout_ms);
192 		}
193 
194 		if (k_sem_take(&ctx->sync, timeout)) {
195 			LOG_ERR("Timeout waiting for transfer complete");
196 			return -ETIMEDOUT;
197 		}
198 		status = ctx->sync_status;
199 	}
200 
201 #ifdef CONFIG_SPI_SLAVE
202 	if (spi_context_is_slave(ctx) && !status) {
203 		return ctx->recv_frames;
204 	}
205 #endif /* CONFIG_SPI_SLAVE */
206 
207 	return status;
208 }
209 
210 /* For synchronous transfers, this will signal to a thread waiting
211  * on spi_context_wait for completion.
212  *
213  * For asynchronous tranfers, this will call the async callback function
214  * with the user data.
215  */
spi_context_complete(struct spi_context * ctx,const struct device * dev,int status)216 static inline void spi_context_complete(struct spi_context *ctx,
217 					const struct device *dev,
218 					int status)
219 {
220 #ifdef CONFIG_SPI_ASYNC
221 	if (!ctx->asynchronous) {
222 		ctx->sync_status = status;
223 		k_sem_give(&ctx->sync);
224 	} else {
225 		if (ctx->callback) {
226 #ifdef CONFIG_SPI_SLAVE
227 			if (spi_context_is_slave(ctx) && !status) {
228 				/* Let's update the status so it tells
229 				 * about number of received frames.
230 				 */
231 				status = ctx->recv_frames;
232 			}
233 #endif /* CONFIG_SPI_SLAVE */
234 			ctx->callback(dev, status, ctx->callback_data);
235 		}
236 
237 		if (!(ctx->config->operation & SPI_LOCK_ON)) {
238 			ctx->owner = NULL;
239 			k_sem_give(&ctx->lock);
240 		}
241 	}
242 #else
243 	ctx->sync_status = status;
244 	k_sem_give(&ctx->sync);
245 #endif /* CONFIG_SPI_ASYNC */
246 }
247 
248 /*
249  * This function initializes all the chip select GPIOs associated with a spi controller.
250  * The context first must be initialized using the SPI_CONTEXT_CS_GPIOS_INITIALIZE macro.
251  * This function should be called during the device init sequence so that
252  * all the CS lines are configured properly before the first transfer begins.
253  * Note: If a controller has native CS control in SPI hardware, they should also be initialized
254  * during device init by the driver with hardware-specific code.
255  */
spi_context_cs_configure_all(struct spi_context * ctx)256 static inline int spi_context_cs_configure_all(struct spi_context *ctx)
257 {
258 	int ret;
259 	const struct gpio_dt_spec *cs_gpio;
260 
261 	for (cs_gpio = ctx->cs_gpios; cs_gpio < &ctx->cs_gpios[ctx->num_cs_gpios]; cs_gpio++) {
262 		if (!device_is_ready(cs_gpio->port)) {
263 			LOG_ERR("CS GPIO port %s pin %d is not ready",
264 				cs_gpio->port->name, cs_gpio->pin);
265 			return -ENODEV;
266 		}
267 
268 		ret = gpio_pin_configure_dt(cs_gpio, GPIO_OUTPUT_INACTIVE);
269 		if (ret < 0) {
270 			return ret;
271 		}
272 	}
273 
274 	return 0;
275 }
276 
277 /* Helper function to control the GPIO CS, not meant to be used directly by drivers */
_spi_context_cs_control(struct spi_context * ctx,bool on,bool force_off)278 static inline void _spi_context_cs_control(struct spi_context *ctx,
279 					   bool on, bool force_off)
280 {
281 	if (ctx->config && spi_cs_is_gpio(ctx->config)) {
282 		if (on) {
283 			gpio_pin_set_dt(&ctx->config->cs.gpio, 1);
284 			k_busy_wait(ctx->config->cs.delay);
285 		} else {
286 			if (!force_off &&
287 			    ctx->config->operation & SPI_HOLD_ON_CS) {
288 				return;
289 			}
290 
291 			k_busy_wait(ctx->config->cs.delay);
292 			gpio_pin_set_dt(&ctx->config->cs.gpio, 0);
293 		}
294 	}
295 }
296 
297 /* This function should be called by drivers to control the chip select line in master mode
298  * in the case of the CS being a GPIO. The de facto usage of the zephyr SPI API expects that the
299  * chip select be asserted throughout the entire transfer specified by a transceive call,
300  * ie all buffers in a spi_buf_set should be finished before deasserting CS. And usually
301  * the deassertion is at the end of the transfer, except in the case that the
302  * SPI_HOLD_ON_CS bit was set in the configuration.
303  */
spi_context_cs_control(struct spi_context * ctx,bool on)304 static inline void spi_context_cs_control(struct spi_context *ctx, bool on)
305 {
306 	_spi_context_cs_control(ctx, on, false);
307 }
308 
309 /* Forcefully releases the spi context and removes the owner, allowing taking the lock
310  * with spi_context_lock without the previous owner releasing the lock.
311  * This is usually used to aid in implementation of the spi_release driver API.
312  */
spi_context_unlock_unconditionally(struct spi_context * ctx)313 static inline void spi_context_unlock_unconditionally(struct spi_context *ctx)
314 {
315 	/* Forcing CS to go to inactive status */
316 	_spi_context_cs_control(ctx, false, true);
317 
318 	if (!k_sem_count_get(&ctx->lock)) {
319 		ctx->owner = NULL;
320 		k_sem_give(&ctx->lock);
321 	}
322 }
323 
324 /*
325  * Helper function for incrementing buffer pointer.
326  * Generally not needed to be used directly by drivers.
327  * Use spi_context_update_(tx/rx) instead.
328  */
spi_context_get_next_buf(const struct spi_buf ** current,size_t * count,size_t * buf_len,uint8_t dfs)329 static inline void *spi_context_get_next_buf(const struct spi_buf **current,
330 					     size_t *count,
331 					     size_t *buf_len,
332 					     uint8_t dfs)
333 {
334 	/* This loop skips zero-length buffers in the set, if any. */
335 	while (*count) {
336 		if (((*current)->len / dfs) != 0) {
337 			*buf_len = (*current)->len / dfs;
338 			return (*current)->buf;
339 		}
340 		++(*current);
341 		--(*count);
342 	}
343 
344 	*buf_len = 0;
345 	return NULL;
346 }
347 
348 /*
349  * The spi context private api works with the driver by providing code to
350  * keep track of how much of the transfer has been completed. The driver
351  * calls functions to report when some tx or rx has finished, and the driver
352  * then can use the spi context to keep track of how much is left to do.
353  */
354 
355 /*
356  * This function must be called at the start of a transfer by the driver
357  * to initialize the spi context fields for tracking the progress.
358  */
359 static inline
spi_context_buffers_setup(struct spi_context * ctx,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs,uint8_t dfs)360 void spi_context_buffers_setup(struct spi_context *ctx,
361 			       const struct spi_buf_set *tx_bufs,
362 			       const struct spi_buf_set *rx_bufs,
363 			       uint8_t dfs)
364 {
365 	LOG_DBG("tx_bufs %p - rx_bufs %p - %u", tx_bufs, rx_bufs, dfs);
366 
367 	ctx->current_tx = tx_bufs ? tx_bufs->buffers : NULL;
368 	ctx->tx_count = ctx->current_tx ? tx_bufs->count : 0;
369 	ctx->tx_buf = (const uint8_t *)
370 		spi_context_get_next_buf(&ctx->current_tx, &ctx->tx_count,
371 					 &ctx->tx_len, dfs);
372 
373 	ctx->current_rx = rx_bufs ? rx_bufs->buffers : NULL;
374 	ctx->rx_count = ctx->current_rx ? rx_bufs->count : 0;
375 	ctx->rx_buf = (uint8_t *)
376 		spi_context_get_next_buf(&ctx->current_rx, &ctx->rx_count,
377 					 &ctx->rx_len, dfs);
378 
379 	ctx->sync_status = 0;
380 
381 #ifdef CONFIG_SPI_SLAVE
382 	ctx->recv_frames = 0;
383 #endif /* CONFIG_SPI_SLAVE */
384 
385 	LOG_DBG("current_tx %p (%zu), current_rx %p (%zu),"
386 		" tx buf/len %p/%zu, rx buf/len %p/%zu",
387 		ctx->current_tx, ctx->tx_count,
388 		ctx->current_rx, ctx->rx_count,
389 		(void *)ctx->tx_buf, ctx->tx_len,
390 		(void *)ctx->rx_buf, ctx->rx_len);
391 }
392 
393 /*
394  * Should be called to update the tracking of TX being completed.
395  *
396  * Parameter "dfs" is the number of bytes needed to store a data frame.
397  * Parameter "len" is the number of data frames of TX that were sent.
398  */
399 static ALWAYS_INLINE
spi_context_update_tx(struct spi_context * ctx,uint8_t dfs,uint32_t len)400 void spi_context_update_tx(struct spi_context *ctx, uint8_t dfs, uint32_t len)
401 {
402 	if (!ctx->tx_len) {
403 		return;
404 	}
405 
406 	if (len > ctx->tx_len) {
407 		LOG_ERR("Update exceeds current buffer");
408 		return;
409 	}
410 
411 	ctx->tx_len -= len;
412 	if (!ctx->tx_len) {
413 		/* Current buffer is done. Get the next one to be processed. */
414 		++ctx->current_tx;
415 		--ctx->tx_count;
416 		ctx->tx_buf = (const uint8_t *)
417 			spi_context_get_next_buf(&ctx->current_tx,
418 						 &ctx->tx_count,
419 						 &ctx->tx_len, dfs);
420 	} else if (ctx->tx_buf) {
421 		ctx->tx_buf += dfs * len;
422 	}
423 
424 	LOG_DBG("tx buf/len %p/%zu", (void *)ctx->tx_buf, ctx->tx_len);
425 }
426 
427 /* Returns true if there is still TX buffers left in the spi_buf_set
428  * even if they are "null" (nop) buffers.
429  */
430 static ALWAYS_INLINE
spi_context_tx_on(struct spi_context * ctx)431 bool spi_context_tx_on(struct spi_context *ctx)
432 {
433 	return !!(ctx->tx_len);
434 }
435 
436 /* Similar to spi_context_tx_on, but only returns true if the current buffer is
437  * not a null/NOP placeholder.
438  */
439 static ALWAYS_INLINE
spi_context_tx_buf_on(struct spi_context * ctx)440 bool spi_context_tx_buf_on(struct spi_context *ctx)
441 {
442 	return !!(ctx->tx_buf && ctx->tx_len);
443 }
444 
445 /*
446  * Should be called to update the tracking of RX being completed.
447  *
448  * @param dfs is the number of bytes needed to store a data frame.
449  * @param len is the number of data frames of RX that were received.
450  */
451 static ALWAYS_INLINE
spi_context_update_rx(struct spi_context * ctx,uint8_t dfs,uint32_t len)452 void spi_context_update_rx(struct spi_context *ctx, uint8_t dfs, uint32_t len)
453 {
454 #ifdef CONFIG_SPI_SLAVE
455 	if (spi_context_is_slave(ctx)) {
456 		ctx->recv_frames += len;
457 	}
458 
459 #endif /* CONFIG_SPI_SLAVE */
460 
461 	if (!ctx->rx_len) {
462 		return;
463 	}
464 
465 	if (len > ctx->rx_len) {
466 		LOG_ERR("Update exceeds current buffer");
467 		return;
468 	}
469 
470 	ctx->rx_len -= len;
471 	if (!ctx->rx_len) {
472 		/* Current buffer is done. Get the next one to be processed. */
473 		++ctx->current_rx;
474 		--ctx->rx_count;
475 		ctx->rx_buf = (uint8_t *)
476 			spi_context_get_next_buf(&ctx->current_rx,
477 						 &ctx->rx_count,
478 						 &ctx->rx_len, dfs);
479 	} else if (ctx->rx_buf) {
480 		ctx->rx_buf += dfs * len;
481 	}
482 
483 	LOG_DBG("rx buf/len %p/%zu", (void *)ctx->rx_buf, ctx->rx_len);
484 }
485 
486 /* Returns true if there is still RX buffers left in the spi_buf_set
487  * even if they are "null" (nop) buffers.
488  */
489 static ALWAYS_INLINE
spi_context_rx_on(struct spi_context * ctx)490 bool spi_context_rx_on(struct spi_context *ctx)
491 {
492 	return !!(ctx->rx_len);
493 }
494 
495 /* Similar to spi_context_rx_on, but only returns true if the current buffer is
496  * not a null/NOP placeholder.
497  */
498 static ALWAYS_INLINE
spi_context_rx_buf_on(struct spi_context * ctx)499 bool spi_context_rx_buf_on(struct spi_context *ctx)
500 {
501 	return !!(ctx->rx_buf && ctx->rx_len);
502 }
503 
504 /*
505  * Returns the maximum length of a transfer for which all currently active
506  * directions have a continuous buffer, i.e. the maximum SPI transfer that
507  * can be done with DMA that handles only non-scattered buffers.
508  *
509  * In other words, returns the length of the smaller of the current RX or current TX buffer.
510  * Except if either RX or TX buf length is 0, returns the length of the other.
511  * And if both are 0 then will return 0 and should indicate transfer completion.
512  */
spi_context_max_continuous_chunk(struct spi_context * ctx)513 static inline size_t spi_context_max_continuous_chunk(struct spi_context *ctx)
514 {
515 	if (!ctx->tx_len) {
516 		return ctx->rx_len;
517 	} else if (!ctx->rx_len) {
518 		return ctx->tx_len;
519 	}
520 
521 	return MIN(ctx->tx_len, ctx->rx_len);
522 }
523 
524 /* Returns the length of the longer of the current RX or current TX buffer. */
spi_context_longest_current_buf(struct spi_context * ctx)525 static inline size_t spi_context_longest_current_buf(struct spi_context *ctx)
526 {
527 	return ctx->tx_len > ctx->rx_len ? ctx->tx_len : ctx->rx_len;
528 }
529 
530 /* Helper function, not intended to be used by drivers directly */
spi_context_count_tx_buf_lens(struct spi_context * ctx,size_t start_index)531 static size_t spi_context_count_tx_buf_lens(struct spi_context *ctx, size_t start_index)
532 {
533 	size_t n;
534 	size_t total_len = 0;
535 
536 	for (n = start_index; n < ctx->tx_count; ++n) {
537 		total_len += ctx->current_tx[n].len;
538 	}
539 
540 	return total_len;
541 }
542 
543 /* Helper function, not intended to be used by drivers directly */
spi_context_count_rx_buf_lens(struct spi_context * ctx,size_t start_index)544 static size_t spi_context_count_rx_buf_lens(struct spi_context *ctx, size_t start_index)
545 {
546 	size_t n;
547 	size_t total_len = 0;
548 
549 	for (n = start_index; n < ctx->rx_count; ++n) {
550 		total_len += ctx->current_rx[n].len;
551 	}
552 
553 	return total_len;
554 }
555 
556 
557 /* Returns the length of the sum of the remaining TX buffers in the buf set, including
558  * the current buffer in the total.
559  */
spi_context_total_tx_len(struct spi_context * ctx)560 static inline size_t spi_context_total_tx_len(struct spi_context *ctx)
561 {
562 	return spi_context_count_tx_buf_lens(ctx, 0);
563 }
564 
565 /* Returns the length of the sum of the remaining RX buffers in the buf set, including
566  * the current buffer in the total.
567  */
spi_context_total_rx_len(struct spi_context * ctx)568 static inline size_t spi_context_total_rx_len(struct spi_context *ctx)
569 {
570 	return spi_context_count_rx_buf_lens(ctx, 0);
571 }
572 
573 /* Similar to spi_context_total_tx_len, except does not count words that have been finished
574  * in the current buffer, ie only including what is remaining in the current buffer in the sum.
575  */
spi_context_tx_len_left(struct spi_context * ctx)576 static inline size_t spi_context_tx_len_left(struct spi_context *ctx)
577 {
578 	return ctx->tx_len + spi_context_count_tx_buf_lens(ctx, 1);
579 }
580 
581 /* Similar to spi_context_total_rx_len, except does not count words that have been finished
582  * in the current buffer, ie only including what is remaining in the current buffer in the sum.
583  */
spi_context_rx_len_left(struct spi_context * ctx)584 static inline size_t spi_context_rx_len_left(struct spi_context *ctx)
585 {
586 	return ctx->rx_len + spi_context_count_rx_buf_lens(ctx, 1);
587 }
588 
589 #ifdef __cplusplus
590 }
591 #endif
592 
593 #endif /* ZEPHYR_DRIVERS_SPI_SPI_CONTEXT_H_ */
594