1 /*
2 * Copyright (c) 2024 ITE Technology Inc.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #define DT_DRV_COMPAT ite_it8xxx2_spi
8
9 #include <zephyr/logging/log.h>
10 LOG_MODULE_REGISTER(spi_it8xxx2, CONFIG_SPI_LOG_LEVEL);
11
12 #include <zephyr/irq.h>
13 #include <zephyr/drivers/spi.h>
14 #include <zephyr/drivers/pinctrl.h>
15 #include <zephyr/pm/policy.h>
16 #include <soc.h>
17
18 #include "spi_context.h"
19
20 #define BYTE_0(x) (uint8_t)(((x) >> 0) & 0xFF)
21 #define BYTE_1(x) (uint8_t)(((x) >> 8) & 0xFF)
22 #define BYTE_2(x) (uint8_t)(((x) >> 16) & 0xFF)
23
24 #define SRAM_BASE_ADDR DT_REG_ADDR(DT_NODELABEL(sram0))
25
26 #define SPI_CHIP_SELECT_COUNT 2
27 #define SPI_CMDQ_WR_CMD_LEN_MAX 16
28 #define SPI_CMDQ_DATA_LEN_MAX 0xFFFF
29
30 /* IT8xxx2 SSPI Registers Definition */
31 #define SPI01_CTRL1 0x01
32 #define CLOCK_POLARTY BIT(6)
33 #define SSCK_FREQ_MASK (BIT(2) | BIT(3) | BIT(4))
34 #define INTERRUPT_EN BIT(1)
35
36 #define SPI04_CTRL3 0x04
37 #define AUTO_MODE BIT(5)
38
39 #define SPI05_CH0_CMD_ADDR_LB 0x05
40 #define SPI06_CH0_CMD_ADDR_HB 0x06
41 #define SPI0C_INT_STS 0x0C
42 #define SPI_CMDQ_BUS_END_INT_MASK BIT(4)
43 #define SPI_DMA_RBUF_1_FULL BIT(2)
44 #define SPI_DMA_RBUF_0_FULL BIT(1)
45 #define SPI_CMDQ_BUS_END BIT(0)
46
47 #define SPI0D_CTRL5 0x0D
48 #define CH1_SEL_CMDQ BIT(5)
49 #define CH0_SEL_CMDQ BIT(4)
50 #define SCK_FREQ_DIV_1_EN BIT(1)
51 #define CMDQ_MODE_EN BIT(0)
52
53 #define SPI0E_CH0_WR_MEM_ADDR_LB 0x0E
54 #define SPI0F_CH0_WR_MEM_ADDR_HB 0x0F
55 #define SPI12_CH1_CMD_ADDR_LB 0x12
56 #define SPI13_CH1_CMD_ADDR_HB 0x13
57 #define SPI14_CH1_WR_MEM_ADDR_LB 0x14
58 #define SPI15_CH1_WR_MEM_ADDR_HB 0x15
59 #define SPI21_CH0_CMD_ADDR_HB2 0x21
60 #define SPI23_CH0_WR_MEM_ADDR_HB2 0x23
61 #define SPI25_CH1_CMD_ADDR_HB2 0x25
62 #define SPI27_CH1_WR_MEM_ADDR_HB2 0x27
63
64 struct spi_it8xxx2_cmdq_data {
65 uint8_t spi_write_cmd_length;
66
67 union {
68 uint8_t value;
69 struct {
70 uint8_t cmd_end: 1;
71 uint8_t read_write: 1;
72 uint8_t auto_check_sts: 1;
73 uint8_t cs_active: 1;
74 uint8_t reserved: 1;
75 uint8_t cmd_mode: 2;
76 uint8_t dtr: 1;
77 } __packed fields;
78 } __packed command;
79
80 uint8_t data_length_lb;
81 uint8_t data_length_hb;
82 uint8_t data_addr_lb;
83 uint8_t data_addr_hb;
84 uint8_t check_bit_mask;
85 uint8_t check_bit_value;
86
87 uint8_t write_data[SPI_CMDQ_WR_CMD_LEN_MAX];
88 };
89
90 struct spi_it8xxx2_config {
91 mm_reg_t base;
92 const struct pinctrl_dev_config *pcfg;
93 uint8_t spi_irq;
94 };
95
96 struct spi_it8xxx2_data {
97 struct spi_context ctx;
98 struct spi_it8xxx2_cmdq_data cmdq_data;
99 size_t transfer_len;
100 size_t receive_len;
101 };
102
spi_it8xxx2_set_freq(const struct device * dev,const uint32_t frequency)103 static inline int spi_it8xxx2_set_freq(const struct device *dev, const uint32_t frequency)
104 {
105 const struct spi_it8xxx2_config *cfg = dev->config;
106 uint8_t freq_div[8] = {2, 4, 6, 8, 10, 12, 14, 16};
107 uint32_t clk_pll, clk_sspi;
108 uint8_t reg_val;
109
110 clk_pll = chip_get_pll_freq();
111 clk_sspi = clk_pll / (((IT8XXX2_ECPM_SCDCR2 & 0xF0) >> 4) + 1U);
112 if (frequency < (clk_sspi / 16) || frequency > clk_sspi) {
113 LOG_ERR("Unsupported frequency %d", frequency);
114 return -ENOTSUP;
115 }
116
117 if (frequency == clk_sspi) {
118 sys_write8(sys_read8(cfg->base + SPI0D_CTRL5) | SCK_FREQ_DIV_1_EN,
119 cfg->base + SPI0D_CTRL5);
120 } else {
121 for (int i = 0; i <= ARRAY_SIZE(freq_div); i++) {
122 if (i == ARRAY_SIZE(freq_div)) {
123 LOG_ERR("Unknown frequency %d", frequency);
124 return -ENOTSUP;
125 }
126 if (frequency == (clk_sspi / freq_div[i])) {
127 sys_write8(sys_read8(cfg->base + SPI0D_CTRL5) & ~SCK_FREQ_DIV_1_EN,
128 cfg->base + SPI0D_CTRL5);
129 reg_val = sys_read8(cfg->base + SPI01_CTRL1);
130 reg_val = (reg_val & (~SSCK_FREQ_MASK)) | (i << 2);
131 sys_write8(reg_val, cfg->base + SPI01_CTRL1);
132 break;
133 }
134 }
135 }
136
137 LOG_DBG("freq: pll %dHz, sspi %dHz, ssck %dHz", clk_pll, clk_sspi, frequency);
138 return 0;
139 }
140
spi_it8xxx2_configure(const struct device * dev,const struct spi_config * spi_cfg)141 static int spi_it8xxx2_configure(const struct device *dev, const struct spi_config *spi_cfg)
142 {
143 const struct spi_it8xxx2_config *cfg = dev->config;
144 struct spi_it8xxx2_data *data = dev->data;
145 struct spi_context *ctx = &data->ctx;
146 int ret;
147 uint8_t reg_val;
148
149 if (spi_cfg->slave > (SPI_CHIP_SELECT_COUNT - 1)) {
150 LOG_ERR("Slave %d is greater than %d", spi_cfg->slave, SPI_CHIP_SELECT_COUNT - 1);
151 return -EINVAL;
152 }
153
154 LOG_DBG("chip select: %d, operation: 0x%x", spi_cfg->slave, spi_cfg->operation);
155
156 if (SPI_OP_MODE_GET(spi_cfg->operation) == SPI_OP_MODE_SLAVE) {
157 LOG_ERR("Unsupported SPI slave mode");
158 return -ENOTSUP;
159 }
160
161 if (SPI_MODE_GET(spi_cfg->operation) & SPI_MODE_LOOP) {
162 LOG_ERR("Unsupported loopback mode");
163 return -ENOTSUP;
164 }
165
166 if (SPI_MODE_GET(spi_cfg->operation) & SPI_MODE_CPHA) {
167 LOG_ERR("Unsupported cpha mode");
168 return -ENOTSUP;
169 }
170
171 reg_val = sys_read8(cfg->base + SPI01_CTRL1);
172 if (SPI_MODE_GET(spi_cfg->operation) & SPI_MODE_CPOL) {
173 reg_val |= CLOCK_POLARTY;
174 } else {
175 reg_val &= ~CLOCK_POLARTY;
176 }
177 sys_write8(reg_val, cfg->base + SPI01_CTRL1);
178
179 if (SPI_WORD_SIZE_GET(spi_cfg->operation) != 8) {
180 return -ENOTSUP;
181 }
182
183 if (IS_ENABLED(CONFIG_SPI_EXTENDED_MODES) &&
184 (spi_cfg->operation & SPI_LINES_MASK) != SPI_LINES_SINGLE) {
185 LOG_ERR("Only single line mode is supported");
186 return -EINVAL;
187 }
188
189 ret = spi_it8xxx2_set_freq(dev, spi_cfg->frequency);
190 if (ret) {
191 return ret;
192 }
193
194 reg_val = sys_read8(cfg->base + SPI0C_INT_STS);
195 reg_val = (reg_val & (~SPI_CMDQ_BUS_END_INT_MASK));
196 sys_write8(reg_val, cfg->base + SPI0C_INT_STS);
197
198 ctx->config = spi_cfg;
199 return 0;
200 }
201
spi_it8xxx2_transfer_done(struct spi_context * ctx)202 static inline bool spi_it8xxx2_transfer_done(struct spi_context *ctx)
203 {
204 return !spi_context_tx_buf_on(ctx) && !spi_context_rx_buf_on(ctx);
205 }
206
spi_it8xxx2_complete(const struct device * dev,const int status)207 static void spi_it8xxx2_complete(const struct device *dev, const int status)
208 {
209 struct spi_it8xxx2_data *data = dev->data;
210 struct spi_context *ctx = &data->ctx;
211
212 spi_context_complete(ctx, dev, status);
213 if (spi_cs_is_gpio(ctx->config)) {
214 spi_context_cs_control(ctx, false);
215 }
216 /* Permit to enter power policy and idle mode. */
217 pm_policy_state_lock_put(PM_STATE_STANDBY, PM_ALL_SUBSTATES);
218 chip_permit_idle();
219 }
220
spi_it8xxx2_tx(const struct device * dev)221 static inline void spi_it8xxx2_tx(const struct device *dev)
222 {
223 struct spi_it8xxx2_data *data = dev->data;
224 struct spi_context *ctx = &data->ctx;
225 uint32_t mem_address;
226
227 if (ctx->tx_count > 1) {
228 data->cmdq_data.command.fields.cs_active = 1;
229 } else {
230 data->cmdq_data.command.fields.cs_active = 0;
231 }
232 data->cmdq_data.command.fields.cmd_end = 1;
233 data->cmdq_data.command.fields.read_write = 0;
234 if (ctx->tx_len <= SPI_CMDQ_WR_CMD_LEN_MAX) {
235 data->cmdq_data.spi_write_cmd_length = ctx->tx_len;
236 memcpy(data->cmdq_data.write_data, ctx->tx_buf, ctx->tx_len);
237 data->cmdq_data.data_length_lb = 0;
238 data->cmdq_data.data_length_hb = 0;
239 data->cmdq_data.data_addr_lb = 0;
240 data->cmdq_data.data_addr_hb = 0;
241 } else {
242 data->cmdq_data.spi_write_cmd_length = SPI_CMDQ_WR_CMD_LEN_MAX;
243 memcpy(data->cmdq_data.write_data, ctx->tx_buf, SPI_CMDQ_WR_CMD_LEN_MAX);
244 data->cmdq_data.data_length_lb = BYTE_0(ctx->tx_len - SPI_CMDQ_WR_CMD_LEN_MAX);
245 data->cmdq_data.data_length_hb = BYTE_1(ctx->tx_len - SPI_CMDQ_WR_CMD_LEN_MAX);
246 mem_address = (uint32_t)(ctx->tx_buf + SPI_CMDQ_WR_CMD_LEN_MAX) - SRAM_BASE_ADDR;
247 data->cmdq_data.data_addr_lb = BYTE_0(mem_address);
248 data->cmdq_data.data_addr_hb = BYTE_1(mem_address);
249 data->cmdq_data.check_bit_mask |= ((BYTE_2(mem_address)) & 0x03);
250 }
251 data->transfer_len = ctx->tx_len;
252 }
253
spi_it8xxx2_rx(const struct device * dev)254 static inline void spi_it8xxx2_rx(const struct device *dev)
255 {
256 struct spi_it8xxx2_data *data = dev->data;
257 struct spi_context *ctx = &data->ctx;
258
259 if (ctx->rx_count > 1) {
260 data->cmdq_data.command.fields.cs_active = 1;
261 } else {
262 data->cmdq_data.command.fields.cs_active = 0;
263 }
264 data->cmdq_data.command.fields.cmd_end = 1;
265 data->cmdq_data.command.fields.read_write = 1;
266 data->cmdq_data.spi_write_cmd_length = 0;
267 data->cmdq_data.data_length_lb = BYTE_0(ctx->rx_len);
268 data->cmdq_data.data_length_hb = BYTE_1(ctx->rx_len);
269 data->cmdq_data.data_addr_lb = 0;
270 data->cmdq_data.data_addr_hb = 0;
271 data->receive_len = ctx->rx_len;
272 }
273
spi_it8xxx2_tx_rx(const struct device * dev)274 static inline void spi_it8xxx2_tx_rx(const struct device *dev)
275 {
276 struct spi_it8xxx2_data *data = dev->data;
277 struct spi_context *ctx = &data->ctx;
278 uint32_t mem_address;
279
280 data->cmdq_data.command.fields.cmd_end = 1;
281 if (ctx->tx_len <= SPI_CMDQ_WR_CMD_LEN_MAX) {
282 data->cmdq_data.command.fields.cs_active = 0;
283 data->cmdq_data.command.fields.read_write = 1;
284 data->cmdq_data.spi_write_cmd_length = ctx->tx_len;
285 memcpy(data->cmdq_data.write_data, ctx->tx_buf, ctx->tx_len);
286 if (ctx->rx_buf == ctx->tx_buf) {
287 spi_context_update_tx(ctx, 1, ctx->tx_len);
288 spi_context_update_rx(ctx, 1, ctx->rx_len);
289 }
290
291 data->cmdq_data.data_length_lb = BYTE_0(ctx->rx_len);
292 data->cmdq_data.data_length_hb = BYTE_1(ctx->rx_len);
293 data->cmdq_data.data_addr_lb = 0;
294 data->cmdq_data.data_addr_hb = 0;
295 data->transfer_len = ctx->tx_len;
296 data->receive_len = ctx->rx_len;
297 } else {
298 data->cmdq_data.command.fields.cs_active = 1;
299 data->cmdq_data.command.fields.read_write = 0;
300 data->cmdq_data.spi_write_cmd_length = SPI_CMDQ_WR_CMD_LEN_MAX;
301 memcpy(data->cmdq_data.write_data, ctx->tx_buf, SPI_CMDQ_WR_CMD_LEN_MAX);
302 data->cmdq_data.data_length_lb = BYTE_0(ctx->tx_len - SPI_CMDQ_WR_CMD_LEN_MAX);
303 data->cmdq_data.data_length_hb = BYTE_1(ctx->tx_len - SPI_CMDQ_WR_CMD_LEN_MAX);
304
305 mem_address = (uint32_t)(ctx->tx_buf + SPI_CMDQ_WR_CMD_LEN_MAX) - SRAM_BASE_ADDR;
306 data->cmdq_data.data_addr_lb = BYTE_0(mem_address);
307 data->cmdq_data.data_addr_hb = BYTE_1(mem_address);
308 data->cmdq_data.check_bit_mask |= ((BYTE_2(mem_address)) & 0x03);
309 if (ctx->rx_buf == ctx->tx_buf) {
310 spi_context_update_tx(ctx, 1, ctx->tx_len);
311 spi_context_update_rx(ctx, 1, ctx->rx_len);
312 }
313 data->transfer_len = ctx->tx_len;
314 data->receive_len = 0;
315 }
316 }
317
spi_it8xxx2_next_xfer(const struct device * dev)318 static int spi_it8xxx2_next_xfer(const struct device *dev)
319 {
320 const struct spi_it8xxx2_config *cfg = dev->config;
321 struct spi_it8xxx2_data *data = dev->data;
322 struct spi_context *ctx = &data->ctx;
323 uint8_t reg_val;
324 uint32_t cmd_address, mem_address;
325
326 if (spi_it8xxx2_transfer_done(ctx)) {
327 spi_it8xxx2_complete(dev, 0);
328 return 0;
329 }
330
331 if (spi_cs_is_gpio(ctx->config)) {
332 spi_context_cs_control(ctx, true);
333 }
334
335 if (spi_context_longest_current_buf(ctx) > SPI_CMDQ_DATA_LEN_MAX) {
336 return -EINVAL;
337 }
338
339 memset(&data->cmdq_data, 0, sizeof(struct spi_it8xxx2_cmdq_data));
340
341 /* Prepare command queue data */
342 if (!spi_context_tx_on(ctx)) {
343 /* rx only, nothing to tx */
344 spi_it8xxx2_rx(dev);
345 } else if (!spi_context_rx_on(ctx)) {
346 /* tx only, nothing to rx */
347 spi_it8xxx2_tx(dev);
348 } else {
349 spi_it8xxx2_tx_rx(dev);
350 }
351
352 cmd_address = (uint32_t)(&data->cmdq_data) - SRAM_BASE_ADDR;
353 mem_address = (uint32_t)ctx->rx_buf - SRAM_BASE_ADDR;
354 if (ctx->config->slave == 0) {
355 sys_write8(BYTE_0(cmd_address), cfg->base + SPI05_CH0_CMD_ADDR_LB);
356 sys_write8(BYTE_1(cmd_address), cfg->base + SPI06_CH0_CMD_ADDR_HB);
357 sys_write8(BYTE_2(cmd_address), cfg->base + SPI21_CH0_CMD_ADDR_HB2);
358
359 if (spi_context_rx_on(ctx)) {
360 sys_write8(BYTE_0(mem_address), cfg->base + SPI0E_CH0_WR_MEM_ADDR_LB);
361 sys_write8(BYTE_1(mem_address), cfg->base + SPI0F_CH0_WR_MEM_ADDR_HB);
362 sys_write8(BYTE_2(mem_address), cfg->base + SPI23_CH0_WR_MEM_ADDR_HB2);
363 }
364 } else {
365 sys_write8(BYTE_0(cmd_address), cfg->base + SPI12_CH1_CMD_ADDR_LB);
366 sys_write8(BYTE_1(cmd_address), cfg->base + SPI13_CH1_CMD_ADDR_HB);
367 sys_write8(BYTE_2(cmd_address), cfg->base + SPI25_CH1_CMD_ADDR_HB2);
368
369 if (spi_context_rx_on(ctx)) {
370 sys_write8(BYTE_0(mem_address), cfg->base + SPI14_CH1_WR_MEM_ADDR_LB);
371 sys_write8(BYTE_1(mem_address), cfg->base + SPI15_CH1_WR_MEM_ADDR_HB);
372 sys_write8(BYTE_2(mem_address), cfg->base + SPI27_CH1_WR_MEM_ADDR_HB2);
373 }
374 }
375
376 sys_write8(sys_read8(cfg->base + SPI01_CTRL1) | INTERRUPT_EN, cfg->base + SPI01_CTRL1);
377
378 reg_val = sys_read8(cfg->base + SPI0D_CTRL5);
379 reg_val |= (ctx->config->slave == 0) ? CH0_SEL_CMDQ : CH1_SEL_CMDQ;
380 sys_write8(reg_val | CMDQ_MODE_EN, cfg->base + SPI0D_CTRL5);
381 return 0;
382 }
383
transceive(const struct device * dev,const struct spi_config * config,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs,bool asynchronous,spi_callback_t cb,void * userdata)384 static int transceive(const struct device *dev, const struct spi_config *config,
385 const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs,
386 bool asynchronous, spi_callback_t cb, void *userdata)
387 {
388 struct spi_it8xxx2_data *data = dev->data;
389 struct spi_context *ctx = &data->ctx;
390 int ret;
391
392 spi_context_lock(ctx, asynchronous, cb, userdata, config);
393
394 /* Configure spi */
395 ret = spi_it8xxx2_configure(dev, config);
396 if (ret) {
397 spi_context_release(ctx, ret);
398 return ret;
399 }
400
401 /*
402 * The EC processor(CPU) cannot be in the k_cpu_idle() and power
403 * policy during the transactions with the CQ mode.
404 * Otherwise, the EC processor would be clock gated.
405 */
406 chip_block_idle();
407 pm_policy_state_lock_get(PM_STATE_STANDBY, PM_ALL_SUBSTATES);
408
409 spi_context_buffers_setup(ctx, tx_bufs, rx_bufs, 1);
410 ret = spi_it8xxx2_next_xfer(dev);
411 if (!ret) {
412 ret = spi_context_wait_for_completion(ctx);
413 } else {
414 spi_it8xxx2_complete(dev, ret);
415 }
416
417 spi_context_release(ctx, ret);
418 return ret;
419 }
420
it8xxx2_transceive(const struct device * dev,const struct spi_config * config,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs)421 static int it8xxx2_transceive(const struct device *dev, const struct spi_config *config,
422 const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs)
423 {
424 return transceive(dev, config, tx_bufs, rx_bufs, false, NULL, NULL);
425 }
426
427 #ifdef CONFIG_SPI_ASYNC
it8xxx2_transceive_async(const struct device * dev,const struct spi_config * config,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs,spi_callback_t cb,void * userdata)428 static int it8xxx2_transceive_async(const struct device *dev, const struct spi_config *config,
429 const struct spi_buf_set *tx_bufs,
430 const struct spi_buf_set *rx_bufs, spi_callback_t cb,
431 void *userdata)
432 {
433 return transceive(dev, config, tx_bufs, rx_bufs, true, cb, userdata);
434 }
435 #endif /* CONFIG_SPI_ASYNC */
436
it8xxx2_release(const struct device * dev,const struct spi_config * config)437 static int it8xxx2_release(const struct device *dev, const struct spi_config *config)
438 {
439 struct spi_it8xxx2_data *data = dev->data;
440
441 spi_context_unlock_unconditionally(&data->ctx);
442 return 0;
443 }
444
it8xxx2_spi_isr(const void * arg)445 static void it8xxx2_spi_isr(const void *arg)
446 {
447 const struct device *dev = arg;
448 const struct spi_it8xxx2_config *cfg = dev->config;
449 struct spi_it8xxx2_data *data = dev->data;
450 struct spi_context *ctx = &data->ctx;
451 uint8_t reg_val;
452 int ret;
453
454 reg_val = sys_read8(cfg->base + SPI0C_INT_STS);
455 sys_write8(reg_val, cfg->base + SPI0C_INT_STS);
456 if (reg_val & (SPI_DMA_RBUF_0_FULL | SPI_DMA_RBUF_1_FULL)) {
457 LOG_INF("Triggered dma ring buffer full interrupt, status: 0x%x", reg_val);
458 }
459
460 if (reg_val & SPI_CMDQ_BUS_END) {
461 reg_val = sys_read8(cfg->base + SPI0D_CTRL5);
462 if (ctx->config->slave == 0) {
463 reg_val &= ~CH0_SEL_CMDQ;
464 } else {
465 reg_val &= ~CH1_SEL_CMDQ;
466 }
467 sys_write8(reg_val, cfg->base + SPI0D_CTRL5);
468
469 spi_context_update_tx(ctx, 1, data->transfer_len);
470 spi_context_update_rx(ctx, 1, data->receive_len);
471 ret = spi_it8xxx2_next_xfer(dev);
472 if (ret) {
473 spi_it8xxx2_complete(dev, ret);
474 }
475 }
476 }
477
spi_it8xxx2_init(const struct device * dev)478 static int spi_it8xxx2_init(const struct device *dev)
479 {
480 const struct spi_it8xxx2_config *cfg = dev->config;
481 struct spi_it8xxx2_data *data = dev->data;
482 int ret;
483
484 ret = pinctrl_apply_state(cfg->pcfg, PINCTRL_STATE_DEFAULT);
485 if (ret) {
486 LOG_ERR("Failed to set default pinctrl");
487 return ret;
488 }
489
490 /* Enable one-shot mode */
491 sys_write8(sys_read8(cfg->base + SPI04_CTRL3) & ~AUTO_MODE, cfg->base + SPI04_CTRL3);
492
493 irq_connect_dynamic(cfg->spi_irq, 0, it8xxx2_spi_isr, dev, 0);
494 irq_enable(cfg->spi_irq);
495
496 ret = spi_context_cs_configure_all(&data->ctx);
497 if (ret) {
498 return ret;
499 }
500
501 spi_context_unlock_unconditionally(&data->ctx);
502 return 0;
503 }
504
505 static DEVICE_API(spi, spi_it8xxx2_driver_api) = {
506 .transceive = it8xxx2_transceive,
507 .release = it8xxx2_release,
508
509 #ifdef CONFIG_SPI_ASYNC
510 .transceive_async = it8xxx2_transceive_async,
511 #endif
512 };
513
514 #define SPI_IT8XXX2_INIT(n) \
515 PINCTRL_DT_INST_DEFINE(n); \
516 static const struct spi_it8xxx2_config spi_it8xxx2_cfg_##n = { \
517 .base = DT_INST_REG_ADDR(n), \
518 .pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n), \
519 .spi_irq = DT_INST_IRQ(n, irq), \
520 }; \
521 \
522 static struct spi_it8xxx2_data spi_it8xxx2_data_##n = { \
523 SPI_CONTEXT_INIT_LOCK(spi_it8xxx2_data_##n, ctx), \
524 SPI_CONTEXT_INIT_SYNC(spi_it8xxx2_data_##n, ctx), \
525 SPI_CONTEXT_CS_GPIOS_INITIALIZE(DT_DRV_INST(n), ctx)}; \
526 \
527 DEVICE_DT_INST_DEFINE(n, &spi_it8xxx2_init, NULL, &spi_it8xxx2_data_##n, \
528 &spi_it8xxx2_cfg_##n, POST_KERNEL, \
529 CONFIG_KERNEL_INIT_PRIORITY_DEVICE, &spi_it8xxx2_driver_api);
530
531 DT_INST_FOREACH_STATUS_OKAY(SPI_IT8XXX2_INIT)
532