1 /*
2 * Copyright (c) 2021 ITE Corporation. All Rights Reserved.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #define DT_DRV_COMPAT ite_it8xxx2_espi
8
9 #include <assert.h>
10 #include <zephyr/drivers/espi.h>
11 #include <zephyr/drivers/gpio.h>
12 #include <zephyr/drivers/interrupt_controller/wuc_ite_it8xxx2.h>
13 #include <zephyr/kernel.h>
14 #include <zephyr/sys/util.h>
15 #include <soc.h>
16 #include <soc_dt.h>
17 #include "soc_espi.h"
18 #include "espi_utils.h"
19
20 #include <zephyr/logging/log.h>
21 #include <zephyr/irq.h>
22 LOG_MODULE_REGISTER(espi, CONFIG_ESPI_LOG_LEVEL);
23
24 #define ESPI_IT8XXX2_GET_GCTRL_BASE \
25 ((struct gctrl_it8xxx2_regs *)DT_REG_ADDR(DT_NODELABEL(gctrl)))
26
27 #define IT8XXX2_ESPI_IRQ DT_INST_IRQ_BY_IDX(0, 0, irq)
28 #define IT8XXX2_ESPI_VW_IRQ DT_INST_IRQ_BY_IDX(0, 1, irq)
29 #define IT8XXX2_KBC_IBF_IRQ DT_INST_IRQ_BY_IDX(0, 2, irq)
30 #define IT8XXX2_KBC_OBE_IRQ DT_INST_IRQ_BY_IDX(0, 3, irq)
31 #define IT8XXX2_PMC1_IBF_IRQ DT_INST_IRQ_BY_IDX(0, 4, irq)
32 #define IT8XXX2_PORT_80_IRQ DT_INST_IRQ_BY_IDX(0, 5, irq)
33 #define IT8XXX2_PMC2_IBF_IRQ DT_INST_IRQ_BY_IDX(0, 6, irq)
34 #define IT8XXX2_TRANS_IRQ DT_INST_IRQ_BY_IDX(0, 7, irq)
35
36 /* General Capabilities and Configuration 1 */
37 #define IT8XXX2_ESPI_MAX_FREQ_MASK GENMASK(2, 0)
38 #define IT8XXX2_ESPI_CAPCFG1_MAX_FREQ_20 0
39 #define IT8XXX2_ESPI_CAPCFG1_MAX_FREQ_25 1
40 #define IT8XXX2_ESPI_CAPCFG1_MAX_FREQ_33 2
41 #define IT8XXX2_ESPI_CAPCFG1_MAX_FREQ_50 3
42 #define IT8XXX2_ESPI_CAPCFG1_MAX_FREQ_66 4
43
44 #define IT8XXX2_ESPI_PC_READY_MASK BIT(1)
45 #define IT8XXX2_ESPI_VW_READY_MASK BIT(1)
46 #define IT8XXX2_ESPI_OOB_READY_MASK BIT(1)
47 #define IT8XXX2_ESPI_FC_READY_MASK BIT(1)
48
49 #define IT8XXX2_ESPI_INTERRUPT_ENABLE BIT(7)
50 #define IT8XXX2_ESPI_TO_WUC_ENABLE BIT(4)
51 #define IT8XXX2_ESPI_VW_INTERRUPT_ENABLE BIT(7)
52 #define IT8XXX2_ESPI_INTERRUPT_PUT_PC BIT(7)
53
54 /*
55 * VWCTRL2 register:
56 * bit4 = 1b: Refers to ESPI_RESET# for PLTRST#.
57 */
58 #define IT8XXX2_ESPI_VW_RESET_PLTRST BIT(4)
59
60 #define IT8XXX2_ESPI_UPSTREAM_ENABLE BIT(7)
61 #define IT8XXX2_ESPI_UPSTREAM_GO BIT(6)
62 #define IT8XXX2_ESPI_UPSTREAM_INTERRUPT_ENABLE BIT(5)
63 #define IT8XXX2_ESPI_UPSTREAM_CHANNEL_DISABLE BIT(2)
64 #define IT8XXX2_ESPI_UPSTREAM_DONE BIT(1)
65 #define IT8XXX2_ESPI_UPSTREAM_BUSY BIT(0)
66
67 #define IT8XXX2_ESPI_CYCLE_TYPE_OOB 0x07
68
69 #define IT8XXX2_ESPI_PUT_OOB_STATUS BIT(7)
70 #define IT8XXX2_ESPI_PUT_OOB_INTERRUPT_ENABLE BIT(7)
71 #define IT8XXX2_ESPI_PUT_OOB_LEN_MASK GENMASK(6, 0)
72
73 #define IT8XXX2_ESPI_INPUT_PAD_GATING BIT(6)
74
75 #define IT8XXX2_ESPI_FLASH_MAX_PAYLOAD_SIZE 64
76 #define IT8XXX2_ESPI_PUT_FLASH_TAG_MASK GENMASK(7, 4)
77 #define IT8XXX2_ESPI_PUT_FLASH_LEN_MASK GENMASK(6, 0)
78
79 struct espi_it8xxx2_wuc {
80 /* WUC control device structure */
81 const struct device *wucs;
82 /* WUC pin mask */
83 uint8_t mask;
84 };
85
86 struct espi_it8xxx2_config {
87 uintptr_t base_espi_slave;
88 uintptr_t base_espi_vw;
89 uintptr_t base_espi_queue1;
90 uintptr_t base_espi_queue0;
91 uintptr_t base_ec2i;
92 uintptr_t base_kbc;
93 uintptr_t base_pmc;
94 uintptr_t base_smfi;
95 const struct espi_it8xxx2_wuc wuc;
96 };
97
98 struct espi_it8xxx2_data {
99 sys_slist_t callbacks;
100 #ifdef CONFIG_ESPI_OOB_CHANNEL
101 struct k_sem oob_upstream_go;
102 #endif
103 #ifdef CONFIG_ESPI_FLASH_CHANNEL
104 struct k_sem flash_upstream_go;
105 uint8_t put_flash_cycle_type;
106 uint8_t put_flash_tag;
107 uint8_t put_flash_len;
108 uint8_t flash_buf[IT8XXX2_ESPI_FLASH_MAX_PAYLOAD_SIZE];
109 #endif
110 };
111
112 struct vw_channel_t {
113 uint8_t vw_index; /* VW index of signal */
114 uint8_t level_mask; /* level bit of signal */
115 uint8_t valid_mask; /* valid bit of signal */
116 };
117
118 struct vwidx_isr_t {
119 void (*vwidx_isr)(const struct device *dev, uint8_t update_flag);
120 uint8_t vw_index;
121 };
122
123 enum espi_ch_enable_isr_type {
124 DEASSERTED_FLAG = 0,
125 ASSERTED_FLAG = 1,
126 };
127
128 struct espi_isr_t {
129 void (*espi_isr)(const struct device *dev, bool enable);
130 enum espi_ch_enable_isr_type isr_type;
131 };
132
133 struct espi_vw_signal_t {
134 enum espi_vwire_signal signal;
135 void (*vw_signal_isr)(const struct device *dev);
136 };
137
138 /* EC2I bridge and PNPCFG devices */
139 static const struct ec2i_t kbc_settings[] = {
140 /* Select logical device 06h(keyboard) */
141 {HOST_INDEX_LDN, LDN_KBC_KEYBOARD},
142 /* Set IRQ=01h for logical device */
143 {HOST_INDEX_IRQNUMX, 0x01},
144 /* Configure IRQTP for KBC. */
145 /*
146 * Interrupt request type select (IRQTP) for KBC.
147 * bit 1, 0: IRQ request is buffered and applied to SERIRQ
148 * 1: IRQ request is inverted before being applied to SERIRQ
149 * bit 0, 0: Edge triggered mode
150 * 1: Level triggered mode
151 *
152 * This interrupt configuration should the same on both host and EC side
153 */
154 {HOST_INDEX_IRQTP, 0x02},
155 /* Enable logical device */
156 {HOST_INDEX_LDA, 0x01},
157
158 #ifdef CONFIG_ESPI_IT8XXX2_PNPCFG_DEVICE_KBC_MOUSE
159 /* Select logical device 05h(mouse) */
160 {HOST_INDEX_LDN, LDN_KBC_MOUSE},
161 /* Set IRQ=0Ch for logical device */
162 {HOST_INDEX_IRQNUMX, 0x0C},
163 /* Enable logical device */
164 {HOST_INDEX_LDA, 0x01},
165 #endif
166 };
167
168 static const struct ec2i_t pmc1_settings[] = {
169 /* Select logical device 11h(PM1 ACPI) */
170 {HOST_INDEX_LDN, LDN_PMC1},
171 /* Set IRQ=00h for logical device */
172 {HOST_INDEX_IRQNUMX, 0x00},
173 /* Enable logical device */
174 {HOST_INDEX_LDA, 0x01},
175 };
176
177 #ifdef CONFIG_ESPI_PERIPHERAL_EC_HOST_CMD
178 #define IT8XXX2_ESPI_HC_DATA_PORT_MSB \
179 ((CONFIG_ESPI_PERIPHERAL_HOST_CMD_DATA_PORT_NUM >> 8) & 0xff)
180 #define IT8XXX2_ESPI_HC_DATA_PORT_LSB \
181 (CONFIG_ESPI_PERIPHERAL_HOST_CMD_DATA_PORT_NUM & 0xff)
182 #define IT8XXX2_ESPI_HC_CMD_PORT_MSB \
183 (((CONFIG_ESPI_PERIPHERAL_HOST_CMD_DATA_PORT_NUM + 4) >> 8) & 0xff)
184 #define IT8XXX2_ESPI_HC_CMD_PORT_LSB \
185 ((CONFIG_ESPI_PERIPHERAL_HOST_CMD_DATA_PORT_NUM + 4) & 0xff)
186 static const struct ec2i_t pmc2_settings[] = {
187 /* Select logical device 12h(PM2 host command) */
188 {HOST_INDEX_LDN, LDN_PMC2},
189 /* I/O Port Base Address (data/command ports) */
190 {HOST_INDEX_IOBAD0_MSB, IT8XXX2_ESPI_HC_DATA_PORT_MSB},
191 {HOST_INDEX_IOBAD0_LSB, IT8XXX2_ESPI_HC_DATA_PORT_LSB},
192 {HOST_INDEX_IOBAD1_MSB, IT8XXX2_ESPI_HC_CMD_PORT_MSB},
193 {HOST_INDEX_IOBAD1_LSB, IT8XXX2_ESPI_HC_CMD_PORT_LSB},
194 /* Set IRQ=00h for logical device */
195 {HOST_INDEX_IRQNUMX, 0x00},
196 /* Enable logical device */
197 {HOST_INDEX_LDA, 0x01},
198 };
199 #endif
200
201 #if defined(CONFIG_ESPI_PERIPHERAL_EC_HOST_CMD) || \
202 defined(CONFIG_ESPI_PERIPHERAL_ACPI_SHM_REGION)
203 /*
204 * Host to RAM (H2RAM) memory mapping.
205 * This feature allows host access EC's memory directly by eSPI I/O cycles.
206 * Mapping range is 4K bytes and base address is adjustable.
207 * Eg. the I/O cycle 800h~8ffh from host can be mapped to x800h~x8ffh.
208 * Linker script will make the pool 4K aligned.
209 */
210 #define IT8XXX2_ESPI_H2RAM_POOL_SIZE_MAX 0x1000
211 #define IT8XXX2_ESPI_H2RAM_OFFSET_MASK GENMASK(5, 0)
212 #define IT8XXX2_ESPI_H2RAM_BASEADDR_MASK GENMASK(19, 0)
213
214 #if defined(CONFIG_ESPI_PERIPHERAL_ACPI_SHM_REGION)
215 #define H2RAM_ACPI_SHM_MAX ((CONFIG_ESPI_IT8XXX2_ACPI_SHM_H2RAM_SIZE) + \
216 (CONFIG_ESPI_PERIPHERAL_ACPI_SHM_REGION_PORT_NUM))
217 #if (H2RAM_ACPI_SHM_MAX > IT8XXX2_ESPI_H2RAM_POOL_SIZE_MAX)
218 #error "ACPI shared memory region out of h2ram"
219 #endif
220 #endif /* CONFIG_ESPI_PERIPHERAL_ACPI_SHM_REGION */
221
222 #if defined(CONFIG_ESPI_PERIPHERAL_EC_HOST_CMD)
223 #define H2RAM_EC_HOST_CMD_MAX ((CONFIG_ESPI_IT8XXX2_HC_H2RAM_SIZE) + \
224 (CONFIG_ESPI_PERIPHERAL_HOST_CMD_PARAM_PORT_NUM))
225 #if (H2RAM_EC_HOST_CMD_MAX > IT8XXX2_ESPI_H2RAM_POOL_SIZE_MAX)
226 #error "EC host command parameters out of h2ram"
227 #endif
228 #endif /* CONFIG_ESPI_PERIPHERAL_EC_HOST_CMD */
229
230 #if defined(CONFIG_ESPI_PERIPHERAL_EC_HOST_CMD) && \
231 defined(CONFIG_ESPI_PERIPHERAL_ACPI_SHM_REGION)
232 #if (MIN(H2RAM_ACPI_SHM_MAX, H2RAM_EC_HOST_CMD_MAX) > \
233 MAX(CONFIG_ESPI_PERIPHERAL_ACPI_SHM_REGION_PORT_NUM, \
234 CONFIG_ESPI_PERIPHERAL_HOST_CMD_PARAM_PORT_NUM))
235 #error "ACPI and HC sections of h2ram overlap"
236 #endif
237 #endif
238
239 static uint8_t h2ram_pool[MAX(H2RAM_ACPI_SHM_MAX, H2RAM_EC_HOST_CMD_MAX)]
240 __attribute__((section(".h2ram_pool")));
241
242 #define H2RAM_WINDOW_SIZE(ram_size) ((find_msb_set((ram_size) / 16) - 1) & 0x7)
243
244 static const struct ec2i_t smfi_settings[] = {
245 /* Select logical device 0Fh(SMFI) */
246 {HOST_INDEX_LDN, LDN_SMFI},
247 /* Internal RAM base address on eSPI I/O space */
248 {HOST_INDEX_DSLDC6, 0x00},
249 /* Enable H2RAM eSPI I/O cycle */
250 {HOST_INDEX_DSLDC7, 0x01},
251 /* Enable logical device */
252 {HOST_INDEX_LDA, 0x01},
253 };
254
smfi_it8xxx2_init(const struct device * dev)255 static void smfi_it8xxx2_init(const struct device *dev)
256 {
257 const struct espi_it8xxx2_config *const config = dev->config;
258 struct smfi_it8xxx2_regs *const smfi_reg =
259 (struct smfi_it8xxx2_regs *)config->base_smfi;
260 struct gctrl_it8xxx2_regs *const gctrl = ESPI_IT8XXX2_GET_GCTRL_BASE;
261 uint8_t h2ram_offset;
262
263 /* Set the host to RAM cycle address offset */
264 h2ram_offset = ((uint32_t)h2ram_pool & IT8XXX2_ESPI_H2RAM_BASEADDR_MASK) /
265 IT8XXX2_ESPI_H2RAM_POOL_SIZE_MAX;
266 gctrl->GCTRL_H2ROFSR =
267 (gctrl->GCTRL_H2ROFSR & ~IT8XXX2_ESPI_H2RAM_OFFSET_MASK) |
268 h2ram_offset;
269
270 #ifdef CONFIG_ESPI_PERIPHERAL_EC_HOST_CMD
271 memset(&h2ram_pool[CONFIG_ESPI_PERIPHERAL_HOST_CMD_PARAM_PORT_NUM], 0,
272 CONFIG_ESPI_IT8XXX2_HC_H2RAM_SIZE);
273 /* Set host RAM window 0 base address */
274 smfi_reg->SMFI_HRAMW0BA =
275 (CONFIG_ESPI_PERIPHERAL_HOST_CMD_PARAM_PORT_NUM >> 4) & 0xff;
276 /* Set host RAM window 0 size. (allow R/W) */
277 smfi_reg->SMFI_HRAMW0AAS =
278 H2RAM_WINDOW_SIZE(CONFIG_ESPI_IT8XXX2_HC_H2RAM_SIZE);
279 /* Enable window 0, H2RAM through IO cycle */
280 smfi_reg->SMFI_HRAMWC |= (SMFI_H2RAMPS | SMFI_H2RAMW0E);
281 #endif
282
283 #ifdef CONFIG_ESPI_PERIPHERAL_ACPI_SHM_REGION
284 memset(&h2ram_pool[CONFIG_ESPI_PERIPHERAL_ACPI_SHM_REGION_PORT_NUM], 0,
285 CONFIG_ESPI_IT8XXX2_ACPI_SHM_H2RAM_SIZE);
286 /* Set host RAM window 1 base address */
287 smfi_reg->SMFI_HRAMW1BA =
288 (CONFIG_ESPI_PERIPHERAL_ACPI_SHM_REGION_PORT_NUM >> 4) & 0xff;
289 /* Set host RAM window 1 size. (read-only) */
290 smfi_reg->SMFI_HRAMW1AAS =
291 H2RAM_WINDOW_SIZE(CONFIG_ESPI_IT8XXX2_ACPI_SHM_H2RAM_SIZE) |
292 SMFI_HRAMWXWPE_ALL;
293 /* Enable window 1, H2RAM through IO cycle */
294 smfi_reg->SMFI_HRAMWC |= (SMFI_H2RAMPS | SMFI_H2RAMW1E);
295 #endif
296 }
297 #endif /* CONFIG_ESPI_PERIPHERAL_EC_HOST_CMD ||
298 * CONFIG_ESPI_PERIPHERAL_ACPI_SHM_REGION
299 */
300
ec2i_it8xxx2_wait_status_cleared(const struct device * dev,uint8_t mask)301 static void ec2i_it8xxx2_wait_status_cleared(const struct device *dev,
302 uint8_t mask)
303 {
304 const struct espi_it8xxx2_config *const config = dev->config;
305 struct ec2i_regs *const ec2i = (struct ec2i_regs *)config->base_ec2i;
306
307 while (ec2i->IBCTL & mask) {
308 ;
309 }
310 }
311
ec2i_it8xxx2_write_pnpcfg(const struct device * dev,enum ec2i_access sel,uint8_t data)312 static void ec2i_it8xxx2_write_pnpcfg(const struct device *dev,
313 enum ec2i_access sel, uint8_t data)
314 {
315 const struct espi_it8xxx2_config *const config = dev->config;
316 struct ec2i_regs *const ec2i = (struct ec2i_regs *)config->base_ec2i;
317
318 /* bit0: EC to I-Bus access enabled. */
319 ec2i->IBCTL |= EC2I_IBCTL_CSAE;
320 /*
321 * Wait that both CRIB and CWIB bits in IBCTL register
322 * are cleared.
323 */
324 ec2i_it8xxx2_wait_status_cleared(dev, EC2I_IBCTL_CRWIB);
325 /* Enable EC access to the PNPCFG registers */
326 ec2i->IBMAE |= EC2I_IBMAE_CFGAE;
327 /* Set indirect host I/O offset. */
328 ec2i->IHIOA = sel;
329 /* Write the data to IHD register */
330 ec2i->IHD = data;
331 /* Wait the CWIB bit in IBCTL cleared. */
332 ec2i_it8xxx2_wait_status_cleared(dev, EC2I_IBCTL_CWIB);
333 /* Disable EC access to the PNPCFG registers. */
334 ec2i->IBMAE &= ~EC2I_IBMAE_CFGAE;
335 /* Disable EC to I-Bus access. */
336 ec2i->IBCTL &= ~EC2I_IBCTL_CSAE;
337 }
338
ec2i_it8xxx2_write(const struct device * dev,enum host_pnpcfg_index index,uint8_t data)339 static void ec2i_it8xxx2_write(const struct device *dev,
340 enum host_pnpcfg_index index, uint8_t data)
341 {
342 /* Set index */
343 ec2i_it8xxx2_write_pnpcfg(dev, EC2I_ACCESS_INDEX, index);
344 /* Set data */
345 ec2i_it8xxx2_write_pnpcfg(dev, EC2I_ACCESS_DATA, data);
346 }
347
pnpcfg_it8xxx2_configure(const struct device * dev,const struct ec2i_t * settings,size_t entries)348 static void pnpcfg_it8xxx2_configure(const struct device *dev,
349 const struct ec2i_t *settings,
350 size_t entries)
351 {
352 for (size_t i = 0; i < entries; i++) {
353 ec2i_it8xxx2_write(dev, settings[i].index_port,
354 settings[i].data_port);
355 }
356 }
357
358 #define PNPCFG(_s) \
359 pnpcfg_it8xxx2_configure(dev, _s##_settings, ARRAY_SIZE(_s##_settings))
360
pnpcfg_it8xxx2_init(const struct device * dev)361 static void pnpcfg_it8xxx2_init(const struct device *dev)
362 {
363 const struct espi_it8xxx2_config *const config = dev->config;
364 struct ec2i_regs *const ec2i = (struct ec2i_regs *)config->base_ec2i;
365 struct gctrl_it8xxx2_regs *const gctrl = ESPI_IT8XXX2_GET_GCTRL_BASE;
366
367 /* The register pair to access PNPCFG is 004Eh and 004Fh */
368 gctrl->GCTRL_BADRSEL = 0x1;
369 /* Host access is disabled */
370 ec2i->LSIOHA |= 0x3;
371 /* configure pnpcfg devices */
372 if (IS_ENABLED(CONFIG_ESPI_PERIPHERAL_8042_KBC)) {
373 PNPCFG(kbc);
374 }
375 if (IS_ENABLED(CONFIG_ESPI_PERIPHERAL_HOST_IO)) {
376 PNPCFG(pmc1);
377 }
378 #ifdef CONFIG_ESPI_PERIPHERAL_EC_HOST_CMD
379 PNPCFG(pmc2);
380 #endif
381 #if defined(CONFIG_ESPI_PERIPHERAL_EC_HOST_CMD) || \
382 defined(CONFIG_ESPI_PERIPHERAL_ACPI_SHM_REGION)
383 PNPCFG(smfi);
384 #endif
385 }
386
387 /* KBC (port 60h/64h) */
388 #ifdef CONFIG_ESPI_PERIPHERAL_8042_KBC
kbc_it8xxx2_ibf_isr(const struct device * dev)389 static void kbc_it8xxx2_ibf_isr(const struct device *dev)
390 {
391 const struct espi_it8xxx2_config *const config = dev->config;
392 struct espi_it8xxx2_data *const data = dev->data;
393 struct kbc_regs *const kbc_reg = (struct kbc_regs *)config->base_kbc;
394 struct espi_event evt = {
395 ESPI_BUS_PERIPHERAL_NOTIFICATION,
396 ESPI_PERIPHERAL_8042_KBC,
397 ESPI_PERIPHERAL_NODATA
398 };
399 struct espi_evt_data_kbc *kbc_evt =
400 (struct espi_evt_data_kbc *)&evt.evt_data;
401
402 /* KBC Input Buffer Full event */
403 kbc_evt->evt = HOST_KBC_EVT_IBF;
404 /*
405 * Indicates if the host sent a command or data.
406 * 0 = data
407 * 1 = Command.
408 */
409 kbc_evt->type = !!(kbc_reg->KBHISR & KBC_KBHISR_A2_ADDR);
410 /* The data in KBC Input Buffer */
411 kbc_evt->data = kbc_reg->KBHIDIR;
412
413 espi_send_callbacks(&data->callbacks, dev, evt);
414 }
415
kbc_it8xxx2_obe_isr(const struct device * dev)416 static void kbc_it8xxx2_obe_isr(const struct device *dev)
417 {
418 const struct espi_it8xxx2_config *const config = dev->config;
419 struct espi_it8xxx2_data *const data = dev->data;
420 struct kbc_regs *const kbc_reg = (struct kbc_regs *)config->base_kbc;
421 struct espi_event evt = {
422 ESPI_BUS_PERIPHERAL_NOTIFICATION,
423 ESPI_PERIPHERAL_8042_KBC,
424 ESPI_PERIPHERAL_NODATA
425 };
426 struct espi_evt_data_kbc *kbc_evt =
427 (struct espi_evt_data_kbc *)&evt.evt_data;
428
429 /* Disable KBC OBE interrupt first */
430 kbc_reg->KBHICR &= ~KBC_KBHICR_OBECIE;
431
432 /* Notify application that host already read out data. */
433 kbc_evt->evt = HOST_KBC_EVT_OBE;
434 kbc_evt->data = 0;
435 kbc_evt->type = 0;
436 espi_send_callbacks(&data->callbacks, dev, evt);
437 }
438
kbc_it8xxx2_init(const struct device * dev)439 static void kbc_it8xxx2_init(const struct device *dev)
440 {
441 const struct espi_it8xxx2_config *const config = dev->config;
442 struct kbc_regs *const kbc_reg = (struct kbc_regs *)config->base_kbc;
443
444 /* Disable KBC serirq IRQ */
445 kbc_reg->KBIRQR = 0;
446
447 /*
448 * bit3: Input Buffer Full CPU Interrupt Enable.
449 * bit1: Enable the interrupt to mouse driver in the host processor via
450 * SERIRQ when the output buffer is full.
451 * bit0: Enable the interrupt to keyboard driver in the host processor
452 * via SERIRQ when the output buffer is full
453 */
454 kbc_reg->KBHICR |=
455 (KBC_KBHICR_IBFCIE | KBC_KBHICR_OBFKIE | KBC_KBHICR_OBFMIE);
456
457 /* Input Buffer Full CPU Interrupt Enable. */
458 IRQ_CONNECT(IT8XXX2_KBC_IBF_IRQ, 0, kbc_it8xxx2_ibf_isr,
459 DEVICE_DT_INST_GET(0), 0);
460 irq_enable(IT8XXX2_KBC_IBF_IRQ);
461
462 /* Output Buffer Empty CPU Interrupt Enable */
463 IRQ_CONNECT(IT8XXX2_KBC_OBE_IRQ, 0, kbc_it8xxx2_obe_isr,
464 DEVICE_DT_INST_GET(0), 0);
465 irq_enable(IT8XXX2_KBC_OBE_IRQ);
466 }
467 #endif
468
469 /* PMC 1 (APCI port 62h/66h) */
470 #ifdef CONFIG_ESPI_PERIPHERAL_HOST_IO
pmc1_it8xxx2_ibf_isr(const struct device * dev)471 static void pmc1_it8xxx2_ibf_isr(const struct device *dev)
472 {
473 const struct espi_it8xxx2_config *const config = dev->config;
474 struct espi_it8xxx2_data *const data = dev->data;
475 struct pmc_regs *const pmc_reg = (struct pmc_regs *)config->base_pmc;
476 struct espi_event evt = {
477 ESPI_BUS_PERIPHERAL_NOTIFICATION,
478 ESPI_PERIPHERAL_HOST_IO,
479 ESPI_PERIPHERAL_NODATA
480 };
481 struct espi_evt_data_acpi *acpi_evt =
482 (struct espi_evt_data_acpi *)&evt.evt_data;
483
484 /*
485 * Indicates if the host sent a command or data.
486 * 0 = data
487 * 1 = Command.
488 */
489 acpi_evt->type = !!(pmc_reg->PM1STS & PMC_PM1STS_A2_ADDR);
490 /* Set processing flag before reading command byte */
491 pmc_reg->PM1STS |= PMC_PM1STS_GPF;
492 acpi_evt->data = pmc_reg->PM1DI;
493
494 espi_send_callbacks(&data->callbacks, dev, evt);
495 }
496
pmc1_it8xxx2_init(const struct device * dev)497 static void pmc1_it8xxx2_init(const struct device *dev)
498 {
499 const struct espi_it8xxx2_config *const config = dev->config;
500 struct pmc_regs *const pmc_reg = (struct pmc_regs *)config->base_pmc;
501
502 /* Enable pmc1 input buffer full interrupt */
503 pmc_reg->PM1CTL |= PMC_PM1CTL_IBFIE;
504 IRQ_CONNECT(IT8XXX2_PMC1_IBF_IRQ, 0, pmc1_it8xxx2_ibf_isr,
505 DEVICE_DT_INST_GET(0), 0);
506 if (!IS_ENABLED(CONFIG_ESPI_PERIPHERAL_CUSTOM_OPCODE)) {
507 irq_enable(IT8XXX2_PMC1_IBF_IRQ);
508 }
509 }
510 #endif
511
512 /* Port 80 */
513 #ifdef CONFIG_ESPI_PERIPHERAL_DEBUG_PORT_80
port80_it8xxx2_isr(const struct device * dev)514 static void port80_it8xxx2_isr(const struct device *dev)
515 {
516 struct espi_it8xxx2_data *const data = dev->data;
517 struct gctrl_it8xxx2_regs *const gctrl = ESPI_IT8XXX2_GET_GCTRL_BASE;
518 struct espi_event evt = {
519 ESPI_BUS_PERIPHERAL_NOTIFICATION,
520 (ESPI_PERIPHERAL_INDEX_0 << 16) | ESPI_PERIPHERAL_DEBUG_PORT80,
521 ESPI_PERIPHERAL_NODATA
522 };
523
524 if (IS_ENABLED(CONFIG_ESPI_IT8XXX2_PORT_81_CYCLE)) {
525 evt.evt_data = gctrl->GCTRL_P80HDR | (gctrl->GCTRL_P81HDR << 8);
526 } else {
527 evt.evt_data = gctrl->GCTRL_P80HDR;
528 }
529 /* Write 1 to clear this bit */
530 gctrl->GCTRL_P80H81HSR |= BIT(0);
531
532 espi_send_callbacks(&data->callbacks, dev, evt);
533 }
534
port80_it8xxx2_init(const struct device * dev)535 static void port80_it8xxx2_init(const struct device *dev)
536 {
537 ARG_UNUSED(dev);
538 struct gctrl_it8xxx2_regs *const gctrl = ESPI_IT8XXX2_GET_GCTRL_BASE;
539
540 /* Accept Port 80h (and 81h) Cycle */
541 if (IS_ENABLED(CONFIG_ESPI_IT8XXX2_PORT_81_CYCLE)) {
542 gctrl->GCTRL_SPCTRL1 |=
543 (IT8XXX2_GCTRL_ACP80 | IT8XXX2_GCTRL_ACP81);
544 } else {
545 gctrl->GCTRL_SPCTRL1 |= IT8XXX2_GCTRL_ACP80;
546 }
547 IRQ_CONNECT(IT8XXX2_PORT_80_IRQ, 0, port80_it8xxx2_isr,
548 DEVICE_DT_INST_GET(0), 0);
549 irq_enable(IT8XXX2_PORT_80_IRQ);
550 }
551 #endif
552
553 #ifdef CONFIG_ESPI_PERIPHERAL_EC_HOST_CMD
554 /* PMC 2 (Host command port CONFIG_ESPI_PERIPHERAL_HOST_CMD_DATA_PORT_NUM) */
pmc2_it8xxx2_ibf_isr(const struct device * dev)555 static void pmc2_it8xxx2_ibf_isr(const struct device *dev)
556 {
557 const struct espi_it8xxx2_config *const config = dev->config;
558 struct espi_it8xxx2_data *const data = dev->data;
559 struct pmc_regs *const pmc_reg = (struct pmc_regs *)config->base_pmc;
560 struct espi_event evt = {
561 ESPI_BUS_PERIPHERAL_NOTIFICATION,
562 ESPI_PERIPHERAL_EC_HOST_CMD,
563 ESPI_PERIPHERAL_NODATA
564 };
565
566 /* Set processing flag before reading command byte */
567 pmc_reg->PM2STS |= PMC_PM2STS_GPF;
568 evt.evt_data = pmc_reg->PM2DI;
569
570 espi_send_callbacks(&data->callbacks, dev, evt);
571 }
572
pmc2_it8xxx2_init(const struct device * dev)573 static void pmc2_it8xxx2_init(const struct device *dev)
574 {
575 const struct espi_it8xxx2_config *const config = dev->config;
576 struct pmc_regs *const pmc_reg = (struct pmc_regs *)config->base_pmc;
577
578 /* Dedicated interrupt for PMC2 */
579 pmc_reg->MBXCTRL |= PMC_MBXCTRL_DINT;
580 /* Enable pmc2 input buffer full interrupt */
581 pmc_reg->PM2CTL |= PMC_PM2CTL_IBFIE;
582 IRQ_CONNECT(IT8XXX2_PMC2_IBF_IRQ, 0, pmc2_it8xxx2_ibf_isr,
583 DEVICE_DT_INST_GET(0), 0);
584 if (!IS_ENABLED(CONFIG_ESPI_PERIPHERAL_CUSTOM_OPCODE)) {
585 irq_enable(IT8XXX2_PMC2_IBF_IRQ);
586 }
587 }
588 #endif
589
590 /* eSPI api functions */
591 #define VW_CHAN(signal, index, level, valid) \
592 [signal] = {.vw_index = index, .level_mask = level, .valid_mask = valid}
593
594 /* VW signals used in eSPI */
595 static const struct vw_channel_t vw_channel_list[] = {
596 VW_CHAN(ESPI_VWIRE_SIGNAL_SLP_S3, 0x02, BIT(0), BIT(4)),
597 VW_CHAN(ESPI_VWIRE_SIGNAL_SLP_S4, 0x02, BIT(1), BIT(5)),
598 VW_CHAN(ESPI_VWIRE_SIGNAL_SLP_S5, 0x02, BIT(2), BIT(6)),
599 VW_CHAN(ESPI_VWIRE_SIGNAL_OOB_RST_WARN, 0x03, BIT(2), BIT(6)),
600 VW_CHAN(ESPI_VWIRE_SIGNAL_PLTRST, 0x03, BIT(1), BIT(5)),
601 VW_CHAN(ESPI_VWIRE_SIGNAL_SUS_STAT, 0x03, BIT(0), BIT(4)),
602 VW_CHAN(ESPI_VWIRE_SIGNAL_NMIOUT, 0x07, BIT(2), BIT(6)),
603 VW_CHAN(ESPI_VWIRE_SIGNAL_SMIOUT, 0x07, BIT(1), BIT(5)),
604 VW_CHAN(ESPI_VWIRE_SIGNAL_HOST_RST_WARN, 0x07, BIT(0), BIT(4)),
605 VW_CHAN(ESPI_VWIRE_SIGNAL_SLP_A, 0x41, BIT(3), BIT(7)),
606 VW_CHAN(ESPI_VWIRE_SIGNAL_SUS_PWRDN_ACK, 0x41, BIT(1), BIT(5)),
607 VW_CHAN(ESPI_VWIRE_SIGNAL_SUS_WARN, 0x41, BIT(0), BIT(4)),
608 VW_CHAN(ESPI_VWIRE_SIGNAL_SLP_WLAN, 0x42, BIT(1), BIT(5)),
609 VW_CHAN(ESPI_VWIRE_SIGNAL_SLP_LAN, 0x42, BIT(0), BIT(4)),
610 VW_CHAN(ESPI_VWIRE_SIGNAL_HOST_C10, 0x47, BIT(0), BIT(4)),
611 VW_CHAN(ESPI_VWIRE_SIGNAL_DNX_WARN, 0x4a, BIT(1), BIT(5)),
612 VW_CHAN(ESPI_VWIRE_SIGNAL_PME, 0x04, BIT(3), BIT(7)),
613 VW_CHAN(ESPI_VWIRE_SIGNAL_WAKE, 0x04, BIT(2), BIT(6)),
614 VW_CHAN(ESPI_VWIRE_SIGNAL_OOB_RST_ACK, 0x04, BIT(0), BIT(4)),
615 VW_CHAN(ESPI_VWIRE_SIGNAL_TARGET_BOOT_STS, 0x05, BIT(3), BIT(7)),
616 VW_CHAN(ESPI_VWIRE_SIGNAL_ERR_NON_FATAL, 0x05, BIT(2), BIT(6)),
617 VW_CHAN(ESPI_VWIRE_SIGNAL_ERR_FATAL, 0x05, BIT(1), BIT(5)),
618 VW_CHAN(ESPI_VWIRE_SIGNAL_TARGET_BOOT_DONE, 0x05, BIT(0), BIT(4)),
619 VW_CHAN(ESPI_VWIRE_SIGNAL_HOST_RST_ACK, 0x06, BIT(3), BIT(7)),
620 VW_CHAN(ESPI_VWIRE_SIGNAL_RST_CPU_INIT, 0x06, BIT(2), BIT(6)),
621 VW_CHAN(ESPI_VWIRE_SIGNAL_SMI, 0x06, BIT(1), BIT(5)),
622 VW_CHAN(ESPI_VWIRE_SIGNAL_SCI, 0x06, BIT(0), BIT(4)),
623 VW_CHAN(ESPI_VWIRE_SIGNAL_DNX_ACK, 0x40, BIT(1), BIT(5)),
624 VW_CHAN(ESPI_VWIRE_SIGNAL_SUS_ACK, 0x40, BIT(0), BIT(4)),
625 };
626
espi_it8xxx2_configure(const struct device * dev,struct espi_cfg * cfg)627 static int espi_it8xxx2_configure(const struct device *dev,
628 struct espi_cfg *cfg)
629 {
630 const struct espi_it8xxx2_config *const config = dev->config;
631 struct espi_slave_regs *const slave_reg =
632 (struct espi_slave_regs *)config->base_espi_slave;
633 uint8_t capcfg1 = 0;
634
635 /* Set frequency */
636 switch (cfg->max_freq) {
637 case 20:
638 capcfg1 = IT8XXX2_ESPI_CAPCFG1_MAX_FREQ_20;
639 break;
640 case 25:
641 capcfg1 = IT8XXX2_ESPI_CAPCFG1_MAX_FREQ_25;
642 break;
643 case 33:
644 capcfg1 = IT8XXX2_ESPI_CAPCFG1_MAX_FREQ_33;
645 break;
646 case 50:
647 capcfg1 = IT8XXX2_ESPI_CAPCFG1_MAX_FREQ_50;
648 break;
649 case 66:
650 capcfg1 = IT8XXX2_ESPI_CAPCFG1_MAX_FREQ_66;
651 break;
652 default:
653 return -EINVAL;
654 }
655 slave_reg->GCAPCFG1 =
656 (slave_reg->GCAPCFG1 & ~IT8XXX2_ESPI_MAX_FREQ_MASK) | capcfg1;
657
658 /*
659 * Configure eSPI I/O mode. (Register read only)
660 * Supported I/O mode : single, dual and quad.
661 */
662
663 /* Configure eSPI supported channels. (Register read only)
664 * Supported channels: peripheral, virtual wire, OOB, and flash access.
665 */
666
667 return 0;
668 }
669
espi_it8xxx2_channel_ready(const struct device * dev,enum espi_channel ch)670 static bool espi_it8xxx2_channel_ready(const struct device *dev,
671 enum espi_channel ch)
672 {
673 const struct espi_it8xxx2_config *const config = dev->config;
674 struct espi_slave_regs *const slave_reg =
675 (struct espi_slave_regs *)config->base_espi_slave;
676 bool sts = false;
677
678 switch (ch) {
679 case ESPI_CHANNEL_PERIPHERAL:
680 sts = slave_reg->CH_PC_CAPCFG3 & IT8XXX2_ESPI_PC_READY_MASK;
681 break;
682 case ESPI_CHANNEL_VWIRE:
683 sts = slave_reg->CH_VW_CAPCFG3 & IT8XXX2_ESPI_VW_READY_MASK;
684 break;
685 case ESPI_CHANNEL_OOB:
686 sts = slave_reg->CH_OOB_CAPCFG3 & IT8XXX2_ESPI_OOB_READY_MASK;
687 break;
688 case ESPI_CHANNEL_FLASH:
689 sts = slave_reg->CH_FLASH_CAPCFG3 & IT8XXX2_ESPI_FC_READY_MASK;
690 break;
691 default:
692 break;
693 }
694
695 return sts;
696 }
697
espi_it8xxx2_send_vwire(const struct device * dev,enum espi_vwire_signal signal,uint8_t level)698 static int espi_it8xxx2_send_vwire(const struct device *dev,
699 enum espi_vwire_signal signal, uint8_t level)
700 {
701 const struct espi_it8xxx2_config *const config = dev->config;
702 struct espi_vw_regs *const vw_reg =
703 (struct espi_vw_regs *)config->base_espi_vw;
704 uint8_t vw_index = vw_channel_list[signal].vw_index;
705 uint8_t level_mask = vw_channel_list[signal].level_mask;
706 uint8_t valid_mask = vw_channel_list[signal].valid_mask;
707
708 if (signal > ARRAY_SIZE(vw_channel_list)) {
709 return -EIO;
710 }
711
712 if (level) {
713 vw_reg->VW_INDEX[vw_index] |= level_mask;
714 } else {
715 vw_reg->VW_INDEX[vw_index] &= ~level_mask;
716 }
717
718 vw_reg->VW_INDEX[vw_index] |= valid_mask;
719
720 return 0;
721 }
722
espi_it8xxx2_receive_vwire(const struct device * dev,enum espi_vwire_signal signal,uint8_t * level)723 static int espi_it8xxx2_receive_vwire(const struct device *dev,
724 enum espi_vwire_signal signal, uint8_t *level)
725 {
726 const struct espi_it8xxx2_config *const config = dev->config;
727 struct espi_vw_regs *const vw_reg =
728 (struct espi_vw_regs *)config->base_espi_vw;
729 uint8_t vw_index = vw_channel_list[signal].vw_index;
730 uint8_t level_mask = vw_channel_list[signal].level_mask;
731 uint8_t valid_mask = vw_channel_list[signal].valid_mask;
732
733 if (signal > ARRAY_SIZE(vw_channel_list)) {
734 return -EIO;
735 }
736
737 if (IS_ENABLED(CONFIG_ESPI_VWIRE_VALID_BIT_CHECK)) {
738 if (vw_reg->VW_INDEX[vw_index] & valid_mask) {
739 *level = !!(vw_reg->VW_INDEX[vw_index] & level_mask);
740 } else {
741 /* Not valid */
742 *level = 0;
743 }
744 } else {
745 *level = !!(vw_reg->VW_INDEX[vw_index] & level_mask);
746 }
747
748 return 0;
749 }
750
751 #ifdef CONFIG_ESPI_PERIPHERAL_CUSTOM_OPCODE
host_custom_opcode_enable_interrupts(void)752 static void host_custom_opcode_enable_interrupts(void)
753 {
754 if (IS_ENABLED(CONFIG_ESPI_PERIPHERAL_HOST_IO)) {
755 irq_enable(IT8XXX2_PMC1_IBF_IRQ);
756 }
757 if (IS_ENABLED(CONFIG_ESPI_PERIPHERAL_EC_HOST_CMD)) {
758 irq_enable(IT8XXX2_PMC2_IBF_IRQ);
759 }
760 }
761
host_custom_opcode_disable_interrupts(void)762 static void host_custom_opcode_disable_interrupts(void)
763 {
764 if (IS_ENABLED(CONFIG_ESPI_PERIPHERAL_HOST_IO)) {
765 irq_disable(IT8XXX2_PMC1_IBF_IRQ);
766 }
767 if (IS_ENABLED(CONFIG_ESPI_PERIPHERAL_EC_HOST_CMD)) {
768 irq_disable(IT8XXX2_PMC2_IBF_IRQ);
769 }
770 }
771 #endif /* CONFIG_ESPI_PERIPHERAL_CUSTOM_OPCODE */
772
espi_it8xxx2_manage_callback(const struct device * dev,struct espi_callback * callback,bool set)773 static int espi_it8xxx2_manage_callback(const struct device *dev,
774 struct espi_callback *callback, bool set)
775 {
776 struct espi_it8xxx2_data *const data = dev->data;
777
778 return espi_manage_callback(&data->callbacks, callback, set);
779 }
780
espi_it8xxx2_read_lpc_request(const struct device * dev,enum lpc_peripheral_opcode op,uint32_t * data)781 static int espi_it8xxx2_read_lpc_request(const struct device *dev,
782 enum lpc_peripheral_opcode op,
783 uint32_t *data)
784 {
785 const struct espi_it8xxx2_config *const config = dev->config;
786
787 if (op >= E8042_START_OPCODE && op <= E8042_MAX_OPCODE) {
788 struct kbc_regs *const kbc_reg =
789 (struct kbc_regs *)config->base_kbc;
790
791 switch (op) {
792 case E8042_OBF_HAS_CHAR:
793 /* EC has written data back to host. OBF is
794 * automatically cleared after host reads
795 * the data
796 */
797 *data = !!(kbc_reg->KBHISR & KBC_KBHISR_OBF);
798 break;
799 case E8042_IBF_HAS_CHAR:
800 *data = !!(kbc_reg->KBHISR & KBC_KBHISR_IBF);
801 break;
802 case E8042_READ_KB_STS:
803 *data = kbc_reg->KBHISR;
804 break;
805 default:
806 return -EINVAL;
807 }
808 } else if (op >= EACPI_START_OPCODE && op <= EACPI_MAX_OPCODE) {
809 struct pmc_regs *const pmc_reg =
810 (struct pmc_regs *)config->base_pmc;
811
812 switch (op) {
813 case EACPI_OBF_HAS_CHAR:
814 /* EC has written data back to host. OBF is
815 * automatically cleared after host reads
816 * the data
817 */
818 *data = !!(pmc_reg->PM1STS & PMC_PM1STS_OBF);
819 break;
820 case EACPI_IBF_HAS_CHAR:
821 *data = !!(pmc_reg->PM1STS & PMC_PM1STS_IBF);
822 break;
823 case EACPI_READ_STS:
824 *data = pmc_reg->PM1STS;
825 break;
826 #ifdef CONFIG_ESPI_PERIPHERAL_ACPI_SHM_REGION
827 case EACPI_GET_SHARED_MEMORY:
828 *data = (uint32_t)&h2ram_pool[
829 CONFIG_ESPI_PERIPHERAL_ACPI_SHM_REGION_PORT_NUM];
830 break;
831 #endif /* CONFIG_ESPI_PERIPHERAL_ACPI_SHM_REGION */
832 default:
833 return -EINVAL;
834 }
835 }
836 #ifdef CONFIG_ESPI_PERIPHERAL_CUSTOM_OPCODE
837 else if (op >= ECUSTOM_START_OPCODE && op <= ECUSTOM_MAX_OPCODE) {
838
839 switch (op) {
840 case ECUSTOM_HOST_CMD_GET_PARAM_MEMORY:
841 *data = (uint32_t)&h2ram_pool[
842 CONFIG_ESPI_PERIPHERAL_HOST_CMD_PARAM_PORT_NUM];
843 break;
844 case ECUSTOM_HOST_CMD_GET_PARAM_MEMORY_SIZE:
845 *data = CONFIG_ESPI_IT8XXX2_HC_H2RAM_SIZE;
846 break;
847 default:
848 return -EINVAL;
849 }
850 }
851 #endif /* CONFIG_ESPI_PERIPHERAL_CUSTOM_OPCODE */
852 else {
853 return -ENOTSUP;
854 }
855
856 return 0;
857 }
858
espi_it8xxx2_write_lpc_request(const struct device * dev,enum lpc_peripheral_opcode op,uint32_t * data)859 static int espi_it8xxx2_write_lpc_request(const struct device *dev,
860 enum lpc_peripheral_opcode op,
861 uint32_t *data)
862 {
863 const struct espi_it8xxx2_config *const config = dev->config;
864
865 if (op >= E8042_START_OPCODE && op <= E8042_MAX_OPCODE) {
866 struct kbc_regs *const kbc_reg =
867 (struct kbc_regs *)config->base_kbc;
868
869 switch (op) {
870 case E8042_WRITE_KB_CHAR:
871 kbc_reg->KBHIKDOR = (*data & 0xff);
872 /*
873 * Enable OBE interrupt after putting data in
874 * data register.
875 */
876 kbc_reg->KBHICR |= KBC_KBHICR_OBECIE;
877 break;
878 case E8042_WRITE_MB_CHAR:
879 kbc_reg->KBHIMDOR = (*data & 0xff);
880 /*
881 * Enable OBE interrupt after putting data in
882 * data register.
883 */
884 kbc_reg->KBHICR |= KBC_KBHICR_OBECIE;
885 break;
886 case E8042_RESUME_IRQ:
887 /* Enable KBC IBF interrupt */
888 irq_enable(IT8XXX2_KBC_IBF_IRQ);
889 break;
890 case E8042_PAUSE_IRQ:
891 /* Disable KBC IBF interrupt */
892 irq_disable(IT8XXX2_KBC_IBF_IRQ);
893 break;
894 case E8042_CLEAR_OBF:
895 volatile uint8_t _kbhicr __unused;
896 /*
897 * After enabling IBF/OBF clear mode, we have to make
898 * sure that IBF interrupt is not triggered before
899 * disabling the clear mode. Or the interrupt will keep
900 * triggering until the watchdog is reset.
901 */
902 unsigned int key = irq_lock();
903 /*
904 * When IBFOBFCME is enabled, write 1 to COBF bit to
905 * clear KBC OBF.
906 */
907 kbc_reg->KBHICR |= KBC_KBHICR_IBFOBFCME;
908 kbc_reg->KBHICR |= KBC_KBHICR_COBF;
909 kbc_reg->KBHICR &= ~KBC_KBHICR_COBF;
910 /* Disable clear mode */
911 kbc_reg->KBHICR &= ~KBC_KBHICR_IBFOBFCME;
912 /*
913 * I/O access synchronization, this load operation will
914 * guarantee the above modification of SOC's register
915 * can be seen by any following instructions.
916 */
917 _kbhicr = kbc_reg->KBHICR;
918 irq_unlock(key);
919 break;
920 case E8042_SET_FLAG:
921 kbc_reg->KBHISR |= (*data & 0xff);
922 break;
923 case E8042_CLEAR_FLAG:
924 kbc_reg->KBHISR &= ~(*data & 0xff);
925 break;
926 default:
927 return -EINVAL;
928 }
929 } else if (op >= EACPI_START_OPCODE && op <= EACPI_MAX_OPCODE) {
930 struct pmc_regs *const pmc_reg =
931 (struct pmc_regs *)config->base_pmc;
932
933 switch (op) {
934 case EACPI_WRITE_CHAR:
935 pmc_reg->PM1DO = (*data & 0xff);
936 break;
937 case EACPI_WRITE_STS:
938 pmc_reg->PM1STS = (*data & 0xff);
939 break;
940 default:
941 return -EINVAL;
942 }
943 }
944 #ifdef CONFIG_ESPI_PERIPHERAL_CUSTOM_OPCODE
945 else if (op >= ECUSTOM_START_OPCODE && op <= ECUSTOM_MAX_OPCODE) {
946 struct pmc_regs *const pmc_reg =
947 (struct pmc_regs *)config->base_pmc;
948
949 switch (op) {
950 /* Enable/Disable PMCx interrupt */
951 case ECUSTOM_HOST_SUBS_INTERRUPT_EN:
952 if (*data) {
953 host_custom_opcode_enable_interrupts();
954 } else {
955 host_custom_opcode_disable_interrupts();
956 }
957 break;
958 case ECUSTOM_HOST_CMD_SEND_RESULT:
959 /* Write result to data output port (set OBF status) */
960 pmc_reg->PM2DO = (*data & 0xff);
961 /* Clear processing flag */
962 pmc_reg->PM2STS &= ~PMC_PM2STS_GPF;
963 break;
964 default:
965 return -EINVAL;
966 }
967 }
968 #endif /* CONFIG_ESPI_PERIPHERAL_CUSTOM_OPCODE */
969 else {
970 return -ENOTSUP;
971 }
972
973 return 0;
974 }
975
976 #ifdef CONFIG_ESPI_OOB_CHANNEL
977 /* eSPI cycle type field */
978 #define ESPI_OOB_CYCLE_TYPE 0x21
979 #define ESPI_OOB_TAG 0x00
980 #define ESPI_OOB_TIMEOUT_MS 200
981
982 /* eSPI tag + len[11:8] field */
983 #define ESPI_TAG_LEN_FIELD(tag, len) \
984 ((((tag) & 0xF) << 4) | (((len) >> 8) & 0xF))
985
986 struct espi_oob_msg_packet {
987 FLEXIBLE_ARRAY_DECLARE(uint8_t, data_byte);
988 };
989
espi_it8xxx2_send_oob(const struct device * dev,struct espi_oob_packet * pckt)990 static int espi_it8xxx2_send_oob(const struct device *dev,
991 struct espi_oob_packet *pckt)
992 {
993 const struct espi_it8xxx2_config *const config = dev->config;
994 struct espi_slave_regs *const slave_reg =
995 (struct espi_slave_regs *)config->base_espi_slave;
996 struct espi_queue1_regs *const queue1_reg =
997 (struct espi_queue1_regs *)config->base_espi_queue1;
998 struct espi_oob_msg_packet *oob_pckt =
999 (struct espi_oob_msg_packet *)pckt->buf;
1000
1001 if (!(slave_reg->CH_OOB_CAPCFG3 & IT8XXX2_ESPI_OOB_READY_MASK)) {
1002 LOG_ERR("%s: OOB channel isn't ready", __func__);
1003 return -EIO;
1004 }
1005
1006 if (slave_reg->ESUCTRL0 & IT8XXX2_ESPI_UPSTREAM_BUSY) {
1007 LOG_ERR("%s: OOB upstream busy", __func__);
1008 return -EIO;
1009 }
1010
1011 if (pckt->len > ESPI_IT8XXX2_OOB_MAX_PAYLOAD_SIZE) {
1012 LOG_ERR("%s: Out of OOB queue space", __func__);
1013 return -EINVAL;
1014 }
1015
1016 /* Set cycle type */
1017 slave_reg->ESUCTRL1 = IT8XXX2_ESPI_CYCLE_TYPE_OOB;
1018 /* Set tag and length[11:8] */
1019 slave_reg->ESUCTRL2 = ESPI_TAG_LEN_FIELD(0, pckt->len);
1020 /* Set length [7:0] */
1021 slave_reg->ESUCTRL3 = pckt->len & 0xff;
1022
1023 /* Set data byte */
1024 for (int i = 0; i < pckt->len; i++) {
1025 queue1_reg->UPSTREAM_DATA[i] = oob_pckt->data_byte[i];
1026 }
1027
1028 /* Set upstream enable */
1029 slave_reg->ESUCTRL0 |= IT8XXX2_ESPI_UPSTREAM_ENABLE;
1030 /* Set upstream go */
1031 slave_reg->ESUCTRL0 |= IT8XXX2_ESPI_UPSTREAM_GO;
1032
1033 return 0;
1034 }
1035
espi_it8xxx2_receive_oob(const struct device * dev,struct espi_oob_packet * pckt)1036 static int espi_it8xxx2_receive_oob(const struct device *dev,
1037 struct espi_oob_packet *pckt)
1038 {
1039 const struct espi_it8xxx2_config *const config = dev->config;
1040 struct espi_slave_regs *const slave_reg =
1041 (struct espi_slave_regs *)config->base_espi_slave;
1042 struct espi_queue0_regs *const queue0_reg =
1043 (struct espi_queue0_regs *)config->base_espi_queue0;
1044 struct espi_oob_msg_packet *oob_pckt =
1045 (struct espi_oob_msg_packet *)pckt->buf;
1046 uint8_t oob_len;
1047
1048 if (!(slave_reg->CH_OOB_CAPCFG3 & IT8XXX2_ESPI_OOB_READY_MASK)) {
1049 LOG_ERR("%s: OOB channel isn't ready", __func__);
1050 return -EIO;
1051 }
1052
1053 #ifndef CONFIG_ESPI_OOB_CHANNEL_RX_ASYNC
1054 struct espi_it8xxx2_data *const data = dev->data;
1055 int ret;
1056
1057 /* Wait until receive OOB message or timeout */
1058 ret = k_sem_take(&data->oob_upstream_go, K_MSEC(ESPI_OOB_TIMEOUT_MS));
1059 if (ret == -EAGAIN) {
1060 LOG_ERR("%s: Timeout", __func__);
1061 return -ETIMEDOUT;
1062 }
1063 #endif
1064
1065 /* Get length */
1066 oob_len = (slave_reg->ESOCTRL4 & IT8XXX2_ESPI_PUT_OOB_LEN_MASK);
1067 /*
1068 * Buffer passed to driver isn't enough.
1069 * The first three bytes of buffer are cycle type, tag, and length.
1070 */
1071 if (oob_len > pckt->len) {
1072 LOG_ERR("%s: Out of rx buf %d vs %d", __func__,
1073 oob_len, pckt->len);
1074 return -EINVAL;
1075 }
1076
1077 pckt->len = oob_len;
1078 /* Get data byte */
1079 for (int i = 0; i < oob_len; i++) {
1080 oob_pckt->data_byte[i] = queue0_reg->PUT_OOB_DATA[i];
1081 }
1082
1083 return 0;
1084 }
1085
espi_it8xxx2_oob_init(const struct device * dev)1086 static void espi_it8xxx2_oob_init(const struct device *dev)
1087 {
1088 const struct espi_it8xxx2_config *const config = dev->config;
1089 struct espi_slave_regs *const slave_reg =
1090 (struct espi_slave_regs *)config->base_espi_slave;
1091
1092 #ifndef CONFIG_ESPI_OOB_CHANNEL_RX_ASYNC
1093 struct espi_it8xxx2_data *const data = dev->data;
1094
1095 k_sem_init(&data->oob_upstream_go, 0, 1);
1096 #endif
1097
1098 /* Upstream interrupt enable */
1099 slave_reg->ESUCTRL0 |= IT8XXX2_ESPI_UPSTREAM_INTERRUPT_ENABLE;
1100
1101 /* PUT_OOB interrupt enable */
1102 slave_reg->ESOCTRL1 |= IT8XXX2_ESPI_PUT_OOB_INTERRUPT_ENABLE;
1103 }
1104 #endif
1105
1106 #ifdef CONFIG_ESPI_FLASH_CHANNEL
1107 #define ESPI_FLASH_TAG 0x01
1108 #define ESPI_FLASH_READ_TIMEOUT_MS 200
1109 #define ESPI_FLASH_WRITE_TIMEOUT_MS 500
1110 #define ESPI_FLASH_ERASE_TIMEOUT_MS 1000
1111
1112 /* Successful completion without data */
1113 #define ESPI_IT8XXX2_PUT_FLASH_C_SCWOD 0
1114 /* Successful completion with data */
1115 #define ESPI_IT8XXX2_PUT_FLASH_C_SCWD 4
1116
1117 enum espi_flash_cycle_type {
1118 IT8XXX2_ESPI_CYCLE_TYPE_FLASH_READ = 0x08,
1119 IT8XXX2_ESPI_CYCLE_TYPE_FLASH_WRITE = 0x09,
1120 IT8XXX2_ESPI_CYCLE_TYPE_FLASH_ERASE = 0x0A,
1121 };
1122
espi_it8xxx2_flash_trans(const struct device * dev,struct espi_flash_packet * pckt,enum espi_flash_cycle_type tran)1123 static int espi_it8xxx2_flash_trans(const struct device *dev,
1124 struct espi_flash_packet *pckt,
1125 enum espi_flash_cycle_type tran)
1126 {
1127 const struct espi_it8xxx2_config *const config = dev->config;
1128 struct espi_slave_regs *const slave_reg =
1129 (struct espi_slave_regs *)config->base_espi_slave;
1130 struct espi_queue1_regs *const queue1_reg =
1131 (struct espi_queue1_regs *)config->base_espi_queue1;
1132
1133 if (!(slave_reg->CH_FLASH_CAPCFG3 & IT8XXX2_ESPI_FC_READY_MASK)) {
1134 LOG_ERR("%s: Flash channel isn't ready (tran:%d)",
1135 __func__, tran);
1136 return -EIO;
1137 }
1138
1139 if (slave_reg->ESUCTRL0 & IT8XXX2_ESPI_UPSTREAM_BUSY) {
1140 LOG_ERR("%s: Upstream busy (tran:%d)", __func__, tran);
1141 return -EIO;
1142 }
1143
1144 if (pckt->len > IT8XXX2_ESPI_FLASH_MAX_PAYLOAD_SIZE) {
1145 LOG_ERR("%s: Invalid size request (tran:%d)", __func__, tran);
1146 return -EINVAL;
1147 }
1148
1149 /* Set cycle type */
1150 slave_reg->ESUCTRL1 = tran;
1151 /* Set tag and length[11:8] */
1152 slave_reg->ESUCTRL2 = (ESPI_FLASH_TAG << 4);
1153 /*
1154 * Set length [7:0]
1155 * Note: for erasing, the least significant 3 bit of the length field
1156 * specifies the size of the block to be erased:
1157 * 001b: 4 Kbytes
1158 * 010b: 64Kbytes
1159 * 100b: 128 Kbytes
1160 * 101b: 256 Kbytes
1161 */
1162 slave_reg->ESUCTRL3 = pckt->len;
1163 /* Set flash address */
1164 queue1_reg->UPSTREAM_DATA[0] = (pckt->flash_addr >> 24) & 0xff;
1165 queue1_reg->UPSTREAM_DATA[1] = (pckt->flash_addr >> 16) & 0xff;
1166 queue1_reg->UPSTREAM_DATA[2] = (pckt->flash_addr >> 8) & 0xff;
1167 queue1_reg->UPSTREAM_DATA[3] = pckt->flash_addr & 0xff;
1168
1169 return 0;
1170 }
1171
espi_it8xxx2_flash_read(const struct device * dev,struct espi_flash_packet * pckt)1172 static int espi_it8xxx2_flash_read(const struct device *dev,
1173 struct espi_flash_packet *pckt)
1174 {
1175 const struct espi_it8xxx2_config *const config = dev->config;
1176 struct espi_it8xxx2_data *const data = dev->data;
1177 struct espi_slave_regs *const slave_reg =
1178 (struct espi_slave_regs *)config->base_espi_slave;
1179 int ret;
1180
1181 ret = espi_it8xxx2_flash_trans(dev, pckt,
1182 IT8XXX2_ESPI_CYCLE_TYPE_FLASH_READ);
1183 if (ret) {
1184 return ret;
1185 }
1186
1187 /* Set upstream enable */
1188 slave_reg->ESUCTRL0 |= IT8XXX2_ESPI_UPSTREAM_ENABLE;
1189 /* Set upstream go */
1190 slave_reg->ESUCTRL0 |= IT8XXX2_ESPI_UPSTREAM_GO;
1191
1192 /* Wait until upstream done or timeout */
1193 ret = k_sem_take(&data->flash_upstream_go,
1194 K_MSEC(ESPI_FLASH_READ_TIMEOUT_MS));
1195 if (ret == -EAGAIN) {
1196 LOG_ERR("%s: Timeout", __func__);
1197 return -ETIMEDOUT;
1198 }
1199
1200 if (data->put_flash_cycle_type != ESPI_IT8XXX2_PUT_FLASH_C_SCWD) {
1201 LOG_ERR("%s: Unsuccessful completion", __func__);
1202 return -EIO;
1203 }
1204
1205 memcpy(pckt->buf, data->flash_buf, pckt->len);
1206
1207 LOG_INF("%s: read (%d) bytes from flash over espi", __func__,
1208 data->put_flash_len);
1209
1210 return 0;
1211 }
1212
espi_it8xxx2_flash_write(const struct device * dev,struct espi_flash_packet * pckt)1213 static int espi_it8xxx2_flash_write(const struct device *dev,
1214 struct espi_flash_packet *pckt)
1215 {
1216 const struct espi_it8xxx2_config *const config = dev->config;
1217 struct espi_it8xxx2_data *const data = dev->data;
1218 struct espi_slave_regs *const slave_reg =
1219 (struct espi_slave_regs *)config->base_espi_slave;
1220 struct espi_queue1_regs *const queue1_reg =
1221 (struct espi_queue1_regs *)config->base_espi_queue1;
1222 int ret;
1223
1224 ret = espi_it8xxx2_flash_trans(dev, pckt,
1225 IT8XXX2_ESPI_CYCLE_TYPE_FLASH_WRITE);
1226 if (ret) {
1227 return ret;
1228 }
1229
1230 /* Set data byte */
1231 for (int i = 0; i < pckt->len; i++) {
1232 queue1_reg->UPSTREAM_DATA[4 + i] = pckt->buf[i];
1233 }
1234
1235 /* Set upstream enable */
1236 slave_reg->ESUCTRL0 |= IT8XXX2_ESPI_UPSTREAM_ENABLE;
1237 /* Set upstream go */
1238 slave_reg->ESUCTRL0 |= IT8XXX2_ESPI_UPSTREAM_GO;
1239
1240 /* Wait until upstream done or timeout */
1241 ret = k_sem_take(&data->flash_upstream_go,
1242 K_MSEC(ESPI_FLASH_WRITE_TIMEOUT_MS));
1243 if (ret == -EAGAIN) {
1244 LOG_ERR("%s: Timeout", __func__);
1245 return -ETIMEDOUT;
1246 }
1247
1248 if (data->put_flash_cycle_type != ESPI_IT8XXX2_PUT_FLASH_C_SCWOD) {
1249 LOG_ERR("%s: Unsuccessful completion", __func__);
1250 return -EIO;
1251 }
1252
1253 return 0;
1254 }
1255
espi_it8xxx2_flash_erase(const struct device * dev,struct espi_flash_packet * pckt)1256 static int espi_it8xxx2_flash_erase(const struct device *dev,
1257 struct espi_flash_packet *pckt)
1258 {
1259 const struct espi_it8xxx2_config *const config = dev->config;
1260 struct espi_it8xxx2_data *const data = dev->data;
1261 struct espi_slave_regs *const slave_reg =
1262 (struct espi_slave_regs *)config->base_espi_slave;
1263 int ret;
1264
1265 ret = espi_it8xxx2_flash_trans(dev, pckt,
1266 IT8XXX2_ESPI_CYCLE_TYPE_FLASH_ERASE);
1267 if (ret) {
1268 return ret;
1269 }
1270
1271 /* Set upstream enable */
1272 slave_reg->ESUCTRL0 |= IT8XXX2_ESPI_UPSTREAM_ENABLE;
1273 /* Set upstream go */
1274 slave_reg->ESUCTRL0 |= IT8XXX2_ESPI_UPSTREAM_GO;
1275
1276 /* Wait until upstream done or timeout */
1277 ret = k_sem_take(&data->flash_upstream_go,
1278 K_MSEC(ESPI_FLASH_ERASE_TIMEOUT_MS));
1279 if (ret == -EAGAIN) {
1280 LOG_ERR("%s: Timeout", __func__);
1281 return -ETIMEDOUT;
1282 }
1283
1284 if (data->put_flash_cycle_type != ESPI_IT8XXX2_PUT_FLASH_C_SCWOD) {
1285 LOG_ERR("%s: Unsuccessful completion", __func__);
1286 return -EIO;
1287 }
1288
1289 return 0;
1290 }
1291
espi_it8xxx2_flash_upstream_done_isr(const struct device * dev)1292 static void espi_it8xxx2_flash_upstream_done_isr(const struct device *dev)
1293 {
1294 const struct espi_it8xxx2_config *const config = dev->config;
1295 struct espi_it8xxx2_data *const data = dev->data;
1296 struct espi_slave_regs *const slave_reg =
1297 (struct espi_slave_regs *)config->base_espi_slave;
1298 struct espi_queue1_regs *const queue1_reg =
1299 (struct espi_queue1_regs *)config->base_espi_queue1;
1300
1301 data->put_flash_cycle_type = slave_reg->ESUCTRL6;
1302 data->put_flash_tag = slave_reg->ESUCTRL7 &
1303 IT8XXX2_ESPI_PUT_FLASH_TAG_MASK;
1304 data->put_flash_len = slave_reg->ESUCTRL8 &
1305 IT8XXX2_ESPI_PUT_FLASH_LEN_MASK;
1306
1307 if (slave_reg->ESUCTRL1 == IT8XXX2_ESPI_CYCLE_TYPE_FLASH_READ) {
1308 if (data->put_flash_len > IT8XXX2_ESPI_FLASH_MAX_PAYLOAD_SIZE) {
1309 LOG_ERR("%s: Invalid size (%d)", __func__,
1310 data->put_flash_len);
1311 } else {
1312 for (int i = 0; i < data->put_flash_len; i++) {
1313 data->flash_buf[i] =
1314 queue1_reg->UPSTREAM_DATA[i];
1315 }
1316 }
1317 }
1318
1319 k_sem_give(&data->flash_upstream_go);
1320 }
1321
espi_it8xxx2_flash_init(const struct device * dev)1322 static void espi_it8xxx2_flash_init(const struct device *dev)
1323 {
1324 const struct espi_it8xxx2_config *const config = dev->config;
1325 struct espi_it8xxx2_data *const data = dev->data;
1326 struct espi_slave_regs *const slave_reg =
1327 (struct espi_slave_regs *)config->base_espi_slave;
1328
1329 k_sem_init(&data->flash_upstream_go, 0, 1);
1330
1331 /* Upstream interrupt enable */
1332 slave_reg->ESUCTRL0 |= IT8XXX2_ESPI_UPSTREAM_INTERRUPT_ENABLE;
1333 }
1334 #endif /* CONFIG_ESPI_FLASH_CHANNEL */
1335
1336 /* eSPI driver registration */
1337 static int espi_it8xxx2_init(const struct device *dev);
1338
1339 static DEVICE_API(espi, espi_it8xxx2_driver_api) = {
1340 .config = espi_it8xxx2_configure,
1341 .get_channel_status = espi_it8xxx2_channel_ready,
1342 .send_vwire = espi_it8xxx2_send_vwire,
1343 .receive_vwire = espi_it8xxx2_receive_vwire,
1344 .manage_callback = espi_it8xxx2_manage_callback,
1345 .read_lpc_request = espi_it8xxx2_read_lpc_request,
1346 .write_lpc_request = espi_it8xxx2_write_lpc_request,
1347 #ifdef CONFIG_ESPI_OOB_CHANNEL
1348 .send_oob = espi_it8xxx2_send_oob,
1349 .receive_oob = espi_it8xxx2_receive_oob,
1350 #endif
1351 #ifdef CONFIG_ESPI_FLASH_CHANNEL
1352 .flash_read = espi_it8xxx2_flash_read,
1353 .flash_write = espi_it8xxx2_flash_write,
1354 .flash_erase = espi_it8xxx2_flash_erase,
1355 #endif
1356 };
1357
espi_it8xxx2_vw_notify_system_state(const struct device * dev,enum espi_vwire_signal signal)1358 static void espi_it8xxx2_vw_notify_system_state(const struct device *dev,
1359 enum espi_vwire_signal signal)
1360 {
1361 struct espi_it8xxx2_data *const data = dev->data;
1362 struct espi_event evt = {ESPI_BUS_EVENT_VWIRE_RECEIVED, 0, 0};
1363 uint8_t level = 0;
1364
1365 espi_it8xxx2_receive_vwire(dev, signal, &level);
1366
1367 evt.evt_details = signal;
1368 evt.evt_data = level;
1369 espi_send_callbacks(&data->callbacks, dev, evt);
1370 }
1371
espi_vw_signal_no_isr(const struct device * dev)1372 static void espi_vw_signal_no_isr(const struct device *dev)
1373 {
1374 ARG_UNUSED(dev);
1375 }
1376
1377 static const struct espi_vw_signal_t vwidx2_signals[] = {
1378 {ESPI_VWIRE_SIGNAL_SLP_S3, NULL},
1379 {ESPI_VWIRE_SIGNAL_SLP_S4, NULL},
1380 {ESPI_VWIRE_SIGNAL_SLP_S5, NULL},
1381 };
1382
espi_it8xxx2_vwidx2_isr(const struct device * dev,uint8_t updated_flag)1383 static void espi_it8xxx2_vwidx2_isr(const struct device *dev,
1384 uint8_t updated_flag)
1385 {
1386 for (int i = 0; i < ARRAY_SIZE(vwidx2_signals); i++) {
1387 enum espi_vwire_signal vw_signal = vwidx2_signals[i].signal;
1388
1389 if (updated_flag & vw_channel_list[vw_signal].level_mask) {
1390 espi_it8xxx2_vw_notify_system_state(dev, vw_signal);
1391 }
1392 }
1393 }
1394
espi_vw_oob_rst_warn_isr(const struct device * dev)1395 static void espi_vw_oob_rst_warn_isr(const struct device *dev)
1396 {
1397 uint8_t level = 0;
1398
1399 espi_it8xxx2_receive_vwire(dev, ESPI_VWIRE_SIGNAL_OOB_RST_WARN, &level);
1400 espi_it8xxx2_send_vwire(dev, ESPI_VWIRE_SIGNAL_OOB_RST_ACK, level);
1401 }
1402
espi_vw_pltrst_isr(const struct device * dev)1403 static void espi_vw_pltrst_isr(const struct device *dev)
1404 {
1405 uint8_t pltrst = 0;
1406
1407 espi_it8xxx2_receive_vwire(dev, ESPI_VWIRE_SIGNAL_PLTRST, &pltrst);
1408
1409 if (pltrst) {
1410 espi_it8xxx2_send_vwire(dev, ESPI_VWIRE_SIGNAL_SMI, 1);
1411 espi_it8xxx2_send_vwire(dev, ESPI_VWIRE_SIGNAL_SCI, 1);
1412 espi_it8xxx2_send_vwire(dev, ESPI_VWIRE_SIGNAL_HOST_RST_ACK, 1);
1413 espi_it8xxx2_send_vwire(dev, ESPI_VWIRE_SIGNAL_RST_CPU_INIT, 1);
1414 }
1415
1416 LOG_INF("VW PLTRST_L %sasserted", pltrst ? "de" : "");
1417 }
1418
1419 static const struct espi_vw_signal_t vwidx3_signals[] = {
1420 {ESPI_VWIRE_SIGNAL_OOB_RST_WARN, espi_vw_oob_rst_warn_isr},
1421 {ESPI_VWIRE_SIGNAL_PLTRST, espi_vw_pltrst_isr},
1422 };
1423
espi_it8xxx2_vwidx3_isr(const struct device * dev,uint8_t updated_flag)1424 static void espi_it8xxx2_vwidx3_isr(const struct device *dev,
1425 uint8_t updated_flag)
1426 {
1427 for (int i = 0; i < ARRAY_SIZE(vwidx3_signals); i++) {
1428 enum espi_vwire_signal vw_signal = vwidx3_signals[i].signal;
1429
1430 if (updated_flag & vw_channel_list[vw_signal].level_mask) {
1431 vwidx3_signals[i].vw_signal_isr(dev);
1432 espi_it8xxx2_vw_notify_system_state(dev, vw_signal);
1433 }
1434 }
1435 }
1436
espi_vw_host_rst_warn_isr(const struct device * dev)1437 static void espi_vw_host_rst_warn_isr(const struct device *dev)
1438 {
1439 uint8_t level = 0;
1440
1441 espi_it8xxx2_receive_vwire(dev,
1442 ESPI_VWIRE_SIGNAL_HOST_RST_WARN, &level);
1443 espi_it8xxx2_send_vwire(dev, ESPI_VWIRE_SIGNAL_HOST_RST_ACK, level);
1444 }
1445
1446 static const struct espi_vw_signal_t vwidx7_signals[] = {
1447 {ESPI_VWIRE_SIGNAL_HOST_RST_WARN, espi_vw_host_rst_warn_isr},
1448 };
1449
espi_it8xxx2_vwidx7_isr(const struct device * dev,uint8_t updated_flag)1450 static void espi_it8xxx2_vwidx7_isr(const struct device *dev,
1451 uint8_t updated_flag)
1452 {
1453 for (int i = 0; i < ARRAY_SIZE(vwidx7_signals); i++) {
1454 enum espi_vwire_signal vw_signal = vwidx7_signals[i].signal;
1455
1456 if (updated_flag & vw_channel_list[vw_signal].level_mask) {
1457 vwidx7_signals[i].vw_signal_isr(dev);
1458 espi_it8xxx2_vw_notify_system_state(dev, vw_signal);
1459 }
1460 }
1461 }
1462
espi_vw_sus_warn_isr(const struct device * dev)1463 static void espi_vw_sus_warn_isr(const struct device *dev)
1464 {
1465 uint8_t level = 0;
1466
1467 espi_it8xxx2_receive_vwire(dev, ESPI_VWIRE_SIGNAL_SUS_WARN, &level);
1468 espi_it8xxx2_send_vwire(dev, ESPI_VWIRE_SIGNAL_SUS_ACK, level);
1469 }
1470
1471 static const struct espi_vw_signal_t vwidx41_signals[] = {
1472 {ESPI_VWIRE_SIGNAL_SUS_WARN, espi_vw_sus_warn_isr},
1473 {ESPI_VWIRE_SIGNAL_SUS_PWRDN_ACK, espi_vw_signal_no_isr},
1474 {ESPI_VWIRE_SIGNAL_SLP_A, espi_vw_signal_no_isr},
1475 };
1476
espi_it8xxx2_vwidx41_isr(const struct device * dev,uint8_t updated_flag)1477 static void espi_it8xxx2_vwidx41_isr(const struct device *dev,
1478 uint8_t updated_flag)
1479 {
1480 for (int i = 0; i < ARRAY_SIZE(vwidx41_signals); i++) {
1481 enum espi_vwire_signal vw_signal = vwidx41_signals[i].signal;
1482
1483 if (updated_flag & vw_channel_list[vw_signal].level_mask) {
1484 vwidx41_signals[i].vw_signal_isr(dev);
1485 espi_it8xxx2_vw_notify_system_state(dev, vw_signal);
1486 }
1487 }
1488 }
1489
1490 static const struct espi_vw_signal_t vwidx42_signals[] = {
1491 {ESPI_VWIRE_SIGNAL_SLP_LAN, NULL},
1492 {ESPI_VWIRE_SIGNAL_SLP_WLAN, NULL},
1493 };
1494
espi_it8xxx2_vwidx42_isr(const struct device * dev,uint8_t updated_flag)1495 static void espi_it8xxx2_vwidx42_isr(const struct device *dev,
1496 uint8_t updated_flag)
1497 {
1498 for (int i = 0; i < ARRAY_SIZE(vwidx42_signals); i++) {
1499 enum espi_vwire_signal vw_signal = vwidx42_signals[i].signal;
1500
1501 if (updated_flag & vw_channel_list[vw_signal].level_mask) {
1502 espi_it8xxx2_vw_notify_system_state(dev, vw_signal);
1503 }
1504 }
1505 }
1506
espi_it8xxx2_vwidx43_isr(const struct device * dev,uint8_t updated_flag)1507 static void espi_it8xxx2_vwidx43_isr(const struct device *dev,
1508 uint8_t updated_flag)
1509 {
1510 ARG_UNUSED(dev);
1511 /*
1512 * We haven't send callback to system because there is no index 43
1513 * virtual wire signal is listed in enum espi_vwire_signal.
1514 */
1515 LOG_INF("vw isr %s is ignored!", __func__);
1516 }
1517
espi_it8xxx2_vwidx44_isr(const struct device * dev,uint8_t updated_flag)1518 static void espi_it8xxx2_vwidx44_isr(const struct device *dev,
1519 uint8_t updated_flag)
1520 {
1521 ARG_UNUSED(dev);
1522 /*
1523 * We haven't send callback to system because there is no index 44
1524 * virtual wire signal is listed in enum espi_vwire_signal.
1525 */
1526 LOG_INF("vw isr %s is ignored!", __func__);
1527 }
1528
1529 static const struct espi_vw_signal_t vwidx47_signals[] = {
1530 {ESPI_VWIRE_SIGNAL_HOST_C10, NULL},
1531 };
espi_it8xxx2_vwidx47_isr(const struct device * dev,uint8_t updated_flag)1532 static void espi_it8xxx2_vwidx47_isr(const struct device *dev,
1533 uint8_t updated_flag)
1534 {
1535 for (int i = 0; i < ARRAY_SIZE(vwidx47_signals); i++) {
1536 enum espi_vwire_signal vw_signal = vwidx47_signals[i].signal;
1537
1538 if (updated_flag & vw_channel_list[vw_signal].level_mask) {
1539 espi_it8xxx2_vw_notify_system_state(dev, vw_signal);
1540 }
1541 }
1542 }
1543
1544 /*
1545 * The ISR of espi VW interrupt in array needs to match bit order in
1546 * ESPI VW VWCTRL1 register.
1547 */
1548 static const struct vwidx_isr_t vwidx_isr_list[] = {
1549 [0] = {espi_it8xxx2_vwidx2_isr, 0x02},
1550 [1] = {espi_it8xxx2_vwidx3_isr, 0x03},
1551 [2] = {espi_it8xxx2_vwidx7_isr, 0x07},
1552 [3] = {espi_it8xxx2_vwidx41_isr, 0x41},
1553 [4] = {espi_it8xxx2_vwidx42_isr, 0x42},
1554 [5] = {espi_it8xxx2_vwidx43_isr, 0x43},
1555 [6] = {espi_it8xxx2_vwidx44_isr, 0x44},
1556 [7] = {espi_it8xxx2_vwidx47_isr, 0x47},
1557 };
1558
1559 /*
1560 * This is used to record the previous VW valid/level field state to discover
1561 * changes. Then do following sequence only when state is changed.
1562 */
1563 static uint8_t vwidx_cached_flag[ARRAY_SIZE(vwidx_isr_list)];
1564
espi_it8xxx2_reset_vwidx_cache(const struct device * dev)1565 static void espi_it8xxx2_reset_vwidx_cache(const struct device *dev)
1566 {
1567 const struct espi_it8xxx2_config *const config = dev->config;
1568 struct espi_vw_regs *const vw_reg =
1569 (struct espi_vw_regs *)config->base_espi_vw;
1570
1571 /* reset vwidx_cached_flag */
1572 for (int i = 0; i < ARRAY_SIZE(vwidx_isr_list); i++) {
1573 vwidx_cached_flag[i] =
1574 vw_reg->VW_INDEX[vwidx_isr_list[i].vw_index];
1575 }
1576 }
1577
espi_it8xxx2_vw_isr(const struct device * dev)1578 static void espi_it8xxx2_vw_isr(const struct device *dev)
1579 {
1580 const struct espi_it8xxx2_config *const config = dev->config;
1581 struct espi_vw_regs *const vw_reg =
1582 (struct espi_vw_regs *)config->base_espi_vw;
1583 uint8_t vwidx_updated = vw_reg->VWCTRL1;
1584
1585 /* write-1 to clear */
1586 vw_reg->VWCTRL1 = vwidx_updated;
1587
1588 for (int i = 0; i < ARRAY_SIZE(vwidx_isr_list); i++) {
1589 if (vwidx_updated & BIT(i)) {
1590 uint8_t vw_flag;
1591
1592 vw_flag = vw_reg->VW_INDEX[vwidx_isr_list[i].vw_index];
1593 vwidx_isr_list[i].vwidx_isr(dev,
1594 vwidx_cached_flag[i] ^ vw_flag);
1595 vwidx_cached_flag[i] = vw_flag;
1596 }
1597 }
1598 }
1599
espi_it8xxx2_ch_notify_system_state(const struct device * dev,enum espi_channel ch,bool en)1600 static void espi_it8xxx2_ch_notify_system_state(const struct device *dev,
1601 enum espi_channel ch, bool en)
1602 {
1603 struct espi_it8xxx2_data *const data = dev->data;
1604 struct espi_event evt = {
1605 .evt_type = ESPI_BUS_EVENT_CHANNEL_READY,
1606 .evt_details = ch,
1607 .evt_data = en,
1608 };
1609
1610 espi_send_callbacks(&data->callbacks, dev, evt);
1611 }
1612
1613 /*
1614 * Peripheral channel enable asserted flag.
1615 * A 0-to-1 or 1-to-0 transition on "Peripheral Channel Enable" bit.
1616 */
espi_it8xxx2_peripheral_ch_en_isr(const struct device * dev,bool enable)1617 static void espi_it8xxx2_peripheral_ch_en_isr(const struct device *dev,
1618 bool enable)
1619 {
1620 espi_it8xxx2_ch_notify_system_state(dev,
1621 ESPI_CHANNEL_PERIPHERAL, enable);
1622 }
1623
1624 /*
1625 * VW channel enable asserted flag.
1626 * A 0-to-1 or 1-to-0 transition on "Virtual Wire Channel Enable" bit.
1627 */
espi_it8xxx2_vw_ch_en_isr(const struct device * dev,bool enable)1628 static void espi_it8xxx2_vw_ch_en_isr(const struct device *dev, bool enable)
1629 {
1630 espi_it8xxx2_ch_notify_system_state(dev, ESPI_CHANNEL_VWIRE, enable);
1631 }
1632
1633 /*
1634 * OOB message channel enable asserted flag.
1635 * A 0-to-1 or 1-to-0 transition on "OOB Message Channel Enable" bit.
1636 */
espi_it8xxx2_oob_ch_en_isr(const struct device * dev,bool enable)1637 static void espi_it8xxx2_oob_ch_en_isr(const struct device *dev, bool enable)
1638 {
1639 espi_it8xxx2_ch_notify_system_state(dev, ESPI_CHANNEL_OOB, enable);
1640 }
1641
1642 /*
1643 * Flash channel enable asserted flag.
1644 * A 0-to-1 or 1-to-0 transition on "Flash Access Channel Enable" bit.
1645 */
espi_it8xxx2_flash_ch_en_isr(const struct device * dev,bool enable)1646 static void espi_it8xxx2_flash_ch_en_isr(const struct device *dev, bool enable)
1647 {
1648 if (enable) {
1649 espi_it8xxx2_send_vwire(dev, ESPI_VWIRE_SIGNAL_TARGET_BOOT_STS, 1);
1650 espi_it8xxx2_send_vwire(dev,
1651 ESPI_VWIRE_SIGNAL_TARGET_BOOT_DONE, 1);
1652 }
1653
1654 espi_it8xxx2_ch_notify_system_state(dev, ESPI_CHANNEL_FLASH, enable);
1655 }
1656
espi_it8xxx2_put_pc_status_isr(const struct device * dev)1657 static void espi_it8xxx2_put_pc_status_isr(const struct device *dev)
1658 {
1659 const struct espi_it8xxx2_config *const config = dev->config;
1660 struct espi_slave_regs *const slave_reg =
1661 (struct espi_slave_regs *)config->base_espi_slave;
1662
1663 /*
1664 * TODO: To check cycle type (bit[3-0] at ESPCTRL0) and make
1665 * corresponding modification if needed.
1666 */
1667 LOG_INF("isr %s is ignored!", __func__);
1668
1669 /* write-1-clear to release PC_FREE */
1670 slave_reg->ESPCTRL0 = IT8XXX2_ESPI_INTERRUPT_PUT_PC;
1671 }
1672
1673 #ifdef CONFIG_ESPI_OOB_CHANNEL
espi_it8xxx2_upstream_channel_disable_isr(const struct device * dev)1674 static void espi_it8xxx2_upstream_channel_disable_isr(const struct device *dev)
1675 {
1676 const struct espi_it8xxx2_config *const config = dev->config;
1677 struct espi_slave_regs *const slave_reg =
1678 (struct espi_slave_regs *)config->base_espi_slave;
1679
1680 LOG_INF("isr %s is ignored!", __func__);
1681
1682 /* write-1 to clear this bit */
1683 slave_reg->ESUCTRL0 |= IT8XXX2_ESPI_UPSTREAM_CHANNEL_DISABLE;
1684 }
1685
espi_it8xxx2_put_oob_status_isr(const struct device * dev)1686 static void espi_it8xxx2_put_oob_status_isr(const struct device *dev)
1687 {
1688 const struct espi_it8xxx2_config *const config = dev->config;
1689 struct espi_it8xxx2_data *const data = dev->data;
1690 struct espi_slave_regs *const slave_reg =
1691 (struct espi_slave_regs *)config->base_espi_slave;
1692 #ifdef CONFIG_ESPI_OOB_CHANNEL_RX_ASYNC
1693 struct espi_event evt = { .evt_type = ESPI_BUS_EVENT_OOB_RECEIVED,
1694 .evt_details = 0,
1695 .evt_data = 0 };
1696 #endif
1697
1698 /* Write-1 to clear this bit for the next coming posted transaction. */
1699 slave_reg->ESOCTRL0 |= IT8XXX2_ESPI_PUT_OOB_STATUS;
1700
1701 #ifndef CONFIG_ESPI_OOB_CHANNEL_RX_ASYNC
1702 k_sem_give(&data->oob_upstream_go);
1703 #else
1704 /* Additional detail is length field of PUT_OOB message packet. */
1705 evt.evt_details = (slave_reg->ESOCTRL4 & IT8XXX2_ESPI_PUT_OOB_LEN_MASK);
1706 espi_send_callbacks(&data->callbacks, dev, evt);
1707 #endif
1708 }
1709 #endif
1710
1711 #if defined(CONFIG_ESPI_OOB_CHANNEL) || defined(CONFIG_ESPI_FLASH_CHANNEL)
espi_it8xxx2_upstream_done_isr(const struct device * dev)1712 static void espi_it8xxx2_upstream_done_isr(const struct device *dev)
1713 {
1714 const struct espi_it8xxx2_config *const config = dev->config;
1715 struct espi_slave_regs *const slave_reg =
1716 (struct espi_slave_regs *)config->base_espi_slave;
1717
1718 #ifdef CONFIG_ESPI_FLASH_CHANNEL
1719 /* cycle type is flash read, write, or erase */
1720 if (slave_reg->ESUCTRL1 != IT8XXX2_ESPI_CYCLE_TYPE_OOB) {
1721 espi_it8xxx2_flash_upstream_done_isr(dev);
1722 }
1723 #endif
1724
1725 /* write-1 to clear this bit */
1726 slave_reg->ESUCTRL0 |= IT8XXX2_ESPI_UPSTREAM_DONE;
1727 /* upstream disable */
1728 slave_reg->ESUCTRL0 &= ~IT8XXX2_ESPI_UPSTREAM_ENABLE;
1729 }
1730 #endif
1731
1732 /*
1733 * The ISR of espi interrupt event in array need to be matched bit order in
1734 * IT8XXX2 ESPI ESGCTRL0 register.
1735 */
1736 static const struct espi_isr_t espi_isr_list[] = {
1737 [0] = {espi_it8xxx2_peripheral_ch_en_isr, ASSERTED_FLAG},
1738 [1] = {espi_it8xxx2_vw_ch_en_isr, ASSERTED_FLAG},
1739 [2] = {espi_it8xxx2_oob_ch_en_isr, ASSERTED_FLAG},
1740 [3] = {espi_it8xxx2_flash_ch_en_isr, ASSERTED_FLAG},
1741 [4] = {espi_it8xxx2_peripheral_ch_en_isr, DEASSERTED_FLAG},
1742 [5] = {espi_it8xxx2_vw_ch_en_isr, DEASSERTED_FLAG},
1743 [6] = {espi_it8xxx2_oob_ch_en_isr, DEASSERTED_FLAG},
1744 [7] = {espi_it8xxx2_flash_ch_en_isr, DEASSERTED_FLAG},
1745 };
1746
espi_it8xxx2_isr(const struct device * dev)1747 static void espi_it8xxx2_isr(const struct device *dev)
1748 {
1749 const struct espi_it8xxx2_config *const config = dev->config;
1750 struct espi_slave_regs *const slave_reg =
1751 (struct espi_slave_regs *)config->base_espi_slave;
1752 /* get espi interrupt events */
1753 uint8_t espi_event = slave_reg->ESGCTRL0;
1754 #if defined(CONFIG_ESPI_OOB_CHANNEL) || defined(CONFIG_ESPI_FLASH_CHANNEL)
1755 uint8_t espi_upstream = slave_reg->ESUCTRL0;
1756 #endif
1757
1758 /* write-1 to clear */
1759 slave_reg->ESGCTRL0 = espi_event;
1760
1761 /* process espi interrupt events */
1762 for (int i = 0; i < ARRAY_SIZE(espi_isr_list); i++) {
1763 if (espi_event & BIT(i)) {
1764 espi_isr_list[i].espi_isr(dev,
1765 espi_isr_list[i].isr_type);
1766 }
1767 }
1768
1769 /*
1770 * bit7: the peripheral has received a peripheral posted/completion.
1771 * This bit indicates the peripheral has received a packet from eSPI
1772 * peripheral channel.
1773 */
1774 if (slave_reg->ESPCTRL0 & IT8XXX2_ESPI_INTERRUPT_PUT_PC) {
1775 espi_it8xxx2_put_pc_status_isr(dev);
1776 }
1777
1778 #ifdef CONFIG_ESPI_OOB_CHANNEL
1779 /*
1780 * The corresponding channel of the eSPI upstream transaction is
1781 * disabled.
1782 */
1783 if (espi_upstream & IT8XXX2_ESPI_UPSTREAM_CHANNEL_DISABLE) {
1784 espi_it8xxx2_upstream_channel_disable_isr(dev);
1785 }
1786
1787 /* The eSPI slave has received a PUT_OOB message. */
1788 if (slave_reg->ESOCTRL0 & IT8XXX2_ESPI_PUT_OOB_STATUS) {
1789 espi_it8xxx2_put_oob_status_isr(dev);
1790 }
1791 #endif
1792
1793 /* eSPI oob and flash channels use the same interrupt of upstream. */
1794 #if defined(CONFIG_ESPI_OOB_CHANNEL) || defined(CONFIG_ESPI_FLASH_CHANNEL)
1795 /* The eSPI upstream transaction is done. */
1796 if (espi_upstream & IT8XXX2_ESPI_UPSTREAM_DONE) {
1797 espi_it8xxx2_upstream_done_isr(dev);
1798 }
1799 #endif
1800 }
1801
espi_it8xxx2_enable_pad_ctrl(const struct device * dev,bool enable)1802 void espi_it8xxx2_enable_pad_ctrl(const struct device *dev, bool enable)
1803 {
1804 const struct espi_it8xxx2_config *const config = dev->config;
1805 struct espi_slave_regs *const slave_reg =
1806 (struct espi_slave_regs *)config->base_espi_slave;
1807
1808 if (enable) {
1809 /* Enable eSPI pad. */
1810 slave_reg->ESGCTRL2 &= ~IT8XXX2_ESPI_INPUT_PAD_GATING;
1811 } else {
1812 /* Disable eSPI pad. */
1813 slave_reg->ESGCTRL2 |= IT8XXX2_ESPI_INPUT_PAD_GATING;
1814 }
1815 }
1816
espi_it8xxx2_enable_trans_irq(const struct device * dev,bool enable)1817 void espi_it8xxx2_enable_trans_irq(const struct device *dev, bool enable)
1818 {
1819 const struct espi_it8xxx2_config *const config = dev->config;
1820
1821 if (enable) {
1822 irq_enable(IT8XXX2_TRANS_IRQ);
1823 } else {
1824 irq_disable(IT8XXX2_TRANS_IRQ);
1825 /* Clear pending interrupt */
1826 it8xxx2_wuc_clear_status(config->wuc.wucs, config->wuc.mask);
1827 }
1828 }
1829
espi_it8xxx2_trans_isr(const struct device * dev)1830 static void espi_it8xxx2_trans_isr(const struct device *dev)
1831 {
1832 /*
1833 * This interrupt is only used to wake up CPU, there is no need to do
1834 * anything in the isr in addition to disable interrupt.
1835 */
1836 espi_it8xxx2_enable_trans_irq(dev, false);
1837 }
1838
espi_it8xxx2_espi_reset_isr(const struct device * port,struct gpio_callback * cb,uint32_t pins)1839 void espi_it8xxx2_espi_reset_isr(const struct device *port,
1840 struct gpio_callback *cb, uint32_t pins)
1841 {
1842 struct espi_it8xxx2_data *const data = ESPI_IT8XXX2_SOC_DEV->data;
1843 struct espi_event evt = {ESPI_BUS_RESET, 0, 0};
1844 bool espi_reset = gpio_pin_get(port, (find_msb_set(pins) - 1));
1845
1846 if (!(espi_reset)) {
1847 /* Reset vwidx_cached_flag[] when espi_reset# asserted. */
1848 espi_it8xxx2_reset_vwidx_cache(ESPI_IT8XXX2_SOC_DEV);
1849 }
1850
1851 evt.evt_data = espi_reset;
1852 espi_send_callbacks(&data->callbacks, ESPI_IT8XXX2_SOC_DEV, evt);
1853
1854 LOG_INF("eSPI reset %sasserted", espi_reset ? "de" : "");
1855 }
1856
1857 /* eSPI reset# is enabled on GPD2 */
1858 #define ESPI_IT8XXX2_ESPI_RESET_PORT DEVICE_DT_GET(DT_NODELABEL(gpiod))
1859 #define ESPI_IT8XXX2_ESPI_RESET_PIN 2
espi_it8xxx2_enable_reset(void)1860 static void espi_it8xxx2_enable_reset(void)
1861 {
1862 struct gpio_it8xxx2_regs *const gpio_regs = GPIO_IT8XXX2_REG_BASE;
1863 static struct gpio_callback espi_reset_cb;
1864
1865 /* eSPI reset is enabled on GPD2 */
1866 gpio_regs->GPIO_GCR =
1867 (gpio_regs->GPIO_GCR & ~IT8XXX2_GPIO_GCR_ESPI_RST_EN_MASK) |
1868 (IT8XXX2_GPIO_GCR_ESPI_RST_D2 << IT8XXX2_GPIO_GCR_ESPI_RST_POS);
1869 /* enable eSPI reset isr */
1870 gpio_init_callback(&espi_reset_cb, espi_it8xxx2_espi_reset_isr,
1871 BIT(ESPI_IT8XXX2_ESPI_RESET_PIN));
1872 gpio_add_callback(ESPI_IT8XXX2_ESPI_RESET_PORT, &espi_reset_cb);
1873 gpio_pin_interrupt_configure(ESPI_IT8XXX2_ESPI_RESET_PORT,
1874 ESPI_IT8XXX2_ESPI_RESET_PIN,
1875 GPIO_INT_MODE_EDGE | GPIO_INT_TRIG_BOTH);
1876 }
1877
1878 static struct espi_it8xxx2_data espi_it8xxx2_data_0;
1879 static const struct espi_it8xxx2_config espi_it8xxx2_config_0 = {
1880 .base_espi_slave = DT_INST_REG_ADDR_BY_IDX(0, 0),
1881 .base_espi_vw = DT_INST_REG_ADDR_BY_IDX(0, 1),
1882 .base_espi_queue0 = DT_INST_REG_ADDR_BY_IDX(0, 2),
1883 .base_espi_queue1 = DT_INST_REG_ADDR_BY_IDX(0, 3),
1884 .base_ec2i = DT_INST_REG_ADDR_BY_IDX(0, 4),
1885 .base_kbc = DT_INST_REG_ADDR_BY_IDX(0, 5),
1886 .base_pmc = DT_INST_REG_ADDR_BY_IDX(0, 6),
1887 .base_smfi = DT_INST_REG_ADDR_BY_IDX(0, 7),
1888 .wuc = IT8XXX2_DT_WUC_ITEMS_FUNC(0, 0),
1889 };
1890
1891 DEVICE_DT_INST_DEFINE(0, &espi_it8xxx2_init, NULL,
1892 &espi_it8xxx2_data_0, &espi_it8xxx2_config_0,
1893 PRE_KERNEL_2, CONFIG_ESPI_INIT_PRIORITY,
1894 &espi_it8xxx2_driver_api);
1895
espi_it8xxx2_init(const struct device * dev)1896 static int espi_it8xxx2_init(const struct device *dev)
1897 {
1898 const struct espi_it8xxx2_config *const config = dev->config;
1899 struct espi_vw_regs *const vw_reg =
1900 (struct espi_vw_regs *)config->base_espi_vw;
1901 struct espi_slave_regs *const slave_reg =
1902 (struct espi_slave_regs *)config->base_espi_slave;
1903 struct gctrl_it8xxx2_regs *const gctrl = ESPI_IT8XXX2_GET_GCTRL_BASE;
1904
1905 /* configure VCC detector */
1906 gctrl->GCTRL_RSTS = (gctrl->GCTRL_RSTS &
1907 ~(IT8XXX2_GCTRL_VCCDO_MASK | IT8XXX2_GCTRL_HGRST)) |
1908 (IT8XXX2_GCTRL_VCCDO_VCC_ON | IT8XXX2_GCTRL_GRST);
1909
1910 /* enable PNPCFG devices */
1911 pnpcfg_it8xxx2_init(dev);
1912
1913 #ifdef CONFIG_ESPI_PERIPHERAL_8042_KBC
1914 /* enable kbc port (60h/64h) */
1915 kbc_it8xxx2_init(dev);
1916 #endif
1917 #ifdef CONFIG_ESPI_PERIPHERAL_HOST_IO
1918 /* enable pmc1 for ACPI port (62h/66h) */
1919 pmc1_it8xxx2_init(dev);
1920 #endif
1921 #ifdef CONFIG_ESPI_PERIPHERAL_DEBUG_PORT_80
1922 /* Accept Port 80h Cycle */
1923 port80_it8xxx2_init(dev);
1924 #endif
1925 #if defined(CONFIG_ESPI_PERIPHERAL_EC_HOST_CMD) || \
1926 defined(CONFIG_ESPI_PERIPHERAL_ACPI_SHM_REGION)
1927 smfi_it8xxx2_init(dev);
1928 #endif
1929 #ifdef CONFIG_ESPI_PERIPHERAL_EC_HOST_CMD
1930 /* enable pmc2 for host command port */
1931 pmc2_it8xxx2_init(dev);
1932 #endif
1933
1934 /* Reset vwidx_cached_flag[] at initialization */
1935 espi_it8xxx2_reset_vwidx_cache(dev);
1936
1937 /* Enable espi vw interrupt */
1938 vw_reg->VWCTRL0 |= IT8XXX2_ESPI_VW_INTERRUPT_ENABLE;
1939 IRQ_CONNECT(IT8XXX2_ESPI_VW_IRQ, 0, espi_it8xxx2_vw_isr,
1940 DEVICE_DT_INST_GET(0), 0);
1941 irq_enable(IT8XXX2_ESPI_VW_IRQ);
1942
1943 /* Reset PLTRST# virtual wire signal during eSPI reset */
1944 vw_reg->VWCTRL2 |= IT8XXX2_ESPI_VW_RESET_PLTRST;
1945
1946 #ifdef CONFIG_ESPI_OOB_CHANNEL
1947 espi_it8xxx2_oob_init(dev);
1948 #endif
1949
1950 #ifdef CONFIG_ESPI_FLASH_CHANNEL
1951 espi_it8xxx2_flash_init(dev);
1952 #endif
1953
1954 /* Enable espi interrupt */
1955 slave_reg->ESGCTRL1 |= IT8XXX2_ESPI_INTERRUPT_ENABLE;
1956 IRQ_CONNECT(IT8XXX2_ESPI_IRQ, 0, espi_it8xxx2_isr,
1957 DEVICE_DT_INST_GET(0), 0);
1958 irq_enable(IT8XXX2_ESPI_IRQ);
1959
1960 /* enable interrupt and reset from eSPI_reset# */
1961 espi_it8xxx2_enable_reset();
1962
1963 /*
1964 * Enable eSPI to WUC.
1965 * If an eSPI transaction is accepted, WU42 interrupt will be asserted.
1966 */
1967 slave_reg->ESGCTRL2 |= IT8XXX2_ESPI_TO_WUC_ENABLE;
1968
1969 /* Enable WU42 of WUI */
1970 it8xxx2_wuc_clear_status(config->wuc.wucs, config->wuc.mask);
1971 it8xxx2_wuc_enable(config->wuc.wucs, config->wuc.mask);
1972 /*
1973 * Only register isr here, the interrupt only need to be enabled
1974 * before CPU and RAM clocks gated in the idle function.
1975 */
1976 IRQ_CONNECT(IT8XXX2_TRANS_IRQ, 0, espi_it8xxx2_trans_isr,
1977 DEVICE_DT_INST_GET(0), 0);
1978
1979 return 0;
1980 }
1981