1 /*
2 * Copyright (c) 2019 Intel Corporation
3 * Copyright (c) 2021 Microchip Technology Inc.
4 *
5 * SPDX-License-Identifier: Apache-2.0
6 */
7
8 #define DT_DRV_COMPAT microchip_xec_espi_host_dev
9
10 #include <zephyr/kernel.h>
11 #include <soc.h>
12 #include <errno.h>
13 #include <zephyr/drivers/espi.h>
14 #include <zephyr/drivers/clock_control/mchp_xec_clock_control.h>
15 #include <zephyr/drivers/interrupt_controller/intc_mchp_xec_ecia.h>
16 #include <zephyr/dt-bindings/interrupt-controller/mchp-xec-ecia.h>
17 #include <zephyr/logging/log.h>
18 #include <zephyr/sys/sys_io.h>
19 #include <zephyr/sys/util.h>
20 #include <zephyr/irq.h>
21 #include "espi_utils.h"
22 #include "espi_mchp_xec_v2.h"
23
24 #define CONNECT_IRQ_MBOX0 NULL
25 #define CONNECT_IRQ_KBC0 NULL
26 #define CONNECT_IRQ_ACPI_EC0 NULL
27 #define CONNECT_IRQ_ACPI_EC1 NULL
28 #define CONNECT_IRQ_ACPI_EC2 NULL
29 #define CONNECT_IRQ_ACPI_EC3 NULL
30 #define CONNECT_IRQ_ACPI_EC4 NULL
31 #define CONNECT_IRQ_ACPI_PM1 NULL
32 #define CONNECT_IRQ_EMI0 NULL
33 #define CONNECT_IRQ_EMI1 NULL
34 #define CONNECT_IRQ_EMI2 NULL
35 #define CONNECT_IRQ_RTC0 NULL
36 #define CONNECT_IRQ_P80BD0 NULL
37
38 #define INIT_MBOX0 NULL
39 #define INIT_KBC0 NULL
40 #define INIT_ACPI_EC0 NULL
41 #define INIT_ACPI_EC1 NULL
42 #define INIT_ACPI_EC2 NULL
43 #define INIT_ACPI_EC3 NULL
44 #define INIT_ACPI_EC4 NULL
45 #define INIT_ACPI_PM1 NULL
46 #define INIT_EMI0 NULL
47 #define INIT_EMI1 NULL
48 #define INIT_EMI2 NULL
49 #define INIT_RTC0 NULL
50 #define INIT_P80BD0 NULL
51 #define INIT_UART0 NULL
52 #define INIT_UART1 NULL
53
54 /* BARs as defined in LPC spec chapter 11 */
55 #define ESPI_XEC_KBC_BAR_ADDRESS 0x00600000
56 #define ESPI_XEC_UART0_BAR_ADDRESS 0x03F80000
57 #define ESPI_XEC_MBOX_BAR_ADDRESS 0x03600000
58 #define ESPI_XEC_PORT80_BAR_ADDRESS 0x00800000
59 #define ESPI_XEC_PORT81_BAR_ADDRESS 0x00810000
60 #define ESPI_XEC_ACPI_EC0_BAR_ADDRESS 0x00620000
61
62 /* Espi peripheral has 3 uart ports */
63 #define ESPI_PERIPHERAL_UART_PORT0 0
64 #define ESPI_PERIPHERAL_UART_PORT1 1
65
66 #define UART_DEFAULT_IRQ_POS 2u
67 #define UART_DEFAULT_IRQ BIT(UART_DEFAULT_IRQ_POS)
68
69 /* PCR */
70 #define XEC_PCR_REG_BASE \
71 ((struct pcr_regs *)(DT_REG_ADDR(DT_NODELABEL(pcr))))
72
73 struct xec_espi_host_sram_config {
74 uint32_t host_sram1_base;
75 uint32_t host_sram2_base;
76 uint16_t ec_sram1_ofs;
77 uint16_t ec_sram2_ofs;
78 uint8_t sram1_acc_size;
79 uint8_t sram2_acc_size;
80 };
81
82 struct xec_espi_host_dev_config {
83 const struct device *parent;
84 uint32_t reg_base; /* logical device registers */
85 uint32_t host_mem_base; /* 32-bit host memory address */
86 uint16_t host_io_base; /* 16-bit host I/O address */
87 uint8_t ldn; /* Logical device number */
88 uint8_t num_ecia;
89 uint32_t *girqs;
90 };
91
92 struct xec_acpi_ec_config {
93 uintptr_t regbase;
94 uint32_t ibf_ecia_info;
95 uint32_t obe_ecia_info;
96 };
97
98 #ifdef CONFIG_ESPI_PERIPHERAL_EC_HOST_CMD
99
100 #ifdef CONFIG_ESPI_PERIPHERAL_ACPI_SHM_REGION
101 static uint8_t ec_host_cmd_sram[CONFIG_ESPI_XEC_PERIPHERAL_HOST_CMD_PARAM_SIZE +
102 CONFIG_ESPI_XEC_PERIPHERAL_ACPI_SHD_MEM_SIZE] __aligned(8);
103 #else
104 static uint8_t ec_host_cmd_sram[CONFIG_ESPI_XEC_PERIPHERAL_HOST_CMD_PARAM_SIZE] __aligned(8);
105 #endif
106
107 #endif /* CONFIG_ESPI_PERIPHERAL_EC_HOST_CMD */
108
109 #ifdef CONFIG_ESPI_PERIPHERAL_XEC_MAILBOX
110
111 BUILD_ASSERT(DT_NODE_HAS_STATUS_OKAY(DT_NODELABEL(mbox0)),
112 "XEC mbox0 DT node is disabled!");
113
114 static struct xec_mbox_config {
115 uintptr_t regbase;
116 uint32_t ecia_info;
117 };
118
119 static const struct xec_mbox0_config xec_mbox0_cfg = {
120 .regbase = DT_REG_ADDR(DT_NODELABEL(mbox0)),
121 .ecia_info = DT_PROP_BY_IDX(DT_NODELABEL(mbox0), girqs, 0),
122 };
123
124 /* dev is a pointer to espi0 (parent) device */
mbox0_isr(const struct device * dev)125 static void mbox0_isr(const struct device *dev)
126 {
127 uint8_t girq = MCHP_XEC_ECIA_GIRQ(xec_mbox0_cfg.ecia_info);
128 uint8_t bitpos = MCHP_XEC_ECIA_GIRQ_POS(xec_mbox0_cfg.ecia_info);
129
130 /* clear GIRQ source, inline version */
131 mchp_soc_ecia_girq_src_clr(girq, bitpos);
132 }
133
connect_irq_mbox0(const struct device * dev)134 static int connect_irq_mbox0(const struct device *dev)
135 {
136 /* clear GIRQ source */
137 mchp_xec_ecia_info_girq_src_clr(xec_mbox0_cfg.ecia_info);
138
139 IRQ_CONNECT(DT_IRQN(DT_NODELABLE(mbox0)),
140 DT_IRQ(DT_NODELABLE(mbox0), priority),
141 acpi_ec0_isr,
142 DEVICE_DT_GET(DT_NODELABEL(espi0)),
143 0);
144 irq_enable(DT_IRQN(DT_NODELABLE(mbox0)));
145
146 /* enable GIRQ source */
147 mchp_xec_ecia_info_girq_src_en(xec_mbox0_cfg.ecia_info);
148
149 return 0;
150 }
151
152 /* Called by eSPI Bus init, eSPI reset de-assertion, and eSPI Platform Reset
153 * de-assertion.
154 */
init_mbox0(const struct device * dev)155 static int init_mbox0(const struct device *dev)
156 {
157 struct espi_xec_config *const cfg = ESPI_XEC_CONFIG(dev);
158 struct espi_iom_regs *regs = (struct espi_iom_regs *)cfg->base_addr;
159
160 regs->IOHBAR[IOB_MBOX] = ESPI_XEC_MBOX_BAR_ADDRESS |
161 MCHP_ESPI_IO_BAR_HOST_VALID;
162 return 0;
163 }
164
165 #undef CONNECT_IRQ_MBOX0
166 #define CONNECT_IRQ_MBOX0 connect_irq_mbox0
167 #undef INIT_MBOX0
168 #define INIT_MBOX0 init_mbox0
169
170 #endif /* CONFIG_ESPI_PERIPHERAL_XEC_MAILBOX */
171
172 #ifdef CONFIG_ESPI_PERIPHERAL_8042_KBC
173
174 BUILD_ASSERT(DT_NODE_HAS_STATUS_OKAY(DT_NODELABEL(kbc0)),
175 "XEC kbc0 DT node is disabled!");
176
177 struct xec_kbc0_config {
178 uintptr_t regbase;
179 uint32_t ibf_ecia_info;
180 uint32_t obe_ecia_info;
181 };
182
183 static const struct xec_kbc0_config xec_kbc0_cfg = {
184 .regbase = DT_REG_ADDR(DT_NODELABEL(kbc0)),
185 .ibf_ecia_info = DT_PROP_BY_IDX(DT_NODELABEL(kbc0), girqs, 1),
186 .obe_ecia_info = DT_PROP_BY_IDX(DT_NODELABEL(kbc0), girqs, 0),
187 };
188
kbc0_ibf_isr(const struct device * dev)189 static void kbc0_ibf_isr(const struct device *dev)
190 {
191 struct kbc_regs *kbc_hw = (struct kbc_regs *)xec_kbc0_cfg.regbase;
192 struct espi_xec_data *const data =
193 (struct espi_xec_data *const)dev->data;
194
195 #ifdef CONFIG_ESPI_PERIPHERAL_KBC_IBF_EVT_DATA
196 /* Chrome solution */
197 struct espi_event evt = {
198 ESPI_BUS_PERIPHERAL_NOTIFICATION,
199 ESPI_PERIPHERAL_8042_KBC,
200 ESPI_PERIPHERAL_NODATA,
201 };
202 struct espi_evt_data_kbc *kbc_evt =
203 (struct espi_evt_data_kbc *)&evt.evt_data;
204 /*
205 * Indicates if the host sent a command or data.
206 * 0 = data
207 * 1 = Command.
208 */
209 kbc_evt->type = kbc_hw->EC_KBC_STS & MCHP_KBC_STS_CD ? 1 : 0;
210 /* The data in KBC Input Buffer */
211 kbc_evt->data = kbc_hw->EC_DATA;
212 /* KBC Input Buffer Full event */
213 kbc_evt->evt = HOST_KBC_EVT_IBF;
214 #else
215 /* Windows solution */
216 /* The high byte contains information from the host,
217 * and the lower byte speficies if the host sent
218 * a command or data. 1 = Command.
219 */
220 uint32_t isr_data = ((kbc_hw->EC_KBC_STS & MCHP_KBC_STS_CD) <<
221 E8042_ISR_CMD_DATA_POS);
222 isr_data |= ((kbc_hw->EC_DATA & 0xFF) << E8042_ISR_DATA_POS);
223
224 struct espi_event evt = {
225 .evt_type = ESPI_BUS_PERIPHERAL_NOTIFICATION,
226 .evt_details = ESPI_PERIPHERAL_8042_KBC,
227 .evt_data = isr_data
228 };
229 #endif
230 espi_send_callbacks(&data->callbacks, dev, evt);
231
232 mchp_xec_ecia_info_girq_src_clr(xec_kbc0_cfg.ibf_ecia_info);
233 }
234
kbc0_obe_isr(const struct device * dev)235 static void kbc0_obe_isr(const struct device *dev)
236 {
237 #ifdef CONFIG_ESPI_PERIPHERAL_KBC_OBE_CBK
238 /* Chrome solution */
239 struct espi_xec_data *const data =
240 (struct espi_xec_data *const)dev->data;
241
242 struct espi_event evt = {
243 ESPI_BUS_PERIPHERAL_NOTIFICATION,
244 ESPI_PERIPHERAL_8042_KBC,
245 ESPI_PERIPHERAL_NODATA,
246 };
247 struct espi_evt_data_kbc *kbc_evt =
248 (struct espi_evt_data_kbc *)&evt.evt_data;
249
250 /* Disable KBC OBE interrupt first */
251 mchp_xec_ecia_info_girq_src_dis(xec_kbc0_cfg.obe_ecia_info);
252
253 /*
254 * Notify application that host already read out data. The application
255 * might need to clear status register via espi_api_lpc_write_request()
256 * with E8042_CLEAR_FLAG opcode in callback.
257 */
258 kbc_evt->evt = HOST_KBC_EVT_OBE;
259 kbc_evt->data = 0;
260 kbc_evt->type = 0;
261
262 espi_send_callbacks(&data->callbacks, dev, evt);
263 #else
264 /* Windows solution */
265 /* disable and clear GIRQ interrupt and status */
266 mchp_xec_ecia_info_girq_src_dis(xec_kbc0_cfg.obe_ecia_info);
267 #endif
268 mchp_xec_ecia_info_girq_src_clr(xec_kbc0_cfg.obe_ecia_info);
269 }
270
271 /* dev is a pointer to espi0 device */
kbc0_rd_req(const struct device * dev,enum lpc_peripheral_opcode op,uint32_t * data)272 static int kbc0_rd_req(const struct device *dev, enum lpc_peripheral_opcode op,
273 uint32_t *data)
274 {
275 struct kbc_regs *kbc_hw = (struct kbc_regs *)xec_kbc0_cfg.regbase;
276
277 ARG_UNUSED(dev);
278
279 if (op >= E8042_START_OPCODE && op <= E8042_MAX_OPCODE) {
280 /* Make sure kbc 8042 is on */
281 if (!(kbc_hw->KBC_CTRL & MCHP_KBC_CTRL_OBFEN)) {
282 return -ENOTSUP;
283 }
284
285 switch (op) {
286 case E8042_OBF_HAS_CHAR:
287 /* EC has written data back to host. OBF is
288 * automatically cleared after host reads
289 * the data
290 */
291 *data = kbc_hw->EC_KBC_STS & MCHP_KBC_STS_OBF ? 1 : 0;
292 break;
293 case E8042_IBF_HAS_CHAR:
294 *data = kbc_hw->EC_KBC_STS & MCHP_KBC_STS_IBF ? 1 : 0;
295 break;
296 case E8042_READ_KB_STS:
297 *data = kbc_hw->EC_KBC_STS;
298 break;
299 default:
300 return -EINVAL;
301 }
302 } else {
303 return -ENOTSUP;
304 }
305
306 return 0;
307 }
308
309 /* dev is a pointer to espi0 device */
kbc0_wr_req(const struct device * dev,enum lpc_peripheral_opcode op,uint32_t * data)310 static int kbc0_wr_req(const struct device *dev, enum lpc_peripheral_opcode op,
311 uint32_t *data)
312 {
313 struct kbc_regs *kbc_hw = (struct kbc_regs *)xec_kbc0_cfg.regbase;
314
315 volatile uint32_t __attribute__((unused)) dummy;
316
317 ARG_UNUSED(dev);
318
319 if (op >= E8042_START_OPCODE && op <= E8042_MAX_OPCODE) {
320 /* Make sure kbc 8042 is on */
321 if (!(kbc_hw->KBC_CTRL & MCHP_KBC_CTRL_OBFEN)) {
322 return -ENOTSUP;
323 }
324
325 switch (op) {
326 case E8042_WRITE_KB_CHAR:
327 kbc_hw->EC_DATA = *data & 0xff;
328 break;
329 case E8042_WRITE_MB_CHAR:
330 kbc_hw->EC_AUX_DATA = *data & 0xff;
331 break;
332 case E8042_RESUME_IRQ:
333 mchp_xec_ecia_info_girq_src_clr(
334 xec_kbc0_cfg.ibf_ecia_info);
335 mchp_xec_ecia_info_girq_src_en(
336 xec_kbc0_cfg.ibf_ecia_info);
337 break;
338 case E8042_PAUSE_IRQ:
339 mchp_xec_ecia_info_girq_src_dis(
340 xec_kbc0_cfg.ibf_ecia_info);
341 break;
342 case E8042_CLEAR_OBF:
343 dummy = kbc_hw->HOST_AUX_DATA;
344 break;
345 case E8042_SET_FLAG:
346 /* FW shouldn't modify these flags directly */
347 *data &= ~(MCHP_KBC_STS_OBF | MCHP_KBC_STS_IBF |
348 MCHP_KBC_STS_AUXOBF);
349 kbc_hw->EC_KBC_STS |= *data;
350 break;
351 case E8042_CLEAR_FLAG:
352 /* FW shouldn't modify these flags directly */
353 *data |= (MCHP_KBC_STS_OBF | MCHP_KBC_STS_IBF |
354 MCHP_KBC_STS_AUXOBF);
355 kbc_hw->EC_KBC_STS &= ~(*data);
356 break;
357 default:
358 return -EINVAL;
359 }
360 } else {
361 return -ENOTSUP;
362 }
363
364 return 0;
365 }
366
connect_irq_kbc0(const struct device * dev)367 static int connect_irq_kbc0(const struct device *dev)
368 {
369 /* clear GIRQ source */
370 mchp_xec_ecia_info_girq_src_clr(xec_kbc0_cfg.ibf_ecia_info);
371 mchp_xec_ecia_info_girq_src_clr(xec_kbc0_cfg.obe_ecia_info);
372
373 IRQ_CONNECT(DT_IRQ_BY_NAME(DT_NODELABEL(kbc0), kbc_ibf, irq),
374 DT_IRQ_BY_NAME(DT_NODELABEL(kbc0), kbc_ibf, priority),
375 kbc0_ibf_isr,
376 DEVICE_DT_GET(DT_NODELABEL(espi0)),
377 0);
378 irq_enable(DT_IRQ_BY_NAME(DT_NODELABEL(kbc0), kbc_ibf, irq));
379
380 IRQ_CONNECT(DT_IRQ_BY_NAME(DT_NODELABEL(kbc0), kbc_obe, irq),
381 DT_IRQ_BY_NAME(DT_NODELABEL(kbc0), kbc_obe, priority),
382 kbc0_obe_isr,
383 DEVICE_DT_GET(DT_NODELABEL(espi0)),
384 0);
385 irq_enable(DT_IRQ_BY_NAME(DT_NODELABEL(kbc0), kbc_obe, irq));
386
387 /* enable GIRQ sources */
388 mchp_xec_ecia_info_girq_src_en(xec_kbc0_cfg.ibf_ecia_info);
389 mchp_xec_ecia_info_girq_src_en(xec_kbc0_cfg.obe_ecia_info);
390
391 return 0;
392 }
393
init_kbc0(const struct device * dev)394 static int init_kbc0(const struct device *dev)
395 {
396 struct espi_xec_config *const cfg = ESPI_XEC_CONFIG(dev);
397 struct espi_iom_regs *regs = (struct espi_iom_regs *)cfg->base_addr;
398 struct kbc_regs *kbc_hw = (struct kbc_regs *)xec_kbc0_cfg.regbase;
399
400 kbc_hw->KBC_CTRL |= MCHP_KBC_CTRL_AUXH;
401 kbc_hw->KBC_CTRL |= MCHP_KBC_CTRL_OBFEN;
402 /* This is the activate register, but the HAL has a funny name */
403 kbc_hw->KBC_PORT92_EN = MCHP_KBC_PORT92_EN;
404 regs->IOHBAR[IOB_KBC] = ESPI_XEC_KBC_BAR_ADDRESS |
405 MCHP_ESPI_IO_BAR_HOST_VALID;
406
407 return 0;
408 }
409
410 #undef CONNECT_IRQ_KBC0
411 #define CONNECT_IRQ_KBC0 connect_irq_kbc0
412 #undef INIT_KBC0
413 #define INIT_KBC0 init_kbc0
414
415 #endif /* CONFIG_ESPI_PERIPHERAL_8042_KBC */
416
417 #ifdef CONFIG_ESPI_PERIPHERAL_HOST_IO
418
419 static const struct xec_acpi_ec_config xec_acpi_ec0_cfg = {
420 .regbase = DT_REG_ADDR(DT_NODELABEL(acpi_ec0)),
421 .ibf_ecia_info = DT_PROP_BY_IDX(DT_NODELABEL(acpi_ec0), girqs, 0),
422 .obe_ecia_info = DT_PROP_BY_IDX(DT_NODELABEL(acpi_ec0), girqs, 1),
423 };
424
acpi_ec0_ibf_isr(const struct device * dev)425 static void acpi_ec0_ibf_isr(const struct device *dev)
426 {
427 struct espi_xec_data *const data =
428 (struct espi_xec_data *const)dev->data;
429 struct espi_event evt = { ESPI_BUS_PERIPHERAL_NOTIFICATION,
430 ESPI_PERIPHERAL_HOST_IO, ESPI_PERIPHERAL_NODATA
431 };
432 #ifdef CONFIG_ESPI_PERIPHERAL_ACPI_EC_IBF_EVT_DATA
433 struct acpi_ec_regs *acpi_ec0_hw = (struct acpi_ec_regs *)xec_acpi_ec0_cfg.regbase;
434
435 /* Updates to fit Chrome shim layer design */
436 struct espi_evt_data_acpi *acpi_evt =
437 (struct espi_evt_data_acpi *)&evt.evt_data;
438
439 /* Host put data on input buffer of ACPI EC0 channel */
440 if (acpi_ec0_hw->EC_STS & MCHP_ACPI_EC_STS_IBF) {
441 /* Set processing flag before reading command byte */
442 acpi_ec0_hw->EC_STS |= MCHP_ACPI_EC_STS_UD1A;
443 /*
444 * Indicates if the host sent a command or data.
445 * 0 = data
446 * 1 = Command.
447 */
448 acpi_evt->type = acpi_ec0_hw->EC_STS & MCHP_ACPI_EC_STS_CMD ? 1 : 0;
449 acpi_evt->data = acpi_ec0_hw->OS2EC_DATA;
450 }
451 #endif /* CONFIG_ESPI_PERIPHERAL_ACPI_EC_IBF_EVT_DATA */
452
453 espi_send_callbacks(&data->callbacks, dev, evt);
454
455 /* clear GIRQ status */
456 mchp_xec_ecia_info_girq_src_clr(xec_acpi_ec0_cfg.ibf_ecia_info);
457 }
458
acpi_ec0_obe_isr(const struct device * dev)459 static void acpi_ec0_obe_isr(const struct device *dev)
460 {
461 /* disable and clear GIRQ status */
462 mchp_xec_ecia_info_girq_src_dis(xec_acpi_ec0_cfg.obe_ecia_info);
463 mchp_xec_ecia_info_girq_src_clr(xec_acpi_ec0_cfg.obe_ecia_info);
464 }
465
eacpi_rd_req(const struct device * dev,enum lpc_peripheral_opcode op,uint32_t * data)466 static int eacpi_rd_req(const struct device *dev,
467 enum lpc_peripheral_opcode op,
468 uint32_t *data)
469 {
470 struct acpi_ec_regs *acpi_ec0_hw = (struct acpi_ec_regs *)xec_acpi_ec0_cfg.regbase;
471
472 ARG_UNUSED(dev);
473
474 switch (op) {
475 case EACPI_OBF_HAS_CHAR:
476 /* EC has written data back to host. OBF is
477 * automatically cleared after host reads
478 * the data
479 */
480 *data = acpi_ec0_hw->EC_STS & MCHP_ACPI_EC_STS_OBF ? 1 : 0;
481 break;
482 case EACPI_IBF_HAS_CHAR:
483 *data = acpi_ec0_hw->EC_STS & MCHP_ACPI_EC_STS_IBF ? 1 : 0;
484 break;
485 case EACPI_READ_STS:
486 *data = acpi_ec0_hw->EC_STS;
487 break;
488 #if defined(CONFIG_ESPI_PERIPHERAL_ACPI_SHM_REGION)
489 case EACPI_GET_SHARED_MEMORY:
490 *data = (uint32_t)ec_host_cmd_sram + CONFIG_ESPI_XEC_PERIPHERAL_HOST_CMD_PARAM_SIZE;
491 break;
492 #endif /* CONFIG_ESPI_PERIPHERAL_ACPI_SHM_REGION */
493 default:
494 return -EINVAL;
495 }
496
497 return 0;
498 }
499
eacpi_wr_req(const struct device * dev,enum lpc_peripheral_opcode op,uint32_t * data)500 static int eacpi_wr_req(const struct device *dev,
501 enum lpc_peripheral_opcode op,
502 uint32_t *data)
503 {
504 struct acpi_ec_regs *acpi_ec0_hw = (struct acpi_ec_regs *)xec_acpi_ec0_cfg.regbase;
505
506 ARG_UNUSED(dev);
507
508 switch (op) {
509 case EACPI_WRITE_CHAR:
510 acpi_ec0_hw->EC2OS_DATA = (*data & 0xff);
511 break;
512 case EACPI_WRITE_STS:
513 acpi_ec0_hw->EC_STS = (*data & 0xff);
514 break;
515 default:
516 return -EINVAL;
517 }
518
519 return 0;
520 }
521
connect_irq_acpi_ec0(const struct device * dev)522 static int connect_irq_acpi_ec0(const struct device *dev)
523 {
524 mchp_xec_ecia_info_girq_src_clr(xec_acpi_ec0_cfg.ibf_ecia_info);
525 mchp_xec_ecia_info_girq_src_clr(xec_acpi_ec0_cfg.obe_ecia_info);
526
527 IRQ_CONNECT(DT_IRQ_BY_NAME(DT_NODELABEL(acpi_ec0), acpi_ibf, irq),
528 DT_IRQ_BY_NAME(DT_NODELABEL(acpi_ec0), acpi_ibf, priority),
529 acpi_ec0_ibf_isr,
530 DEVICE_DT_GET(DT_NODELABEL(espi0)),
531 0);
532 irq_enable(DT_IRQ_BY_NAME(DT_NODELABEL(acpi_ec0), acpi_ibf, irq));
533
534 IRQ_CONNECT(DT_IRQ_BY_NAME(DT_NODELABEL(acpi_ec0), acpi_obe, irq),
535 DT_IRQ_BY_NAME(DT_NODELABEL(acpi_ec0), acpi_obe, priority),
536 acpi_ec0_obe_isr,
537 DEVICE_DT_GET(DT_NODELABEL(espi0)),
538 0);
539 irq_enable(DT_IRQ_BY_NAME(DT_NODELABEL(acpi_ec0), acpi_obe, irq));
540
541 mchp_xec_ecia_info_girq_src_en(xec_acpi_ec0_cfg.ibf_ecia_info);
542 mchp_xec_ecia_info_girq_src_en(xec_acpi_ec0_cfg.obe_ecia_info);
543
544 return 0;
545 }
546
init_acpi_ec0(const struct device * dev)547 static int init_acpi_ec0(const struct device *dev)
548 {
549 struct espi_xec_config *const cfg = ESPI_XEC_CONFIG(dev);
550 struct espi_iom_regs *regs = (struct espi_iom_regs *)cfg->base_addr;
551
552 regs->IOHBAR[IOB_ACPI_EC0] = ESPI_XEC_ACPI_EC0_BAR_ADDRESS |
553 MCHP_ESPI_IO_BAR_HOST_VALID;
554
555 return 0;
556 }
557
558 #undef CONNECT_IRQ_ACPI_EC0
559 #define CONNECT_IRQ_ACPI_EC0 connect_irq_acpi_ec0
560 #undef INIT_ACPI_EC0
561 #define INIT_ACPI_EC0 init_acpi_ec0
562
563 #endif /* CONFIG_ESPI_PERIPHERAL_HOST_IO */
564
565 #if defined(CONFIG_ESPI_PERIPHERAL_EC_HOST_CMD) || \
566 defined(CONFIG_ESPI_PERIPHERAL_HOST_IO_PVT)
567
568 static const struct xec_acpi_ec_config xec_acpi_ec1_cfg = {
569 .regbase = DT_REG_ADDR(DT_NODELABEL(acpi_ec1)),
570 .ibf_ecia_info = DT_PROP_BY_IDX(DT_NODELABEL(acpi_ec1), girqs, 0),
571 .obe_ecia_info = DT_PROP_BY_IDX(DT_NODELABEL(acpi_ec1), girqs, 1),
572 };
573
acpi_ec1_ibf_isr(const struct device * dev)574 static void acpi_ec1_ibf_isr(const struct device *dev)
575 {
576 struct espi_xec_data *const data =
577 (struct espi_xec_data *const)dev->data;
578 struct espi_event evt = {
579 .evt_type = ESPI_BUS_PERIPHERAL_NOTIFICATION,
580 #ifdef CONFIG_ESPI_PERIPHERAL_EC_HOST_CMD
581 .evt_details = ESPI_PERIPHERAL_EC_HOST_CMD,
582 #else
583 .evt_details = ESPI_PERIPHERAL_HOST_IO_PVT,
584 #endif
585 .evt_data = ESPI_PERIPHERAL_NODATA
586 };
587 #ifdef CONFIG_ESPI_PERIPHERAL_ACPI_EC_IBF_EVT_DATA
588 struct acpi_ec_regs *acpi_ec1_hw = (struct acpi_ec_regs *)xec_acpi_ec1_cfg.regbase;
589
590 /* Updates to fit Chrome shim layer design.
591 * Host put data on input buffer of ACPI EC1 channel.
592 */
593 if (acpi_ec1_hw->EC_STS & MCHP_ACPI_EC_STS_IBF) {
594 /* Set processing flag before reading command byte */
595 acpi_ec1_hw->EC_STS |= MCHP_ACPI_EC_STS_UD1A;
596 /* Read out input data and clear IBF pending bit */
597 evt.evt_data = acpi_ec1_hw->OS2EC_DATA;
598 }
599 #endif /* CONFIG_ESPI_PERIPHERAL_ACPI_EC_IBF_EVT_DATA */
600
601 espi_send_callbacks(&data->callbacks, dev, evt);
602
603 /* clear GIRQ status */
604 mchp_xec_ecia_info_girq_src_clr(xec_acpi_ec1_cfg.ibf_ecia_info);
605 }
606
acpi_ec1_obe_isr(const struct device * dev)607 static void acpi_ec1_obe_isr(const struct device *dev)
608 {
609 /* disable and clear GIRQ status */
610 mchp_xec_ecia_info_girq_src_dis(xec_acpi_ec1_cfg.obe_ecia_info);
611 mchp_xec_ecia_info_girq_src_clr(xec_acpi_ec1_cfg.obe_ecia_info);
612 }
613
connect_irq_acpi_ec1(const struct device * dev)614 static int connect_irq_acpi_ec1(const struct device *dev)
615 {
616 mchp_xec_ecia_info_girq_src_clr(xec_acpi_ec1_cfg.ibf_ecia_info);
617 mchp_xec_ecia_info_girq_src_clr(xec_acpi_ec1_cfg.obe_ecia_info);
618
619 IRQ_CONNECT(DT_IRQ_BY_NAME(DT_NODELABEL(acpi_ec1), acpi_ibf, irq),
620 DT_IRQ_BY_NAME(DT_NODELABEL(acpi_ec1), acpi_ibf, priority),
621 acpi_ec1_ibf_isr,
622 DEVICE_DT_GET(DT_NODELABEL(espi0)),
623 0);
624 irq_enable(DT_IRQ_BY_NAME(DT_NODELABEL(acpi_ec1), acpi_ibf, irq));
625
626 IRQ_CONNECT(DT_IRQ_BY_NAME(DT_NODELABEL(acpi_ec1), acpi_obe, irq),
627 DT_IRQ_BY_NAME(DT_NODELABEL(acpi_ec1), acpi_obe, priority),
628 acpi_ec1_obe_isr,
629 DEVICE_DT_GET(DT_NODELABEL(espi0)),
630 0);
631 irq_enable(DT_IRQ_BY_NAME(DT_NODELABEL(acpi_ec1), acpi_obe, irq));
632
633 mchp_xec_ecia_info_girq_src_en(xec_acpi_ec1_cfg.ibf_ecia_info);
634 mchp_xec_ecia_info_girq_src_en(xec_acpi_ec1_cfg.obe_ecia_info);
635
636 return 0;
637 }
638
init_acpi_ec1(const struct device * dev)639 static int init_acpi_ec1(const struct device *dev)
640 {
641 struct espi_xec_config *const cfg = ESPI_XEC_CONFIG(dev);
642 struct espi_iom_regs *regs = (struct espi_iom_regs *)cfg->base_addr;
643
644 #ifdef CONFIG_ESPI_PERIPHERAL_EC_HOST_CMD
645 regs->IOHBAR[IOB_ACPI_EC1] =
646 (CONFIG_ESPI_PERIPHERAL_HOST_CMD_DATA_PORT_NUM << 16) |
647 MCHP_ESPI_IO_BAR_HOST_VALID;
648 #else
649 regs->IOHBAR[IOB_ACPI_EC1] =
650 CONFIG_ESPI_PERIPHERAL_HOST_IO_PVT_PORT_NUM |
651 MCHP_ESPI_IO_BAR_HOST_VALID;
652 regs->IOHBAR[IOB_MBOX] = ESPI_XEC_MBOX_BAR_ADDRESS |
653 MCHP_ESPI_IO_BAR_HOST_VALID;
654 #endif
655
656 return 0;
657 }
658
659 #undef CONNECT_IRQ_ACPI_EC1
660 #define CONNECT_IRQ_ACPI_EC1 connect_irq_acpi_ec1
661 #undef INIT_ACPI_EC1
662 #define INIT_ACPI_EC1 init_acpi_ec1
663
664 #endif /* CONFIG_ESPI_PERIPHERAL_EC_HOST_CMD || CONFIG_ESPI_PERIPHERAL_HOST_IO_PVT */
665
666 #ifdef CONFIG_ESPI_PERIPHERAL_EC_HOST_CMD
667
668 BUILD_ASSERT(DT_NODE_HAS_STATUS_OKAY(DT_NODELABEL(emi0)),
669 "XEC EMI0 DT node is disabled!");
670
671 struct xec_emi_config {
672 uintptr_t regbase;
673 };
674
675 static const struct xec_emi_config xec_emi0_cfg = {
676 .regbase = DT_REG_ADDR(DT_NODELABEL(emi0)),
677 };
678
init_emi0(const struct device * dev)679 static int init_emi0(const struct device *dev)
680 {
681 struct espi_xec_config *const cfg = ESPI_XEC_CONFIG(dev);
682 struct espi_iom_regs *regs = (struct espi_iom_regs *)cfg->base_addr;
683 struct emi_regs *emi_hw =
684 (struct emi_regs *)xec_emi0_cfg.regbase;
685
686 regs->IOHBAR[IOB_EMI0] =
687 (CONFIG_ESPI_PERIPHERAL_HOST_CMD_PARAM_PORT_NUM << 16) |
688 MCHP_ESPI_IO_BAR_HOST_VALID;
689
690 emi_hw->MEM_BA_0 = (uint32_t)ec_host_cmd_sram;
691 #ifdef CONFIG_ESPI_PERIPHERAL_ACPI_SHM_REGION
692 emi_hw->MEM_RL_0 = CONFIG_ESPI_XEC_PERIPHERAL_HOST_CMD_PARAM_SIZE +
693 CONFIG_ESPI_XEC_PERIPHERAL_ACPI_SHD_MEM_SIZE;
694 #else
695 emi_hw->MEM_RL_0 = CONFIG_ESPI_XEC_PERIPHERAL_HOST_CMD_PARAM_SIZE;
696 #endif
697 emi_hw->MEM_WL_0 = CONFIG_ESPI_XEC_PERIPHERAL_HOST_CMD_PARAM_SIZE;
698
699 return 0;
700 }
701
702 #undef INIT_EMI0
703 #define INIT_EMI0 init_emi0
704
705 #endif /* CONFIG_ESPI_PERIPHERAL_EC_HOST_CMD */
706
707 #ifdef CONFIG_ESPI_PERIPHERAL_CUSTOM_OPCODE
708
709 static void host_cus_opcode_enable_interrupts(void);
710 static void host_cus_opcode_disable_interrupts(void);
711
ecust_rd_req(const struct device * dev,enum lpc_peripheral_opcode op,uint32_t * data)712 static int ecust_rd_req(const struct device *dev,
713 enum lpc_peripheral_opcode op,
714 uint32_t *data)
715 {
716 ARG_UNUSED(dev);
717
718 switch (op) {
719 #ifdef CONFIG_ESPI_PERIPHERAL_EC_HOST_CMD
720 case ECUSTOM_HOST_CMD_GET_PARAM_MEMORY:
721 *data = (uint32_t)ec_host_cmd_sram;
722 break;
723 case ECUSTOM_HOST_CMD_GET_PARAM_MEMORY_SIZE:
724 *data = CONFIG_ESPI_XEC_PERIPHERAL_HOST_CMD_PARAM_SIZE;
725 break;
726 #endif
727 default:
728 return -EINVAL;
729 }
730
731 return 0;
732 }
733
ecust_wr_req(const struct device * dev,enum lpc_peripheral_opcode op,uint32_t * data)734 static int ecust_wr_req(const struct device *dev,
735 enum lpc_peripheral_opcode op,
736 uint32_t *data)
737 {
738 struct acpi_ec_regs *acpi_ec1_hw = (struct acpi_ec_regs *)xec_acpi_ec1_cfg.regbase;
739
740 ARG_UNUSED(dev);
741
742 switch (op) {
743 case ECUSTOM_HOST_SUBS_INTERRUPT_EN:
744 if (*data != 0) {
745 host_cus_opcode_enable_interrupts();
746 } else {
747 host_cus_opcode_disable_interrupts();
748 }
749 break;
750 case ECUSTOM_HOST_CMD_SEND_RESULT:
751 /*
752 * Write result to the data byte. This sets the OBF
753 * status bit.
754 */
755 acpi_ec1_hw->EC2OS_DATA = (*data & 0xff);
756 /* Clear processing flag */
757 acpi_ec1_hw->EC_STS &= ~MCHP_ACPI_EC_STS_UD1A;
758 break;
759 default:
760 return -EINVAL;
761 }
762
763 return 0;
764 }
765
766 #endif /* CONFIG_ESPI_PERIPHERAL_CUSTOM_OPCODE */
767
768 #if defined(CONFIG_ESPI_PERIPHERAL_EC_HOST_CMD) && \
769 defined(CONFIG_ESPI_PERIPHERAL_ACPI_SHM_REGION)
770
eacpi_shm_rd_req(const struct device * dev,enum lpc_peripheral_opcode op,uint32_t * data)771 static int eacpi_shm_rd_req(const struct device *dev,
772 enum lpc_peripheral_opcode op,
773 uint32_t *data)
774 {
775 ARG_UNUSED(dev);
776
777 switch (op) {
778 case EACPI_GET_SHARED_MEMORY:
779 *data = (uint32_t)&ec_host_cmd_sram[CONFIG_ESPI_XEC_PERIPHERAL_HOST_CMD_PARAM_SIZE];
780 break;
781 default:
782 return -EINVAL;
783 }
784
785 return 0;
786 }
787
eacpi_shm_wr_req(const struct device * dev,enum lpc_peripheral_opcode op,uint32_t * data)788 static int eacpi_shm_wr_req(const struct device *dev,
789 enum lpc_peripheral_opcode op,
790 uint32_t *data)
791 {
792 ARG_UNUSED(dev);
793 ARG_UNUSED(op);
794 ARG_UNUSED(data);
795
796 return -EINVAL;
797 }
798
799 #endif /* CONFIG_ESPI_PERIPHERAL_ACPI_SHM_REGION */
800
801
802 #ifdef CONFIG_ESPI_PERIPHERAL_DEBUG_PORT_80
803
804 struct xec_p80bd_config {
805 uintptr_t regbase;
806 uint32_t ecia_info;
807 };
808
809 static const struct xec_p80bd_config xec_p80bd0_cfg = {
810 .regbase = DT_REG_ADDR(DT_NODELABEL(p80bd0)),
811 .ecia_info = DT_PROP_BY_IDX(DT_NODELABEL(p80bd0), girqs, 0),
812 };
813
814 /*
815 * MEC172x P80 BIOS Debug Port hardware captures writes to its 4-byte I/O range
816 * Hardware provides status indicating byte lane(s) of each write.
817 * We must decode the byte lane information and produce one or more
818 * notification packets.
819 */
p80bd0_isr(const struct device * dev)820 static void p80bd0_isr(const struct device *dev)
821 {
822 struct espi_xec_data *const data =
823 (struct espi_xec_data *const)dev->data;
824 struct p80bd_regs *p80regs =
825 (struct p80bd_regs *)xec_p80bd0_cfg.regbase;
826 struct espi_event evt = { ESPI_BUS_PERIPHERAL_NOTIFICATION, 0,
827 ESPI_PERIPHERAL_NODATA };
828 int count = 8; /* limit ISR to 8 bytes */
829 uint32_t dattr = p80regs->EC_DA;
830
831 /* b[7:0]=8-bit value written, b[15:8]=attributes */
832 while ((dattr & MCHP_P80BD_ECDA_NE) && (count--)) { /* Not empty? */
833 /* espi_event protocol No Data value is 0 so pick a bit and
834 * set it. This depends on the application.
835 */
836 evt.evt_data = (dattr & 0xffu) | BIT(16);
837 switch (dattr & MCHP_P80BD_ECDA_LANE_MSK) {
838 case MCHP_P80BD_ECDA_LANE_0:
839 evt.evt_details |= (ESPI_PERIPHERAL_INDEX_0 << 16) |
840 ESPI_PERIPHERAL_DEBUG_PORT80;
841 break;
842 case MCHP_P80BD_ECDA_LANE_1:
843 evt.evt_details |= (ESPI_PERIPHERAL_INDEX_1 << 16) |
844 ESPI_PERIPHERAL_DEBUG_PORT80;
845 break;
846 case MCHP_P80BD_ECDA_LANE_2:
847 break;
848 case MCHP_P80BD_ECDA_LANE_3:
849 break;
850 default:
851 break;
852 }
853
854 if (evt.evt_details) {
855 espi_send_callbacks(&data->callbacks, dev, evt);
856 evt.evt_details = 0;
857 }
858 dattr = p80regs->EC_DA;
859 }
860
861 /* clear GIRQ status */
862 mchp_xec_ecia_info_girq_src_clr(xec_p80bd0_cfg.ecia_info);
863 }
864
connect_irq_p80bd0(const struct device * dev)865 static int connect_irq_p80bd0(const struct device *dev)
866 {
867 mchp_xec_ecia_info_girq_src_clr(xec_p80bd0_cfg.ecia_info);
868
869 IRQ_CONNECT(DT_IRQN(DT_NODELABEL(p80bd0)),
870 DT_IRQ(DT_NODELABEL(acpi_ec1), priority),
871 p80bd0_isr,
872 DEVICE_DT_GET(DT_NODELABEL(espi0)),
873 0);
874 irq_enable(DT_IRQN(DT_NODELABEL(p80bd0)));
875
876 mchp_xec_ecia_info_girq_src_en(xec_p80bd0_cfg.ecia_info);
877
878 return 0;
879 }
880
init_p80bd0(const struct device * dev)881 static int init_p80bd0(const struct device *dev)
882 {
883 struct espi_xec_config *const cfg = ESPI_XEC_CONFIG(dev);
884 struct espi_iom_regs *regs = (struct espi_iom_regs *)cfg->base_addr;
885 struct p80bd_regs *p80bd_hw =
886 (struct p80bd_regs *)xec_p80bd0_cfg.regbase;
887
888 regs->IOHBAR[IOB_P80BD] = ESPI_XEC_PORT80_BAR_ADDRESS |
889 MCHP_ESPI_IO_BAR_HOST_VALID;
890
891 p80bd_hw->ACTV = 1;
892 p80bd_hw->STS_IEN = MCHP_P80BD_SI_THR_IEN;
893
894 return 0;
895 }
896
897 #undef CONNECT_IRQ_P80BD0
898 #define CONNECT_IRQ_P80BD0 connect_irq_p80bd0
899 #undef INIT_P80BD0
900 #define INIT_P80BD0 init_p80bd0
901
902 #endif /* CONFIG_ESPI_PERIPHERAL_DEBUG_PORT_80 */
903
904 #ifdef CONFIG_ESPI_PERIPHERAL_UART
905
906 #if CONFIG_ESPI_PERIPHERAL_UART_SOC_MAPPING == 0
init_uart0(const struct device * dev)907 int init_uart0(const struct device *dev)
908 {
909 struct espi_xec_config *const cfg = ESPI_XEC_CONFIG(dev);
910 struct espi_iom_regs *regs = (struct espi_iom_regs *)cfg->base_addr;
911
912 regs->IOHBAR[IOB_UART0] = ESPI_XEC_UART0_BAR_ADDRESS |
913 MCHP_ESPI_IO_BAR_HOST_VALID;
914
915 return 0;
916 }
917
918 #undef INIT_UART0
919 #define INIT_UART0 init_uart0
920
921 #elif CONFIG_ESPI_PERIPHERAL_UART_SOC_MAPPING == 1
init_uart1(const struct device * dev)922 int init_uart1(const struct device *dev)
923 {
924 struct espi_xec_config *const cfg = ESPI_XEC_CONFIG(dev);
925 struct espi_iom_regs *regs = (struct espi_iom_regs *)cfg->base_addr;
926
927 regs->IOHBAR[IOB_UART1] = ESPI_XEC_UART0_BAR_ADDRESS |
928 MCHP_ESPI_IO_BAR_HOST_VALID;
929
930 return 0;
931 }
932
933 #undef INIT_UART1
934 #define INIT_UART1 init_uart1
935 #endif /* CONFIG_ESPI_PERIPHERAL_UART_SOC_MAPPING */
936 #endif /* CONFIG_ESPI_PERIPHERAL_UART */
937
938 typedef int (*host_dev_irq_connect)(const struct device *dev);
939
940 static const host_dev_irq_connect hdic_tbl[] = {
941 CONNECT_IRQ_MBOX0,
942 CONNECT_IRQ_KBC0,
943 CONNECT_IRQ_ACPI_EC0,
944 CONNECT_IRQ_ACPI_EC1,
945 CONNECT_IRQ_ACPI_EC2,
946 CONNECT_IRQ_ACPI_EC3,
947 CONNECT_IRQ_ACPI_EC4,
948 CONNECT_IRQ_ACPI_PM1,
949 CONNECT_IRQ_EMI0,
950 CONNECT_IRQ_EMI1,
951 CONNECT_IRQ_EMI2,
952 CONNECT_IRQ_RTC0,
953 CONNECT_IRQ_P80BD0,
954 };
955
956 typedef int (*host_dev_init)(const struct device *dev);
957
958 static const host_dev_init hd_init_tbl[] = {
959 INIT_MBOX0,
960 INIT_KBC0,
961 INIT_ACPI_EC0,
962 INIT_ACPI_EC1,
963 INIT_ACPI_EC2,
964 INIT_ACPI_EC3,
965 INIT_ACPI_EC4,
966 INIT_ACPI_PM1,
967 INIT_EMI0,
968 INIT_EMI1,
969 INIT_EMI2,
970 INIT_RTC0,
971 INIT_P80BD0,
972 INIT_UART0,
973 INIT_UART1,
974 };
975
xec_host_dev_connect_irqs(const struct device * dev)976 int xec_host_dev_connect_irqs(const struct device *dev)
977 {
978 int ret = 0;
979
980 for (int i = 0; i < ARRAY_SIZE(hdic_tbl); i++) {
981 if (hdic_tbl[i] == NULL) {
982 continue;
983 }
984
985 ret = hdic_tbl[i](dev);
986 if (ret < 0) {
987 break;
988 }
989 }
990
991 return ret;
992 }
993
xec_host_dev_init(const struct device * dev)994 int xec_host_dev_init(const struct device *dev)
995 {
996 int ret = 0;
997
998 for (int i = 0; i < ARRAY_SIZE(hd_init_tbl); i++) {
999 if (hd_init_tbl[i] == NULL) {
1000 continue;
1001 }
1002
1003 ret = hd_init_tbl[i](dev);
1004 if (ret < 0) {
1005 break;
1006 }
1007 }
1008
1009 return ret;
1010 }
1011
1012 #ifdef CONFIG_ESPI_PERIPHERAL_CHANNEL
1013
1014 typedef int (*xec_lpc_req)(const struct device *,
1015 enum lpc_peripheral_opcode,
1016 uint32_t *);
1017
1018 struct espi_lpc_req {
1019 uint16_t opcode_start;
1020 uint16_t opcode_max;
1021 xec_lpc_req rd_req;
1022 xec_lpc_req wr_req;
1023 };
1024
1025 static const struct espi_lpc_req espi_lpc_req_tbl[] = {
1026 #ifdef CONFIG_ESPI_PERIPHERAL_8042_KBC
1027 { E8042_START_OPCODE, E8042_MAX_OPCODE, kbc0_rd_req, kbc0_wr_req },
1028 #endif
1029 #ifdef CONFIG_ESPI_PERIPHERAL_HOST_IO
1030 { EACPI_START_OPCODE, EACPI_MAX_OPCODE, eacpi_rd_req, eacpi_wr_req },
1031 #endif
1032 #if defined(CONFIG_ESPI_PERIPHERAL_EC_HOST_CMD) && \
1033 defined(CONFIG_ESPI_PERIPHERAL_ACPI_SHM_REGION)
1034 { EACPI_GET_SHARED_MEMORY, EACPI_GET_SHARED_MEMORY, eacpi_shm_rd_req, eacpi_shm_wr_req},
1035 #endif
1036 #ifdef CONFIG_ESPI_PERIPHERAL_CUSTOM_OPCODE
1037 { ECUSTOM_START_OPCODE, ECUSTOM_MAX_OPCODE, ecust_rd_req, ecust_wr_req},
1038 #endif
1039 };
1040
espi_xec_lpc_req(const struct device * dev,enum lpc_peripheral_opcode op,uint32_t * data,uint8_t write)1041 static int espi_xec_lpc_req(const struct device *dev,
1042 enum lpc_peripheral_opcode op,
1043 uint32_t *data, uint8_t write)
1044 {
1045 ARG_UNUSED(dev);
1046
1047 for (int i = 0; i < ARRAY_SIZE(espi_lpc_req_tbl); i++) {
1048 const struct espi_lpc_req *req = &espi_lpc_req_tbl[i];
1049
1050 if ((op >= req->opcode_start) && (op <= req->opcode_max)) {
1051 if (write) {
1052 return req->wr_req(dev, op, data);
1053 } else {
1054 return req->rd_req(dev, op, data);
1055 }
1056 }
1057 }
1058
1059 return -ENOTSUP;
1060 }
1061
1062 /* dev = pointer to espi0 device */
espi_xec_read_lpc_request(const struct device * dev,enum lpc_peripheral_opcode op,uint32_t * data)1063 int espi_xec_read_lpc_request(const struct device *dev,
1064 enum lpc_peripheral_opcode op,
1065 uint32_t *data)
1066 {
1067 return espi_xec_lpc_req(dev, op, data, 0);
1068 }
1069
espi_xec_write_lpc_request(const struct device * dev,enum lpc_peripheral_opcode op,uint32_t * data)1070 int espi_xec_write_lpc_request(const struct device *dev,
1071 enum lpc_peripheral_opcode op,
1072 uint32_t *data)
1073 {
1074 return espi_xec_lpc_req(dev, op, data, 1);
1075 }
1076 #else
espi_xec_write_lpc_request(const struct device * dev,enum lpc_peripheral_opcode op,uint32_t * data)1077 int espi_xec_write_lpc_request(const struct device *dev,
1078 enum lpc_peripheral_opcode op,
1079 uint32_t *data)
1080 {
1081 ARG_UNUSED(dev);
1082 ARG_UNUSED(op);
1083 ARG_UNUSED(data);
1084
1085 return -ENOTSUP;
1086 }
1087
espi_xec_read_lpc_request(const struct device * dev,enum lpc_peripheral_opcode op,uint32_t * data)1088 int espi_xec_read_lpc_request(const struct device *dev,
1089 enum lpc_peripheral_opcode op,
1090 uint32_t *data)
1091 {
1092 ARG_UNUSED(dev);
1093 ARG_UNUSED(op);
1094 ARG_UNUSED(data);
1095
1096 return -ENOTSUP;
1097 }
1098 #endif /* CONFIG_ESPI_PERIPHERAL_CHANNEL */
1099
1100 #if defined(CONFIG_ESPI_PERIPHERAL_CUSTOM_OPCODE)
host_cus_opcode_enable_interrupts(void)1101 static void host_cus_opcode_enable_interrupts(void)
1102 {
1103 /* Enable host KBC sub-device interrupt */
1104 if (IS_ENABLED(CONFIG_ESPI_PERIPHERAL_8042_KBC)) {
1105 mchp_xec_ecia_info_girq_src_en(xec_kbc0_cfg.ibf_ecia_info);
1106 mchp_xec_ecia_info_girq_src_en(xec_kbc0_cfg.obe_ecia_info);
1107 }
1108
1109 /* Enable host ACPI EC0 (Host IO) and
1110 * ACPI EC1 (Host CMD) sub-device interrupt
1111 */
1112 if (IS_ENABLED(CONFIG_ESPI_PERIPHERAL_HOST_IO) ||
1113 IS_ENABLED(CONFIG_ESPI_PERIPHERAL_EC_HOST_CMD)) {
1114 mchp_xec_ecia_info_girq_src_en(xec_acpi_ec0_cfg.ibf_ecia_info);
1115 mchp_xec_ecia_info_girq_src_en(xec_acpi_ec0_cfg.obe_ecia_info);
1116 mchp_xec_ecia_info_girq_src_en(xec_acpi_ec1_cfg.ibf_ecia_info);
1117 }
1118
1119 /* Enable host Port80 sub-device interrupt installation */
1120 if (IS_ENABLED(CONFIG_ESPI_PERIPHERAL_DEBUG_PORT_80)) {
1121 mchp_xec_ecia_info_girq_src_en(xec_p80bd0_cfg.ecia_info);
1122 }
1123 }
1124
host_cus_opcode_disable_interrupts(void)1125 static void host_cus_opcode_disable_interrupts(void)
1126 {
1127 /* Disable host KBC sub-device interrupt */
1128 if (IS_ENABLED(CONFIG_ESPI_PERIPHERAL_8042_KBC)) {
1129 mchp_xec_ecia_info_girq_src_dis(xec_kbc0_cfg.ibf_ecia_info);
1130 mchp_xec_ecia_info_girq_src_dis(xec_kbc0_cfg.obe_ecia_info);
1131 }
1132
1133 /* Disable host ACPI EC0 (Host IO) and
1134 * ACPI EC1 (Host CMD) sub-device interrupt
1135 */
1136 if (IS_ENABLED(CONFIG_ESPI_PERIPHERAL_HOST_IO) ||
1137 IS_ENABLED(CONFIG_ESPI_PERIPHERAL_EC_HOST_CMD)) {
1138 mchp_xec_ecia_info_girq_src_dis(xec_acpi_ec0_cfg.ibf_ecia_info);
1139 mchp_xec_ecia_info_girq_src_dis(xec_acpi_ec0_cfg.obe_ecia_info);
1140 mchp_xec_ecia_info_girq_src_dis(xec_acpi_ec1_cfg.ibf_ecia_info);
1141 }
1142
1143 /* Disable host Port80 sub-device interrupt installation */
1144 if (IS_ENABLED(CONFIG_ESPI_PERIPHERAL_DEBUG_PORT_80)) {
1145 mchp_xec_ecia_info_girq_src_dis(xec_p80bd0_cfg.ecia_info);
1146 }
1147 }
1148 #endif /* CONFIG_ESPI_PERIPHERAL_CUSTOM_OPCODE */
1149