1 /*
2  * Copyright (c) 2020 Intel Corporation
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #define DT_DRV_COMPAT microchip_xec_peci
8 
9 #include <errno.h>
10 #include <zephyr/device.h>
11 #include <zephyr/kernel.h>
12 #ifdef CONFIG_SOC_SERIES_MEC172X
13 #include <zephyr/drivers/clock_control/mchp_xec_clock_control.h>
14 #include <zephyr/drivers/interrupt_controller/intc_mchp_xec_ecia.h>
15 #endif
16 #include <zephyr/drivers/peci.h>
17 #include <zephyr/drivers/pinctrl.h>
18 #include <zephyr/pm/device.h>
19 #include <zephyr/pm/policy.h>
20 #include <soc.h>
21 #include <zephyr/logging/log.h>
22 #include <zephyr/irq.h>
23 LOG_MODULE_REGISTER(peci_mchp_xec, CONFIG_PECI_LOG_LEVEL);
24 
25 /* Maximum PECI core clock is the main clock 48Mhz */
26 #define MAX_PECI_CORE_CLOCK 48000u
27 /* 1 ms */
28 #define PECI_RESET_DELAY    1000u
29 #define PECI_RESET_DELAY_MS 1u
30 /* 100 us */
31 #define PECI_IDLE_DELAY     100u
32 /* 5 ms */
33 #define PECI_IDLE_TIMEOUT   50u
34 /* Maximum retries */
35 #define PECI_TIMEOUT_RETRIES 3u
36 /* Maximum read buffer fill wait retries */
37 #define PECI_RX_BUF_FILL_WAIT_RETRY 100u
38 
39 /* 10 us */
40 #define PECI_IO_DELAY       10
41 
42 #define OPT_BIT_TIME_MSB_OFS 8u
43 
44 #define PECI_FCS_LEN         2
45 
46 struct peci_xec_config {
47 	struct peci_regs * const regs;
48 	uint8_t irq_num;
49 	uint8_t girq;
50 	uint8_t girq_pos;
51 	uint8_t pcr_idx;
52 	uint8_t pcr_pos;
53 	const struct pinctrl_dev_config *pcfg;
54 };
55 
56 enum peci_pm_policy_state_flag {
57 	PECI_PM_POLICY_FLAG,
58 	PECI_PM_POLICY_FLAG_COUNT,
59 };
60 
61 struct peci_xec_data {
62 	struct k_sem tx_lock;
63 	uint32_t  bitrate;
64 	int    timeout_retries;
65 #ifdef CONFIG_PM_DEVICE
66 	ATOMIC_DEFINE(pm_policy_state_flag, PECI_PM_POLICY_FLAG_COUNT);
67 #endif
68 };
69 
70 #ifdef CONFIG_PM_DEVICE
peci_xec_pm_policy_state_lock_get(struct peci_xec_data * data,enum peci_pm_policy_state_flag flag)71 static void peci_xec_pm_policy_state_lock_get(struct peci_xec_data *data,
72 					       enum peci_pm_policy_state_flag flag)
73 {
74 	if (atomic_test_and_set_bit(data->pm_policy_state_flag, flag) == 0) {
75 		pm_policy_state_lock_get(PM_STATE_SUSPEND_TO_IDLE, PM_ALL_SUBSTATES);
76 	}
77 }
78 
peci_xec_pm_policy_state_lock_put(struct peci_xec_data * data,enum peci_pm_policy_state_flag flag)79 static void peci_xec_pm_policy_state_lock_put(struct peci_xec_data *data,
80 					    enum peci_pm_policy_state_flag flag)
81 {
82 	if (atomic_test_and_clear_bit(data->pm_policy_state_flag, flag) == 1) {
83 		pm_policy_state_lock_put(PM_STATE_SUSPEND_TO_IDLE, PM_ALL_SUBSTATES);
84 	}
85 }
86 #endif
87 
88 #ifdef CONFIG_SOC_SERIES_MEC172X
peci_girq_enable(const struct device * dev)89 static inline void peci_girq_enable(const struct device *dev)
90 {
91 	const struct peci_xec_config * const cfg = dev->config;
92 
93 	mchp_xec_ecia_girq_src_en(cfg->girq, cfg->girq_pos);
94 }
95 
peci_girq_status_clear(const struct device * dev)96 static inline void peci_girq_status_clear(const struct device *dev)
97 {
98 	const struct peci_xec_config * const cfg = dev->config;
99 
100 	mchp_soc_ecia_girq_src_clr(cfg->girq, cfg->girq_pos);
101 }
102 
peci_clr_slp_en(const struct device * dev)103 static inline void peci_clr_slp_en(const struct device *dev)
104 {
105 	const struct peci_xec_config * const cfg = dev->config;
106 
107 	z_mchp_xec_pcr_periph_sleep(cfg->pcr_idx, cfg->pcr_pos, 0);
108 }
109 #else
peci_girq_enable(const struct device * dev)110 static inline void peci_girq_enable(const struct device *dev)
111 {
112 	const struct peci_xec_config * const cfg = dev->config;
113 
114 	MCHP_GIRQ_ENSET(cfg->girq) = BIT(cfg->girq_pos);
115 }
116 
peci_girq_status_clear(const struct device * dev)117 static inline void peci_girq_status_clear(const struct device *dev)
118 {
119 	const struct peci_xec_config * const cfg = dev->config;
120 
121 	MCHP_GIRQ_SRC(cfg->girq) = BIT(cfg->girq_pos);
122 }
123 
peci_clr_slp_en(const struct device * dev)124 static inline void peci_clr_slp_en(const struct device *dev)
125 {
126 	ARG_UNUSED(dev);
127 
128 	mchp_pcr_periph_slp_ctrl(PCR_PECI, 0);
129 }
130 #endif
131 
check_bus_idle(struct peci_regs * const regs)132 static int check_bus_idle(struct peci_regs * const regs)
133 {
134 	uint8_t delay_cnt = PECI_IDLE_TIMEOUT;
135 
136 	/* Wait until PECI bus becomes idle.
137 	 * Note that when IDLE bit in the status register changes, HW do not
138 	 * generate an interrupt, so need to poll.
139 	 */
140 	while (!(regs->STATUS2 & MCHP_PECI_STS2_IDLE)) {
141 		k_busy_wait(PECI_IDLE_DELAY);
142 		delay_cnt--;
143 
144 		if (!delay_cnt) {
145 			LOG_WRN("Bus is busy");
146 			return -EBUSY;
147 		}
148 	}
149 	return 0;
150 }
151 
peci_xec_configure(const struct device * dev,uint32_t bitrate)152 static int peci_xec_configure(const struct device *dev, uint32_t bitrate)
153 {
154 	const struct peci_xec_config * const cfg = dev->config;
155 	struct peci_xec_data * const data = dev->data;
156 	struct peci_regs * const regs = cfg->regs;
157 	uint16_t value;
158 
159 	data->bitrate = bitrate;
160 
161 	/* Power down PECI interface */
162 	regs->CONTROL = MCHP_PECI_CTRL_PD;
163 
164 	/* Adjust bitrate */
165 	value = MAX_PECI_CORE_CLOCK / bitrate;
166 	regs->OPT_BIT_TIME_LSB = value & MCHP_PECI_OPT_BT_LSB_MASK;
167 	regs->OPT_BIT_TIME_MSB = ((value >> OPT_BIT_TIME_MSB_OFS) &
168 				  MCHP_PECI_OPT_BT_MSB_MASK);
169 
170 	/* Power up PECI interface */
171 	regs->CONTROL &= ~MCHP_PECI_CTRL_PD;
172 
173 	return 0;
174 }
175 
peci_xec_disable(const struct device * dev)176 static int peci_xec_disable(const struct device *dev)
177 {
178 	const struct peci_xec_config * const cfg = dev->config;
179 	struct peci_regs * const regs = cfg->regs;
180 	int ret;
181 
182 	/* Make sure no transaction is interrupted before disabling the HW */
183 	ret = check_bus_idle(regs);
184 	if (ret) {
185 		return ret;
186 	}
187 
188 #ifdef CONFIG_PECI_INTERRUPT_DRIVEN
189 	peci_girq_status_clear(dev);
190 	NVIC_ClearPendingIRQ(cfg->irq_num);
191 	irq_disable(cfg->irq_num);
192 #endif
193 	regs->CONTROL |= MCHP_PECI_CTRL_PD;
194 
195 	return 0;
196 }
197 
peci_xec_enable(const struct device * dev)198 static int peci_xec_enable(const struct device *dev)
199 {
200 	const struct peci_xec_config * const cfg = dev->config;
201 	struct peci_regs * const regs = cfg->regs;
202 
203 	regs->CONTROL &= ~MCHP_PECI_CTRL_PD;
204 
205 #ifdef CONFIG_PECI_INTERRUPT_DRIVEN
206 	peci_girq_status_clear(dev);
207 	peci_girq_enable(dev);
208 	irq_enable(cfg->irq_num);
209 #endif
210 	return 0;
211 }
212 
peci_xec_bus_recovery(const struct device * dev,bool full_reset)213 static void peci_xec_bus_recovery(const struct device *dev, bool full_reset)
214 {
215 	const struct peci_xec_config * const cfg = dev->config;
216 	struct peci_xec_data * const data = dev->data;
217 	struct peci_regs * const regs = cfg->regs;
218 
219 	LOG_WRN("%s full_reset:%d", __func__, full_reset);
220 	if (full_reset) {
221 		regs->CONTROL = MCHP_PECI_CTRL_PD | MCHP_PECI_CTRL_RST;
222 
223 		if (k_is_in_isr()) {
224 			k_busy_wait(PECI_RESET_DELAY_MS);
225 		} else {
226 			k_msleep(PECI_RESET_DELAY);
227 		}
228 
229 		regs->CONTROL &= ~MCHP_PECI_CTRL_RST;
230 
231 		peci_xec_configure(dev, data->bitrate);
232 	} else {
233 		/* Only reset internal FIFOs */
234 		regs->CONTROL |= MCHP_PECI_CTRL_FRST;
235 	}
236 }
237 
peci_xec_write(const struct device * dev,struct peci_msg * msg)238 static int peci_xec_write(const struct device *dev, struct peci_msg *msg)
239 {
240 	const struct peci_xec_config * const cfg = dev->config;
241 	struct peci_xec_data * const data = dev->data;
242 	struct peci_regs * const regs = cfg->regs;
243 	int i;
244 	int ret;
245 
246 	struct peci_buf *tx_buf = &msg->tx_buffer;
247 	struct peci_buf *rx_buf = &msg->rx_buffer;
248 
249 	/* Check if FIFO is full */
250 	if (regs->STATUS2 & MCHP_PECI_STS2_WFF) {
251 		LOG_WRN("%s FIFO is full", __func__);
252 		return -EIO;
253 	}
254 
255 	regs->CONTROL &= ~MCHP_PECI_CTRL_FRST;
256 
257 	/* Add PECI transaction header to TX FIFO */
258 	regs->WR_DATA = msg->addr;
259 	regs->WR_DATA = tx_buf->len;
260 	regs->WR_DATA = rx_buf->len;
261 
262 	/* Add PECI payload to Tx FIFO only if write length is valid */
263 	if (tx_buf->len) {
264 		regs->WR_DATA = msg->cmd_code;
265 		for (i = 0; i < tx_buf->len - 1; i++) {
266 			if (!(regs->STATUS2 & MCHP_PECI_STS2_WFF)) {
267 				regs->WR_DATA = tx_buf->buf[i];
268 			}
269 		}
270 	}
271 
272 	/* Check bus is idle before starting a new transfer */
273 	ret = check_bus_idle(regs);
274 	if (ret) {
275 		return ret;
276 	}
277 
278 	regs->CONTROL |= MCHP_PECI_CTRL_TXEN;
279 	k_busy_wait(PECI_IO_DELAY);
280 
281 	/* Wait for transmission to complete */
282 #ifdef CONFIG_PECI_INTERRUPT_DRIVEN
283 	if (k_sem_take(&data->tx_lock, PECI_IO_DELAY * tx_buf->len)) {
284 		return -ETIMEDOUT;
285 	}
286 #else
287 	/* In worst case, overall timeout will be 1msec (100 * 10usec) */
288 	uint8_t wait_timeout_cnt = 100;
289 
290 	while (!(regs->STATUS1 & MCHP_PECI_STS1_EOF)) {
291 		k_busy_wait(PECI_IO_DELAY);
292 		wait_timeout_cnt--;
293 		if (!wait_timeout_cnt) {
294 			LOG_WRN("Tx timeout");
295 			data->timeout_retries++;
296 			/* Full reset only if multiple consecutive failures */
297 			if (data->timeout_retries > PECI_TIMEOUT_RETRIES) {
298 				peci_xec_bus_recovery(dev, true);
299 			} else {
300 				peci_xec_bus_recovery(dev, false);
301 			}
302 
303 			return -ETIMEDOUT;
304 		}
305 	}
306 #endif
307 	data->timeout_retries = 0;
308 
309 	return 0;
310 }
311 
peci_xec_read(const struct device * dev,struct peci_msg * msg)312 static int peci_xec_read(const struct device *dev, struct peci_msg *msg)
313 {
314 	const struct peci_xec_config * const cfg = dev->config;
315 	struct peci_regs * const regs = cfg->regs;
316 	int i;
317 	int ret;
318 	uint8_t tx_fcs;
319 	uint8_t bytes_rcvd;
320 	uint8_t wait_timeout_cnt;
321 	struct peci_buf *rx_buf = &msg->rx_buffer;
322 
323 	/* Attempt to read data from RX FIFO */
324 	bytes_rcvd = 0;
325 	for (i = 0; i < (rx_buf->len + PECI_FCS_LEN); i++) {
326 		/* Worst case timeout will be 1msec (100 * 10usec) */
327 		wait_timeout_cnt = PECI_RX_BUF_FILL_WAIT_RETRY;
328 		/* Wait for read buffer to fill up */
329 		while (regs->STATUS2 & MCHP_PECI_STS2_RFE) {
330 			k_usleep(PECI_IO_DELAY);
331 			wait_timeout_cnt--;
332 			if (!wait_timeout_cnt) {
333 				LOG_WRN("Rx buffer empty");
334 				return -ETIMEDOUT;
335 			}
336 		}
337 
338 		if (i == 0) {
339 			/* Get write block FCS just for debug */
340 			tx_fcs = regs->RD_DATA;
341 			LOG_DBG("TX FCS %x", tx_fcs);
342 
343 			/* If a Ping is done, write Tx fcs to rx buffer*/
344 			if (msg->cmd_code == PECI_CMD_PING) {
345 				rx_buf->buf[0] = tx_fcs;
346 				break;
347 			}
348 		} else if (i == (rx_buf->len + 1)) {
349 			/* Get read block FCS, but don't count it */
350 			rx_buf->buf[i-1] = regs->RD_DATA;
351 		} else {
352 			/* Get response */
353 			rx_buf->buf[i-1] = regs->RD_DATA;
354 			bytes_rcvd++;
355 		}
356 	}
357 
358 	/* Check if transaction is as expected */
359 	if (rx_buf->len != bytes_rcvd) {
360 		LOG_INF("Incomplete %x vs %x", bytes_rcvd, rx_buf->len);
361 	}
362 
363 	/* Once write-read transaction is complete, ensure bus is idle
364 	 * before resetting the internal FIFOs
365 	 */
366 	ret = check_bus_idle(regs);
367 	if (ret) {
368 		return ret;
369 	}
370 
371 	return 0;
372 }
373 
peci_xec_transfer(const struct device * dev,struct peci_msg * msg)374 static int peci_xec_transfer(const struct device *dev, struct peci_msg *msg)
375 {
376 	const struct peci_xec_config * const cfg = dev->config;
377 	struct peci_regs * const regs = cfg->regs;
378 	int ret = 0;
379 	uint8_t err_val = 0;
380 #ifdef CONFIG_PM_DEVICE
381 	struct peci_xec_data *data = dev->data;
382 
383 	peci_xec_pm_policy_state_lock_get(data, PECI_PM_POLICY_FLAG);
384 #endif
385 
386 	do {
387 		ret = peci_xec_write(dev, msg);
388 		if (ret) {
389 			break;
390 		}
391 
392 		/* If a PECI transmission is successful, it may or not involve
393 		 * a read operation, check if transaction expects a response
394 		 * Also perform a read when PECI cmd is Ping to get Write FCS
395 		 */
396 		if (msg->rx_buffer.len || (msg->cmd_code == PECI_CMD_PING)) {
397 			ret = peci_xec_read(dev, msg);
398 			if (ret) {
399 				break;
400 			}
401 		}
402 
403 		/* Cleanup */
404 		if (regs->STATUS1 & MCHP_PECI_STS1_EOF) {
405 			regs->STATUS1 |= MCHP_PECI_STS1_EOF;
406 		}
407 
408 		/* Check for error conditions and perform bus recovery if necessary */
409 		err_val = regs->ERROR;
410 		if (err_val) {
411 			if (err_val & MCHP_PECI_ERR_RDOV) {
412 				LOG_ERR("Read buffer is not empty");
413 			}
414 
415 			if (err_val & MCHP_PECI_ERR_WRUN) {
416 				LOG_ERR("Write buffer is not empty");
417 			}
418 
419 			if (err_val & MCHP_PECI_ERR_BERR) {
420 				LOG_ERR("PECI bus error");
421 			}
422 
423 			LOG_DBG("PECI err %x", err_val);
424 			LOG_DBG("PECI sts1 %x", regs->STATUS1);
425 			LOG_DBG("PECI sts2 %x", regs->STATUS2);
426 
427 			/* ERROR is a clear-on-write register, need to clear errors
428 			 * occurring at the end of a transaction. A temp variable is
429 			 * used to overcome complaints by the static code analyzer
430 			 */
431 			regs->ERROR = err_val;
432 			peci_xec_bus_recovery(dev, false);
433 			ret = -EIO;
434 			break;
435 		}
436 	} while (0);
437 
438 #ifdef CONFIG_PM_DEVICE
439 	peci_xec_pm_policy_state_lock_put(data, PECI_PM_POLICY_FLAG);
440 #endif
441 	return ret;
442 }
443 
444 #ifdef CONFIG_PM_DEVICE
peci_xec_pm_action(const struct device * dev,enum pm_device_action action)445 static int peci_xec_pm_action(const struct device *dev, enum pm_device_action action)
446 {
447 	const struct peci_xec_config *const devcfg = dev->config;
448 	struct peci_regs * const regs = devcfg->regs;
449 	struct ecs_regs * const ecs_regs = (struct ecs_regs *)(DT_REG_ADDR(DT_NODELABEL(ecs)));
450 	int ret;
451 
452 	switch (action) {
453 	case PM_DEVICE_ACTION_RESUME:
454 		ret = pinctrl_apply_state(devcfg->pcfg, PINCTRL_STATE_DEFAULT);
455 		/* VREF_VTT function is enabled*/
456 		ecs_regs->PECI_DIS = 0x00u;
457 
458 		/* Power up PECI interface */
459 		regs->CONTROL &= ~MCHP_PECI_CTRL_PD;
460 		break;
461 	case PM_DEVICE_ACTION_SUSPEND:
462 		regs->CONTROL |= MCHP_PECI_CTRL_PD;
463 		/* This bit reduces leakage current through the CPU voltage reference
464 		 * pin if PECI is not used. VREF_VTT function is disabled.
465 		 */
466 		ecs_regs->PECI_DIS = 0x01u;
467 
468 		/* If application does not want to turn off PECI pins it will
469 		 * not define pinctrl-1 for this node.
470 		 */
471 		ret = pinctrl_apply_state(devcfg->pcfg, PINCTRL_STATE_SLEEP);
472 		if (ret == -ENOENT) { /* pinctrl-1 does not exist.  */
473 			ret = 0;
474 		}
475 		break;
476 	default:
477 		ret = -ENOTSUP;
478 	}
479 
480 	return ret;
481 }
482 #endif /* CONFIG_PM_DEVICE */
483 
484 #ifdef CONFIG_PECI_INTERRUPT_DRIVEN
peci_xec_isr(const void * arg)485 static void peci_xec_isr(const void *arg)
486 {
487 	const struct device *dev = arg;
488 	struct peci_xec_config * const cfg = dev->config;
489 	struct peci_xec_data * const data = dev->data;
490 	struct peci_regs * const regs = cfg->regs;
491 	uint8_t peci_error = regs->ERROR;
492 	uint8_t peci_status2 = regs->STATUS2;
493 
494 	peci_girq_status_clear(dev);
495 
496 	if (peci_error) {
497 		regs->ERROR = peci_error;
498 	}
499 
500 	if (peci_status2 & MCHP_PECI_STS2_WFE) {
501 		LOG_WRN("TX FIFO empty ST2:%x", peci_status2);
502 		k_sem_give(&data->tx_lock);
503 	}
504 
505 	if (peci_status2 & MCHP_PECI_STS2_RFE) {
506 		LOG_WRN("RX FIFO full ST2:%x", peci_status2);
507 	}
508 }
509 #endif
510 
511 static const struct peci_driver_api peci_xec_driver_api = {
512 	.config = peci_xec_configure,
513 	.enable = peci_xec_enable,
514 	.disable = peci_xec_disable,
515 	.transfer = peci_xec_transfer,
516 };
517 
peci_xec_init(const struct device * dev)518 static int peci_xec_init(const struct device *dev)
519 {
520 	const struct peci_xec_config * const cfg = dev->config;
521 	struct peci_regs * const regs = cfg->regs;
522 	struct ecs_regs * const ecs_regs = (struct ecs_regs *)(DT_REG_ADDR(DT_NODELABEL(ecs)));
523 
524 	int ret = pinctrl_apply_state(cfg->pcfg, PINCTRL_STATE_DEFAULT);
525 
526 	if (ret != 0) {
527 		LOG_ERR("XEC PECI pinctrl init failed (%d)", ret);
528 		return ret;
529 	}
530 
531 #ifdef CONFIG_PECI_INTERRUPT_DRIVEN
532 	k_sem_init(&data->tx_lock, 0, 1);
533 #endif
534 
535 	peci_clr_slp_en(dev);
536 
537 	ecs_regs->PECI_DIS = 0x00u;
538 
539 	/* Reset PECI interface */
540 	regs->CONTROL |= MCHP_PECI_CTRL_RST;
541 	k_msleep(PECI_RESET_DELAY_MS);
542 	regs->CONTROL &= ~MCHP_PECI_CTRL_RST;
543 
544 #ifdef CONFIG_PECI_INTERRUPT_DRIVEN
545 	/* Enable interrupt for errors */
546 	regs->INT_EN1 = (MCHP_PECI_IEN1_EREN | MCHP_PECI_IEN1_EIEN);
547 
548 	/* Enable interrupt for Tx FIFO is empty */
549 	regs->INT_EN2 |= MCHP_PECI_IEN2_ENWFE;
550 	/* Enable interrupt for Rx FIFO is full */
551 	regs->INT_EN2 |= MCHP_PECI_IEN2_ENRFF;
552 
553 	regs->CONTROL |= MCHP_PECI_CTRL_MIEN;
554 
555 	/* Direct NVIC */
556 	IRQ_CONNECT(cfg->irq_num,
557 		    DT_INST_IRQ(0, priority),
558 		    peci_xec_isr, NULL, 0);
559 #endif
560 	return 0;
561 }
562 
563 static struct peci_xec_data peci_data;
564 
565 PINCTRL_DT_INST_DEFINE(0);
566 
567 static const struct peci_xec_config peci_xec_config = {
568 	.regs = (struct peci_regs * const)(DT_INST_REG_ADDR(0)),
569 	.irq_num = DT_INST_IRQN(0),
570 	.girq = DT_INST_PROP_BY_IDX(0, girqs, 0),
571 	.girq_pos = DT_INST_PROP_BY_IDX(0, girqs, 1),
572 	.pcr_idx = DT_INST_PROP_BY_IDX(0, pcrs, 0),
573 	.pcr_pos = DT_INST_PROP_BY_IDX(0, pcrs, 1),
574 	.pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(0),
575 };
576 
577 PM_DEVICE_DT_INST_DEFINE(0, peci_xec_pm_action);
578 
579 DEVICE_DT_INST_DEFINE(0,
580 		    &peci_xec_init,
581 		    PM_DEVICE_DT_INST_GET(0),
582 		    &peci_data, &peci_xec_config,
583 		    POST_KERNEL, CONFIG_PECI_INIT_PRIORITY,
584 		    &peci_xec_driver_api);
585