1 /*
2  * Copyright (c) 2020 Intel Corporation
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #define DT_DRV_COMPAT microchip_xec_peci
8 
9 #include <errno.h>
10 #include <device.h>
11 #include <drivers/peci.h>
12 #include <soc.h>
13 #include <logging/log.h>
14 LOG_MODULE_REGISTER(peci_mchp_xec, CONFIG_PECI_LOG_LEVEL);
15 
16 /* Maximum PECI core clock is the main clock 48Mhz */
17 #define MAX_PECI_CORE_CLOCK 48000u
18 /* 1 ms */
19 #define PECI_RESET_DELAY    1000u
20 /* 100 us */
21 #define PECI_IDLE_DELAY     100u
22 /* 5 ms */
23 #define PECI_IDLE_TIMEOUT   50u
24 /* Maximum retries */
25 #define PECI_TIMEOUT_RETRIES 3u
26 /* Maximum read buffer fill wait retries */
27 #define PECI_RX_BUF_FILL_WAIT_RETRY 100u
28 
29 /* 10 us */
30 #define PECI_IO_DELAY       10
31 
32 #define OPT_BIT_TIME_MSB_OFS 8u
33 
34 #define PECI_FCS_LEN         2
35 
36 struct peci_xec_config {
37 	PECI_Type *base;
38 	uint8_t irq_num;
39 };
40 
41 struct peci_xec_data {
42 	struct k_sem tx_lock;
43 	uint32_t  bitrate;
44 	int    timeout_retries;
45 };
46 
47 static struct peci_xec_data peci_data;
48 
49 static const struct peci_xec_config peci_xec_config = {
50 	.base = (PECI_Type *) DT_INST_REG_ADDR(0),
51 	.irq_num = DT_INST_IRQN(0),
52 };
53 
check_bus_idle(PECI_Type * base)54 static int check_bus_idle(PECI_Type *base)
55 {
56 	uint8_t delay_cnt = PECI_IDLE_TIMEOUT;
57 
58 	/* Wait until PECI bus becomes idle.
59 	 * Note that when IDLE bit in the status register changes, HW do not
60 	 * generate an interrupt, so need to poll.
61 	 */
62 	while (!(base->STATUS2 & MCHP_PECI_STS2_IDLE)) {
63 		k_busy_wait(PECI_IDLE_DELAY);
64 		delay_cnt--;
65 
66 		if (!delay_cnt) {
67 			LOG_WRN("Bus is busy");
68 			return -EBUSY;
69 		}
70 	}
71 	return 0;
72 }
73 
peci_xec_configure(const struct device * dev,uint32_t bitrate)74 static int peci_xec_configure(const struct device *dev, uint32_t bitrate)
75 {
76 	ARG_UNUSED(dev);
77 
78 	peci_data.bitrate = bitrate;
79 	PECI_Type *base = peci_xec_config.base;
80 	uint16_t value;
81 
82 	/* Power down PECI interface */
83 	base->CONTROL = MCHP_PECI_CTRL_PD;
84 
85 	/* Adjust bitrate */
86 	value = MAX_PECI_CORE_CLOCK / bitrate;
87 	base->OPT_BIT_TIME_LSB = value & MCHP_PECI_OPT_BT_LSB_MASK;
88 	base->OPT_BIT_TIME_MSB = (value >> OPT_BIT_TIME_MSB_OFS) &
89 				 MCHP_PECI_OPT_BT_MSB_MASK;
90 
91 	/* Power up PECI interface */
92 	base->CONTROL &= ~MCHP_PECI_CTRL_PD;
93 
94 	return 0;
95 }
96 
peci_xec_disable(const struct device * dev)97 static int peci_xec_disable(const struct device *dev)
98 {
99 	ARG_UNUSED(dev);
100 	int ret;
101 	PECI_Type *base = peci_xec_config.base;
102 
103 	/* Make sure no transaction is interrupted before disabling the HW */
104 	ret = check_bus_idle(base);
105 	if (ret) {
106 		return ret;
107 	}
108 
109 #ifdef CONFIG_PECI_INTERRUPT_DRIVEN
110 	NVIC_ClearPendingIRQ(peci_xec_config.irq_num);
111 	irq_disable(peci_xec_config.irq_num);
112 #endif
113 	base->CONTROL |= MCHP_PECI_CTRL_PD;
114 
115 	return 0;
116 }
117 
peci_xec_enable(const struct device * dev)118 static int peci_xec_enable(const struct device *dev)
119 {
120 	ARG_UNUSED(dev);
121 	PECI_Type *base = peci_xec_config.base;
122 
123 	base->CONTROL &= ~MCHP_PECI_CTRL_PD;
124 
125 #ifdef CONFIG_PECI_INTERRUPT_DRIVEN
126 	irq_enable(peci_xec_config.irq_num);
127 #endif
128 	return 0;
129 }
130 
peci_xec_bus_recovery(const struct device * dev,bool full_reset)131 static void peci_xec_bus_recovery(const struct device *dev, bool full_reset)
132 {
133 	PECI_Type *base = peci_xec_config.base;
134 
135 	LOG_WRN("%s full_reset:%d", __func__, full_reset);
136 	if (full_reset) {
137 		base->CONTROL = MCHP_PECI_CTRL_PD | MCHP_PECI_CTRL_RST;
138 		k_busy_wait(PECI_RESET_DELAY);
139 		base->CONTROL &= ~MCHP_PECI_CTRL_RST;
140 
141 		peci_xec_configure(dev, peci_data.bitrate);
142 	} else {
143 		/* Only reset internal FIFOs */
144 		base->CONTROL |= MCHP_PECI_CTRL_FRST;
145 	}
146 }
147 
peci_xec_write(const struct device * dev,struct peci_msg * msg)148 static int peci_xec_write(const struct device *dev, struct peci_msg *msg)
149 {
150 	ARG_UNUSED(dev);
151 	int i;
152 	int ret;
153 
154 	struct peci_buf *tx_buf = &msg->tx_buffer;
155 	struct peci_buf *rx_buf = &msg->rx_buffer;
156 	PECI_Type *base = peci_xec_config.base;
157 
158 	/* Check if FIFO is full */
159 	if (base->STATUS2 & MCHP_PECI_STS2_WFF) {
160 		LOG_WRN("%s FIFO is full", __func__);
161 		return -EIO;
162 	}
163 
164 	base->CONTROL &= ~MCHP_PECI_CTRL_FRST;
165 
166 	/* Add PECI transaction header to TX FIFO */
167 	base->WR_DATA = msg->addr;
168 	base->WR_DATA = tx_buf->len;
169 	base->WR_DATA = rx_buf->len;
170 
171 	/* Add PECI payload to Tx FIFO only if write length is valid */
172 	if (tx_buf->len) {
173 		base->WR_DATA = msg->cmd_code;
174 		for (i = 0; i < tx_buf->len - 1; i++) {
175 			if (!(base->STATUS2 & MCHP_PECI_STS2_WFF)) {
176 				base->WR_DATA = tx_buf->buf[i];
177 			}
178 		}
179 	}
180 
181 	/* Check bus is idle before starting a new transfer */
182 	ret = check_bus_idle(base);
183 	if (ret) {
184 		return ret;
185 	}
186 
187 	base->CONTROL |= MCHP_PECI_CTRL_TXEN;
188 	k_busy_wait(PECI_IO_DELAY);
189 
190 	/* Wait for transmission to complete */
191 #ifdef CONFIG_PECI_INTERRUPT_DRIVEN
192 	if (k_sem_take(&peci_data.tx_lock, PECI_IO_DELAY * tx_buf->len)) {
193 		return -ETIMEDOUT;
194 	}
195 #else
196 	/* In worst case, overall timeout will be 1msec (100 * 10usec) */
197 	uint8_t wait_timeout_cnt = 100;
198 
199 	while (!(base->STATUS1 & MCHP_PECI_STS1_EOF)) {
200 		k_busy_wait(PECI_IO_DELAY);
201 		wait_timeout_cnt--;
202 		if (!wait_timeout_cnt) {
203 			LOG_WRN("Tx timeout");
204 			peci_data.timeout_retries++;
205 			/* Full reset only if multiple consecutive failures */
206 			if (peci_data.timeout_retries > PECI_TIMEOUT_RETRIES) {
207 				peci_xec_bus_recovery(dev, true);
208 			} else {
209 				peci_xec_bus_recovery(dev, false);
210 			}
211 
212 			return -ETIMEDOUT;
213 		}
214 	}
215 #endif
216 	peci_data.timeout_retries = 0;
217 
218 	return 0;
219 }
220 
peci_xec_read(const struct device * dev,struct peci_msg * msg)221 static int peci_xec_read(const struct device *dev, struct peci_msg *msg)
222 {
223 	ARG_UNUSED(dev);
224 	int i;
225 	int ret;
226 	uint8_t tx_fcs;
227 	uint8_t bytes_rcvd;
228 	uint8_t wait_timeout_cnt;
229 	struct peci_buf *rx_buf = &msg->rx_buffer;
230 	PECI_Type *base = peci_xec_config.base;
231 
232 	/* Attempt to read data from RX FIFO */
233 	bytes_rcvd = 0;
234 	for (i = 0; i < (rx_buf->len + PECI_FCS_LEN); i++) {
235 		/* Worst case timeout will be 1msec (100 * 10usec) */
236 		wait_timeout_cnt = PECI_RX_BUF_FILL_WAIT_RETRY;
237 		/* Wait for read buffer to fill up */
238 		while (base->STATUS2 & MCHP_PECI_STS2_RFE) {
239 			k_usleep(PECI_IO_DELAY);
240 			wait_timeout_cnt--;
241 			if (!wait_timeout_cnt) {
242 				LOG_WRN("Rx buffer empty");
243 				return -ETIMEDOUT;
244 			}
245 		}
246 
247 		if (i == 0) {
248 			/* Get write block FCS just for debug */
249 			tx_fcs = base->RD_DATA;
250 			LOG_DBG("TX FCS %x", tx_fcs);
251 		} else if (i == (rx_buf->len + 1)) {
252 			/* Get read block FCS, but don't count it */
253 			rx_buf->buf[i-1] = base->RD_DATA;
254 		} else {
255 			/* Get response */
256 			rx_buf->buf[i-1] = base->RD_DATA;
257 			bytes_rcvd++;
258 		}
259 	}
260 
261 	/* Check if transaction is as expected */
262 	if (rx_buf->len != bytes_rcvd) {
263 		LOG_INF("Incomplete %x vs %x", bytes_rcvd, rx_buf->len);
264 	}
265 
266 	/* Once write-read transaction is complete, ensure bus is idle
267 	 * before resetting the internal FIFOs
268 	 */
269 	ret = check_bus_idle(base);
270 	if (ret) {
271 
272 		return ret;
273 	}
274 
275 	return 0;
276 }
277 
peci_xec_transfer(const struct device * dev,struct peci_msg * msg)278 static int peci_xec_transfer(const struct device *dev, struct peci_msg *msg)
279 {
280 	ARG_UNUSED(dev);
281 	int ret;
282 	PECI_Type *base = peci_xec_config.base;
283 	uint8_t err_val;
284 
285 	ret = peci_xec_write(dev, msg);
286 	if (ret) {
287 		return ret;
288 	}
289 
290 	/* If a PECI transmission is successful, it may or not involve
291 	 * a read operation, check if transaction expects a response
292 	 */
293 	if (msg->rx_buffer.len) {
294 		ret = peci_xec_read(dev, msg);
295 		if (ret) {
296 			return ret;
297 		}
298 	}
299 
300 	/* Cleanup */
301 	if (base->STATUS1 & MCHP_PECI_STS1_EOF) {
302 		base->STATUS1 |= MCHP_PECI_STS1_EOF;
303 	}
304 
305 	/* Check for error conditions and perform bus recovery if necessary */
306 	err_val = base->ERROR;
307 	if (err_val) {
308 		if (err_val & MCHP_PECI_ERR_RDOV) {
309 			LOG_ERR("Read buffer is not empty");
310 		}
311 
312 		if (err_val & MCHP_PECI_ERR_WRUN) {
313 			LOG_ERR("Write buffer is not empty");
314 		}
315 
316 		if (err_val & MCHP_PECI_ERR_BERR) {
317 			LOG_ERR("PECI bus error");
318 		}
319 
320 		LOG_DBG("PECI err %x", err_val);
321 		LOG_DBG("PECI sts1 %x", base->STATUS1);
322 		LOG_DBG("PECI sts2 %x", base->STATUS2);
323 
324 		/* ERROR is a clear-on-write register, need to clear errors
325 		 * occurring at the end of a transaction. A temp variable is
326 		 * used to overcome complaints by the static code analyzer
327 		 */
328 		base->ERROR = err_val;
329 		peci_xec_bus_recovery(dev, false);
330 		return -EIO;
331 	}
332 
333 	return 0;
334 }
335 
336 #ifdef CONFIG_PECI_INTERRUPT_DRIVEN
peci_xec_isr(const void * arg)337 static void peci_xec_isr(const void *arg)
338 {
339 	ARG_UNUSED(arg);
340 	PECI_Type *base = peci_xec_config.base;
341 
342 	MCHP_GIRQ_SRC(MCHP_PECI_GIRQ) = MCHP_PECI_GIRQ_VAL;
343 
344 	if (base->ERROR) {
345 		base->ERROR = base->ERROR;
346 	}
347 
348 	if (base->STATUS2 & MCHP_PECI_STS2_WFE) {
349 		LOG_WRN("TX FIFO empty ST2:%x", base->STATUS2);
350 		k_sem_give(&peci_data.tx_lock);
351 	}
352 
353 	if (base->STATUS2 & MCHP_PECI_STS2_RFE) {
354 		LOG_WRN("RX FIFO full ST2:%x", base->STATUS2);
355 	}
356 }
357 #endif
358 
359 static const struct peci_driver_api peci_xec_driver_api = {
360 	.config = peci_xec_configure,
361 	.enable = peci_xec_enable,
362 	.disable = peci_xec_disable,
363 	.transfer = peci_xec_transfer,
364 };
365 
peci_xec_init(const struct device * dev)366 static int peci_xec_init(const struct device *dev)
367 {
368 	ARG_UNUSED(dev);
369 
370 	PECI_Type *base = peci_xec_config.base;
371 #ifdef CONFIG_PECI_INTERRUPT_DRIVEN
372 	k_sem_init(&peci_data.tx_lock, 0, 1);
373 #endif
374 
375 	/* Reset PECI interface */
376 	base->CONTROL |= MCHP_PECI_CTRL_RST;
377 	k_busy_wait(PECI_RESET_DELAY);
378 	base->CONTROL &= ~MCHP_PECI_CTRL_RST;
379 
380 #ifdef CONFIG_PECI_INTERRUPT_DRIVEN
381 	/* Enable interrupt for errors */
382 	base->INT_EN1 = (MCHP_PECI_IEN1_EREN | MCHP_PECI_IEN1_EIEN);
383 
384 	/* Enable interrupt for Tx FIFO is empty */
385 	base->INT_EN2 |= MCHP_PECI_IEN2_ENWFE;
386 	/* Enable interrupt for Rx FIFO is full */
387 	base->INT_EN2 |= MCHP_PECI_IEN2_ENRFF;
388 
389 	base->CONTROL |= MCHP_PECI_CTRL_MIEN;
390 
391 	/* Direct NVIC */
392 	IRQ_CONNECT(peci_xec_config.irq_num,
393 		    DT_INST_IRQ(0, priority),
394 		    peci_xec_isr, NULL, 0);
395 #endif
396 	return 0;
397 }
398 
399 DEVICE_DT_INST_DEFINE(0,
400 		    &peci_xec_init,
401 		    NULL,
402 		    NULL, NULL,
403 		    POST_KERNEL, CONFIG_PECI_INIT_PRIORITY,
404 		    &peci_xec_driver_api);
405