1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
4  *
5  */
6 
7 #include <linux/delay.h>
8 #include <linux/device.h>
9 #include <linux/dma-direction.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/firmware.h>
12 #include <linux/interrupt.h>
13 #include <linux/list.h>
14 #include <linux/mhi.h>
15 #include <linux/module.h>
16 #include <linux/random.h>
17 #include <linux/slab.h>
18 #include <linux/wait.h>
19 #include "internal.h"
20 
21 /* Setup RDDM vector table for RDDM transfer and program RXVEC */
mhi_rddm_prepare(struct mhi_controller * mhi_cntrl,struct image_info * img_info)22 void mhi_rddm_prepare(struct mhi_controller *mhi_cntrl,
23 		      struct image_info *img_info)
24 {
25 	struct mhi_buf *mhi_buf = img_info->mhi_buf;
26 	struct bhi_vec_entry *bhi_vec = img_info->bhi_vec;
27 	void __iomem *base = mhi_cntrl->bhie;
28 	struct device *dev = &mhi_cntrl->mhi_dev->dev;
29 	u32 sequence_id;
30 	unsigned int i;
31 
32 	for (i = 0; i < img_info->entries - 1; i++, mhi_buf++, bhi_vec++) {
33 		bhi_vec->dma_addr = mhi_buf->dma_addr;
34 		bhi_vec->size = mhi_buf->len;
35 	}
36 
37 	dev_dbg(dev, "BHIe programming for RDDM\n");
38 
39 	mhi_write_reg(mhi_cntrl, base, BHIE_RXVECADDR_HIGH_OFFS,
40 		      upper_32_bits(mhi_buf->dma_addr));
41 
42 	mhi_write_reg(mhi_cntrl, base, BHIE_RXVECADDR_LOW_OFFS,
43 		      lower_32_bits(mhi_buf->dma_addr));
44 
45 	mhi_write_reg(mhi_cntrl, base, BHIE_RXVECSIZE_OFFS, mhi_buf->len);
46 	sequence_id = MHI_RANDOM_U32_NONZERO(BHIE_RXVECSTATUS_SEQNUM_BMSK);
47 
48 	mhi_write_reg_field(mhi_cntrl, base, BHIE_RXVECDB_OFFS,
49 			    BHIE_RXVECDB_SEQNUM_BMSK, BHIE_RXVECDB_SEQNUM_SHFT,
50 			    sequence_id);
51 
52 	dev_dbg(dev, "Address: %p and len: 0x%zx sequence: %u\n",
53 		&mhi_buf->dma_addr, mhi_buf->len, sequence_id);
54 }
55 
56 /* Collect RDDM buffer during kernel panic */
__mhi_download_rddm_in_panic(struct mhi_controller * mhi_cntrl)57 static int __mhi_download_rddm_in_panic(struct mhi_controller *mhi_cntrl)
58 {
59 	int ret;
60 	u32 rx_status;
61 	enum mhi_ee_type ee;
62 	const u32 delayus = 2000;
63 	u32 retry = (mhi_cntrl->timeout_ms * 1000) / delayus;
64 	const u32 rddm_timeout_us = 200000;
65 	int rddm_retry = rddm_timeout_us / delayus;
66 	void __iomem *base = mhi_cntrl->bhie;
67 	struct device *dev = &mhi_cntrl->mhi_dev->dev;
68 
69 	dev_dbg(dev, "Entered with pm_state:%s dev_state:%s ee:%s\n",
70 		to_mhi_pm_state_str(mhi_cntrl->pm_state),
71 		TO_MHI_STATE_STR(mhi_cntrl->dev_state),
72 		TO_MHI_EXEC_STR(mhi_cntrl->ee));
73 
74 	/*
75 	 * This should only be executing during a kernel panic, we expect all
76 	 * other cores to shutdown while we're collecting RDDM buffer. After
77 	 * returning from this function, we expect the device to reset.
78 	 *
79 	 * Normaly, we read/write pm_state only after grabbing the
80 	 * pm_lock, since we're in a panic, skipping it. Also there is no
81 	 * gurantee that this state change would take effect since
82 	 * we're setting it w/o grabbing pm_lock
83 	 */
84 	mhi_cntrl->pm_state = MHI_PM_LD_ERR_FATAL_DETECT;
85 	/* update should take the effect immediately */
86 	smp_wmb();
87 
88 	/*
89 	 * Make sure device is not already in RDDM. In case the device asserts
90 	 * and a kernel panic follows, device will already be in RDDM.
91 	 * Do not trigger SYS ERR again and proceed with waiting for
92 	 * image download completion.
93 	 */
94 	ee = mhi_get_exec_env(mhi_cntrl);
95 	if (ee != MHI_EE_RDDM) {
96 		dev_dbg(dev, "Trigger device into RDDM mode using SYS ERR\n");
97 		mhi_set_mhi_state(mhi_cntrl, MHI_STATE_SYS_ERR);
98 
99 		dev_dbg(dev, "Waiting for device to enter RDDM\n");
100 		while (rddm_retry--) {
101 			ee = mhi_get_exec_env(mhi_cntrl);
102 			if (ee == MHI_EE_RDDM)
103 				break;
104 
105 			udelay(delayus);
106 		}
107 
108 		if (rddm_retry <= 0) {
109 			/* Hardware reset so force device to enter RDDM */
110 			dev_dbg(dev,
111 				"Did not enter RDDM, do a host req reset\n");
112 			mhi_write_reg(mhi_cntrl, mhi_cntrl->regs,
113 				      MHI_SOC_RESET_REQ_OFFSET,
114 				      MHI_SOC_RESET_REQ);
115 			udelay(delayus);
116 		}
117 
118 		ee = mhi_get_exec_env(mhi_cntrl);
119 	}
120 
121 	dev_dbg(dev,
122 		"Waiting for RDDM image download via BHIe, current EE:%s\n",
123 		TO_MHI_EXEC_STR(ee));
124 
125 	while (retry--) {
126 		ret = mhi_read_reg_field(mhi_cntrl, base, BHIE_RXVECSTATUS_OFFS,
127 					 BHIE_RXVECSTATUS_STATUS_BMSK,
128 					 BHIE_RXVECSTATUS_STATUS_SHFT,
129 					 &rx_status);
130 		if (ret)
131 			return -EIO;
132 
133 		if (rx_status == BHIE_RXVECSTATUS_STATUS_XFER_COMPL)
134 			return 0;
135 
136 		udelay(delayus);
137 	}
138 
139 	ee = mhi_get_exec_env(mhi_cntrl);
140 	ret = mhi_read_reg(mhi_cntrl, base, BHIE_RXVECSTATUS_OFFS, &rx_status);
141 
142 	dev_err(dev, "Did not complete RDDM transfer\n");
143 	dev_err(dev, "Current EE: %s\n", TO_MHI_EXEC_STR(ee));
144 	dev_err(dev, "RXVEC_STATUS: 0x%x\n", rx_status);
145 
146 	return -EIO;
147 }
148 
149 /* Download RDDM image from device */
mhi_download_rddm_img(struct mhi_controller * mhi_cntrl,bool in_panic)150 int mhi_download_rddm_img(struct mhi_controller *mhi_cntrl, bool in_panic)
151 {
152 	void __iomem *base = mhi_cntrl->bhie;
153 	struct device *dev = &mhi_cntrl->mhi_dev->dev;
154 	u32 rx_status;
155 
156 	if (in_panic)
157 		return __mhi_download_rddm_in_panic(mhi_cntrl);
158 
159 	dev_dbg(dev, "Waiting for RDDM image download via BHIe\n");
160 
161 	/* Wait for the image download to complete */
162 	wait_event_timeout(mhi_cntrl->state_event,
163 			   mhi_read_reg_field(mhi_cntrl, base,
164 					      BHIE_RXVECSTATUS_OFFS,
165 					      BHIE_RXVECSTATUS_STATUS_BMSK,
166 					      BHIE_RXVECSTATUS_STATUS_SHFT,
167 					      &rx_status) || rx_status,
168 			   msecs_to_jiffies(mhi_cntrl->timeout_ms));
169 
170 	return (rx_status == BHIE_RXVECSTATUS_STATUS_XFER_COMPL) ? 0 : -EIO;
171 }
172 EXPORT_SYMBOL_GPL(mhi_download_rddm_img);
173 
mhi_fw_load_amss(struct mhi_controller * mhi_cntrl,const struct mhi_buf * mhi_buf)174 static int mhi_fw_load_amss(struct mhi_controller *mhi_cntrl,
175 			    const struct mhi_buf *mhi_buf)
176 {
177 	void __iomem *base = mhi_cntrl->bhie;
178 	struct device *dev = &mhi_cntrl->mhi_dev->dev;
179 	rwlock_t *pm_lock = &mhi_cntrl->pm_lock;
180 	u32 tx_status, sequence_id;
181 	int ret;
182 
183 	read_lock_bh(pm_lock);
184 	if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
185 		read_unlock_bh(pm_lock);
186 		return -EIO;
187 	}
188 
189 	sequence_id = MHI_RANDOM_U32_NONZERO(BHIE_TXVECSTATUS_SEQNUM_BMSK);
190 	dev_dbg(dev, "Starting AMSS download via BHIe. Sequence ID:%u\n",
191 		sequence_id);
192 	mhi_write_reg(mhi_cntrl, base, BHIE_TXVECADDR_HIGH_OFFS,
193 		      upper_32_bits(mhi_buf->dma_addr));
194 
195 	mhi_write_reg(mhi_cntrl, base, BHIE_TXVECADDR_LOW_OFFS,
196 		      lower_32_bits(mhi_buf->dma_addr));
197 
198 	mhi_write_reg(mhi_cntrl, base, BHIE_TXVECSIZE_OFFS, mhi_buf->len);
199 
200 	mhi_write_reg_field(mhi_cntrl, base, BHIE_TXVECDB_OFFS,
201 			    BHIE_TXVECDB_SEQNUM_BMSK, BHIE_TXVECDB_SEQNUM_SHFT,
202 			    sequence_id);
203 	read_unlock_bh(pm_lock);
204 
205 	/* Wait for the image download to complete */
206 	ret = wait_event_timeout(mhi_cntrl->state_event,
207 				 MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) ||
208 				 mhi_read_reg_field(mhi_cntrl, base,
209 						   BHIE_TXVECSTATUS_OFFS,
210 						   BHIE_TXVECSTATUS_STATUS_BMSK,
211 						   BHIE_TXVECSTATUS_STATUS_SHFT,
212 						   &tx_status) || tx_status,
213 				 msecs_to_jiffies(mhi_cntrl->timeout_ms));
214 	if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) ||
215 	    tx_status != BHIE_TXVECSTATUS_STATUS_XFER_COMPL)
216 		return -EIO;
217 
218 	return (!ret) ? -ETIMEDOUT : 0;
219 }
220 
mhi_fw_load_sbl(struct mhi_controller * mhi_cntrl,dma_addr_t dma_addr,size_t size)221 static int mhi_fw_load_sbl(struct mhi_controller *mhi_cntrl,
222 			   dma_addr_t dma_addr,
223 			   size_t size)
224 {
225 	u32 tx_status, val, session_id;
226 	int i, ret;
227 	void __iomem *base = mhi_cntrl->bhi;
228 	rwlock_t *pm_lock = &mhi_cntrl->pm_lock;
229 	struct device *dev = &mhi_cntrl->mhi_dev->dev;
230 	struct {
231 		char *name;
232 		u32 offset;
233 	} error_reg[] = {
234 		{ "ERROR_CODE", BHI_ERRCODE },
235 		{ "ERROR_DBG1", BHI_ERRDBG1 },
236 		{ "ERROR_DBG2", BHI_ERRDBG2 },
237 		{ "ERROR_DBG3", BHI_ERRDBG3 },
238 		{ NULL },
239 	};
240 
241 	read_lock_bh(pm_lock);
242 	if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
243 		read_unlock_bh(pm_lock);
244 		goto invalid_pm_state;
245 	}
246 
247 	session_id = MHI_RANDOM_U32_NONZERO(BHI_TXDB_SEQNUM_BMSK);
248 	dev_dbg(dev, "Starting SBL download via BHI. Session ID:%u\n",
249 		session_id);
250 	mhi_write_reg(mhi_cntrl, base, BHI_STATUS, 0);
251 	mhi_write_reg(mhi_cntrl, base, BHI_IMGADDR_HIGH,
252 		      upper_32_bits(dma_addr));
253 	mhi_write_reg(mhi_cntrl, base, BHI_IMGADDR_LOW,
254 		      lower_32_bits(dma_addr));
255 	mhi_write_reg(mhi_cntrl, base, BHI_IMGSIZE, size);
256 	mhi_write_reg(mhi_cntrl, base, BHI_IMGTXDB, session_id);
257 	read_unlock_bh(pm_lock);
258 
259 	/* Wait for the image download to complete */
260 	ret = wait_event_timeout(mhi_cntrl->state_event,
261 			   MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) ||
262 			   mhi_read_reg_field(mhi_cntrl, base, BHI_STATUS,
263 					      BHI_STATUS_MASK, BHI_STATUS_SHIFT,
264 					      &tx_status) || tx_status,
265 			   msecs_to_jiffies(mhi_cntrl->timeout_ms));
266 	if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
267 		goto invalid_pm_state;
268 
269 	if (tx_status == BHI_STATUS_ERROR) {
270 		dev_err(dev, "Image transfer failed\n");
271 		read_lock_bh(pm_lock);
272 		if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
273 			for (i = 0; error_reg[i].name; i++) {
274 				ret = mhi_read_reg(mhi_cntrl, base,
275 						   error_reg[i].offset, &val);
276 				if (ret)
277 					break;
278 				dev_err(dev, "Reg: %s value: 0x%x\n",
279 					error_reg[i].name, val);
280 			}
281 		}
282 		read_unlock_bh(pm_lock);
283 		goto invalid_pm_state;
284 	}
285 
286 	return (!ret) ? -ETIMEDOUT : 0;
287 
288 invalid_pm_state:
289 
290 	return -EIO;
291 }
292 
mhi_free_bhie_table(struct mhi_controller * mhi_cntrl,struct image_info * image_info)293 void mhi_free_bhie_table(struct mhi_controller *mhi_cntrl,
294 			 struct image_info *image_info)
295 {
296 	int i;
297 	struct mhi_buf *mhi_buf = image_info->mhi_buf;
298 
299 	for (i = 0; i < image_info->entries; i++, mhi_buf++)
300 		mhi_free_coherent(mhi_cntrl, mhi_buf->len, mhi_buf->buf,
301 				  mhi_buf->dma_addr);
302 
303 	kfree(image_info->mhi_buf);
304 	kfree(image_info);
305 }
306 
mhi_alloc_bhie_table(struct mhi_controller * mhi_cntrl,struct image_info ** image_info,size_t alloc_size)307 int mhi_alloc_bhie_table(struct mhi_controller *mhi_cntrl,
308 			 struct image_info **image_info,
309 			 size_t alloc_size)
310 {
311 	size_t seg_size = mhi_cntrl->seg_len;
312 	int segments = DIV_ROUND_UP(alloc_size, seg_size) + 1;
313 	int i;
314 	struct image_info *img_info;
315 	struct mhi_buf *mhi_buf;
316 
317 	img_info = kzalloc(sizeof(*img_info), GFP_KERNEL);
318 	if (!img_info)
319 		return -ENOMEM;
320 
321 	/* Allocate memory for entries */
322 	img_info->mhi_buf = kcalloc(segments, sizeof(*img_info->mhi_buf),
323 				    GFP_KERNEL);
324 	if (!img_info->mhi_buf)
325 		goto error_alloc_mhi_buf;
326 
327 	/* Allocate and populate vector table */
328 	mhi_buf = img_info->mhi_buf;
329 	for (i = 0; i < segments; i++, mhi_buf++) {
330 		size_t vec_size = seg_size;
331 
332 		/* Vector table is the last entry */
333 		if (i == segments - 1)
334 			vec_size = sizeof(struct bhi_vec_entry) * i;
335 
336 		mhi_buf->len = vec_size;
337 		mhi_buf->buf = mhi_alloc_coherent(mhi_cntrl, vec_size,
338 						  &mhi_buf->dma_addr,
339 						  GFP_KERNEL);
340 		if (!mhi_buf->buf)
341 			goto error_alloc_segment;
342 	}
343 
344 	img_info->bhi_vec = img_info->mhi_buf[segments - 1].buf;
345 	img_info->entries = segments;
346 	*image_info = img_info;
347 
348 	return 0;
349 
350 error_alloc_segment:
351 	for (--i, --mhi_buf; i >= 0; i--, mhi_buf--)
352 		mhi_free_coherent(mhi_cntrl, mhi_buf->len, mhi_buf->buf,
353 				  mhi_buf->dma_addr);
354 
355 error_alloc_mhi_buf:
356 	kfree(img_info);
357 
358 	return -ENOMEM;
359 }
360 
mhi_firmware_copy(struct mhi_controller * mhi_cntrl,const struct firmware * firmware,struct image_info * img_info)361 static void mhi_firmware_copy(struct mhi_controller *mhi_cntrl,
362 			      const struct firmware *firmware,
363 			      struct image_info *img_info)
364 {
365 	size_t remainder = firmware->size;
366 	size_t to_cpy;
367 	const u8 *buf = firmware->data;
368 	int i = 0;
369 	struct mhi_buf *mhi_buf = img_info->mhi_buf;
370 	struct bhi_vec_entry *bhi_vec = img_info->bhi_vec;
371 
372 	while (remainder) {
373 		to_cpy = min(remainder, mhi_buf->len);
374 		memcpy(mhi_buf->buf, buf, to_cpy);
375 		bhi_vec->dma_addr = mhi_buf->dma_addr;
376 		bhi_vec->size = to_cpy;
377 
378 		buf += to_cpy;
379 		remainder -= to_cpy;
380 		i++;
381 		bhi_vec++;
382 		mhi_buf++;
383 	}
384 }
385 
mhi_fw_load_handler(struct mhi_controller * mhi_cntrl)386 void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl)
387 {
388 	const struct firmware *firmware = NULL;
389 	struct image_info *image_info;
390 	struct device *dev = &mhi_cntrl->mhi_dev->dev;
391 	const char *fw_name;
392 	void *buf;
393 	dma_addr_t dma_addr;
394 	size_t size;
395 	int i, ret;
396 
397 	if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
398 		dev_err(dev, "Device MHI is not in valid state\n");
399 		return;
400 	}
401 
402 	/* save hardware info from BHI */
403 	ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_SERIALNU,
404 			   &mhi_cntrl->serial_number);
405 	if (ret)
406 		dev_err(dev, "Could not capture serial number via BHI\n");
407 
408 	for (i = 0; i < ARRAY_SIZE(mhi_cntrl->oem_pk_hash); i++) {
409 		ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_OEMPKHASH(i),
410 				   &mhi_cntrl->oem_pk_hash[i]);
411 		if (ret) {
412 			dev_err(dev, "Could not capture OEM PK HASH via BHI\n");
413 			break;
414 		}
415 	}
416 
417 	/* If device is in pass through, do reset to ready state transition */
418 	if (mhi_cntrl->ee == MHI_EE_PTHRU)
419 		goto fw_load_ee_pthru;
420 
421 	fw_name = (mhi_cntrl->ee == MHI_EE_EDL) ?
422 		mhi_cntrl->edl_image : mhi_cntrl->fw_image;
423 
424 	if (!fw_name || (mhi_cntrl->fbc_download && (!mhi_cntrl->sbl_size ||
425 						     !mhi_cntrl->seg_len))) {
426 		dev_err(dev,
427 			"No firmware image defined or !sbl_size || !seg_len\n");
428 		return;
429 	}
430 
431 	ret = request_firmware(&firmware, fw_name, dev);
432 	if (ret) {
433 		dev_err(dev, "Error loading firmware: %d\n", ret);
434 		return;
435 	}
436 
437 	size = (mhi_cntrl->fbc_download) ? mhi_cntrl->sbl_size : firmware->size;
438 
439 	/* SBL size provided is maximum size, not necessarily the image size */
440 	if (size > firmware->size)
441 		size = firmware->size;
442 
443 	buf = mhi_alloc_coherent(mhi_cntrl, size, &dma_addr, GFP_KERNEL);
444 	if (!buf) {
445 		release_firmware(firmware);
446 		return;
447 	}
448 
449 	/* Download SBL image */
450 	memcpy(buf, firmware->data, size);
451 	ret = mhi_fw_load_sbl(mhi_cntrl, dma_addr, size);
452 	mhi_free_coherent(mhi_cntrl, size, buf, dma_addr);
453 
454 	if (!mhi_cntrl->fbc_download || ret || mhi_cntrl->ee == MHI_EE_EDL)
455 		release_firmware(firmware);
456 
457 	/* Error or in EDL mode, we're done */
458 	if (ret) {
459 		dev_err(dev, "MHI did not load SBL, ret:%d\n", ret);
460 		return;
461 	}
462 
463 	if (mhi_cntrl->ee == MHI_EE_EDL)
464 		return;
465 
466 	write_lock_irq(&mhi_cntrl->pm_lock);
467 	mhi_cntrl->dev_state = MHI_STATE_RESET;
468 	write_unlock_irq(&mhi_cntrl->pm_lock);
469 
470 	/*
471 	 * If we're doing fbc, populate vector tables while
472 	 * device transitioning into MHI READY state
473 	 */
474 	if (mhi_cntrl->fbc_download) {
475 		ret = mhi_alloc_bhie_table(mhi_cntrl, &mhi_cntrl->fbc_image,
476 					   firmware->size);
477 		if (ret)
478 			goto error_alloc_fw_table;
479 
480 		/* Load the firmware into BHIE vec table */
481 		mhi_firmware_copy(mhi_cntrl, firmware, mhi_cntrl->fbc_image);
482 	}
483 
484 fw_load_ee_pthru:
485 	/* Transitioning into MHI RESET->READY state */
486 	ret = mhi_ready_state_transition(mhi_cntrl);
487 
488 	if (!mhi_cntrl->fbc_download)
489 		return;
490 
491 	if (ret) {
492 		dev_err(dev, "MHI did not enter READY state\n");
493 		goto error_read;
494 	}
495 
496 	/* Wait for the SBL event */
497 	ret = wait_event_timeout(mhi_cntrl->state_event,
498 				 mhi_cntrl->ee == MHI_EE_SBL ||
499 				 MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
500 				 msecs_to_jiffies(mhi_cntrl->timeout_ms));
501 
502 	if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
503 		dev_err(dev, "MHI did not enter SBL\n");
504 		goto error_read;
505 	}
506 
507 	/* Start full firmware image download */
508 	image_info = mhi_cntrl->fbc_image;
509 	ret = mhi_fw_load_amss(mhi_cntrl,
510 			       /* Vector table is the last entry */
511 			       &image_info->mhi_buf[image_info->entries - 1]);
512 	if (ret)
513 		dev_err(dev, "MHI did not load AMSS, ret:%d\n", ret);
514 
515 	release_firmware(firmware);
516 
517 	return;
518 
519 error_read:
520 	mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image);
521 	mhi_cntrl->fbc_image = NULL;
522 
523 error_alloc_fw_table:
524 	release_firmware(firmware);
525 }
526