1 /*
2  * Copyright (c) 2017-2021, ARM Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <assert.h>
8 #include <endian.h>
9 #include <errno.h>
10 #include <stdint.h>
11 #include <string.h>
12 
13 #include <platform_def.h>
14 
15 #include <arch_helpers.h>
16 #include <common/debug.h>
17 #include <drivers/delay_timer.h>
18 #include <drivers/ufs.h>
19 #include <lib/mmio.h>
20 
21 #define CDB_ADDR_MASK			127
22 #define ALIGN_CDB(x)			(((x) + CDB_ADDR_MASK) & ~CDB_ADDR_MASK)
23 #define ALIGN_8(x)			(((x) + 7) & ~7)
24 
25 #define UFS_DESC_SIZE			0x400
26 #define MAX_UFS_DESC_SIZE		0x8000		/* 32 descriptors */
27 
28 #define MAX_PRDT_SIZE			0x40000		/* 256KB */
29 
30 static ufs_params_t ufs_params;
31 static int nutrs;	/* Number of UTP Transfer Request Slots */
32 
33 /*
34  * ufs_uic_error_handler - UIC error interrupts handler
35  * @ignore_linereset: set to ignore PA_LAYER_GEN_ERR (UIC error)
36  *
37  * Returns
38  * 0 - ignore error
39  * -EIO - fatal error, needs re-init
40  * -EAGAIN - non-fatal error, retries are sufficient
41  */
ufs_uic_error_handler(bool ignore_linereset)42 static int ufs_uic_error_handler(bool ignore_linereset)
43 {
44 	uint32_t data;
45 	int result = 0;
46 
47 	data = mmio_read_32(ufs_params.reg_base + UECPA);
48 	if (data & UFS_UIC_PA_ERROR_MASK) {
49 		if (data & PA_LAYER_GEN_ERR) {
50 			if (!ignore_linereset) {
51 				return -EIO;
52 			}
53 		} else {
54 			result = -EAGAIN;
55 		}
56 	}
57 
58 	data = mmio_read_32(ufs_params.reg_base + UECDL);
59 	if (data & UFS_UIC_DL_ERROR_MASK) {
60 		if (data & PA_INIT_ERR) {
61 			return -EIO;
62 		}
63 		result = -EAGAIN;
64 	}
65 
66 	/* NL/TL/DME error requires retries */
67 	data = mmio_read_32(ufs_params.reg_base + UECN);
68 	if (data & UFS_UIC_NL_ERROR_MASK) {
69 		result = -EAGAIN;
70 	}
71 
72 	data = mmio_read_32(ufs_params.reg_base + UECT);
73 	if (data & UFS_UIC_TL_ERROR_MASK) {
74 		result = -EAGAIN;
75 	}
76 
77 	data = mmio_read_32(ufs_params.reg_base + UECDME);
78 	if (data & UFS_UIC_DME_ERROR_MASK) {
79 		result = -EAGAIN;
80 	}
81 
82 	return result;
83 }
84 
85 /*
86  * ufs_error_handler - error interrupts handler
87  * @status: interrupt status
88  * @ignore_linereset: set to ignore PA_LAYER_GEN_ERR (UIC error)
89  *
90  * Returns
91  * 0 - ignore error
92  * -EIO - fatal error, needs re-init
93  * -EAGAIN - non-fatal error, retries are sufficient
94  */
ufs_error_handler(uint32_t status,bool ignore_linereset)95 static int ufs_error_handler(uint32_t status, bool ignore_linereset)
96 {
97 	int result;
98 
99 	if (status & UFS_INT_UE) {
100 		result = ufs_uic_error_handler(ignore_linereset);
101 		if (result != 0) {
102 			return result;
103 		}
104 	}
105 
106 	/* Return I/O error on fatal error, it is upto the caller to re-init UFS */
107 	if (status & UFS_INT_FATAL) {
108 		return -EIO;
109 	}
110 
111 	/* retry for non-fatal errors */
112 	return -EAGAIN;
113 }
114 
115 /*
116  * ufs_wait_for_int_status - wait for expected interrupt status
117  * @expected: expected interrupt status bit
118  * @timeout_ms: timeout in milliseconds to poll for
119  * @ignore_linereset: set to ignore PA_LAYER_GEN_ERR (UIC error)
120  *
121  * Returns
122  * 0 - received expected interrupt and cleared it
123  * -EIO - fatal error, needs re-init
124  * -EAGAIN - non-fatal error, caller can retry
125  * -ETIMEDOUT - timed out waiting for interrupt status
126  */
ufs_wait_for_int_status(const uint32_t expected_status,unsigned int timeout_ms,bool ignore_linereset)127 static int ufs_wait_for_int_status(const uint32_t expected_status,
128 				   unsigned int timeout_ms,
129 				   bool ignore_linereset)
130 {
131 	uint32_t interrupt_status, interrupts_enabled;
132 	int result = 0;
133 
134 	interrupts_enabled = mmio_read_32(ufs_params.reg_base + IE);
135 	do {
136 		interrupt_status = mmio_read_32(ufs_params.reg_base + IS) & interrupts_enabled;
137 		if (interrupt_status & UFS_INT_ERR) {
138 			mmio_write_32(ufs_params.reg_base + IS, interrupt_status & UFS_INT_ERR);
139 			result = ufs_error_handler(interrupt_status, ignore_linereset);
140 			if (result != 0) {
141 				return result;
142 			}
143 		}
144 
145 		if (interrupt_status & expected_status) {
146 			break;
147 		}
148 		mdelay(1);
149 	} while (timeout_ms-- > 0);
150 
151 	if (!(interrupt_status & expected_status)) {
152 		return -ETIMEDOUT;
153 	}
154 
155 	mmio_write_32(ufs_params.reg_base + IS, expected_status);
156 
157 	return result;
158 }
159 
160 
ufshc_send_uic_cmd(uintptr_t base,uic_cmd_t * cmd)161 int ufshc_send_uic_cmd(uintptr_t base, uic_cmd_t *cmd)
162 {
163 	unsigned int data;
164 	int result, retries;
165 
166 	if (base == 0 || cmd == NULL)
167 		return -EINVAL;
168 
169 	for (retries = 0; retries < 100; retries++) {
170 		data = mmio_read_32(base + HCS);
171 		if ((data & HCS_UCRDY) != 0) {
172 			break;
173 		}
174 		mdelay(1);
175 	}
176 	if (retries >= 100) {
177 		return -EBUSY;
178 	}
179 
180 	mmio_write_32(base + IS, ~0);
181 	mmio_write_32(base + UCMDARG1, cmd->arg1);
182 	mmio_write_32(base + UCMDARG2, cmd->arg2);
183 	mmio_write_32(base + UCMDARG3, cmd->arg3);
184 	mmio_write_32(base + UICCMD, cmd->op);
185 
186 	result = ufs_wait_for_int_status(UFS_INT_UCCS, UIC_CMD_TIMEOUT_MS,
187 					 cmd->op == DME_SET);
188 	if (result != 0) {
189 		return result;
190 	}
191 
192 	return mmio_read_32(base + UCMDARG2) & CONFIG_RESULT_CODE_MASK;
193 }
194 
ufshc_dme_get(unsigned int attr,unsigned int idx,unsigned int * val)195 int ufshc_dme_get(unsigned int attr, unsigned int idx, unsigned int *val)
196 {
197 	uintptr_t base;
198 	int result, retries;
199 	uic_cmd_t cmd;
200 
201 	assert(ufs_params.reg_base != 0);
202 
203 	if (val == NULL)
204 		return -EINVAL;
205 
206 	base = ufs_params.reg_base;
207 	cmd.arg1 = (attr << 16) | GEN_SELECTOR_IDX(idx);
208 	cmd.arg2 = 0;
209 	cmd.arg3 = 0;
210 	cmd.op = DME_GET;
211 
212 	for (retries = 0; retries < UFS_UIC_COMMAND_RETRIES; ++retries) {
213 		result = ufshc_send_uic_cmd(base, &cmd);
214 		if (result == 0)
215 			break;
216 		/* -EIO requires UFS re-init */
217 		if (result == -EIO) {
218 			return result;
219 		}
220 	}
221 	if (retries >= UFS_UIC_COMMAND_RETRIES)
222 		return -EIO;
223 
224 	*val = mmio_read_32(base + UCMDARG3);
225 	return 0;
226 }
227 
ufshc_dme_set(unsigned int attr,unsigned int idx,unsigned int val)228 int ufshc_dme_set(unsigned int attr, unsigned int idx, unsigned int val)
229 {
230 	uintptr_t base;
231 	int result, retries;
232 	uic_cmd_t cmd;
233 
234 	assert((ufs_params.reg_base != 0));
235 
236 	base = ufs_params.reg_base;
237 	cmd.arg1 = (attr << 16) | GEN_SELECTOR_IDX(idx);
238 	cmd.arg2 = 0;
239 	cmd.arg3 = val;
240 	cmd.op = DME_SET;
241 
242 	for (retries = 0; retries < UFS_UIC_COMMAND_RETRIES; ++retries) {
243 		result = ufshc_send_uic_cmd(base, &cmd);
244 		if (result == 0)
245 			break;
246 		/* -EIO requires UFS re-init */
247 		if (result == -EIO) {
248 			return result;
249 		}
250 	}
251 	if (retries >= UFS_UIC_COMMAND_RETRIES)
252 		return -EIO;
253 
254 	return 0;
255 }
256 
ufshc_hce_enable(uintptr_t base)257 static int ufshc_hce_enable(uintptr_t base)
258 {
259 	unsigned int data;
260 	int retries;
261 
262 	/* Enable Host Controller */
263 	mmio_write_32(base + HCE, HCE_ENABLE);
264 
265 	/* Wait until basic initialization sequence completed */
266 	for (retries = 0; retries < HCE_ENABLE_INNER_RETRIES; ++retries) {
267 		data = mmio_read_32(base + HCE);
268 		if (data & HCE_ENABLE) {
269 			break;
270 		}
271 		udelay(HCE_ENABLE_TIMEOUT_US);
272 	}
273 	if (retries >= HCE_ENABLE_INNER_RETRIES) {
274 		return -ETIMEDOUT;
275 	}
276 
277 	return 0;
278 }
279 
ufshc_hce_disable(uintptr_t base)280 static int ufshc_hce_disable(uintptr_t base)
281 {
282 	unsigned int data;
283 	int timeout;
284 
285 	/* Disable Host Controller */
286 	mmio_write_32(base + HCE, HCE_DISABLE);
287 	timeout = HCE_DISABLE_TIMEOUT_US;
288 	do {
289 		data = mmio_read_32(base + HCE);
290 		if ((data & HCE_ENABLE) == HCE_DISABLE) {
291 			break;
292 		}
293 		udelay(1);
294 	} while (--timeout > 0);
295 
296 	if (timeout <= 0) {
297 		return -ETIMEDOUT;
298 	}
299 
300 	return 0;
301 }
302 
303 
ufshc_reset(uintptr_t base)304 static int ufshc_reset(uintptr_t base)
305 {
306 	unsigned int data;
307 	int retries, result;
308 
309 	/* disable controller if enabled */
310 	if (mmio_read_32(base + HCE) & HCE_ENABLE) {
311 		result = ufshc_hce_disable(base);
312 		if (result != 0) {
313 			return -EIO;
314 		}
315 	}
316 
317 	for (retries = 0; retries < HCE_ENABLE_OUTER_RETRIES; ++retries) {
318 		result = ufshc_hce_enable(base);
319 		if (result == 0) {
320 			break;
321 		}
322 	}
323 	if (retries >= HCE_ENABLE_OUTER_RETRIES) {
324 		return -EIO;
325 	}
326 
327 	/* Enable UIC Interrupts alone. We can ignore other interrupts until
328 	 * link is up as there might be spurious error interrupts during link-up
329 	 */
330 	data = UFS_INT_UCCS | UFS_INT_UHES | UFS_INT_UHXS | UFS_INT_UPMS;
331 	mmio_write_32(base + IE, data);
332 
333 	return 0;
334 }
335 
ufshc_dme_link_startup(uintptr_t base)336 static int ufshc_dme_link_startup(uintptr_t base)
337 {
338 	uic_cmd_t cmd;
339 
340 	memset(&cmd, 0, sizeof(cmd));
341 	cmd.op = DME_LINKSTARTUP;
342 	return ufshc_send_uic_cmd(base, &cmd);
343 }
344 
ufshc_link_startup(uintptr_t base)345 static int ufshc_link_startup(uintptr_t base)
346 {
347 	int data, result;
348 	int retries;
349 
350 	for (retries = DME_LINKSTARTUP_RETRIES; retries > 0; retries--) {
351 		result = ufshc_dme_link_startup(base);
352 		if (result != 0) {
353 			/* Reset controller before trying again */
354 			result = ufshc_reset(base);
355 			if (result != 0) {
356 				return result;
357 			}
358 			continue;
359 		}
360 		assert(mmio_read_32(base + HCS) & HCS_DP);
361 		data = mmio_read_32(base + IS);
362 		if (data & UFS_INT_ULSS)
363 			mmio_write_32(base + IS, UFS_INT_ULSS);
364 
365 		/* clear UE set due to line-reset */
366 		if (data & UFS_INT_UE) {
367 			mmio_write_32(base + IS, UFS_INT_UE);
368 		}
369 		/* clearing line-reset, UECPA is cleared on read */
370 		mmio_read_32(base + UECPA);
371 		return 0;
372 	}
373 	return -EIO;
374 }
375 
376 /* Read Door Bell register to check if slot zero is available */
is_slot_available(void)377 static int is_slot_available(void)
378 {
379 	if (mmio_read_32(ufs_params.reg_base + UTRLDBR) & 0x1) {
380 		return -EBUSY;
381 	}
382 	return 0;
383 }
384 
get_utrd(utp_utrd_t * utrd)385 static void get_utrd(utp_utrd_t *utrd)
386 {
387 	uintptr_t base;
388 	int result;
389 	utrd_header_t *hd;
390 
391 	assert(utrd != NULL);
392 	result = is_slot_available();
393 	assert(result == 0);
394 
395 	/* clear utrd */
396 	memset((void *)utrd, 0, sizeof(utp_utrd_t));
397 	base = ufs_params.desc_base;
398 	/* clear the descriptor */
399 	memset((void *)base, 0, UFS_DESC_SIZE);
400 
401 	utrd->header = base;
402 	utrd->task_tag = 1; /* We always use the first slot */
403 	/* CDB address should be aligned with 128 bytes */
404 	utrd->upiu = ALIGN_CDB(utrd->header + sizeof(utrd_header_t));
405 	utrd->resp_upiu = ALIGN_8(utrd->upiu + sizeof(cmd_upiu_t));
406 	utrd->size_upiu = utrd->resp_upiu - utrd->upiu;
407 	utrd->size_resp_upiu = ALIGN_8(sizeof(resp_upiu_t));
408 	utrd->prdt = utrd->resp_upiu + utrd->size_resp_upiu;
409 
410 	hd = (utrd_header_t *)utrd->header;
411 	hd->ucdba = utrd->upiu & UINT32_MAX;
412 	hd->ucdbau = (utrd->upiu >> 32) & UINT32_MAX;
413 	/* Both RUL and RUO is based on DWORD */
414 	hd->rul = utrd->size_resp_upiu >> 2;
415 	hd->ruo = utrd->size_upiu >> 2;
416 	(void)result;
417 }
418 
419 /*
420  * Prepare UTRD, Command UPIU, Response UPIU.
421  */
ufs_prepare_cmd(utp_utrd_t * utrd,uint8_t op,uint8_t lun,int lba,uintptr_t buf,size_t length)422 static int ufs_prepare_cmd(utp_utrd_t *utrd, uint8_t op, uint8_t lun,
423 			   int lba, uintptr_t buf, size_t length)
424 {
425 	utrd_header_t *hd;
426 	cmd_upiu_t *upiu;
427 	prdt_t *prdt;
428 	unsigned int ulba;
429 	unsigned int lba_cnt;
430 	uintptr_t desc_limit;
431 	uintptr_t prdt_end;
432 
433 	hd = (utrd_header_t *)utrd->header;
434 	upiu = (cmd_upiu_t *)utrd->upiu;
435 
436 	hd->i = 1;
437 	hd->ct = CT_UFS_STORAGE;
438 	hd->ocs = OCS_MASK;
439 
440 	upiu->trans_type = CMD_UPIU;
441 	upiu->task_tag = utrd->task_tag;
442 	upiu->cdb[0] = op;
443 	ulba = (unsigned int)lba;
444 	lba_cnt = (unsigned int)(length >> UFS_BLOCK_SHIFT);
445 	switch (op) {
446 	case CDBCMD_TEST_UNIT_READY:
447 		break;
448 	case CDBCMD_READ_CAPACITY_10:
449 		hd->dd = DD_OUT;
450 		upiu->flags = UPIU_FLAGS_R | UPIU_FLAGS_ATTR_S;
451 		upiu->lun = lun;
452 		break;
453 	case CDBCMD_READ_10:
454 		hd->dd = DD_OUT;
455 		upiu->flags = UPIU_FLAGS_R | UPIU_FLAGS_ATTR_S;
456 		upiu->lun = lun;
457 		upiu->cdb[1] = RW_WITHOUT_CACHE;
458 		/* set logical block address */
459 		upiu->cdb[2] = (ulba >> 24) & 0xff;
460 		upiu->cdb[3] = (ulba >> 16) & 0xff;
461 		upiu->cdb[4] = (ulba >> 8) & 0xff;
462 		upiu->cdb[5] = ulba & 0xff;
463 		/* set transfer length */
464 		upiu->cdb[7] = (lba_cnt >> 8) & 0xff;
465 		upiu->cdb[8] = lba_cnt & 0xff;
466 		break;
467 	case CDBCMD_WRITE_10:
468 		hd->dd = DD_IN;
469 		upiu->flags = UPIU_FLAGS_W | UPIU_FLAGS_ATTR_S;
470 		upiu->lun = lun;
471 		upiu->cdb[1] = RW_WITHOUT_CACHE;
472 		/* set logical block address */
473 		upiu->cdb[2] = (ulba >> 24) & 0xff;
474 		upiu->cdb[3] = (ulba >> 16) & 0xff;
475 		upiu->cdb[4] = (ulba >> 8) & 0xff;
476 		upiu->cdb[5] = ulba & 0xff;
477 		/* set transfer length */
478 		upiu->cdb[7] = (lba_cnt >> 8) & 0xff;
479 		upiu->cdb[8] = lba_cnt & 0xff;
480 		break;
481 	default:
482 		assert(0);
483 		break;
484 	}
485 	if (hd->dd == DD_IN) {
486 		flush_dcache_range(buf, length);
487 	} else if (hd->dd == DD_OUT) {
488 		inv_dcache_range(buf, length);
489 	}
490 
491 	utrd->prdt_length = 0;
492 	if (length) {
493 		upiu->exp_data_trans_len = htobe32(length);
494 		assert(lba_cnt <= UINT16_MAX);
495 		prdt = (prdt_t *)utrd->prdt;
496 
497 		desc_limit = ufs_params.desc_base + ufs_params.desc_size;
498 		while (length > 0) {
499 			if ((uintptr_t)prdt + sizeof(prdt_t) > desc_limit) {
500 				ERROR("UFS: Exceeded descriptor limit. Image is too large\n");
501 				panic();
502 			}
503 			prdt->dba = (unsigned int)(buf & UINT32_MAX);
504 			prdt->dbau = (unsigned int)((buf >> 32) & UINT32_MAX);
505 			/* prdt->dbc counts from 0 */
506 			if (length > MAX_PRDT_SIZE) {
507 				prdt->dbc = MAX_PRDT_SIZE - 1;
508 				length = length - MAX_PRDT_SIZE;
509 			} else {
510 				prdt->dbc = length - 1;
511 				length = 0;
512 			}
513 			buf += MAX_PRDT_SIZE;
514 			prdt++;
515 			utrd->prdt_length++;
516 		}
517 		hd->prdtl = utrd->prdt_length;
518 		hd->prdto = (utrd->size_upiu + utrd->size_resp_upiu) >> 2;
519 	}
520 
521 	prdt_end = utrd->prdt + utrd->prdt_length * sizeof(prdt_t);
522 	flush_dcache_range(utrd->header, prdt_end - utrd->header);
523 	return 0;
524 }
525 
ufs_prepare_query(utp_utrd_t * utrd,uint8_t op,uint8_t idn,uint8_t index,uint8_t sel,uintptr_t buf,size_t length)526 static int ufs_prepare_query(utp_utrd_t *utrd, uint8_t op, uint8_t idn,
527 			     uint8_t index, uint8_t sel,
528 			     uintptr_t buf, size_t length)
529 {
530 	utrd_header_t *hd;
531 	query_upiu_t *query_upiu;
532 
533 
534 	hd = (utrd_header_t *)utrd->header;
535 	query_upiu = (query_upiu_t *)utrd->upiu;
536 
537 	hd->i = 1;
538 	hd->ct = CT_UFS_STORAGE;
539 	hd->ocs = OCS_MASK;
540 
541 	query_upiu->trans_type = QUERY_REQUEST_UPIU;
542 	query_upiu->task_tag = utrd->task_tag;
543 	query_upiu->data_segment_len = htobe16(length);
544 	query_upiu->ts.desc.opcode = op;
545 	query_upiu->ts.desc.idn = idn;
546 	query_upiu->ts.desc.index = index;
547 	query_upiu->ts.desc.selector = sel;
548 	switch (op) {
549 	case QUERY_READ_DESC:
550 		query_upiu->query_func = QUERY_FUNC_STD_READ;
551 		query_upiu->ts.desc.length = htobe16(length);
552 		break;
553 	case QUERY_WRITE_DESC:
554 		query_upiu->query_func = QUERY_FUNC_STD_WRITE;
555 		query_upiu->ts.desc.length = htobe16(length);
556 		memcpy((void *)(utrd->upiu + sizeof(query_upiu_t)),
557 		       (void *)buf, length);
558 		break;
559 	case QUERY_READ_ATTR:
560 	case QUERY_READ_FLAG:
561 		query_upiu->query_func = QUERY_FUNC_STD_READ;
562 		break;
563 	case QUERY_CLEAR_FLAG:
564 	case QUERY_SET_FLAG:
565 		query_upiu->query_func = QUERY_FUNC_STD_WRITE;
566 		break;
567 	case QUERY_WRITE_ATTR:
568 		query_upiu->query_func = QUERY_FUNC_STD_WRITE;
569 		query_upiu->ts.attr.value = htobe32(*((uint32_t *)buf));
570 		break;
571 	default:
572 		assert(0);
573 		break;
574 	}
575 	flush_dcache_range((uintptr_t)utrd->header, UFS_DESC_SIZE);
576 	return 0;
577 }
578 
ufs_prepare_nop_out(utp_utrd_t * utrd)579 static void ufs_prepare_nop_out(utp_utrd_t *utrd)
580 {
581 	utrd_header_t *hd;
582 	nop_out_upiu_t *nop_out;
583 
584 	hd = (utrd_header_t *)utrd->header;
585 	nop_out = (nop_out_upiu_t *)utrd->upiu;
586 
587 	hd->i = 1;
588 	hd->ct = CT_UFS_STORAGE;
589 	hd->ocs = OCS_MASK;
590 
591 	nop_out->trans_type = 0;
592 	nop_out->task_tag = utrd->task_tag;
593 	flush_dcache_range((uintptr_t)utrd->header, UFS_DESC_SIZE);
594 }
595 
ufs_send_request(int task_tag)596 static void ufs_send_request(int task_tag)
597 {
598 	unsigned int data;
599 	int slot;
600 
601 	slot = task_tag - 1;
602 	/* clear all interrupts */
603 	mmio_write_32(ufs_params.reg_base + IS, ~0);
604 
605 	mmio_write_32(ufs_params.reg_base + UTRLRSR, 1);
606 	assert(mmio_read_32(ufs_params.reg_base + UTRLRSR) == 1);
607 
608 	data = UTRIACR_IAEN | UTRIACR_CTR | UTRIACR_IACTH(0x1F) |
609 	       UTRIACR_IATOVAL(0xFF);
610 	mmio_write_32(ufs_params.reg_base + UTRIACR, data);
611 	/* send request */
612 	mmio_setbits_32(ufs_params.reg_base + UTRLDBR, 1U << slot);
613 }
614 
ufs_check_resp(utp_utrd_t * utrd,int trans_type,unsigned int timeout_ms)615 static int ufs_check_resp(utp_utrd_t *utrd, int trans_type, unsigned int timeout_ms)
616 {
617 	utrd_header_t *hd;
618 	resp_upiu_t *resp;
619 	sense_data_t *sense;
620 	unsigned int data;
621 	int slot, result;
622 
623 	hd = (utrd_header_t *)utrd->header;
624 	resp = (resp_upiu_t *)utrd->resp_upiu;
625 
626 	result = ufs_wait_for_int_status(UFS_INT_UTRCS, timeout_ms, false);
627 	if (result != 0) {
628 		return result;
629 	}
630 
631 	slot = utrd->task_tag - 1;
632 
633 	data = mmio_read_32(ufs_params.reg_base + UTRLDBR);
634 	assert((data & (1 << slot)) == 0);
635 	/*
636 	 * Invalidate the header after DMA read operation has
637 	 * completed to avoid cpu referring to the prefetched
638 	 * data brought in before DMA completion.
639 	 */
640 	inv_dcache_range((uintptr_t)hd, UFS_DESC_SIZE);
641 	assert(hd->ocs == OCS_SUCCESS);
642 	assert((resp->trans_type & TRANS_TYPE_CODE_MASK) == trans_type);
643 
644 	sense = &resp->sd.sense;
645 	if (sense->resp_code == SENSE_DATA_VALID &&
646 	    sense->sense_key == SENSE_KEY_UNIT_ATTENTION && sense->asc == 0x29 &&
647 	    sense->ascq == 0) {
648 		WARN("Unit Attention Condition\n");
649 		return -EAGAIN;
650 	}
651 
652 	(void)resp;
653 	(void)slot;
654 	(void)data;
655 	return 0;
656 }
657 
ufs_send_cmd(utp_utrd_t * utrd,uint8_t cmd_op,uint8_t lun,int lba,uintptr_t buf,size_t length)658 static void ufs_send_cmd(utp_utrd_t *utrd, uint8_t cmd_op, uint8_t lun, int lba, uintptr_t buf,
659 			 size_t length)
660 {
661 	int result, i;
662 
663 	for (i = 0; i < UFS_CMD_RETRIES; ++i) {
664 		get_utrd(utrd);
665 		result = ufs_prepare_cmd(utrd, cmd_op, lun, lba, buf, length);
666 		assert(result == 0);
667 		ufs_send_request(utrd->task_tag);
668 		result = ufs_check_resp(utrd, RESPONSE_UPIU, CMD_TIMEOUT_MS);
669 		if (result == 0 || result == -EIO) {
670 			break;
671 		}
672 	}
673 	assert(result == 0);
674 	(void)result;
675 }
676 
677 #ifdef UFS_RESP_DEBUG
dump_upiu(utp_utrd_t * utrd)678 static void dump_upiu(utp_utrd_t *utrd)
679 {
680 	utrd_header_t *hd;
681 	int i;
682 
683 	hd = (utrd_header_t *)utrd->header;
684 	INFO("utrd:0x%x, ruo:0x%x, rul:0x%x, ocs:0x%x, UTRLDBR:0x%x\n",
685 		(unsigned int)(uintptr_t)utrd, hd->ruo, hd->rul, hd->ocs,
686 		mmio_read_32(ufs_params.reg_base + UTRLDBR));
687 	for (i = 0; i < sizeof(utrd_header_t); i += 4) {
688 		INFO("[%lx]:0x%x\n",
689 			(uintptr_t)utrd->header + i,
690 			*(unsigned int *)((uintptr_t)utrd->header + i));
691 	}
692 
693 	for (i = 0; i < sizeof(cmd_upiu_t); i += 4) {
694 		INFO("cmd[%lx]:0x%x\n",
695 			utrd->upiu + i,
696 			*(unsigned int *)(utrd->upiu + i));
697 	}
698 	for (i = 0; i < sizeof(resp_upiu_t); i += 4) {
699 		INFO("resp[%lx]:0x%x\n",
700 			utrd->resp_upiu + i,
701 			*(unsigned int *)(utrd->resp_upiu + i));
702 	}
703 	for (i = 0; i < sizeof(prdt_t); i += 4) {
704 		INFO("prdt[%lx]:0x%x\n",
705 			utrd->prdt + i,
706 			*(unsigned int *)(utrd->prdt + i));
707 	}
708 }
709 #endif
710 
ufs_verify_init(void)711 static void ufs_verify_init(void)
712 {
713 	utp_utrd_t utrd;
714 	int result;
715 
716 	get_utrd(&utrd);
717 	ufs_prepare_nop_out(&utrd);
718 	ufs_send_request(utrd.task_tag);
719 	result = ufs_check_resp(&utrd, NOP_IN_UPIU, NOP_OUT_TIMEOUT_MS);
720 	assert(result == 0);
721 	(void)result;
722 }
723 
ufs_verify_ready(void)724 static void ufs_verify_ready(void)
725 {
726 	utp_utrd_t utrd;
727 	ufs_send_cmd(&utrd, CDBCMD_TEST_UNIT_READY, 0, 0, 0, 0);
728 }
729 
ufs_query(uint8_t op,uint8_t idn,uint8_t index,uint8_t sel,uintptr_t buf,size_t size)730 static void ufs_query(uint8_t op, uint8_t idn, uint8_t index, uint8_t sel,
731 		      uintptr_t buf, size_t size)
732 {
733 	utp_utrd_t utrd;
734 	query_resp_upiu_t *resp;
735 	int result;
736 
737 	switch (op) {
738 	case QUERY_READ_FLAG:
739 	case QUERY_READ_ATTR:
740 	case QUERY_READ_DESC:
741 	case QUERY_WRITE_DESC:
742 	case QUERY_WRITE_ATTR:
743 		assert(((buf & 3) == 0) && (size != 0));
744 		break;
745 	default:
746 		/* Do nothing in default case */
747 		break;
748 	}
749 	get_utrd(&utrd);
750 	ufs_prepare_query(&utrd, op, idn, index, sel, buf, size);
751 	ufs_send_request(utrd.task_tag);
752 	result = ufs_check_resp(&utrd, QUERY_RESPONSE_UPIU, QUERY_REQ_TIMEOUT_MS);
753 	assert(result == 0);
754 	resp = (query_resp_upiu_t *)utrd.resp_upiu;
755 #ifdef UFS_RESP_DEBUG
756 	dump_upiu(&utrd);
757 #endif
758 	assert(resp->query_resp == QUERY_RESP_SUCCESS);
759 
760 	switch (op) {
761 	case QUERY_READ_FLAG:
762 		*(uint32_t *)buf = (uint32_t)resp->ts.flag.value;
763 		break;
764 	case QUERY_READ_DESC:
765 		memcpy((void *)buf,
766 		       (void *)(utrd.resp_upiu + sizeof(query_resp_upiu_t)),
767 		       size);
768 		break;
769 	case QUERY_READ_ATTR:
770 		*(uint32_t *)buf = htobe32(resp->ts.attr.value);
771 		break;
772 	default:
773 		/* Do nothing in default case */
774 		break;
775 	}
776 	(void)result;
777 }
778 
ufs_read_attr(int idn)779 unsigned int ufs_read_attr(int idn)
780 {
781 	unsigned int value;
782 
783 	ufs_query(QUERY_READ_ATTR, idn, 0, 0,
784 		  (uintptr_t)&value, sizeof(value));
785 	return value;
786 }
787 
ufs_write_attr(int idn,unsigned int value)788 void ufs_write_attr(int idn, unsigned int value)
789 {
790 	ufs_query(QUERY_WRITE_ATTR, idn, 0, 0,
791 		  (uintptr_t)&value, sizeof(value));
792 }
793 
ufs_read_flag(int idn)794 unsigned int ufs_read_flag(int idn)
795 {
796 	unsigned int value;
797 
798 	ufs_query(QUERY_READ_FLAG, idn, 0, 0,
799 		  (uintptr_t)&value, sizeof(value));
800 	return value;
801 }
802 
ufs_set_flag(int idn)803 void ufs_set_flag(int idn)
804 {
805 	ufs_query(QUERY_SET_FLAG, idn, 0, 0, 0, 0);
806 }
807 
ufs_clear_flag(int idn)808 void ufs_clear_flag(int idn)
809 {
810 	ufs_query(QUERY_CLEAR_FLAG, idn, 0, 0, 0, 0);
811 }
812 
ufs_read_desc(int idn,int index,uintptr_t buf,size_t size)813 void ufs_read_desc(int idn, int index, uintptr_t buf, size_t size)
814 {
815 	ufs_query(QUERY_READ_DESC, idn, index, 0, buf, size);
816 }
817 
ufs_write_desc(int idn,int index,uintptr_t buf,size_t size)818 void ufs_write_desc(int idn, int index, uintptr_t buf, size_t size)
819 {
820 	ufs_query(QUERY_WRITE_DESC, idn, index, 0, buf, size);
821 }
822 
ufs_read_capacity(int lun,unsigned int * num,unsigned int * size)823 static int ufs_read_capacity(int lun, unsigned int *num, unsigned int *size)
824 {
825 	utp_utrd_t utrd;
826 	resp_upiu_t *resp;
827 	sense_data_t *sense;
828 	unsigned char data[CACHE_WRITEBACK_GRANULE << 1];
829 	uintptr_t buf;
830 	int retries = UFS_READ_CAPACITY_RETRIES;
831 
832 	assert((ufs_params.reg_base != 0) &&
833 	       (ufs_params.desc_base != 0) &&
834 	       (ufs_params.desc_size >= UFS_DESC_SIZE) &&
835 	       (num != NULL) && (size != NULL));
836 
837 	/* align buf address */
838 	buf = (uintptr_t)data;
839 	buf = (buf + CACHE_WRITEBACK_GRANULE - 1) &
840 	      ~(CACHE_WRITEBACK_GRANULE - 1);
841 	do {
842 		ufs_send_cmd(&utrd, CDBCMD_READ_CAPACITY_10, lun, 0,
843 			    buf, READ_CAPACITY_LENGTH);
844 #ifdef UFS_RESP_DEBUG
845 		dump_upiu(&utrd);
846 #endif
847 		resp = (resp_upiu_t *)utrd.resp_upiu;
848 		sense = &resp->sd.sense;
849 		if (!((sense->resp_code == SENSE_DATA_VALID) &&
850 		    (sense->sense_key == SENSE_KEY_UNIT_ATTENTION) &&
851 		    (sense->asc == 0x29) && (sense->ascq == 0))) {
852 			inv_dcache_range(buf, CACHE_WRITEBACK_GRANULE);
853 			/* last logical block address */
854 			*num = be32toh(*(unsigned int *)buf);
855 			if (*num)
856 				*num += 1;
857 			/* logical block length in bytes */
858 			*size = be32toh(*(unsigned int *)(buf + 4));
859 
860 			return 0;
861 		}
862 
863 	} while (retries-- > 0);
864 
865 	return -ETIMEDOUT;
866 }
867 
ufs_read_blocks(int lun,int lba,uintptr_t buf,size_t size)868 size_t ufs_read_blocks(int lun, int lba, uintptr_t buf, size_t size)
869 {
870 	utp_utrd_t utrd;
871 	resp_upiu_t *resp;
872 
873 	assert((ufs_params.reg_base != 0) &&
874 	       (ufs_params.desc_base != 0) &&
875 	       (ufs_params.desc_size >= UFS_DESC_SIZE));
876 
877 	ufs_send_cmd(&utrd, CDBCMD_READ_10, lun, lba, buf, size);
878 #ifdef UFS_RESP_DEBUG
879 	dump_upiu(&utrd);
880 #endif
881 	/*
882 	 * Invalidate prefetched cache contents before cpu
883 	 * accesses the buf.
884 	 */
885 	inv_dcache_range(buf, size);
886 	resp = (resp_upiu_t *)utrd.resp_upiu;
887 	return size - resp->res_trans_cnt;
888 }
889 
ufs_write_blocks(int lun,int lba,const uintptr_t buf,size_t size)890 size_t ufs_write_blocks(int lun, int lba, const uintptr_t buf, size_t size)
891 {
892 	utp_utrd_t utrd;
893 	resp_upiu_t *resp;
894 
895 	assert((ufs_params.reg_base != 0) &&
896 	       (ufs_params.desc_base != 0) &&
897 	       (ufs_params.desc_size >= UFS_DESC_SIZE));
898 
899 	ufs_send_cmd(&utrd, CDBCMD_WRITE_10, lun, lba, buf, size);
900 #ifdef UFS_RESP_DEBUG
901 	dump_upiu(&utrd);
902 #endif
903 	resp = (resp_upiu_t *)utrd.resp_upiu;
904 	return size - resp->res_trans_cnt;
905 }
906 
ufs_set_fdevice_init(void)907 static int ufs_set_fdevice_init(void)
908 {
909 	unsigned int result;
910 	int timeout;
911 
912 	ufs_set_flag(FLAG_DEVICE_INIT);
913 
914 	timeout = FDEVICEINIT_TIMEOUT_MS;
915 	do {
916 		result = ufs_read_flag(FLAG_DEVICE_INIT);
917 		if (!result) {
918 			break;
919 		}
920 		mdelay(5);
921 		timeout -= 5;
922 	} while (timeout > 0);
923 
924 	if (result != 0U) {
925 		return -ETIMEDOUT;
926 	}
927 
928 	return 0;
929 }
930 
ufs_enum(void)931 static void ufs_enum(void)
932 {
933 	unsigned int blk_num, blk_size;
934 	int i, result;
935 
936 	mmio_write_32(ufs_params.reg_base + UTRLBA,
937 		      ufs_params.desc_base & UINT32_MAX);
938 	mmio_write_32(ufs_params.reg_base + UTRLBAU,
939 		      (ufs_params.desc_base >> 32) & UINT32_MAX);
940 
941 	ufs_verify_init();
942 	ufs_verify_ready();
943 
944 	result = ufs_set_fdevice_init();
945 	assert(result == 0);
946 
947 	blk_num = 0;
948 	blk_size = 0;
949 
950 	/* dump available LUNs */
951 	for (i = 0; i < UFS_MAX_LUNS; i++) {
952 		result = ufs_read_capacity(i, &blk_num, &blk_size);
953 		if (result != 0) {
954 			WARN("UFS LUN%d dump failed\n", i);
955 		}
956 		if (blk_num && blk_size) {
957 			INFO("UFS LUN%d contains %d blocks with %d-byte size\n",
958 			     i, blk_num, blk_size);
959 		}
960 	}
961 
962 	(void)result;
963 }
964 
ufs_get_device_info(struct ufs_dev_desc * card_data)965 static void ufs_get_device_info(struct ufs_dev_desc *card_data)
966 {
967 	uint8_t desc_buf[DESC_DEVICE_MAX_SIZE];
968 
969 	ufs_query(QUERY_READ_DESC, DESC_TYPE_DEVICE, 0, 0,
970 				(uintptr_t)desc_buf, DESC_DEVICE_MAX_SIZE);
971 
972 	/*
973 	 * getting vendor (manufacturerID) and Bank Index in big endian
974 	 * format
975 	 */
976 	card_data->wmanufacturerid = (uint16_t)((desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8) |
977 				     (desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1]));
978 }
979 
ufs_init(const ufs_ops_t * ops,ufs_params_t * params)980 int ufs_init(const ufs_ops_t *ops, ufs_params_t *params)
981 {
982 	int result;
983 	unsigned int data;
984 	uic_cmd_t cmd;
985 	struct ufs_dev_desc card = {0};
986 
987 	assert((params != NULL) &&
988 	       (params->reg_base != 0) &&
989 	       (params->desc_base != 0) &&
990 	       (params->desc_size >= UFS_DESC_SIZE));
991 
992 	memcpy(&ufs_params, params, sizeof(ufs_params_t));
993 
994 	/* 0 means 1 slot */
995 	nutrs = (mmio_read_32(ufs_params.reg_base + CAP) & CAP_NUTRS_MASK) + 1;
996 	if (nutrs > (ufs_params.desc_size / UFS_DESC_SIZE)) {
997 		nutrs = ufs_params.desc_size / UFS_DESC_SIZE;
998 	}
999 
1000 
1001 	if (ufs_params.flags & UFS_FLAGS_SKIPINIT) {
1002 		mmio_write_32(ufs_params.reg_base + UTRLBA,
1003 			      ufs_params.desc_base & UINT32_MAX);
1004 		mmio_write_32(ufs_params.reg_base + UTRLBAU,
1005 			      (ufs_params.desc_base >> 32) & UINT32_MAX);
1006 
1007 		result = ufshc_dme_get(0x1571, 0, &data);
1008 		assert(result == 0);
1009 		result = ufshc_dme_get(0x41, 0, &data);
1010 		assert(result == 0);
1011 		if (data == 1) {
1012 			/* prepare to exit hibernate mode */
1013 			memset(&cmd, 0, sizeof(uic_cmd_t));
1014 			cmd.op = DME_HIBERNATE_EXIT;
1015 			result = ufshc_send_uic_cmd(ufs_params.reg_base,
1016 						    &cmd);
1017 			assert(result == 0);
1018 			data = mmio_read_32(ufs_params.reg_base + UCMDARG2);
1019 			assert(data == 0);
1020 			do {
1021 				data = mmio_read_32(ufs_params.reg_base + IS);
1022 			} while ((data & UFS_INT_UHXS) == 0);
1023 			mmio_write_32(ufs_params.reg_base + IS, UFS_INT_UHXS);
1024 			data = mmio_read_32(ufs_params.reg_base + HCS);
1025 			assert((data & HCS_UPMCRS_MASK) == HCS_PWR_LOCAL);
1026 		}
1027 		result = ufshc_dme_get(0x1568, 0, &data);
1028 		assert(result == 0);
1029 		assert((data > 0) && (data <= 3));
1030 	} else {
1031 		assert((ops != NULL) && (ops->phy_init != NULL) &&
1032 		       (ops->phy_set_pwr_mode != NULL));
1033 
1034 		result = ufshc_reset(ufs_params.reg_base);
1035 		assert(result == 0);
1036 		ops->phy_init(&ufs_params);
1037 		result = ufshc_link_startup(ufs_params.reg_base);
1038 		assert(result == 0);
1039 
1040 		/* enable all interrupts */
1041 		data = UFS_INT_UCCS | UFS_INT_UHES | UFS_INT_UHXS | UFS_INT_UPMS;
1042 		data |= UFS_INT_UTRCS | UFS_INT_ERR;
1043 		mmio_write_32(ufs_params.reg_base + IE, data);
1044 
1045 		ufs_enum();
1046 
1047 		ufs_get_device_info(&card);
1048 		if (card.wmanufacturerid == UFS_VENDOR_SKHYNIX) {
1049 			ufs_params.flags |= UFS_FLAGS_VENDOR_SKHYNIX;
1050 		}
1051 
1052 		ops->phy_set_pwr_mode(&ufs_params);
1053 	}
1054 
1055 	(void)result;
1056 	return 0;
1057 }
1058