1 /*
2  * Copyright (c) 2017-2021, ARM Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <assert.h>
8 #include <endian.h>
9 #include <errno.h>
10 #include <stdint.h>
11 #include <string.h>
12 
13 #include <platform_def.h>
14 
15 #include <arch_helpers.h>
16 #include <common/debug.h>
17 #include <drivers/delay_timer.h>
18 #include <drivers/ufs.h>
19 #include <lib/mmio.h>
20 
21 #define CDB_ADDR_MASK			127
22 #define ALIGN_CDB(x)			(((x) + CDB_ADDR_MASK) & ~CDB_ADDR_MASK)
23 #define ALIGN_8(x)			(((x) + 7) & ~7)
24 
25 #define UFS_DESC_SIZE			0x400
26 #define MAX_UFS_DESC_SIZE		0x8000		/* 32 descriptors */
27 
28 #define MAX_PRDT_SIZE			0x40000		/* 256KB */
29 
30 static ufs_params_t ufs_params;
31 static int nutrs;	/* Number of UTP Transfer Request Slots */
32 
33 /*
34  * ufs_uic_error_handler - UIC error interrupts handler
35  * @ignore_linereset: set to ignore PA_LAYER_GEN_ERR (UIC error)
36  *
37  * Returns
38  * 0 - ignore error
39  * -EIO - fatal error, needs re-init
40  * -EAGAIN - non-fatal error, retries are sufficient
41  */
ufs_uic_error_handler(bool ignore_linereset)42 static int ufs_uic_error_handler(bool ignore_linereset)
43 {
44 	uint32_t data;
45 	int result = 0;
46 
47 	data = mmio_read_32(ufs_params.reg_base + UECPA);
48 	if (data & UFS_UIC_PA_ERROR_MASK) {
49 		if (data & PA_LAYER_GEN_ERR) {
50 			if (!ignore_linereset) {
51 				return -EIO;
52 			}
53 		} else {
54 			result = -EAGAIN;
55 		}
56 	}
57 
58 	data = mmio_read_32(ufs_params.reg_base + UECDL);
59 	if (data & UFS_UIC_DL_ERROR_MASK) {
60 		if (data & PA_INIT_ERR) {
61 			return -EIO;
62 		}
63 		result = -EAGAIN;
64 	}
65 
66 	/* NL/TL/DME error requires retries */
67 	data = mmio_read_32(ufs_params.reg_base + UECN);
68 	if (data & UFS_UIC_NL_ERROR_MASK) {
69 		result = -EAGAIN;
70 	}
71 
72 	data = mmio_read_32(ufs_params.reg_base + UECT);
73 	if (data & UFS_UIC_TL_ERROR_MASK) {
74 		result = -EAGAIN;
75 	}
76 
77 	data = mmio_read_32(ufs_params.reg_base + UECDME);
78 	if (data & UFS_UIC_DME_ERROR_MASK) {
79 		result = -EAGAIN;
80 	}
81 
82 	return result;
83 }
84 
85 /*
86  * ufs_error_handler - error interrupts handler
87  * @status: interrupt status
88  * @ignore_linereset: set to ignore PA_LAYER_GEN_ERR (UIC error)
89  *
90  * Returns
91  * 0 - ignore error
92  * -EIO - fatal error, needs re-init
93  * -EAGAIN - non-fatal error, retries are sufficient
94  */
ufs_error_handler(uint32_t status,bool ignore_linereset)95 static int ufs_error_handler(uint32_t status, bool ignore_linereset)
96 {
97 	int result;
98 
99 	if (status & UFS_INT_UE) {
100 		result = ufs_uic_error_handler(ignore_linereset);
101 		if (result != 0) {
102 			return result;
103 		}
104 	}
105 
106 	/* Return I/O error on fatal error, it is upto the caller to re-init UFS */
107 	if (status & UFS_INT_FATAL) {
108 		return -EIO;
109 	}
110 
111 	/* retry for non-fatal errors */
112 	return -EAGAIN;
113 }
114 
115 /*
116  * ufs_wait_for_int_status - wait for expected interrupt status
117  * @expected: expected interrupt status bit
118  * @timeout_ms: timeout in milliseconds to poll for
119  * @ignore_linereset: set to ignore PA_LAYER_GEN_ERR (UIC error)
120  *
121  * Returns
122  * 0 - received expected interrupt and cleared it
123  * -EIO - fatal error, needs re-init
124  * -EAGAIN - non-fatal error, caller can retry
125  * -ETIMEDOUT - timed out waiting for interrupt status
126  */
ufs_wait_for_int_status(const uint32_t expected_status,unsigned int timeout_ms,bool ignore_linereset)127 static int ufs_wait_for_int_status(const uint32_t expected_status,
128 				   unsigned int timeout_ms,
129 				   bool ignore_linereset)
130 {
131 	uint32_t interrupt_status, interrupts_enabled;
132 	int result = 0;
133 
134 	interrupts_enabled = mmio_read_32(ufs_params.reg_base + IE);
135 	do {
136 		interrupt_status = mmio_read_32(ufs_params.reg_base + IS) & interrupts_enabled;
137 		if (interrupt_status & UFS_INT_ERR) {
138 			mmio_write_32(ufs_params.reg_base + IS, interrupt_status & UFS_INT_ERR);
139 			result = ufs_error_handler(interrupt_status, ignore_linereset);
140 			if (result != 0) {
141 				return result;
142 			}
143 		}
144 
145 		if (interrupt_status & expected_status) {
146 			break;
147 		}
148 		mdelay(1);
149 	} while (timeout_ms-- > 0);
150 
151 	if (!(interrupt_status & expected_status)) {
152 		return -ETIMEDOUT;
153 	}
154 
155 	mmio_write_32(ufs_params.reg_base + IS, expected_status);
156 
157 	return result;
158 }
159 
160 
ufshc_send_uic_cmd(uintptr_t base,uic_cmd_t * cmd)161 int ufshc_send_uic_cmd(uintptr_t base, uic_cmd_t *cmd)
162 {
163 	unsigned int data;
164 	int result, retries;
165 
166 	if (base == 0 || cmd == NULL)
167 		return -EINVAL;
168 
169 	for (retries = 0; retries < 100; retries++) {
170 		data = mmio_read_32(base + HCS);
171 		if ((data & HCS_UCRDY) != 0) {
172 			break;
173 		}
174 		mdelay(1);
175 	}
176 	if (retries >= 100) {
177 		return -EBUSY;
178 	}
179 
180 	mmio_write_32(base + IS, ~0);
181 	mmio_write_32(base + UCMDARG1, cmd->arg1);
182 	mmio_write_32(base + UCMDARG2, cmd->arg2);
183 	mmio_write_32(base + UCMDARG3, cmd->arg3);
184 	mmio_write_32(base + UICCMD, cmd->op);
185 
186 	result = ufs_wait_for_int_status(UFS_INT_UCCS, UIC_CMD_TIMEOUT_MS,
187 					 cmd->op == DME_SET);
188 	if (result != 0) {
189 		return result;
190 	}
191 
192 	return mmio_read_32(base + UCMDARG2) & CONFIG_RESULT_CODE_MASK;
193 }
194 
ufshc_dme_get(unsigned int attr,unsigned int idx,unsigned int * val)195 int ufshc_dme_get(unsigned int attr, unsigned int idx, unsigned int *val)
196 {
197 	uintptr_t base;
198 	int result, retries;
199 	uic_cmd_t cmd;
200 
201 	assert(ufs_params.reg_base != 0);
202 
203 	if (val == NULL)
204 		return -EINVAL;
205 
206 	base = ufs_params.reg_base;
207 	cmd.arg1 = (attr << 16) | GEN_SELECTOR_IDX(idx);
208 	cmd.arg2 = 0;
209 	cmd.arg3 = 0;
210 	cmd.op = DME_GET;
211 
212 	for (retries = 0; retries < UFS_UIC_COMMAND_RETRIES; ++retries) {
213 		result = ufshc_send_uic_cmd(base, &cmd);
214 		if (result == 0)
215 			break;
216 		/* -EIO requires UFS re-init */
217 		if (result == -EIO) {
218 			return result;
219 		}
220 	}
221 	if (retries >= UFS_UIC_COMMAND_RETRIES)
222 		return -EIO;
223 
224 	*val = mmio_read_32(base + UCMDARG3);
225 	return 0;
226 }
227 
ufshc_dme_set(unsigned int attr,unsigned int idx,unsigned int val)228 int ufshc_dme_set(unsigned int attr, unsigned int idx, unsigned int val)
229 {
230 	uintptr_t base;
231 	int result, retries;
232 	uic_cmd_t cmd;
233 
234 	assert((ufs_params.reg_base != 0));
235 
236 	base = ufs_params.reg_base;
237 	cmd.arg1 = (attr << 16) | GEN_SELECTOR_IDX(idx);
238 	cmd.arg2 = 0;
239 	cmd.arg3 = val;
240 	cmd.op = DME_SET;
241 
242 	for (retries = 0; retries < UFS_UIC_COMMAND_RETRIES; ++retries) {
243 		result = ufshc_send_uic_cmd(base, &cmd);
244 		if (result == 0)
245 			break;
246 		/* -EIO requires UFS re-init */
247 		if (result == -EIO) {
248 			return result;
249 		}
250 	}
251 	if (retries >= UFS_UIC_COMMAND_RETRIES)
252 		return -EIO;
253 
254 	return 0;
255 }
256 
ufshc_hce_enable(uintptr_t base)257 static int ufshc_hce_enable(uintptr_t base)
258 {
259 	unsigned int data;
260 	int retries;
261 
262 	/* Enable Host Controller */
263 	mmio_write_32(base + HCE, HCE_ENABLE);
264 
265 	/* Wait until basic initialization sequence completed */
266 	for (retries = 0; retries < HCE_ENABLE_INNER_RETRIES; ++retries) {
267 		data = mmio_read_32(base + HCE);
268 		if (data & HCE_ENABLE) {
269 			break;
270 		}
271 		udelay(HCE_ENABLE_TIMEOUT_US);
272 	}
273 	if (retries >= HCE_ENABLE_INNER_RETRIES) {
274 		return -ETIMEDOUT;
275 	}
276 
277 	return 0;
278 }
279 
ufshc_hce_disable(uintptr_t base)280 static int ufshc_hce_disable(uintptr_t base)
281 {
282 	unsigned int data;
283 	int timeout;
284 
285 	/* Disable Host Controller */
286 	mmio_write_32(base + HCE, HCE_DISABLE);
287 	timeout = HCE_DISABLE_TIMEOUT_US;
288 	do {
289 		data = mmio_read_32(base + HCE);
290 		if ((data & HCE_ENABLE) == HCE_DISABLE) {
291 			break;
292 		}
293 		udelay(1);
294 	} while (--timeout > 0);
295 
296 	if (timeout <= 0) {
297 		return -ETIMEDOUT;
298 	}
299 
300 	return 0;
301 }
302 
303 
ufshc_reset(uintptr_t base)304 static int ufshc_reset(uintptr_t base)
305 {
306 	unsigned int data;
307 	int retries, result;
308 
309 	/* disable controller if enabled */
310 	if (mmio_read_32(base + HCE) & HCE_ENABLE) {
311 		result = ufshc_hce_disable(base);
312 		if (result != 0) {
313 			return -EIO;
314 		}
315 	}
316 
317 	for (retries = 0; retries < HCE_ENABLE_OUTER_RETRIES; ++retries) {
318 		result = ufshc_hce_enable(base);
319 		if (result == 0) {
320 			break;
321 		}
322 	}
323 	if (retries >= HCE_ENABLE_OUTER_RETRIES) {
324 		return -EIO;
325 	}
326 
327 	/* Enable UIC Interrupts alone. We can ignore other interrupts until
328 	 * link is up as there might be spurious error interrupts during link-up
329 	 */
330 	data = UFS_INT_UCCS | UFS_INT_UHES | UFS_INT_UHXS | UFS_INT_UPMS;
331 	mmio_write_32(base + IE, data);
332 
333 	return 0;
334 }
335 
ufshc_dme_link_startup(uintptr_t base)336 static int ufshc_dme_link_startup(uintptr_t base)
337 {
338 	uic_cmd_t cmd;
339 
340 	memset(&cmd, 0, sizeof(cmd));
341 	cmd.op = DME_LINKSTARTUP;
342 	return ufshc_send_uic_cmd(base, &cmd);
343 }
344 
ufshc_link_startup(uintptr_t base)345 static int ufshc_link_startup(uintptr_t base)
346 {
347 	int data, result;
348 	int retries;
349 
350 	for (retries = DME_LINKSTARTUP_RETRIES; retries > 0; retries--) {
351 		result = ufshc_dme_link_startup(base);
352 		if (result != 0) {
353 			/* Reset controller before trying again */
354 			result = ufshc_reset(base);
355 			if (result != 0) {
356 				return result;
357 			}
358 			continue;
359 		}
360 		assert(mmio_read_32(base + HCS) & HCS_DP);
361 		data = mmio_read_32(base + IS);
362 		if (data & UFS_INT_ULSS)
363 			mmio_write_32(base + IS, UFS_INT_ULSS);
364 
365 		/* clear UE set due to line-reset */
366 		if (data & UFS_INT_UE) {
367 			mmio_write_32(base + IS, UFS_INT_UE);
368 		}
369 		/* clearing line-reset, UECPA is cleared on read */
370 		mmio_read_32(base + UECPA);
371 		return 0;
372 	}
373 	return -EIO;
374 }
375 
376 /* Read Door Bell register to check if slot zero is available */
is_slot_available(void)377 static int is_slot_available(void)
378 {
379 	if (mmio_read_32(ufs_params.reg_base + UTRLDBR) & 0x1) {
380 		return -EBUSY;
381 	}
382 	return 0;
383 }
384 
get_utrd(utp_utrd_t * utrd)385 static void get_utrd(utp_utrd_t *utrd)
386 {
387 	uintptr_t base;
388 	int result;
389 	utrd_header_t *hd;
390 
391 	assert(utrd != NULL);
392 	result = is_slot_available();
393 	assert(result == 0);
394 
395 	/* clear utrd */
396 	memset((void *)utrd, 0, sizeof(utp_utrd_t));
397 	base = ufs_params.desc_base;
398 	/* clear the descriptor */
399 	memset((void *)base, 0, UFS_DESC_SIZE);
400 
401 	utrd->header = base;
402 	utrd->task_tag = 1; /* We always use the first slot */
403 	/* CDB address should be aligned with 128 bytes */
404 	utrd->upiu = ALIGN_CDB(utrd->header + sizeof(utrd_header_t));
405 	utrd->resp_upiu = ALIGN_8(utrd->upiu + sizeof(cmd_upiu_t));
406 	utrd->size_upiu = utrd->resp_upiu - utrd->upiu;
407 	utrd->size_resp_upiu = ALIGN_8(sizeof(resp_upiu_t));
408 	utrd->prdt = utrd->resp_upiu + utrd->size_resp_upiu;
409 
410 	hd = (utrd_header_t *)utrd->header;
411 	hd->ucdba = utrd->upiu & UINT32_MAX;
412 	hd->ucdbau = (utrd->upiu >> 32) & UINT32_MAX;
413 	/* Both RUL and RUO is based on DWORD */
414 	hd->rul = utrd->size_resp_upiu >> 2;
415 	hd->ruo = utrd->size_upiu >> 2;
416 	(void)result;
417 }
418 
419 /*
420  * Prepare UTRD, Command UPIU, Response UPIU.
421  */
ufs_prepare_cmd(utp_utrd_t * utrd,uint8_t op,uint8_t lun,int lba,uintptr_t buf,size_t length)422 static int ufs_prepare_cmd(utp_utrd_t *utrd, uint8_t op, uint8_t lun,
423 			   int lba, uintptr_t buf, size_t length)
424 {
425 	utrd_header_t *hd;
426 	cmd_upiu_t *upiu;
427 	prdt_t *prdt;
428 	unsigned int ulba;
429 	unsigned int lba_cnt;
430 	uintptr_t desc_limit;
431 	uintptr_t prdt_end;
432 
433 	hd = (utrd_header_t *)utrd->header;
434 	upiu = (cmd_upiu_t *)utrd->upiu;
435 
436 	hd->i = 1;
437 	hd->ct = CT_UFS_STORAGE;
438 	hd->ocs = OCS_MASK;
439 
440 	upiu->trans_type = CMD_UPIU;
441 	upiu->task_tag = utrd->task_tag;
442 	upiu->cdb[0] = op;
443 	ulba = (unsigned int)lba;
444 	lba_cnt = (unsigned int)(length >> UFS_BLOCK_SHIFT);
445 	switch (op) {
446 	case CDBCMD_TEST_UNIT_READY:
447 		break;
448 	case CDBCMD_READ_CAPACITY_10:
449 		hd->dd = DD_OUT;
450 		upiu->flags = UPIU_FLAGS_R | UPIU_FLAGS_ATTR_S;
451 		upiu->lun = lun;
452 		break;
453 	case CDBCMD_READ_10:
454 		hd->dd = DD_OUT;
455 		upiu->flags = UPIU_FLAGS_R | UPIU_FLAGS_ATTR_S;
456 		upiu->lun = lun;
457 		upiu->cdb[1] = RW_WITHOUT_CACHE;
458 		/* set logical block address */
459 		upiu->cdb[2] = (ulba >> 24) & 0xff;
460 		upiu->cdb[3] = (ulba >> 16) & 0xff;
461 		upiu->cdb[4] = (ulba >> 8) & 0xff;
462 		upiu->cdb[5] = ulba & 0xff;
463 		/* set transfer length */
464 		upiu->cdb[7] = (lba_cnt >> 8) & 0xff;
465 		upiu->cdb[8] = lba_cnt & 0xff;
466 		break;
467 	case CDBCMD_WRITE_10:
468 		hd->dd = DD_IN;
469 		upiu->flags = UPIU_FLAGS_W | UPIU_FLAGS_ATTR_S;
470 		upiu->lun = lun;
471 		upiu->cdb[1] = RW_WITHOUT_CACHE;
472 		/* set logical block address */
473 		upiu->cdb[2] = (ulba >> 24) & 0xff;
474 		upiu->cdb[3] = (ulba >> 16) & 0xff;
475 		upiu->cdb[4] = (ulba >> 8) & 0xff;
476 		upiu->cdb[5] = ulba & 0xff;
477 		/* set transfer length */
478 		upiu->cdb[7] = (lba_cnt >> 8) & 0xff;
479 		upiu->cdb[8] = lba_cnt & 0xff;
480 		break;
481 	default:
482 		assert(0);
483 		break;
484 	}
485 	if (hd->dd == DD_IN) {
486 		flush_dcache_range(buf, length);
487 	} else if (hd->dd == DD_OUT) {
488 		inv_dcache_range(buf, length);
489 	}
490 
491 	utrd->prdt_length = 0;
492 	if (length) {
493 		upiu->exp_data_trans_len = htobe32(length);
494 		assert(lba_cnt <= UINT16_MAX);
495 		prdt = (prdt_t *)utrd->prdt;
496 
497 		desc_limit = ufs_params.desc_base + ufs_params.desc_size;
498 		while (length > 0) {
499 			if ((uintptr_t)prdt + sizeof(prdt_t) > desc_limit) {
500 				ERROR("UFS: Exceeded descriptor limit. Image is too large\n");
501 				panic();
502 			}
503 			prdt->dba = (unsigned int)(buf & UINT32_MAX);
504 			prdt->dbau = (unsigned int)((buf >> 32) & UINT32_MAX);
505 			/* prdt->dbc counts from 0 */
506 			if (length > MAX_PRDT_SIZE) {
507 				prdt->dbc = MAX_PRDT_SIZE - 1;
508 				length = length - MAX_PRDT_SIZE;
509 			} else {
510 				prdt->dbc = length - 1;
511 				length = 0;
512 			}
513 			buf += MAX_PRDT_SIZE;
514 			prdt++;
515 			utrd->prdt_length++;
516 		}
517 		hd->prdtl = utrd->prdt_length;
518 		hd->prdto = (utrd->size_upiu + utrd->size_resp_upiu) >> 2;
519 	}
520 
521 	prdt_end = utrd->prdt + utrd->prdt_length * sizeof(prdt_t);
522 	flush_dcache_range(utrd->header, prdt_end - utrd->header);
523 	return 0;
524 }
525 
ufs_prepare_query(utp_utrd_t * utrd,uint8_t op,uint8_t idn,uint8_t index,uint8_t sel,uintptr_t buf,size_t length)526 static int ufs_prepare_query(utp_utrd_t *utrd, uint8_t op, uint8_t idn,
527 			     uint8_t index, uint8_t sel,
528 			     uintptr_t buf, size_t length)
529 {
530 	utrd_header_t *hd;
531 	query_upiu_t *query_upiu;
532 
533 
534 	hd = (utrd_header_t *)utrd->header;
535 	query_upiu = (query_upiu_t *)utrd->upiu;
536 
537 	hd->i = 1;
538 	hd->ct = CT_UFS_STORAGE;
539 	hd->ocs = OCS_MASK;
540 
541 	query_upiu->trans_type = QUERY_REQUEST_UPIU;
542 	query_upiu->task_tag = utrd->task_tag;
543 	query_upiu->ts.desc.opcode = op;
544 	query_upiu->ts.desc.idn = idn;
545 	query_upiu->ts.desc.index = index;
546 	query_upiu->ts.desc.selector = sel;
547 	switch (op) {
548 	case QUERY_READ_DESC:
549 		query_upiu->query_func = QUERY_FUNC_STD_READ;
550 		query_upiu->ts.desc.length = htobe16(length);
551 		break;
552 	case QUERY_WRITE_DESC:
553 		query_upiu->query_func = QUERY_FUNC_STD_WRITE;
554 		query_upiu->ts.desc.length = htobe16(length);
555 		memcpy((void *)(utrd->upiu + sizeof(query_upiu_t)),
556 		       (void *)buf, length);
557 		break;
558 	case QUERY_READ_ATTR:
559 	case QUERY_READ_FLAG:
560 		query_upiu->query_func = QUERY_FUNC_STD_READ;
561 		break;
562 	case QUERY_CLEAR_FLAG:
563 	case QUERY_SET_FLAG:
564 		query_upiu->query_func = QUERY_FUNC_STD_WRITE;
565 		break;
566 	case QUERY_WRITE_ATTR:
567 		query_upiu->query_func = QUERY_FUNC_STD_WRITE;
568 		query_upiu->ts.attr.value = htobe32(*((uint32_t *)buf));
569 		break;
570 	default:
571 		assert(0);
572 		break;
573 	}
574 	flush_dcache_range((uintptr_t)utrd->header, UFS_DESC_SIZE);
575 	return 0;
576 }
577 
ufs_prepare_nop_out(utp_utrd_t * utrd)578 static void ufs_prepare_nop_out(utp_utrd_t *utrd)
579 {
580 	utrd_header_t *hd;
581 	nop_out_upiu_t *nop_out;
582 
583 	hd = (utrd_header_t *)utrd->header;
584 	nop_out = (nop_out_upiu_t *)utrd->upiu;
585 
586 	hd->i = 1;
587 	hd->ct = CT_UFS_STORAGE;
588 	hd->ocs = OCS_MASK;
589 
590 	nop_out->trans_type = 0;
591 	nop_out->task_tag = utrd->task_tag;
592 	flush_dcache_range((uintptr_t)utrd->header, UFS_DESC_SIZE);
593 }
594 
ufs_send_request(int task_tag)595 static void ufs_send_request(int task_tag)
596 {
597 	unsigned int data;
598 	int slot;
599 
600 	slot = task_tag - 1;
601 	/* clear all interrupts */
602 	mmio_write_32(ufs_params.reg_base + IS, ~0);
603 
604 	mmio_write_32(ufs_params.reg_base + UTRLRSR, 1);
605 	assert(mmio_read_32(ufs_params.reg_base + UTRLRSR) == 1);
606 
607 	data = UTRIACR_IAEN | UTRIACR_CTR | UTRIACR_IACTH(0x1F) |
608 	       UTRIACR_IATOVAL(0xFF);
609 	mmio_write_32(ufs_params.reg_base + UTRIACR, data);
610 	/* send request */
611 	mmio_setbits_32(ufs_params.reg_base + UTRLDBR, 1 << slot);
612 }
613 
ufs_check_resp(utp_utrd_t * utrd,int trans_type,unsigned int timeout_ms)614 static int ufs_check_resp(utp_utrd_t *utrd, int trans_type, unsigned int timeout_ms)
615 {
616 	utrd_header_t *hd;
617 	resp_upiu_t *resp;
618 	sense_data_t *sense;
619 	unsigned int data;
620 	int slot, result;
621 
622 	hd = (utrd_header_t *)utrd->header;
623 	resp = (resp_upiu_t *)utrd->resp_upiu;
624 
625 	result = ufs_wait_for_int_status(UFS_INT_UTRCS, timeout_ms, false);
626 	if (result != 0) {
627 		return result;
628 	}
629 
630 	slot = utrd->task_tag - 1;
631 
632 	data = mmio_read_32(ufs_params.reg_base + UTRLDBR);
633 	assert((data & (1 << slot)) == 0);
634 	/*
635 	 * Invalidate the header after DMA read operation has
636 	 * completed to avoid cpu referring to the prefetched
637 	 * data brought in before DMA completion.
638 	 */
639 	inv_dcache_range((uintptr_t)hd, UFS_DESC_SIZE);
640 	assert(hd->ocs == OCS_SUCCESS);
641 	assert((resp->trans_type & TRANS_TYPE_CODE_MASK) == trans_type);
642 
643 	sense = &resp->sd.sense;
644 	if (sense->resp_code == SENSE_DATA_VALID &&
645 	    sense->sense_key == SENSE_KEY_UNIT_ATTENTION && sense->asc == 0x29 &&
646 	    sense->ascq == 0) {
647 		WARN("Unit Attention Condition\n");
648 		return -EAGAIN;
649 	}
650 
651 	(void)resp;
652 	(void)slot;
653 	(void)data;
654 	return 0;
655 }
656 
ufs_send_cmd(utp_utrd_t * utrd,uint8_t cmd_op,uint8_t lun,int lba,uintptr_t buf,size_t length)657 static void ufs_send_cmd(utp_utrd_t *utrd, uint8_t cmd_op, uint8_t lun, int lba, uintptr_t buf,
658 			 size_t length)
659 {
660 	int result, i;
661 
662 	for (i = 0; i < UFS_CMD_RETRIES; ++i) {
663 		get_utrd(utrd);
664 		result = ufs_prepare_cmd(utrd, cmd_op, lun, lba, buf, length);
665 		assert(result == 0);
666 		ufs_send_request(utrd->task_tag);
667 		result = ufs_check_resp(utrd, RESPONSE_UPIU, CMD_TIMEOUT_MS);
668 		if (result == 0 || result == -EIO) {
669 			break;
670 		}
671 	}
672 	assert(result == 0);
673 	(void)result;
674 }
675 
676 #ifdef UFS_RESP_DEBUG
dump_upiu(utp_utrd_t * utrd)677 static void dump_upiu(utp_utrd_t *utrd)
678 {
679 	utrd_header_t *hd;
680 	int i;
681 
682 	hd = (utrd_header_t *)utrd->header;
683 	INFO("utrd:0x%x, ruo:0x%x, rul:0x%x, ocs:0x%x, UTRLDBR:0x%x\n",
684 		(unsigned int)(uintptr_t)utrd, hd->ruo, hd->rul, hd->ocs,
685 		mmio_read_32(ufs_params.reg_base + UTRLDBR));
686 	for (i = 0; i < sizeof(utrd_header_t); i += 4) {
687 		INFO("[%lx]:0x%x\n",
688 			(uintptr_t)utrd->header + i,
689 			*(unsigned int *)((uintptr_t)utrd->header + i));
690 	}
691 
692 	for (i = 0; i < sizeof(cmd_upiu_t); i += 4) {
693 		INFO("cmd[%lx]:0x%x\n",
694 			utrd->upiu + i,
695 			*(unsigned int *)(utrd->upiu + i));
696 	}
697 	for (i = 0; i < sizeof(resp_upiu_t); i += 4) {
698 		INFO("resp[%lx]:0x%x\n",
699 			utrd->resp_upiu + i,
700 			*(unsigned int *)(utrd->resp_upiu + i));
701 	}
702 	for (i = 0; i < sizeof(prdt_t); i += 4) {
703 		INFO("prdt[%lx]:0x%x\n",
704 			utrd->prdt + i,
705 			*(unsigned int *)(utrd->prdt + i));
706 	}
707 }
708 #endif
709 
ufs_verify_init(void)710 static void ufs_verify_init(void)
711 {
712 	utp_utrd_t utrd;
713 	int result;
714 
715 	get_utrd(&utrd);
716 	ufs_prepare_nop_out(&utrd);
717 	ufs_send_request(utrd.task_tag);
718 	result = ufs_check_resp(&utrd, NOP_IN_UPIU, NOP_OUT_TIMEOUT_MS);
719 	assert(result == 0);
720 	(void)result;
721 }
722 
ufs_verify_ready(void)723 static void ufs_verify_ready(void)
724 {
725 	utp_utrd_t utrd;
726 	ufs_send_cmd(&utrd, CDBCMD_TEST_UNIT_READY, 0, 0, 0, 0);
727 }
728 
ufs_query(uint8_t op,uint8_t idn,uint8_t index,uint8_t sel,uintptr_t buf,size_t size)729 static void ufs_query(uint8_t op, uint8_t idn, uint8_t index, uint8_t sel,
730 		      uintptr_t buf, size_t size)
731 {
732 	utp_utrd_t utrd;
733 	query_resp_upiu_t *resp;
734 	int result;
735 
736 	switch (op) {
737 	case QUERY_READ_FLAG:
738 	case QUERY_READ_ATTR:
739 	case QUERY_READ_DESC:
740 	case QUERY_WRITE_DESC:
741 	case QUERY_WRITE_ATTR:
742 		assert(((buf & 3) == 0) && (size != 0));
743 		break;
744 	default:
745 		/* Do nothing in default case */
746 		break;
747 	}
748 	get_utrd(&utrd);
749 	ufs_prepare_query(&utrd, op, idn, index, sel, buf, size);
750 	ufs_send_request(utrd.task_tag);
751 	result = ufs_check_resp(&utrd, QUERY_RESPONSE_UPIU, QUERY_REQ_TIMEOUT_MS);
752 	assert(result == 0);
753 	resp = (query_resp_upiu_t *)utrd.resp_upiu;
754 #ifdef UFS_RESP_DEBUG
755 	dump_upiu(&utrd);
756 #endif
757 	assert(resp->query_resp == QUERY_RESP_SUCCESS);
758 
759 	switch (op) {
760 	case QUERY_READ_FLAG:
761 		*(uint32_t *)buf = (uint32_t)resp->ts.flag.value;
762 		break;
763 	case QUERY_READ_DESC:
764 		memcpy((void *)buf,
765 		       (void *)(utrd.resp_upiu + sizeof(query_resp_upiu_t)),
766 		       size);
767 		break;
768 	case QUERY_READ_ATTR:
769 		*(uint32_t *)buf = htobe32(resp->ts.attr.value);
770 		break;
771 	default:
772 		/* Do nothing in default case */
773 		break;
774 	}
775 	(void)result;
776 }
777 
ufs_read_attr(int idn)778 unsigned int ufs_read_attr(int idn)
779 {
780 	unsigned int value;
781 
782 	ufs_query(QUERY_READ_ATTR, idn, 0, 0,
783 		  (uintptr_t)&value, sizeof(value));
784 	return value;
785 }
786 
ufs_write_attr(int idn,unsigned int value)787 void ufs_write_attr(int idn, unsigned int value)
788 {
789 	ufs_query(QUERY_WRITE_ATTR, idn, 0, 0,
790 		  (uintptr_t)&value, sizeof(value));
791 }
792 
ufs_read_flag(int idn)793 unsigned int ufs_read_flag(int idn)
794 {
795 	unsigned int value;
796 
797 	ufs_query(QUERY_READ_FLAG, idn, 0, 0,
798 		  (uintptr_t)&value, sizeof(value));
799 	return value;
800 }
801 
ufs_set_flag(int idn)802 void ufs_set_flag(int idn)
803 {
804 	ufs_query(QUERY_SET_FLAG, idn, 0, 0, 0, 0);
805 }
806 
ufs_clear_flag(int idn)807 void ufs_clear_flag(int idn)
808 {
809 	ufs_query(QUERY_CLEAR_FLAG, idn, 0, 0, 0, 0);
810 }
811 
ufs_read_desc(int idn,int index,uintptr_t buf,size_t size)812 void ufs_read_desc(int idn, int index, uintptr_t buf, size_t size)
813 {
814 	ufs_query(QUERY_READ_DESC, idn, index, 0, buf, size);
815 }
816 
ufs_write_desc(int idn,int index,uintptr_t buf,size_t size)817 void ufs_write_desc(int idn, int index, uintptr_t buf, size_t size)
818 {
819 	ufs_query(QUERY_WRITE_DESC, idn, index, 0, buf, size);
820 }
821 
ufs_read_capacity(int lun,unsigned int * num,unsigned int * size)822 static int ufs_read_capacity(int lun, unsigned int *num, unsigned int *size)
823 {
824 	utp_utrd_t utrd;
825 	resp_upiu_t *resp;
826 	sense_data_t *sense;
827 	unsigned char data[CACHE_WRITEBACK_GRANULE << 1];
828 	uintptr_t buf;
829 	int retries = UFS_READ_CAPACITY_RETRIES;
830 
831 	assert((ufs_params.reg_base != 0) &&
832 	       (ufs_params.desc_base != 0) &&
833 	       (ufs_params.desc_size >= UFS_DESC_SIZE) &&
834 	       (num != NULL) && (size != NULL));
835 
836 	/* align buf address */
837 	buf = (uintptr_t)data;
838 	buf = (buf + CACHE_WRITEBACK_GRANULE - 1) &
839 	      ~(CACHE_WRITEBACK_GRANULE - 1);
840 	do {
841 		ufs_send_cmd(&utrd, CDBCMD_READ_CAPACITY_10, lun, 0,
842 			    buf, READ_CAPACITY_LENGTH);
843 #ifdef UFS_RESP_DEBUG
844 		dump_upiu(&utrd);
845 #endif
846 		resp = (resp_upiu_t *)utrd.resp_upiu;
847 		sense = &resp->sd.sense;
848 		if (!((sense->resp_code == SENSE_DATA_VALID) &&
849 		    (sense->sense_key == SENSE_KEY_UNIT_ATTENTION) &&
850 		    (sense->asc == 0x29) && (sense->ascq == 0))) {
851 			inv_dcache_range(buf, CACHE_WRITEBACK_GRANULE);
852 			/* last logical block address */
853 			*num = be32toh(*(unsigned int *)buf);
854 			if (*num)
855 				*num += 1;
856 			/* logical block length in bytes */
857 			*size = be32toh(*(unsigned int *)(buf + 4));
858 
859 			return 0;
860 		}
861 
862 	} while (retries-- > 0);
863 
864 	return -ETIMEDOUT;
865 }
866 
ufs_read_blocks(int lun,int lba,uintptr_t buf,size_t size)867 size_t ufs_read_blocks(int lun, int lba, uintptr_t buf, size_t size)
868 {
869 	utp_utrd_t utrd;
870 	resp_upiu_t *resp;
871 
872 	assert((ufs_params.reg_base != 0) &&
873 	       (ufs_params.desc_base != 0) &&
874 	       (ufs_params.desc_size >= UFS_DESC_SIZE));
875 
876 	ufs_send_cmd(&utrd, CDBCMD_READ_10, lun, lba, buf, size);
877 #ifdef UFS_RESP_DEBUG
878 	dump_upiu(&utrd);
879 #endif
880 	/*
881 	 * Invalidate prefetched cache contents before cpu
882 	 * accesses the buf.
883 	 */
884 	inv_dcache_range(buf, size);
885 	resp = (resp_upiu_t *)utrd.resp_upiu;
886 	return size - resp->res_trans_cnt;
887 }
888 
ufs_write_blocks(int lun,int lba,const uintptr_t buf,size_t size)889 size_t ufs_write_blocks(int lun, int lba, const uintptr_t buf, size_t size)
890 {
891 	utp_utrd_t utrd;
892 	resp_upiu_t *resp;
893 
894 	assert((ufs_params.reg_base != 0) &&
895 	       (ufs_params.desc_base != 0) &&
896 	       (ufs_params.desc_size >= UFS_DESC_SIZE));
897 
898 	ufs_send_cmd(&utrd, CDBCMD_WRITE_10, lun, lba, buf, size);
899 #ifdef UFS_RESP_DEBUG
900 	dump_upiu(&utrd);
901 #endif
902 	resp = (resp_upiu_t *)utrd.resp_upiu;
903 	return size - resp->res_trans_cnt;
904 }
905 
ufs_set_fdevice_init(void)906 static int ufs_set_fdevice_init(void)
907 {
908 	unsigned int result;
909 	int timeout;
910 
911 	ufs_set_flag(FLAG_DEVICE_INIT);
912 
913 	timeout = FDEVICEINIT_TIMEOUT_MS;
914 	do {
915 		result = ufs_read_flag(FLAG_DEVICE_INIT);
916 		if (!result) {
917 			break;
918 		}
919 		mdelay(5);
920 		timeout -= 5;
921 	} while (timeout > 0);
922 
923 	if (result != 0U) {
924 		return -ETIMEDOUT;
925 	}
926 
927 	return 0;
928 }
929 
ufs_enum(void)930 static void ufs_enum(void)
931 {
932 	unsigned int blk_num, blk_size;
933 	int i, result;
934 
935 	mmio_write_32(ufs_params.reg_base + UTRLBA,
936 		      ufs_params.desc_base & UINT32_MAX);
937 	mmio_write_32(ufs_params.reg_base + UTRLBAU,
938 		      (ufs_params.desc_base >> 32) & UINT32_MAX);
939 
940 	ufs_verify_init();
941 	ufs_verify_ready();
942 
943 	result = ufs_set_fdevice_init();
944 	assert(result == 0);
945 
946 	blk_num = 0;
947 	blk_size = 0;
948 
949 	/* dump available LUNs */
950 	for (i = 0; i < UFS_MAX_LUNS; i++) {
951 		result = ufs_read_capacity(i, &blk_num, &blk_size);
952 		if (result != 0) {
953 			WARN("UFS LUN%d dump failed\n", i);
954 		}
955 		if (blk_num && blk_size) {
956 			INFO("UFS LUN%d contains %d blocks with %d-byte size\n",
957 			     i, blk_num, blk_size);
958 		}
959 	}
960 
961 	(void)result;
962 }
963 
ufs_get_device_info(struct ufs_dev_desc * card_data)964 static void ufs_get_device_info(struct ufs_dev_desc *card_data)
965 {
966 	uint8_t desc_buf[DESC_DEVICE_MAX_SIZE];
967 
968 	ufs_query(QUERY_READ_DESC, DESC_TYPE_DEVICE, 0, 0,
969 				(uintptr_t)desc_buf, DESC_DEVICE_MAX_SIZE);
970 
971 	/*
972 	 * getting vendor (manufacturerID) and Bank Index in big endian
973 	 * format
974 	 */
975 	card_data->wmanufacturerid = (uint16_t)((desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8) |
976 				     (desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1]));
977 }
978 
ufs_init(const ufs_ops_t * ops,ufs_params_t * params)979 int ufs_init(const ufs_ops_t *ops, ufs_params_t *params)
980 {
981 	int result;
982 	unsigned int data;
983 	uic_cmd_t cmd;
984 	struct ufs_dev_desc card = {0};
985 
986 	assert((params != NULL) &&
987 	       (params->reg_base != 0) &&
988 	       (params->desc_base != 0) &&
989 	       (params->desc_size >= UFS_DESC_SIZE));
990 
991 	memcpy(&ufs_params, params, sizeof(ufs_params_t));
992 
993 	/* 0 means 1 slot */
994 	nutrs = (mmio_read_32(ufs_params.reg_base + CAP) & CAP_NUTRS_MASK) + 1;
995 	if (nutrs > (ufs_params.desc_size / UFS_DESC_SIZE)) {
996 		nutrs = ufs_params.desc_size / UFS_DESC_SIZE;
997 	}
998 
999 
1000 	if (ufs_params.flags & UFS_FLAGS_SKIPINIT) {
1001 		mmio_write_32(ufs_params.reg_base + UTRLBA,
1002 			      ufs_params.desc_base & UINT32_MAX);
1003 		mmio_write_32(ufs_params.reg_base + UTRLBAU,
1004 			      (ufs_params.desc_base >> 32) & UINT32_MAX);
1005 
1006 		result = ufshc_dme_get(0x1571, 0, &data);
1007 		assert(result == 0);
1008 		result = ufshc_dme_get(0x41, 0, &data);
1009 		assert(result == 0);
1010 		if (data == 1) {
1011 			/* prepare to exit hibernate mode */
1012 			memset(&cmd, 0, sizeof(uic_cmd_t));
1013 			cmd.op = DME_HIBERNATE_EXIT;
1014 			result = ufshc_send_uic_cmd(ufs_params.reg_base,
1015 						    &cmd);
1016 			assert(result == 0);
1017 			data = mmio_read_32(ufs_params.reg_base + UCMDARG2);
1018 			assert(data == 0);
1019 			do {
1020 				data = mmio_read_32(ufs_params.reg_base + IS);
1021 			} while ((data & UFS_INT_UHXS) == 0);
1022 			mmio_write_32(ufs_params.reg_base + IS, UFS_INT_UHXS);
1023 			data = mmio_read_32(ufs_params.reg_base + HCS);
1024 			assert((data & HCS_UPMCRS_MASK) == HCS_PWR_LOCAL);
1025 		}
1026 		result = ufshc_dme_get(0x1568, 0, &data);
1027 		assert(result == 0);
1028 		assert((data > 0) && (data <= 3));
1029 	} else {
1030 		assert((ops != NULL) && (ops->phy_init != NULL) &&
1031 		       (ops->phy_set_pwr_mode != NULL));
1032 
1033 		result = ufshc_reset(ufs_params.reg_base);
1034 		assert(result == 0);
1035 		ops->phy_init(&ufs_params);
1036 		result = ufshc_link_startup(ufs_params.reg_base);
1037 		assert(result == 0);
1038 
1039 		/* enable all interrupts */
1040 		data = UFS_INT_UCCS | UFS_INT_UHES | UFS_INT_UHXS | UFS_INT_UPMS;
1041 		data |= UFS_INT_UTRCS | UFS_INT_ERR;
1042 		mmio_write_32(ufs_params.reg_base + IE, data);
1043 
1044 		ufs_enum();
1045 
1046 		ufs_get_device_info(&card);
1047 		if (card.wmanufacturerid == UFS_VENDOR_SKHYNIX) {
1048 			ufs_params.flags |= UFS_FLAGS_VENDOR_SKHYNIX;
1049 		}
1050 
1051 		ops->phy_set_pwr_mode(&ufs_params);
1052 	}
1053 
1054 	(void)result;
1055 	return 0;
1056 }
1057