1 /*
2  * Driver for Realtek PCI-Express card reader
3  *
4  * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms of the GNU General Public License as published by the
8  * Free Software Foundation; either version 2, or (at your option) any
9  * later version.
10  *
11  * This program is distributed in the hope that it will be useful, but
12  * WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License along
17  * with this program; if not, see <http://www.gnu.org/licenses/>.
18  *
19  * Author:
20  *   Wei WANG (wei_wang@realsil.com.cn)
21  *   Micky Ching (micky_ching@realsil.com.cn)
22  */
23 
24 #include <linux/blkdev.h>
25 #include <linux/kthread.h>
26 #include <linux/sched.h>
27 
28 #include "rtsx.h"
29 
30 /***********************************************************************
31  * Scatter-gather transfer buffer access routines
32  ***********************************************************************/
33 
34 /*
35  * Copy a buffer of length buflen to/from the srb's transfer buffer.
36  * (Note: for scatter-gather transfers (srb->use_sg > 0), srb->request_buffer
37  * points to a list of s-g entries and we ignore srb->request_bufflen.
38  * For non-scatter-gather transfers, srb->request_buffer points to the
39  * transfer buffer itself and srb->request_bufflen is the buffer's length.)
40  * Update the *index and *offset variables so that the next copy will
41  * pick up from where this one left off.
42  */
43 
rtsx_stor_access_xfer_buf(unsigned char * buffer,unsigned int buflen,struct scsi_cmnd * srb,unsigned int * index,unsigned int * offset,enum xfer_buf_dir dir)44 unsigned int rtsx_stor_access_xfer_buf(unsigned char *buffer,
45 				       unsigned int buflen,
46 				       struct scsi_cmnd *srb,
47 				       unsigned int *index,
48 				       unsigned int *offset,
49 				       enum xfer_buf_dir dir)
50 {
51 	unsigned int cnt;
52 
53 	/* If not using scatter-gather, just transfer the data directly. */
54 	if (scsi_sg_count(srb) == 0) {
55 		unsigned char *sgbuffer;
56 
57 		if (*offset >= scsi_bufflen(srb))
58 			return 0;
59 		cnt = min(buflen, scsi_bufflen(srb) - *offset);
60 
61 		sgbuffer = (unsigned char *)scsi_sglist(srb) + *offset;
62 
63 		if (dir == TO_XFER_BUF)
64 			memcpy(sgbuffer, buffer, cnt);
65 		else
66 			memcpy(buffer, sgbuffer, cnt);
67 		*offset += cnt;
68 
69 	/*
70 	 * Using scatter-gather.  We have to go through the list one entry
71 	 * at a time.  Each s-g entry contains some number of pages, and
72 	 * each page has to be kmap()'ed separately.
73 	 */
74 	} else {
75 		struct scatterlist *sg =
76 				(struct scatterlist *)scsi_sglist(srb)
77 				+ *index;
78 
79 		/*
80 		 * This loop handles a single s-g list entry, which may
81 		 * include multiple pages.  Find the initial page structure
82 		 * and the starting offset within the page, and update
83 		 * the *offset and *index values for the next loop.
84 		 */
85 		cnt = 0;
86 		while (cnt < buflen && *index < scsi_sg_count(srb)) {
87 			struct page *page = sg_page(sg) +
88 					((sg->offset + *offset) >> PAGE_SHIFT);
89 			unsigned int poff = (sg->offset + *offset) &
90 					    (PAGE_SIZE - 1);
91 			unsigned int sglen = sg->length - *offset;
92 
93 			if (sglen > buflen - cnt) {
94 				/* Transfer ends within this s-g entry */
95 				sglen = buflen - cnt;
96 				*offset += sglen;
97 			} else {
98 				/* Transfer continues to next s-g entry */
99 				*offset = 0;
100 				++*index;
101 				++sg;
102 			}
103 
104 			while (sglen > 0) {
105 				unsigned int plen = min(sglen, (unsigned int)
106 						PAGE_SIZE - poff);
107 				unsigned char *ptr = kmap(page);
108 
109 				if (dir == TO_XFER_BUF)
110 					memcpy(ptr + poff, buffer + cnt, plen);
111 				else
112 					memcpy(buffer + cnt, ptr + poff, plen);
113 				kunmap(page);
114 
115 				/* Start at the beginning of the next page */
116 				poff = 0;
117 				++page;
118 				cnt += plen;
119 				sglen -= plen;
120 			}
121 		}
122 	}
123 
124 	/* Return the amount actually transferred */
125 	return cnt;
126 }
127 
128 /*
129  * Store the contents of buffer into srb's transfer buffer and set the
130  * SCSI residue.
131  */
rtsx_stor_set_xfer_buf(unsigned char * buffer,unsigned int buflen,struct scsi_cmnd * srb)132 void rtsx_stor_set_xfer_buf(unsigned char *buffer,
133 			    unsigned int buflen, struct scsi_cmnd *srb)
134 {
135 	unsigned int index = 0, offset = 0;
136 
137 	rtsx_stor_access_xfer_buf(buffer, buflen, srb, &index, &offset,
138 				  TO_XFER_BUF);
139 	if (buflen < scsi_bufflen(srb))
140 		scsi_set_resid(srb, scsi_bufflen(srb) - buflen);
141 }
142 
rtsx_stor_get_xfer_buf(unsigned char * buffer,unsigned int buflen,struct scsi_cmnd * srb)143 void rtsx_stor_get_xfer_buf(unsigned char *buffer,
144 			    unsigned int buflen, struct scsi_cmnd *srb)
145 {
146 	unsigned int index = 0, offset = 0;
147 
148 	rtsx_stor_access_xfer_buf(buffer, buflen, srb, &index, &offset,
149 				  FROM_XFER_BUF);
150 	if (buflen < scsi_bufflen(srb))
151 		scsi_set_resid(srb, scsi_bufflen(srb) - buflen);
152 }
153 
154 /***********************************************************************
155  * Transport routines
156  ***********************************************************************/
157 
158 /*
159  * Invoke the transport and basic error-handling/recovery methods
160  *
161  * This is used to send the message to the device and receive the response.
162  */
rtsx_invoke_transport(struct scsi_cmnd * srb,struct rtsx_chip * chip)163 void rtsx_invoke_transport(struct scsi_cmnd *srb, struct rtsx_chip *chip)
164 {
165 	int result;
166 
167 	result = rtsx_scsi_handler(srb, chip);
168 
169 	/*
170 	 * if the command gets aborted by the higher layers, we need to
171 	 * short-circuit all other processing.
172 	 */
173 	if (rtsx_chk_stat(chip, RTSX_STAT_ABORT)) {
174 		dev_dbg(rtsx_dev(chip), "-- command was aborted\n");
175 		srb->result = DID_ABORT << 16;
176 		goto handle_errors;
177 	}
178 
179 	/* if there is a transport error, reset and don't auto-sense */
180 	if (result == TRANSPORT_ERROR) {
181 		dev_dbg(rtsx_dev(chip), "-- transport indicates error, resetting\n");
182 		srb->result = DID_ERROR << 16;
183 		goto handle_errors;
184 	}
185 
186 	srb->result = SAM_STAT_GOOD;
187 
188 	/*
189 	 * If we have a failure, we're going to do a REQUEST_SENSE
190 	 * automatically.  Note that we differentiate between a command
191 	 * "failure" and an "error" in the transport mechanism.
192 	 */
193 	if (result == TRANSPORT_FAILED) {
194 		/* set the result so the higher layers expect this data */
195 		srb->result = SAM_STAT_CHECK_CONDITION;
196 		memcpy(srb->sense_buffer,
197 		       (unsigned char *)&chip->sense_buffer[SCSI_LUN(srb)],
198 		       sizeof(struct sense_data_t));
199 	}
200 
201 	return;
202 
203 handle_errors:
204 	return;
205 }
206 
rtsx_add_cmd(struct rtsx_chip * chip,u8 cmd_type,u16 reg_addr,u8 mask,u8 data)207 void rtsx_add_cmd(struct rtsx_chip *chip,
208 		  u8 cmd_type, u16 reg_addr, u8 mask, u8 data)
209 {
210 	__le32 *cb = (__le32 *)(chip->host_cmds_ptr);
211 	u32 val = 0;
212 
213 	val |= (u32)(cmd_type & 0x03) << 30;
214 	val |= (u32)(reg_addr & 0x3FFF) << 16;
215 	val |= (u32)mask << 8;
216 	val |= (u32)data;
217 
218 	spin_lock_irq(&chip->rtsx->reg_lock);
219 	if (chip->ci < (HOST_CMDS_BUF_LEN / 4))
220 		cb[(chip->ci)++] = cpu_to_le32(val);
221 
222 	spin_unlock_irq(&chip->rtsx->reg_lock);
223 }
224 
rtsx_send_cmd_no_wait(struct rtsx_chip * chip)225 void rtsx_send_cmd_no_wait(struct rtsx_chip *chip)
226 {
227 	u32 val = BIT(31);
228 
229 	rtsx_writel(chip, RTSX_HCBAR, chip->host_cmds_addr);
230 
231 	val |= (u32)(chip->ci * 4) & 0x00FFFFFF;
232 	/* Hardware Auto Response */
233 	val |= 0x40000000;
234 	rtsx_writel(chip, RTSX_HCBCTLR, val);
235 }
236 
rtsx_send_cmd(struct rtsx_chip * chip,u8 card,int timeout)237 int rtsx_send_cmd(struct rtsx_chip *chip, u8 card, int timeout)
238 {
239 	struct rtsx_dev *rtsx = chip->rtsx;
240 	struct completion trans_done;
241 	u32 val = BIT(31);
242 	long timeleft;
243 	int err = 0;
244 
245 	if (card == SD_CARD)
246 		rtsx->check_card_cd = SD_EXIST;
247 	else if (card == MS_CARD)
248 		rtsx->check_card_cd = MS_EXIST;
249 	else if (card == XD_CARD)
250 		rtsx->check_card_cd = XD_EXIST;
251 	else
252 		rtsx->check_card_cd = 0;
253 
254 	spin_lock_irq(&rtsx->reg_lock);
255 
256 	/* set up data structures for the wakeup system */
257 	rtsx->done = &trans_done;
258 	rtsx->trans_result = TRANS_NOT_READY;
259 	init_completion(&trans_done);
260 	rtsx->trans_state = STATE_TRANS_CMD;
261 
262 	rtsx_writel(chip, RTSX_HCBAR, chip->host_cmds_addr);
263 
264 	val |= (u32)(chip->ci * 4) & 0x00FFFFFF;
265 	/* Hardware Auto Response */
266 	val |= 0x40000000;
267 	rtsx_writel(chip, RTSX_HCBCTLR, val);
268 
269 	spin_unlock_irq(&rtsx->reg_lock);
270 
271 	/* Wait for TRANS_OK_INT */
272 	timeleft = wait_for_completion_interruptible_timeout(
273 		&trans_done, msecs_to_jiffies(timeout));
274 	if (timeleft <= 0) {
275 		dev_dbg(rtsx_dev(chip), "chip->int_reg = 0x%x\n",
276 			chip->int_reg);
277 		err = -ETIMEDOUT;
278 		goto finish_send_cmd;
279 	}
280 
281 	spin_lock_irq(&rtsx->reg_lock);
282 	if (rtsx->trans_result == TRANS_RESULT_FAIL)
283 		err = -EIO;
284 	else if (rtsx->trans_result == TRANS_RESULT_OK)
285 		err = 0;
286 
287 	spin_unlock_irq(&rtsx->reg_lock);
288 
289 finish_send_cmd:
290 	rtsx->done = NULL;
291 	rtsx->trans_state = STATE_TRANS_NONE;
292 
293 	if (err < 0)
294 		rtsx_stop_cmd(chip, card);
295 
296 	return err;
297 }
298 
rtsx_add_sg_tbl(struct rtsx_chip * chip,u32 addr,u32 len,u8 option)299 static inline void rtsx_add_sg_tbl(
300 	struct rtsx_chip *chip, u32 addr, u32 len, u8 option)
301 {
302 	__le64 *sgb = (__le64 *)(chip->host_sg_tbl_ptr);
303 	u64 val = 0;
304 	u32 temp_len = 0;
305 	u8  temp_opt = 0;
306 
307 	do {
308 		if (len > 0x80000) {
309 			temp_len = 0x80000;
310 			temp_opt = option & (~RTSX_SG_END);
311 		} else {
312 			temp_len = len;
313 			temp_opt = option;
314 		}
315 		val = ((u64)addr << 32) | ((u64)temp_len << 12) | temp_opt;
316 
317 		if (chip->sgi < (HOST_SG_TBL_BUF_LEN / 8))
318 			sgb[(chip->sgi)++] = cpu_to_le64(val);
319 
320 		len -= temp_len;
321 		addr += temp_len;
322 	} while (len);
323 }
324 
rtsx_transfer_sglist_adma_partial(struct rtsx_chip * chip,u8 card,struct scatterlist * sg,int num_sg,unsigned int * index,unsigned int * offset,int size,enum dma_data_direction dma_dir,int timeout)325 static int rtsx_transfer_sglist_adma_partial(struct rtsx_chip *chip, u8 card,
326 					     struct scatterlist *sg, int num_sg,
327 					     unsigned int *index,
328 					     unsigned int *offset, int size,
329 					     enum dma_data_direction dma_dir,
330 					     int timeout)
331 {
332 	struct rtsx_dev *rtsx = chip->rtsx;
333 	struct completion trans_done;
334 	u8 dir;
335 	int sg_cnt, i, resid;
336 	int err = 0;
337 	long timeleft;
338 	struct scatterlist *sg_ptr;
339 	u32 val = TRIG_DMA;
340 
341 	if (!sg || (num_sg <= 0) || !offset || !index)
342 		return -EIO;
343 
344 	if (dma_dir == DMA_TO_DEVICE)
345 		dir = HOST_TO_DEVICE;
346 	else if (dma_dir == DMA_FROM_DEVICE)
347 		dir = DEVICE_TO_HOST;
348 	else
349 		return -ENXIO;
350 
351 	if (card == SD_CARD)
352 		rtsx->check_card_cd = SD_EXIST;
353 	else if (card == MS_CARD)
354 		rtsx->check_card_cd = MS_EXIST;
355 	else if (card == XD_CARD)
356 		rtsx->check_card_cd = XD_EXIST;
357 	else
358 		rtsx->check_card_cd = 0;
359 
360 	spin_lock_irq(&rtsx->reg_lock);
361 
362 	/* set up data structures for the wakeup system */
363 	rtsx->done = &trans_done;
364 
365 	rtsx->trans_state = STATE_TRANS_SG;
366 	rtsx->trans_result = TRANS_NOT_READY;
367 
368 	spin_unlock_irq(&rtsx->reg_lock);
369 
370 	sg_cnt = dma_map_sg(&rtsx->pci->dev, sg, num_sg, dma_dir);
371 
372 	resid = size;
373 	sg_ptr = sg;
374 	chip->sgi = 0;
375 	/*
376 	 * Usually the next entry will be @sg@ + 1, but if this sg element
377 	 * is part of a chained scatterlist, it could jump to the start of
378 	 * a new scatterlist array. So here we use sg_next to move to
379 	 * the proper sg.
380 	 */
381 	for (i = 0; i < *index; i++)
382 		sg_ptr = sg_next(sg_ptr);
383 	for (i = *index; i < sg_cnt; i++) {
384 		dma_addr_t addr;
385 		unsigned int len;
386 		u8 option;
387 
388 		addr = sg_dma_address(sg_ptr);
389 		len = sg_dma_len(sg_ptr);
390 
391 		dev_dbg(rtsx_dev(chip), "DMA addr: 0x%x, Len: 0x%x\n",
392 			(unsigned int)addr, len);
393 		dev_dbg(rtsx_dev(chip), "*index = %d, *offset = %d\n",
394 			*index, *offset);
395 
396 		addr += *offset;
397 
398 		if ((len - *offset) > resid) {
399 			*offset += resid;
400 			len = resid;
401 			resid = 0;
402 		} else {
403 			resid -= (len - *offset);
404 			len -= *offset;
405 			*offset = 0;
406 			*index = *index + 1;
407 		}
408 		if ((i == (sg_cnt - 1)) || !resid)
409 			option = RTSX_SG_VALID | RTSX_SG_END | RTSX_SG_TRANS_DATA;
410 		else
411 			option = RTSX_SG_VALID | RTSX_SG_TRANS_DATA;
412 
413 		rtsx_add_sg_tbl(chip, (u32)addr, (u32)len, option);
414 
415 		if (!resid)
416 			break;
417 
418 		sg_ptr = sg_next(sg_ptr);
419 	}
420 
421 	dev_dbg(rtsx_dev(chip), "SG table count = %d\n", chip->sgi);
422 
423 	val |= (u32)(dir & 0x01) << 29;
424 	val |= ADMA_MODE;
425 
426 	spin_lock_irq(&rtsx->reg_lock);
427 
428 	init_completion(&trans_done);
429 
430 	rtsx_writel(chip, RTSX_HDBAR, chip->host_sg_tbl_addr);
431 	rtsx_writel(chip, RTSX_HDBCTLR, val);
432 
433 	spin_unlock_irq(&rtsx->reg_lock);
434 
435 	timeleft = wait_for_completion_interruptible_timeout(
436 		&trans_done, msecs_to_jiffies(timeout));
437 	if (timeleft <= 0) {
438 		dev_dbg(rtsx_dev(chip), "Timeout (%s %d)\n",
439 			__func__, __LINE__);
440 		dev_dbg(rtsx_dev(chip), "chip->int_reg = 0x%x\n",
441 			chip->int_reg);
442 		err = -ETIMEDOUT;
443 		goto out;
444 	}
445 
446 	spin_lock_irq(&rtsx->reg_lock);
447 	if (rtsx->trans_result == TRANS_RESULT_FAIL) {
448 		err = -EIO;
449 		spin_unlock_irq(&rtsx->reg_lock);
450 		goto out;
451 	}
452 	spin_unlock_irq(&rtsx->reg_lock);
453 
454 	/* Wait for TRANS_OK_INT */
455 	spin_lock_irq(&rtsx->reg_lock);
456 	if (rtsx->trans_result == TRANS_NOT_READY) {
457 		init_completion(&trans_done);
458 		spin_unlock_irq(&rtsx->reg_lock);
459 		timeleft = wait_for_completion_interruptible_timeout(
460 			&trans_done, msecs_to_jiffies(timeout));
461 		if (timeleft <= 0) {
462 			dev_dbg(rtsx_dev(chip), "Timeout (%s %d)\n",
463 				__func__, __LINE__);
464 			dev_dbg(rtsx_dev(chip), "chip->int_reg = 0x%x\n",
465 				chip->int_reg);
466 			err = -ETIMEDOUT;
467 			goto out;
468 		}
469 	} else {
470 		spin_unlock_irq(&rtsx->reg_lock);
471 	}
472 
473 	spin_lock_irq(&rtsx->reg_lock);
474 	if (rtsx->trans_result == TRANS_RESULT_FAIL)
475 		err = -EIO;
476 	else if (rtsx->trans_result == TRANS_RESULT_OK)
477 		err = 0;
478 
479 	spin_unlock_irq(&rtsx->reg_lock);
480 
481 out:
482 	rtsx->done = NULL;
483 	rtsx->trans_state = STATE_TRANS_NONE;
484 	dma_unmap_sg(&rtsx->pci->dev, sg, num_sg, dma_dir);
485 
486 	if (err < 0)
487 		rtsx_stop_cmd(chip, card);
488 
489 	return err;
490 }
491 
rtsx_transfer_sglist_adma(struct rtsx_chip * chip,u8 card,struct scatterlist * sg,int num_sg,enum dma_data_direction dma_dir,int timeout)492 static int rtsx_transfer_sglist_adma(struct rtsx_chip *chip, u8 card,
493 				     struct scatterlist *sg, int num_sg,
494 				     enum dma_data_direction dma_dir,
495 				     int timeout)
496 {
497 	struct rtsx_dev *rtsx = chip->rtsx;
498 	struct completion trans_done;
499 	u8 dir;
500 	int buf_cnt, i;
501 	int err = 0;
502 	long timeleft;
503 	struct scatterlist *sg_ptr;
504 
505 	if (!sg || (num_sg <= 0))
506 		return -EIO;
507 
508 	if (dma_dir == DMA_TO_DEVICE)
509 		dir = HOST_TO_DEVICE;
510 	else if (dma_dir == DMA_FROM_DEVICE)
511 		dir = DEVICE_TO_HOST;
512 	else
513 		return -ENXIO;
514 
515 	if (card == SD_CARD)
516 		rtsx->check_card_cd = SD_EXIST;
517 	else if (card == MS_CARD)
518 		rtsx->check_card_cd = MS_EXIST;
519 	else if (card == XD_CARD)
520 		rtsx->check_card_cd = XD_EXIST;
521 	else
522 		rtsx->check_card_cd = 0;
523 
524 	spin_lock_irq(&rtsx->reg_lock);
525 
526 	/* set up data structures for the wakeup system */
527 	rtsx->done = &trans_done;
528 
529 	rtsx->trans_state = STATE_TRANS_SG;
530 	rtsx->trans_result = TRANS_NOT_READY;
531 
532 	spin_unlock_irq(&rtsx->reg_lock);
533 
534 	buf_cnt = dma_map_sg(&rtsx->pci->dev, sg, num_sg, dma_dir);
535 
536 	sg_ptr = sg;
537 
538 	for (i = 0; i <= buf_cnt / (HOST_SG_TBL_BUF_LEN / 8); i++) {
539 		u32 val = TRIG_DMA;
540 		int sg_cnt, j;
541 
542 		if (i == buf_cnt / (HOST_SG_TBL_BUF_LEN / 8))
543 			sg_cnt = buf_cnt % (HOST_SG_TBL_BUF_LEN / 8);
544 		else
545 			sg_cnt = HOST_SG_TBL_BUF_LEN / 8;
546 
547 		chip->sgi = 0;
548 		for (j = 0; j < sg_cnt; j++) {
549 			dma_addr_t addr = sg_dma_address(sg_ptr);
550 			unsigned int len = sg_dma_len(sg_ptr);
551 			u8 option;
552 
553 			dev_dbg(rtsx_dev(chip), "DMA addr: 0x%x, Len: 0x%x\n",
554 				(unsigned int)addr, len);
555 
556 			if (j == (sg_cnt - 1))
557 				option = RTSX_SG_VALID | RTSX_SG_END | RTSX_SG_TRANS_DATA;
558 			else
559 				option = RTSX_SG_VALID | RTSX_SG_TRANS_DATA;
560 
561 			rtsx_add_sg_tbl(chip, (u32)addr, (u32)len, option);
562 
563 			sg_ptr = sg_next(sg_ptr);
564 		}
565 
566 		dev_dbg(rtsx_dev(chip), "SG table count = %d\n", chip->sgi);
567 
568 		val |= (u32)(dir & 0x01) << 29;
569 		val |= ADMA_MODE;
570 
571 		spin_lock_irq(&rtsx->reg_lock);
572 
573 		init_completion(&trans_done);
574 
575 		rtsx_writel(chip, RTSX_HDBAR, chip->host_sg_tbl_addr);
576 		rtsx_writel(chip, RTSX_HDBCTLR, val);
577 
578 		spin_unlock_irq(&rtsx->reg_lock);
579 
580 		timeleft = wait_for_completion_interruptible_timeout(
581 			&trans_done, msecs_to_jiffies(timeout));
582 		if (timeleft <= 0) {
583 			dev_dbg(rtsx_dev(chip), "Timeout (%s %d)\n",
584 				__func__, __LINE__);
585 			dev_dbg(rtsx_dev(chip), "chip->int_reg = 0x%x\n",
586 				chip->int_reg);
587 			err = -ETIMEDOUT;
588 			goto out;
589 		}
590 
591 		spin_lock_irq(&rtsx->reg_lock);
592 		if (rtsx->trans_result == TRANS_RESULT_FAIL) {
593 			err = -EIO;
594 			spin_unlock_irq(&rtsx->reg_lock);
595 			goto out;
596 		}
597 		spin_unlock_irq(&rtsx->reg_lock);
598 
599 		sg_ptr += sg_cnt;
600 	}
601 
602 	/* Wait for TRANS_OK_INT */
603 	spin_lock_irq(&rtsx->reg_lock);
604 	if (rtsx->trans_result == TRANS_NOT_READY) {
605 		init_completion(&trans_done);
606 		spin_unlock_irq(&rtsx->reg_lock);
607 		timeleft = wait_for_completion_interruptible_timeout(
608 			&trans_done, msecs_to_jiffies(timeout));
609 		if (timeleft <= 0) {
610 			dev_dbg(rtsx_dev(chip), "Timeout (%s %d)\n",
611 				__func__, __LINE__);
612 			dev_dbg(rtsx_dev(chip), "chip->int_reg = 0x%x\n",
613 				chip->int_reg);
614 			err = -ETIMEDOUT;
615 			goto out;
616 		}
617 	} else {
618 		spin_unlock_irq(&rtsx->reg_lock);
619 	}
620 
621 	spin_lock_irq(&rtsx->reg_lock);
622 	if (rtsx->trans_result == TRANS_RESULT_FAIL)
623 		err = -EIO;
624 	else if (rtsx->trans_result == TRANS_RESULT_OK)
625 		err = 0;
626 
627 	spin_unlock_irq(&rtsx->reg_lock);
628 
629 out:
630 	rtsx->done = NULL;
631 	rtsx->trans_state = STATE_TRANS_NONE;
632 	dma_unmap_sg(&rtsx->pci->dev, sg, num_sg, dma_dir);
633 
634 	if (err < 0)
635 		rtsx_stop_cmd(chip, card);
636 
637 	return err;
638 }
639 
rtsx_transfer_buf(struct rtsx_chip * chip,u8 card,void * buf,size_t len,enum dma_data_direction dma_dir,int timeout)640 static int rtsx_transfer_buf(struct rtsx_chip *chip, u8 card, void *buf,
641 			     size_t len, enum dma_data_direction dma_dir,
642 			     int timeout)
643 {
644 	struct rtsx_dev *rtsx = chip->rtsx;
645 	struct completion trans_done;
646 	dma_addr_t addr;
647 	u8 dir;
648 	int err = 0;
649 	u32 val = BIT(31);
650 	long timeleft;
651 
652 	if (!buf || (len <= 0))
653 		return -EIO;
654 
655 	if (dma_dir == DMA_TO_DEVICE)
656 		dir = HOST_TO_DEVICE;
657 	else if (dma_dir == DMA_FROM_DEVICE)
658 		dir = DEVICE_TO_HOST;
659 	else
660 		return -ENXIO;
661 
662 	addr = dma_map_single(&rtsx->pci->dev, buf, len, dma_dir);
663 	if (dma_mapping_error(&rtsx->pci->dev, addr))
664 		return -ENOMEM;
665 
666 	if (card == SD_CARD)
667 		rtsx->check_card_cd = SD_EXIST;
668 	else if (card == MS_CARD)
669 		rtsx->check_card_cd = MS_EXIST;
670 	else if (card == XD_CARD)
671 		rtsx->check_card_cd = XD_EXIST;
672 	else
673 		rtsx->check_card_cd = 0;
674 
675 	val |= (u32)(dir & 0x01) << 29;
676 	val |= (u32)(len & 0x00FFFFFF);
677 
678 	spin_lock_irq(&rtsx->reg_lock);
679 
680 	/* set up data structures for the wakeup system */
681 	rtsx->done = &trans_done;
682 
683 	init_completion(&trans_done);
684 
685 	rtsx->trans_state = STATE_TRANS_BUF;
686 	rtsx->trans_result = TRANS_NOT_READY;
687 
688 	rtsx_writel(chip, RTSX_HDBAR, addr);
689 	rtsx_writel(chip, RTSX_HDBCTLR, val);
690 
691 	spin_unlock_irq(&rtsx->reg_lock);
692 
693 	/* Wait for TRANS_OK_INT */
694 	timeleft = wait_for_completion_interruptible_timeout(
695 		&trans_done, msecs_to_jiffies(timeout));
696 	if (timeleft <= 0) {
697 		dev_dbg(rtsx_dev(chip), "Timeout (%s %d)\n",
698 			__func__, __LINE__);
699 		dev_dbg(rtsx_dev(chip), "chip->int_reg = 0x%x\n",
700 			chip->int_reg);
701 		err = -ETIMEDOUT;
702 		goto out;
703 	}
704 
705 	spin_lock_irq(&rtsx->reg_lock);
706 	if (rtsx->trans_result == TRANS_RESULT_FAIL)
707 		err = -EIO;
708 	else if (rtsx->trans_result == TRANS_RESULT_OK)
709 		err = 0;
710 
711 	spin_unlock_irq(&rtsx->reg_lock);
712 
713 out:
714 	rtsx->done = NULL;
715 	rtsx->trans_state = STATE_TRANS_NONE;
716 	dma_unmap_single(&rtsx->pci->dev, addr, len, dma_dir);
717 
718 	if (err < 0)
719 		rtsx_stop_cmd(chip, card);
720 
721 	return err;
722 }
723 
rtsx_transfer_data_partial(struct rtsx_chip * chip,u8 card,void * buf,size_t len,int use_sg,unsigned int * index,unsigned int * offset,enum dma_data_direction dma_dir,int timeout)724 int rtsx_transfer_data_partial(struct rtsx_chip *chip, u8 card,
725 			       void *buf, size_t len, int use_sg,
726 			       unsigned int *index, unsigned int *offset,
727 			       enum dma_data_direction dma_dir, int timeout)
728 {
729 	int err = 0;
730 
731 	/* don't transfer data during abort processing */
732 	if (rtsx_chk_stat(chip, RTSX_STAT_ABORT))
733 		return -EIO;
734 
735 	if (use_sg) {
736 		struct scatterlist *sg = buf;
737 
738 		err = rtsx_transfer_sglist_adma_partial(chip, card, sg, use_sg,
739 							index, offset, (int)len,
740 							dma_dir, timeout);
741 	} else {
742 		err = rtsx_transfer_buf(chip, card,
743 					buf, len, dma_dir, timeout);
744 	}
745 	if (err < 0) {
746 		if (RTSX_TST_DELINK(chip)) {
747 			RTSX_CLR_DELINK(chip);
748 			chip->need_reinit = SD_CARD | MS_CARD | XD_CARD;
749 			rtsx_reinit_cards(chip, 1);
750 		}
751 	}
752 
753 	return err;
754 }
755 
rtsx_transfer_data(struct rtsx_chip * chip,u8 card,void * buf,size_t len,int use_sg,enum dma_data_direction dma_dir,int timeout)756 int rtsx_transfer_data(struct rtsx_chip *chip, u8 card, void *buf, size_t len,
757 		       int use_sg, enum dma_data_direction dma_dir, int timeout)
758 {
759 	int err = 0;
760 
761 	dev_dbg(rtsx_dev(chip), "use_sg = %d\n", use_sg);
762 
763 	/* don't transfer data during abort processing */
764 	if (rtsx_chk_stat(chip, RTSX_STAT_ABORT))
765 		return -EIO;
766 
767 	if (use_sg) {
768 		err = rtsx_transfer_sglist_adma(chip, card, buf,
769 						use_sg, dma_dir, timeout);
770 	} else {
771 		err = rtsx_transfer_buf(chip, card, buf, len, dma_dir, timeout);
772 	}
773 
774 	if (err < 0) {
775 		if (RTSX_TST_DELINK(chip)) {
776 			RTSX_CLR_DELINK(chip);
777 			chip->need_reinit = SD_CARD | MS_CARD | XD_CARD;
778 			rtsx_reinit_cards(chip, 1);
779 		}
780 	}
781 
782 	return err;
783 }
784 
785