1 /*
2  *  ms_block.c - Sony MemoryStick (legacy) storage support
3 
4  *  Copyright (C) 2013 Maxim Levitsky <maximlevitsky@gmail.com>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  * Minor portions of the driver were copied from mspro_block.c which is
11  * Copyright (C) 2007 Alex Dubov <oakad@yahoo.com>
12  *
13  */
14 #define DRIVER_NAME "ms_block"
15 #define pr_fmt(fmt) DRIVER_NAME ": " fmt
16 
17 #include <linux/module.h>
18 #include <linux/blkdev.h>
19 #include <linux/memstick.h>
20 #include <linux/idr.h>
21 #include <linux/hdreg.h>
22 #include <linux/delay.h>
23 #include <linux/slab.h>
24 #include <linux/random.h>
25 #include <linux/bitmap.h>
26 #include <linux/scatterlist.h>
27 #include <linux/jiffies.h>
28 #include <linux/workqueue.h>
29 #include <linux/mutex.h>
30 #include "ms_block.h"
31 
32 static int debug;
33 static int cache_flush_timeout = 1000;
34 static bool verify_writes;
35 
36 /*
37  * Copies section of 'sg_from' starting from offset 'offset' and with length
38  * 'len' To another scatterlist of to_nents enties
39  */
msb_sg_copy(struct scatterlist * sg_from,struct scatterlist * sg_to,int to_nents,size_t offset,size_t len)40 static size_t msb_sg_copy(struct scatterlist *sg_from,
41 	struct scatterlist *sg_to, int to_nents, size_t offset, size_t len)
42 {
43 	size_t copied = 0;
44 
45 	while (offset > 0) {
46 		if (offset >= sg_from->length) {
47 			if (sg_is_last(sg_from))
48 				return 0;
49 
50 			offset -= sg_from->length;
51 			sg_from = sg_next(sg_from);
52 			continue;
53 		}
54 
55 		copied = min(len, sg_from->length - offset);
56 		sg_set_page(sg_to, sg_page(sg_from),
57 			copied, sg_from->offset + offset);
58 
59 		len -= copied;
60 		offset = 0;
61 
62 		if (sg_is_last(sg_from) || !len)
63 			goto out;
64 
65 		sg_to = sg_next(sg_to);
66 		to_nents--;
67 		sg_from = sg_next(sg_from);
68 	}
69 
70 	while (len > sg_from->length && to_nents--) {
71 		len -= sg_from->length;
72 		copied += sg_from->length;
73 
74 		sg_set_page(sg_to, sg_page(sg_from),
75 				sg_from->length, sg_from->offset);
76 
77 		if (sg_is_last(sg_from) || !len)
78 			goto out;
79 
80 		sg_from = sg_next(sg_from);
81 		sg_to = sg_next(sg_to);
82 	}
83 
84 	if (len && to_nents) {
85 		sg_set_page(sg_to, sg_page(sg_from), len, sg_from->offset);
86 		copied += len;
87 	}
88 out:
89 	sg_mark_end(sg_to);
90 	return copied;
91 }
92 
93 /*
94  * Compares section of 'sg' starting from offset 'offset' and with length 'len'
95  * to linear buffer of length 'len' at address 'buffer'
96  * Returns 0 if equal and  -1 otherwice
97  */
msb_sg_compare_to_buffer(struct scatterlist * sg,size_t offset,u8 * buffer,size_t len)98 static int msb_sg_compare_to_buffer(struct scatterlist *sg,
99 					size_t offset, u8 *buffer, size_t len)
100 {
101 	int retval = 0, cmplen;
102 	struct sg_mapping_iter miter;
103 
104 	sg_miter_start(&miter, sg, sg_nents(sg),
105 					SG_MITER_ATOMIC | SG_MITER_FROM_SG);
106 
107 	while (sg_miter_next(&miter) && len > 0) {
108 		if (offset >= miter.length) {
109 			offset -= miter.length;
110 			continue;
111 		}
112 
113 		cmplen = min(miter.length - offset, len);
114 		retval = memcmp(miter.addr + offset, buffer, cmplen) ? -1 : 0;
115 		if (retval)
116 			break;
117 
118 		buffer += cmplen;
119 		len -= cmplen;
120 		offset = 0;
121 	}
122 
123 	if (!retval && len)
124 		retval = -1;
125 
126 	sg_miter_stop(&miter);
127 	return retval;
128 }
129 
130 
131 /* Get zone at which block with logical address 'lba' lives
132  * Flash is broken into zones.
133  * Each zone consists of 512 eraseblocks, out of which in first
134  * zone 494 are used and 496 are for all following zones.
135  * Therefore zone #0 hosts blocks 0-493, zone #1 blocks 494-988, etc...
136 */
msb_get_zone_from_lba(int lba)137 static int msb_get_zone_from_lba(int lba)
138 {
139 	if (lba < 494)
140 		return 0;
141 	return ((lba - 494) / 496) + 1;
142 }
143 
144 /* Get zone of physical block. Trivial */
msb_get_zone_from_pba(int pba)145 static int msb_get_zone_from_pba(int pba)
146 {
147 	return pba / MS_BLOCKS_IN_ZONE;
148 }
149 
150 /* Debug test to validate free block counts */
msb_validate_used_block_bitmap(struct msb_data * msb)151 static int msb_validate_used_block_bitmap(struct msb_data *msb)
152 {
153 	int total_free_blocks = 0;
154 	int i;
155 
156 	if (!debug)
157 		return 0;
158 
159 	for (i = 0; i < msb->zone_count; i++)
160 		total_free_blocks += msb->free_block_count[i];
161 
162 	if (msb->block_count - bitmap_weight(msb->used_blocks_bitmap,
163 					msb->block_count) == total_free_blocks)
164 		return 0;
165 
166 	pr_err("BUG: free block counts don't match the bitmap");
167 	msb->read_only = true;
168 	return -EINVAL;
169 }
170 
171 /* Mark physical block as used */
msb_mark_block_used(struct msb_data * msb,int pba)172 static void msb_mark_block_used(struct msb_data *msb, int pba)
173 {
174 	int zone = msb_get_zone_from_pba(pba);
175 
176 	if (test_bit(pba, msb->used_blocks_bitmap)) {
177 		pr_err(
178 		"BUG: attempt to mark already used pba %d as used", pba);
179 		msb->read_only = true;
180 		return;
181 	}
182 
183 	if (msb_validate_used_block_bitmap(msb))
184 		return;
185 
186 	/* No races because all IO is single threaded */
187 	__set_bit(pba, msb->used_blocks_bitmap);
188 	msb->free_block_count[zone]--;
189 }
190 
191 /* Mark physical block as free */
msb_mark_block_unused(struct msb_data * msb,int pba)192 static void msb_mark_block_unused(struct msb_data *msb, int pba)
193 {
194 	int zone = msb_get_zone_from_pba(pba);
195 
196 	if (!test_bit(pba, msb->used_blocks_bitmap)) {
197 		pr_err("BUG: attempt to mark already unused pba %d as unused" , pba);
198 		msb->read_only = true;
199 		return;
200 	}
201 
202 	if (msb_validate_used_block_bitmap(msb))
203 		return;
204 
205 	/* No races because all IO is single threaded */
206 	__clear_bit(pba, msb->used_blocks_bitmap);
207 	msb->free_block_count[zone]++;
208 }
209 
210 /* Invalidate current register window */
msb_invalidate_reg_window(struct msb_data * msb)211 static void msb_invalidate_reg_window(struct msb_data *msb)
212 {
213 	msb->reg_addr.w_offset = offsetof(struct ms_register, id);
214 	msb->reg_addr.w_length = sizeof(struct ms_id_register);
215 	msb->reg_addr.r_offset = offsetof(struct ms_register, id);
216 	msb->reg_addr.r_length = sizeof(struct ms_id_register);
217 	msb->addr_valid = false;
218 }
219 
220 /* Start a state machine */
msb_run_state_machine(struct msb_data * msb,int (* state_func)(struct memstick_dev * card,struct memstick_request ** req))221 static int msb_run_state_machine(struct msb_data *msb, int   (*state_func)
222 		(struct memstick_dev *card, struct memstick_request **req))
223 {
224 	struct memstick_dev *card = msb->card;
225 
226 	WARN_ON(msb->state != -1);
227 	msb->int_polling = false;
228 	msb->state = 0;
229 	msb->exit_error = 0;
230 
231 	memset(&card->current_mrq, 0, sizeof(card->current_mrq));
232 
233 	card->next_request = state_func;
234 	memstick_new_req(card->host);
235 	wait_for_completion(&card->mrq_complete);
236 
237 	WARN_ON(msb->state != -1);
238 	return msb->exit_error;
239 }
240 
241 /* State machines call that to exit */
msb_exit_state_machine(struct msb_data * msb,int error)242 static int msb_exit_state_machine(struct msb_data *msb, int error)
243 {
244 	WARN_ON(msb->state == -1);
245 
246 	msb->state = -1;
247 	msb->exit_error = error;
248 	msb->card->next_request = h_msb_default_bad;
249 
250 	/* Invalidate reg window on errors */
251 	if (error)
252 		msb_invalidate_reg_window(msb);
253 
254 	complete(&msb->card->mrq_complete);
255 	return -ENXIO;
256 }
257 
258 /* read INT register */
msb_read_int_reg(struct msb_data * msb,long timeout)259 static int msb_read_int_reg(struct msb_data *msb, long timeout)
260 {
261 	struct memstick_request *mrq = &msb->card->current_mrq;
262 
263 	WARN_ON(msb->state == -1);
264 
265 	if (!msb->int_polling) {
266 		msb->int_timeout = jiffies +
267 			msecs_to_jiffies(timeout == -1 ? 500 : timeout);
268 		msb->int_polling = true;
269 	} else if (time_after(jiffies, msb->int_timeout)) {
270 		mrq->data[0] = MEMSTICK_INT_CMDNAK;
271 		return 0;
272 	}
273 
274 	if ((msb->caps & MEMSTICK_CAP_AUTO_GET_INT) &&
275 				mrq->need_card_int && !mrq->error) {
276 		mrq->data[0] = mrq->int_reg;
277 		mrq->need_card_int = false;
278 		return 0;
279 	} else {
280 		memstick_init_req(mrq, MS_TPC_GET_INT, NULL, 1);
281 		return 1;
282 	}
283 }
284 
285 /* Read a register */
msb_read_regs(struct msb_data * msb,int offset,int len)286 static int msb_read_regs(struct msb_data *msb, int offset, int len)
287 {
288 	struct memstick_request *req = &msb->card->current_mrq;
289 
290 	if (msb->reg_addr.r_offset != offset ||
291 	    msb->reg_addr.r_length != len || !msb->addr_valid) {
292 
293 		msb->reg_addr.r_offset = offset;
294 		msb->reg_addr.r_length = len;
295 		msb->addr_valid = true;
296 
297 		memstick_init_req(req, MS_TPC_SET_RW_REG_ADRS,
298 			&msb->reg_addr, sizeof(msb->reg_addr));
299 		return 0;
300 	}
301 
302 	memstick_init_req(req, MS_TPC_READ_REG, NULL, len);
303 	return 1;
304 }
305 
306 /* Write a card register */
msb_write_regs(struct msb_data * msb,int offset,int len,void * buf)307 static int msb_write_regs(struct msb_data *msb, int offset, int len, void *buf)
308 {
309 	struct memstick_request *req = &msb->card->current_mrq;
310 
311 	if (msb->reg_addr.w_offset != offset ||
312 		msb->reg_addr.w_length != len  || !msb->addr_valid) {
313 
314 		msb->reg_addr.w_offset = offset;
315 		msb->reg_addr.w_length = len;
316 		msb->addr_valid = true;
317 
318 		memstick_init_req(req, MS_TPC_SET_RW_REG_ADRS,
319 			&msb->reg_addr, sizeof(msb->reg_addr));
320 		return 0;
321 	}
322 
323 	memstick_init_req(req, MS_TPC_WRITE_REG, buf, len);
324 	return 1;
325 }
326 
327 /* Handler for absence of IO */
h_msb_default_bad(struct memstick_dev * card,struct memstick_request ** mrq)328 static int h_msb_default_bad(struct memstick_dev *card,
329 						struct memstick_request **mrq)
330 {
331 	return -ENXIO;
332 }
333 
334 /*
335  * This function is a handler for reads of one page from device.
336  * Writes output to msb->current_sg, takes sector address from msb->reg.param
337  * Can also be used to read extra data only. Set params accordintly.
338  */
h_msb_read_page(struct memstick_dev * card,struct memstick_request ** out_mrq)339 static int h_msb_read_page(struct memstick_dev *card,
340 					struct memstick_request **out_mrq)
341 {
342 	struct msb_data *msb = memstick_get_drvdata(card);
343 	struct memstick_request *mrq = *out_mrq = &card->current_mrq;
344 	struct scatterlist sg[2];
345 	u8 command, intreg;
346 
347 	if (mrq->error) {
348 		dbg("read_page, unknown error");
349 		return msb_exit_state_machine(msb, mrq->error);
350 	}
351 again:
352 	switch (msb->state) {
353 	case MSB_RP_SEND_BLOCK_ADDRESS:
354 		/* msb_write_regs sometimes "fails" because it needs to update
355 			the reg window, and thus it returns request for that.
356 			Then we stay in this state and retry */
357 		if (!msb_write_regs(msb,
358 			offsetof(struct ms_register, param),
359 			sizeof(struct ms_param_register),
360 			(unsigned char *)&msb->regs.param))
361 			return 0;
362 
363 		msb->state = MSB_RP_SEND_READ_COMMAND;
364 		return 0;
365 
366 	case MSB_RP_SEND_READ_COMMAND:
367 		command = MS_CMD_BLOCK_READ;
368 		memstick_init_req(mrq, MS_TPC_SET_CMD, &command, 1);
369 		msb->state = MSB_RP_SEND_INT_REQ;
370 		return 0;
371 
372 	case MSB_RP_SEND_INT_REQ:
373 		msb->state = MSB_RP_RECEIVE_INT_REQ_RESULT;
374 		/* If dont actually need to send the int read request (only in
375 			serial mode), then just fall through */
376 		if (msb_read_int_reg(msb, -1))
377 			return 0;
378 		/* fallthrough */
379 
380 	case MSB_RP_RECEIVE_INT_REQ_RESULT:
381 		intreg = mrq->data[0];
382 		msb->regs.status.interrupt = intreg;
383 
384 		if (intreg & MEMSTICK_INT_CMDNAK)
385 			return msb_exit_state_machine(msb, -EIO);
386 
387 		if (!(intreg & MEMSTICK_INT_CED)) {
388 			msb->state = MSB_RP_SEND_INT_REQ;
389 			goto again;
390 		}
391 
392 		msb->int_polling = false;
393 		msb->state = (intreg & MEMSTICK_INT_ERR) ?
394 			MSB_RP_SEND_READ_STATUS_REG : MSB_RP_SEND_OOB_READ;
395 		goto again;
396 
397 	case MSB_RP_SEND_READ_STATUS_REG:
398 		 /* read the status register to understand source of the INT_ERR */
399 		if (!msb_read_regs(msb,
400 			offsetof(struct ms_register, status),
401 			sizeof(struct ms_status_register)))
402 			return 0;
403 
404 		msb->state = MSB_RP_RECEIVE_STATUS_REG;
405 		return 0;
406 
407 	case MSB_RP_RECEIVE_STATUS_REG:
408 		msb->regs.status = *(struct ms_status_register *)mrq->data;
409 		msb->state = MSB_RP_SEND_OOB_READ;
410 		/* fallthrough */
411 
412 	case MSB_RP_SEND_OOB_READ:
413 		if (!msb_read_regs(msb,
414 			offsetof(struct ms_register, extra_data),
415 			sizeof(struct ms_extra_data_register)))
416 			return 0;
417 
418 		msb->state = MSB_RP_RECEIVE_OOB_READ;
419 		return 0;
420 
421 	case MSB_RP_RECEIVE_OOB_READ:
422 		msb->regs.extra_data =
423 			*(struct ms_extra_data_register *) mrq->data;
424 		msb->state = MSB_RP_SEND_READ_DATA;
425 		/* fallthrough */
426 
427 	case MSB_RP_SEND_READ_DATA:
428 		/* Skip that state if we only read the oob */
429 		if (msb->regs.param.cp == MEMSTICK_CP_EXTRA) {
430 			msb->state = MSB_RP_RECEIVE_READ_DATA;
431 			goto again;
432 		}
433 
434 		sg_init_table(sg, ARRAY_SIZE(sg));
435 		msb_sg_copy(msb->current_sg, sg, ARRAY_SIZE(sg),
436 			msb->current_sg_offset,
437 			msb->page_size);
438 
439 		memstick_init_req_sg(mrq, MS_TPC_READ_LONG_DATA, sg);
440 		msb->state = MSB_RP_RECEIVE_READ_DATA;
441 		return 0;
442 
443 	case MSB_RP_RECEIVE_READ_DATA:
444 		if (!(msb->regs.status.interrupt & MEMSTICK_INT_ERR)) {
445 			msb->current_sg_offset += msb->page_size;
446 			return msb_exit_state_machine(msb, 0);
447 		}
448 
449 		if (msb->regs.status.status1 & MEMSTICK_UNCORR_ERROR) {
450 			dbg("read_page: uncorrectable error");
451 			return msb_exit_state_machine(msb, -EBADMSG);
452 		}
453 
454 		if (msb->regs.status.status1 & MEMSTICK_CORR_ERROR) {
455 			dbg("read_page: correctable error");
456 			msb->current_sg_offset += msb->page_size;
457 			return msb_exit_state_machine(msb, -EUCLEAN);
458 		} else {
459 			dbg("read_page: INT error, but no status error bits");
460 			return msb_exit_state_machine(msb, -EIO);
461 		}
462 	}
463 
464 	BUG();
465 }
466 
467 /*
468  * Handler of writes of exactly one block.
469  * Takes address from msb->regs.param.
470  * Writes same extra data to blocks, also taken
471  * from msb->regs.extra
472  * Returns -EBADMSG if write fails due to uncorrectable error, or -EIO if
473  * device refuses to take the command or something else
474  */
h_msb_write_block(struct memstick_dev * card,struct memstick_request ** out_mrq)475 static int h_msb_write_block(struct memstick_dev *card,
476 					struct memstick_request **out_mrq)
477 {
478 	struct msb_data *msb = memstick_get_drvdata(card);
479 	struct memstick_request *mrq = *out_mrq = &card->current_mrq;
480 	struct scatterlist sg[2];
481 	u8 intreg, command;
482 
483 	if (mrq->error)
484 		return msb_exit_state_machine(msb, mrq->error);
485 
486 again:
487 	switch (msb->state) {
488 
489 	/* HACK: Jmicon handling of TPCs between 8 and
490 	 *	sizeof(memstick_request.data) is broken due to hardware
491 	 *	bug in PIO mode that is used for these TPCs
492 	 *	Therefore split the write
493 	 */
494 
495 	case MSB_WB_SEND_WRITE_PARAMS:
496 		if (!msb_write_regs(msb,
497 			offsetof(struct ms_register, param),
498 			sizeof(struct ms_param_register),
499 			&msb->regs.param))
500 			return 0;
501 
502 		msb->state = MSB_WB_SEND_WRITE_OOB;
503 		return 0;
504 
505 	case MSB_WB_SEND_WRITE_OOB:
506 		if (!msb_write_regs(msb,
507 			offsetof(struct ms_register, extra_data),
508 			sizeof(struct ms_extra_data_register),
509 			&msb->regs.extra_data))
510 			return 0;
511 		msb->state = MSB_WB_SEND_WRITE_COMMAND;
512 		return 0;
513 
514 
515 	case MSB_WB_SEND_WRITE_COMMAND:
516 		command = MS_CMD_BLOCK_WRITE;
517 		memstick_init_req(mrq, MS_TPC_SET_CMD, &command, 1);
518 		msb->state = MSB_WB_SEND_INT_REQ;
519 		return 0;
520 
521 	case MSB_WB_SEND_INT_REQ:
522 		msb->state = MSB_WB_RECEIVE_INT_REQ;
523 		if (msb_read_int_reg(msb, -1))
524 			return 0;
525 		/* fallthrough */
526 
527 	case MSB_WB_RECEIVE_INT_REQ:
528 		intreg = mrq->data[0];
529 		msb->regs.status.interrupt = intreg;
530 
531 		/* errors mean out of here, and fast... */
532 		if (intreg & (MEMSTICK_INT_CMDNAK))
533 			return msb_exit_state_machine(msb, -EIO);
534 
535 		if (intreg & MEMSTICK_INT_ERR)
536 			return msb_exit_state_machine(msb, -EBADMSG);
537 
538 
539 		/* for last page we need to poll CED */
540 		if (msb->current_page == msb->pages_in_block) {
541 			if (intreg & MEMSTICK_INT_CED)
542 				return msb_exit_state_machine(msb, 0);
543 			msb->state = MSB_WB_SEND_INT_REQ;
544 			goto again;
545 
546 		}
547 
548 		/* for non-last page we need BREQ before writing next chunk */
549 		if (!(intreg & MEMSTICK_INT_BREQ)) {
550 			msb->state = MSB_WB_SEND_INT_REQ;
551 			goto again;
552 		}
553 
554 		msb->int_polling = false;
555 		msb->state = MSB_WB_SEND_WRITE_DATA;
556 		/* fallthrough */
557 
558 	case MSB_WB_SEND_WRITE_DATA:
559 		sg_init_table(sg, ARRAY_SIZE(sg));
560 
561 		if (msb_sg_copy(msb->current_sg, sg, ARRAY_SIZE(sg),
562 			msb->current_sg_offset,
563 			msb->page_size) < msb->page_size)
564 			return msb_exit_state_machine(msb, -EIO);
565 
566 		memstick_init_req_sg(mrq, MS_TPC_WRITE_LONG_DATA, sg);
567 		mrq->need_card_int = 1;
568 		msb->state = MSB_WB_RECEIVE_WRITE_CONFIRMATION;
569 		return 0;
570 
571 	case MSB_WB_RECEIVE_WRITE_CONFIRMATION:
572 		msb->current_page++;
573 		msb->current_sg_offset += msb->page_size;
574 		msb->state = MSB_WB_SEND_INT_REQ;
575 		goto again;
576 	default:
577 		BUG();
578 	}
579 
580 	return 0;
581 }
582 
583 /*
584  * This function is used to send simple IO requests to device that consist
585  * of register write + command
586  */
h_msb_send_command(struct memstick_dev * card,struct memstick_request ** out_mrq)587 static int h_msb_send_command(struct memstick_dev *card,
588 					struct memstick_request **out_mrq)
589 {
590 	struct msb_data *msb = memstick_get_drvdata(card);
591 	struct memstick_request *mrq = *out_mrq = &card->current_mrq;
592 	u8 intreg;
593 
594 	if (mrq->error) {
595 		dbg("send_command: unknown error");
596 		return msb_exit_state_machine(msb, mrq->error);
597 	}
598 again:
599 	switch (msb->state) {
600 
601 	/* HACK: see h_msb_write_block */
602 	case MSB_SC_SEND_WRITE_PARAMS: /* write param register*/
603 		if (!msb_write_regs(msb,
604 			offsetof(struct ms_register, param),
605 			sizeof(struct ms_param_register),
606 			&msb->regs.param))
607 			return 0;
608 		msb->state = MSB_SC_SEND_WRITE_OOB;
609 		return 0;
610 
611 	case MSB_SC_SEND_WRITE_OOB:
612 		if (!msb->command_need_oob) {
613 			msb->state = MSB_SC_SEND_COMMAND;
614 			goto again;
615 		}
616 
617 		if (!msb_write_regs(msb,
618 			offsetof(struct ms_register, extra_data),
619 			sizeof(struct ms_extra_data_register),
620 			&msb->regs.extra_data))
621 			return 0;
622 
623 		msb->state = MSB_SC_SEND_COMMAND;
624 		return 0;
625 
626 	case MSB_SC_SEND_COMMAND:
627 		memstick_init_req(mrq, MS_TPC_SET_CMD, &msb->command_value, 1);
628 		msb->state = MSB_SC_SEND_INT_REQ;
629 		return 0;
630 
631 	case MSB_SC_SEND_INT_REQ:
632 		msb->state = MSB_SC_RECEIVE_INT_REQ;
633 		if (msb_read_int_reg(msb, -1))
634 			return 0;
635 		/* fallthrough */
636 
637 	case MSB_SC_RECEIVE_INT_REQ:
638 		intreg = mrq->data[0];
639 
640 		if (intreg & MEMSTICK_INT_CMDNAK)
641 			return msb_exit_state_machine(msb, -EIO);
642 		if (intreg & MEMSTICK_INT_ERR)
643 			return msb_exit_state_machine(msb, -EBADMSG);
644 
645 		if (!(intreg & MEMSTICK_INT_CED)) {
646 			msb->state = MSB_SC_SEND_INT_REQ;
647 			goto again;
648 		}
649 
650 		return msb_exit_state_machine(msb, 0);
651 	}
652 
653 	BUG();
654 }
655 
656 /* Small handler for card reset */
h_msb_reset(struct memstick_dev * card,struct memstick_request ** out_mrq)657 static int h_msb_reset(struct memstick_dev *card,
658 					struct memstick_request **out_mrq)
659 {
660 	u8 command = MS_CMD_RESET;
661 	struct msb_data *msb = memstick_get_drvdata(card);
662 	struct memstick_request *mrq = *out_mrq = &card->current_mrq;
663 
664 	if (mrq->error)
665 		return msb_exit_state_machine(msb, mrq->error);
666 
667 	switch (msb->state) {
668 	case MSB_RS_SEND:
669 		memstick_init_req(mrq, MS_TPC_SET_CMD, &command, 1);
670 		mrq->need_card_int = 0;
671 		msb->state = MSB_RS_CONFIRM;
672 		return 0;
673 	case MSB_RS_CONFIRM:
674 		return msb_exit_state_machine(msb, 0);
675 	}
676 	BUG();
677 }
678 
679 /* This handler is used to do serial->parallel switch */
h_msb_parallel_switch(struct memstick_dev * card,struct memstick_request ** out_mrq)680 static int h_msb_parallel_switch(struct memstick_dev *card,
681 					struct memstick_request **out_mrq)
682 {
683 	struct msb_data *msb = memstick_get_drvdata(card);
684 	struct memstick_request *mrq = *out_mrq = &card->current_mrq;
685 	struct memstick_host *host = card->host;
686 
687 	if (mrq->error) {
688 		dbg("parallel_switch: error");
689 		msb->regs.param.system &= ~MEMSTICK_SYS_PAM;
690 		return msb_exit_state_machine(msb, mrq->error);
691 	}
692 
693 	switch (msb->state) {
694 	case MSB_PS_SEND_SWITCH_COMMAND:
695 		/* Set the parallel interface on memstick side */
696 		msb->regs.param.system |= MEMSTICK_SYS_PAM;
697 
698 		if (!msb_write_regs(msb,
699 			offsetof(struct ms_register, param),
700 			1,
701 			(unsigned char *)&msb->regs.param))
702 			return 0;
703 
704 		msb->state = MSB_PS_SWICH_HOST;
705 		return 0;
706 
707 	case MSB_PS_SWICH_HOST:
708 		 /* Set parallel interface on our side + send a dummy request
709 			to see if card responds */
710 		host->set_param(host, MEMSTICK_INTERFACE, MEMSTICK_PAR4);
711 		memstick_init_req(mrq, MS_TPC_GET_INT, NULL, 1);
712 		msb->state = MSB_PS_CONFIRM;
713 		return 0;
714 
715 	case MSB_PS_CONFIRM:
716 		return msb_exit_state_machine(msb, 0);
717 	}
718 
719 	BUG();
720 }
721 
722 static int msb_switch_to_parallel(struct msb_data *msb);
723 
724 /* Reset the card, to guard against hw errors beeing treated as bad blocks */
msb_reset(struct msb_data * msb,bool full)725 static int msb_reset(struct msb_data *msb, bool full)
726 {
727 
728 	bool was_parallel = msb->regs.param.system & MEMSTICK_SYS_PAM;
729 	struct memstick_dev *card = msb->card;
730 	struct memstick_host *host = card->host;
731 	int error;
732 
733 	/* Reset the card */
734 	msb->regs.param.system = MEMSTICK_SYS_BAMD;
735 
736 	if (full) {
737 		error =  host->set_param(host,
738 					MEMSTICK_POWER, MEMSTICK_POWER_OFF);
739 		if (error)
740 			goto out_error;
741 
742 		msb_invalidate_reg_window(msb);
743 
744 		error = host->set_param(host,
745 					MEMSTICK_POWER, MEMSTICK_POWER_ON);
746 		if (error)
747 			goto out_error;
748 
749 		error = host->set_param(host,
750 					MEMSTICK_INTERFACE, MEMSTICK_SERIAL);
751 		if (error) {
752 out_error:
753 			dbg("Failed to reset the host controller");
754 			msb->read_only = true;
755 			return -EFAULT;
756 		}
757 	}
758 
759 	error = msb_run_state_machine(msb, h_msb_reset);
760 	if (error) {
761 		dbg("Failed to reset the card");
762 		msb->read_only = true;
763 		return -ENODEV;
764 	}
765 
766 	/* Set parallel mode */
767 	if (was_parallel)
768 		msb_switch_to_parallel(msb);
769 	return 0;
770 }
771 
772 /* Attempts to switch interface to parallel mode */
msb_switch_to_parallel(struct msb_data * msb)773 static int msb_switch_to_parallel(struct msb_data *msb)
774 {
775 	int error;
776 
777 	error = msb_run_state_machine(msb, h_msb_parallel_switch);
778 	if (error) {
779 		pr_err("Switch to parallel failed");
780 		msb->regs.param.system &= ~MEMSTICK_SYS_PAM;
781 		msb_reset(msb, true);
782 		return -EFAULT;
783 	}
784 
785 	msb->caps |= MEMSTICK_CAP_AUTO_GET_INT;
786 	return 0;
787 }
788 
789 /* Changes overwrite flag on a page */
msb_set_overwrite_flag(struct msb_data * msb,u16 pba,u8 page,u8 flag)790 static int msb_set_overwrite_flag(struct msb_data *msb,
791 						u16 pba, u8 page, u8 flag)
792 {
793 	if (msb->read_only)
794 		return -EROFS;
795 
796 	msb->regs.param.block_address = cpu_to_be16(pba);
797 	msb->regs.param.page_address = page;
798 	msb->regs.param.cp = MEMSTICK_CP_OVERWRITE;
799 	msb->regs.extra_data.overwrite_flag = flag;
800 	msb->command_value = MS_CMD_BLOCK_WRITE;
801 	msb->command_need_oob = true;
802 
803 	dbg_verbose("changing overwrite flag to %02x for sector %d, page %d",
804 							flag, pba, page);
805 	return msb_run_state_machine(msb, h_msb_send_command);
806 }
807 
msb_mark_bad(struct msb_data * msb,int pba)808 static int msb_mark_bad(struct msb_data *msb, int pba)
809 {
810 	pr_notice("marking pba %d as bad", pba);
811 	msb_reset(msb, true);
812 	return msb_set_overwrite_flag(
813 			msb, pba, 0, 0xFF & ~MEMSTICK_OVERWRITE_BKST);
814 }
815 
msb_mark_page_bad(struct msb_data * msb,int pba,int page)816 static int msb_mark_page_bad(struct msb_data *msb, int pba, int page)
817 {
818 	dbg("marking page %d of pba %d as bad", page, pba);
819 	msb_reset(msb, true);
820 	return msb_set_overwrite_flag(msb,
821 		pba, page, ~MEMSTICK_OVERWRITE_PGST0);
822 }
823 
824 /* Erases one physical block */
msb_erase_block(struct msb_data * msb,u16 pba)825 static int msb_erase_block(struct msb_data *msb, u16 pba)
826 {
827 	int error, try;
828 	if (msb->read_only)
829 		return -EROFS;
830 
831 	dbg_verbose("erasing pba %d", pba);
832 
833 	for (try = 1; try < 3; try++) {
834 		msb->regs.param.block_address = cpu_to_be16(pba);
835 		msb->regs.param.page_address = 0;
836 		msb->regs.param.cp = MEMSTICK_CP_BLOCK;
837 		msb->command_value = MS_CMD_BLOCK_ERASE;
838 		msb->command_need_oob = false;
839 
840 
841 		error = msb_run_state_machine(msb, h_msb_send_command);
842 		if (!error || msb_reset(msb, true))
843 			break;
844 	}
845 
846 	if (error) {
847 		pr_err("erase failed, marking pba %d as bad", pba);
848 		msb_mark_bad(msb, pba);
849 	}
850 
851 	dbg_verbose("erase success, marking pba %d as unused", pba);
852 	msb_mark_block_unused(msb, pba);
853 	__set_bit(pba, msb->erased_blocks_bitmap);
854 	return error;
855 }
856 
857 /* Reads one page from device */
msb_read_page(struct msb_data * msb,u16 pba,u8 page,struct ms_extra_data_register * extra,struct scatterlist * sg,int offset)858 static int msb_read_page(struct msb_data *msb,
859 	u16 pba, u8 page, struct ms_extra_data_register *extra,
860 					struct scatterlist *sg,  int offset)
861 {
862 	int try, error;
863 
864 	if (pba == MS_BLOCK_INVALID) {
865 		unsigned long flags;
866 		struct sg_mapping_iter miter;
867 		size_t len = msb->page_size;
868 
869 		dbg_verbose("read unmapped sector. returning 0xFF");
870 
871 		local_irq_save(flags);
872 		sg_miter_start(&miter, sg, sg_nents(sg),
873 				SG_MITER_ATOMIC | SG_MITER_TO_SG);
874 
875 		while (sg_miter_next(&miter) && len > 0) {
876 
877 			int chunklen;
878 
879 			if (offset && offset >= miter.length) {
880 				offset -= miter.length;
881 				continue;
882 			}
883 
884 			chunklen = min(miter.length - offset, len);
885 			memset(miter.addr + offset, 0xFF, chunklen);
886 			len -= chunklen;
887 			offset = 0;
888 		}
889 
890 		sg_miter_stop(&miter);
891 		local_irq_restore(flags);
892 
893 		if (offset)
894 			return -EFAULT;
895 
896 		if (extra)
897 			memset(extra, 0xFF, sizeof(*extra));
898 		return 0;
899 	}
900 
901 	if (pba >= msb->block_count) {
902 		pr_err("BUG: attempt to read beyond the end of the card at pba %d", pba);
903 		return -EINVAL;
904 	}
905 
906 	for (try = 1; try < 3; try++) {
907 		msb->regs.param.block_address = cpu_to_be16(pba);
908 		msb->regs.param.page_address = page;
909 		msb->regs.param.cp = MEMSTICK_CP_PAGE;
910 
911 		msb->current_sg = sg;
912 		msb->current_sg_offset = offset;
913 		error = msb_run_state_machine(msb, h_msb_read_page);
914 
915 
916 		if (error == -EUCLEAN) {
917 			pr_notice("correctable error on pba %d, page %d",
918 				pba, page);
919 			error = 0;
920 		}
921 
922 		if (!error && extra)
923 			*extra = msb->regs.extra_data;
924 
925 		if (!error || msb_reset(msb, true))
926 			break;
927 
928 	}
929 
930 	/* Mark bad pages */
931 	if (error == -EBADMSG) {
932 		pr_err("uncorrectable error on read of pba %d, page %d",
933 			pba, page);
934 
935 		if (msb->regs.extra_data.overwrite_flag &
936 					MEMSTICK_OVERWRITE_PGST0)
937 			msb_mark_page_bad(msb, pba, page);
938 		return -EBADMSG;
939 	}
940 
941 	if (error)
942 		pr_err("read of pba %d, page %d failed with error %d",
943 			pba, page, error);
944 	return error;
945 }
946 
947 /* Reads oob of page only */
msb_read_oob(struct msb_data * msb,u16 pba,u16 page,struct ms_extra_data_register * extra)948 static int msb_read_oob(struct msb_data *msb, u16 pba, u16 page,
949 	struct ms_extra_data_register *extra)
950 {
951 	int error;
952 
953 	BUG_ON(!extra);
954 	msb->regs.param.block_address = cpu_to_be16(pba);
955 	msb->regs.param.page_address = page;
956 	msb->regs.param.cp = MEMSTICK_CP_EXTRA;
957 
958 	if (pba > msb->block_count) {
959 		pr_err("BUG: attempt to read beyond the end of card at pba %d", pba);
960 		return -EINVAL;
961 	}
962 
963 	error = msb_run_state_machine(msb, h_msb_read_page);
964 	*extra = msb->regs.extra_data;
965 
966 	if (error == -EUCLEAN) {
967 		pr_notice("correctable error on pba %d, page %d",
968 			pba, page);
969 		return 0;
970 	}
971 
972 	return error;
973 }
974 
975 /* Reads a block and compares it with data contained in scatterlist orig_sg */
msb_verify_block(struct msb_data * msb,u16 pba,struct scatterlist * orig_sg,int offset)976 static int msb_verify_block(struct msb_data *msb, u16 pba,
977 				struct scatterlist *orig_sg,  int offset)
978 {
979 	struct scatterlist sg;
980 	int page = 0, error;
981 
982 	sg_init_one(&sg, msb->block_buffer, msb->block_size);
983 
984 	while (page < msb->pages_in_block) {
985 
986 		error = msb_read_page(msb, pba, page,
987 				NULL, &sg, page * msb->page_size);
988 		if (error)
989 			return error;
990 		page++;
991 	}
992 
993 	if (msb_sg_compare_to_buffer(orig_sg, offset,
994 				msb->block_buffer, msb->block_size))
995 		return -EIO;
996 	return 0;
997 }
998 
999 /* Writes exectly one block + oob */
msb_write_block(struct msb_data * msb,u16 pba,u32 lba,struct scatterlist * sg,int offset)1000 static int msb_write_block(struct msb_data *msb,
1001 			u16 pba, u32 lba, struct scatterlist *sg, int offset)
1002 {
1003 	int error, current_try = 1;
1004 	BUG_ON(sg->length < msb->page_size);
1005 
1006 	if (msb->read_only)
1007 		return -EROFS;
1008 
1009 	if (pba == MS_BLOCK_INVALID) {
1010 		pr_err(
1011 			"BUG: write: attempt to write MS_BLOCK_INVALID block");
1012 		return -EINVAL;
1013 	}
1014 
1015 	if (pba >= msb->block_count || lba >= msb->logical_block_count) {
1016 		pr_err(
1017 		"BUG: write: attempt to write beyond the end of device");
1018 		return -EINVAL;
1019 	}
1020 
1021 	if (msb_get_zone_from_lba(lba) != msb_get_zone_from_pba(pba)) {
1022 		pr_err("BUG: write: lba zone mismatch");
1023 		return -EINVAL;
1024 	}
1025 
1026 	if (pba == msb->boot_block_locations[0] ||
1027 		pba == msb->boot_block_locations[1]) {
1028 		pr_err("BUG: write: attempt to write to boot blocks!");
1029 		return -EINVAL;
1030 	}
1031 
1032 	while (1) {
1033 
1034 		if (msb->read_only)
1035 			return -EROFS;
1036 
1037 		msb->regs.param.cp = MEMSTICK_CP_BLOCK;
1038 		msb->regs.param.page_address = 0;
1039 		msb->regs.param.block_address = cpu_to_be16(pba);
1040 
1041 		msb->regs.extra_data.management_flag = 0xFF;
1042 		msb->regs.extra_data.overwrite_flag = 0xF8;
1043 		msb->regs.extra_data.logical_address = cpu_to_be16(lba);
1044 
1045 		msb->current_sg = sg;
1046 		msb->current_sg_offset = offset;
1047 		msb->current_page = 0;
1048 
1049 		error = msb_run_state_machine(msb, h_msb_write_block);
1050 
1051 		/* Sector we just wrote to is assumed erased since its pba
1052 			was erased. If it wasn't erased, write will succeed
1053 			and will just clear the bits that were set in the block
1054 			thus test that what we have written,
1055 			matches what we expect.
1056 			We do trust the blocks that we erased */
1057 		if (!error && (verify_writes ||
1058 				!test_bit(pba, msb->erased_blocks_bitmap)))
1059 			error = msb_verify_block(msb, pba, sg, offset);
1060 
1061 		if (!error)
1062 			break;
1063 
1064 		if (current_try > 1 || msb_reset(msb, true))
1065 			break;
1066 
1067 		pr_err("write failed, trying to erase the pba %d", pba);
1068 		error = msb_erase_block(msb, pba);
1069 		if (error)
1070 			break;
1071 
1072 		current_try++;
1073 	}
1074 	return error;
1075 }
1076 
1077 /* Finds a free block for write replacement */
msb_get_free_block(struct msb_data * msb,int zone)1078 static u16 msb_get_free_block(struct msb_data *msb, int zone)
1079 {
1080 	u16 pos;
1081 	int pba = zone * MS_BLOCKS_IN_ZONE;
1082 	int i;
1083 
1084 	get_random_bytes(&pos, sizeof(pos));
1085 
1086 	if (!msb->free_block_count[zone]) {
1087 		pr_err("NO free blocks in the zone %d, to use for a write, (media is WORN out) switching to RO mode", zone);
1088 		msb->read_only = true;
1089 		return MS_BLOCK_INVALID;
1090 	}
1091 
1092 	pos %= msb->free_block_count[zone];
1093 
1094 	dbg_verbose("have %d choices for a free block, selected randomally: %d",
1095 		msb->free_block_count[zone], pos);
1096 
1097 	pba = find_next_zero_bit(msb->used_blocks_bitmap,
1098 							msb->block_count, pba);
1099 	for (i = 0; i < pos; ++i)
1100 		pba = find_next_zero_bit(msb->used_blocks_bitmap,
1101 						msb->block_count, pba + 1);
1102 
1103 	dbg_verbose("result of the free blocks scan: pba %d", pba);
1104 
1105 	if (pba == msb->block_count || (msb_get_zone_from_pba(pba)) != zone) {
1106 		pr_err("BUG: cant get a free block");
1107 		msb->read_only = true;
1108 		return MS_BLOCK_INVALID;
1109 	}
1110 
1111 	msb_mark_block_used(msb, pba);
1112 	return pba;
1113 }
1114 
msb_update_block(struct msb_data * msb,u16 lba,struct scatterlist * sg,int offset)1115 static int msb_update_block(struct msb_data *msb, u16 lba,
1116 	struct scatterlist *sg, int offset)
1117 {
1118 	u16 pba, new_pba;
1119 	int error, try;
1120 
1121 	pba = msb->lba_to_pba_table[lba];
1122 	dbg_verbose("start of a block update at lba  %d, pba %d", lba, pba);
1123 
1124 	if (pba != MS_BLOCK_INVALID) {
1125 		dbg_verbose("setting the update flag on the block");
1126 		msb_set_overwrite_flag(msb, pba, 0,
1127 				0xFF & ~MEMSTICK_OVERWRITE_UDST);
1128 	}
1129 
1130 	for (try = 0; try < 3; try++) {
1131 		new_pba = msb_get_free_block(msb,
1132 			msb_get_zone_from_lba(lba));
1133 
1134 		if (new_pba == MS_BLOCK_INVALID) {
1135 			error = -EIO;
1136 			goto out;
1137 		}
1138 
1139 		dbg_verbose("block update: writing updated block to the pba %d",
1140 								new_pba);
1141 		error = msb_write_block(msb, new_pba, lba, sg, offset);
1142 		if (error == -EBADMSG) {
1143 			msb_mark_bad(msb, new_pba);
1144 			continue;
1145 		}
1146 
1147 		if (error)
1148 			goto out;
1149 
1150 		dbg_verbose("block update: erasing the old block");
1151 		msb_erase_block(msb, pba);
1152 		msb->lba_to_pba_table[lba] = new_pba;
1153 		return 0;
1154 	}
1155 out:
1156 	if (error) {
1157 		pr_err("block update error after %d tries,  switching to r/o mode", try);
1158 		msb->read_only = true;
1159 	}
1160 	return error;
1161 }
1162 
1163 /* Converts endiannes in the boot block for easy use */
msb_fix_boot_page_endianness(struct ms_boot_page * p)1164 static void msb_fix_boot_page_endianness(struct ms_boot_page *p)
1165 {
1166 	p->header.block_id = be16_to_cpu(p->header.block_id);
1167 	p->header.format_reserved = be16_to_cpu(p->header.format_reserved);
1168 	p->entry.disabled_block.start_addr
1169 		= be32_to_cpu(p->entry.disabled_block.start_addr);
1170 	p->entry.disabled_block.data_size
1171 		= be32_to_cpu(p->entry.disabled_block.data_size);
1172 	p->entry.cis_idi.start_addr
1173 		= be32_to_cpu(p->entry.cis_idi.start_addr);
1174 	p->entry.cis_idi.data_size
1175 		= be32_to_cpu(p->entry.cis_idi.data_size);
1176 	p->attr.block_size = be16_to_cpu(p->attr.block_size);
1177 	p->attr.number_of_blocks = be16_to_cpu(p->attr.number_of_blocks);
1178 	p->attr.number_of_effective_blocks
1179 		= be16_to_cpu(p->attr.number_of_effective_blocks);
1180 	p->attr.page_size = be16_to_cpu(p->attr.page_size);
1181 	p->attr.memory_manufacturer_code
1182 		= be16_to_cpu(p->attr.memory_manufacturer_code);
1183 	p->attr.memory_device_code = be16_to_cpu(p->attr.memory_device_code);
1184 	p->attr.implemented_capacity
1185 		= be16_to_cpu(p->attr.implemented_capacity);
1186 	p->attr.controller_number = be16_to_cpu(p->attr.controller_number);
1187 	p->attr.controller_function = be16_to_cpu(p->attr.controller_function);
1188 }
1189 
msb_read_boot_blocks(struct msb_data * msb)1190 static int msb_read_boot_blocks(struct msb_data *msb)
1191 {
1192 	int pba = 0;
1193 	struct scatterlist sg;
1194 	struct ms_extra_data_register extra;
1195 	struct ms_boot_page *page;
1196 
1197 	msb->boot_block_locations[0] = MS_BLOCK_INVALID;
1198 	msb->boot_block_locations[1] = MS_BLOCK_INVALID;
1199 	msb->boot_block_count = 0;
1200 
1201 	dbg_verbose("Start of a scan for the boot blocks");
1202 
1203 	if (!msb->boot_page) {
1204 		page = kmalloc_array(2, sizeof(struct ms_boot_page),
1205 				     GFP_KERNEL);
1206 		if (!page)
1207 			return -ENOMEM;
1208 
1209 		msb->boot_page = page;
1210 	} else
1211 		page = msb->boot_page;
1212 
1213 	msb->block_count = MS_BLOCK_MAX_BOOT_ADDR;
1214 
1215 	for (pba = 0; pba < MS_BLOCK_MAX_BOOT_ADDR; pba++) {
1216 
1217 		sg_init_one(&sg, page, sizeof(*page));
1218 		if (msb_read_page(msb, pba, 0, &extra, &sg, 0)) {
1219 			dbg("boot scan: can't read pba %d", pba);
1220 			continue;
1221 		}
1222 
1223 		if (extra.management_flag & MEMSTICK_MANAGEMENT_SYSFLG) {
1224 			dbg("management flag doesn't indicate boot block %d",
1225 									pba);
1226 			continue;
1227 		}
1228 
1229 		if (be16_to_cpu(page->header.block_id) != MS_BLOCK_BOOT_ID) {
1230 			dbg("the pba at %d doesn' contain boot block ID", pba);
1231 			continue;
1232 		}
1233 
1234 		msb_fix_boot_page_endianness(page);
1235 		msb->boot_block_locations[msb->boot_block_count] = pba;
1236 
1237 		page++;
1238 		msb->boot_block_count++;
1239 
1240 		if (msb->boot_block_count == 2)
1241 			break;
1242 	}
1243 
1244 	if (!msb->boot_block_count) {
1245 		pr_err("media doesn't contain master page, aborting");
1246 		return -EIO;
1247 	}
1248 
1249 	dbg_verbose("End of scan for boot blocks");
1250 	return 0;
1251 }
1252 
msb_read_bad_block_table(struct msb_data * msb,int block_nr)1253 static int msb_read_bad_block_table(struct msb_data *msb, int block_nr)
1254 {
1255 	struct ms_boot_page *boot_block;
1256 	struct scatterlist sg;
1257 	u16 *buffer = NULL;
1258 	int offset = 0;
1259 	int i, error = 0;
1260 	int data_size, data_offset, page, page_offset, size_to_read;
1261 	u16 pba;
1262 
1263 	BUG_ON(block_nr > 1);
1264 	boot_block = &msb->boot_page[block_nr];
1265 	pba = msb->boot_block_locations[block_nr];
1266 
1267 	if (msb->boot_block_locations[block_nr] == MS_BLOCK_INVALID)
1268 		return -EINVAL;
1269 
1270 	data_size = boot_block->entry.disabled_block.data_size;
1271 	data_offset = sizeof(struct ms_boot_page) +
1272 			boot_block->entry.disabled_block.start_addr;
1273 	if (!data_size)
1274 		return 0;
1275 
1276 	page = data_offset / msb->page_size;
1277 	page_offset = data_offset % msb->page_size;
1278 	size_to_read =
1279 		DIV_ROUND_UP(data_size + page_offset, msb->page_size) *
1280 			msb->page_size;
1281 
1282 	dbg("reading bad block of boot block at pba %d, offset %d len %d",
1283 		pba, data_offset, data_size);
1284 
1285 	buffer = kzalloc(size_to_read, GFP_KERNEL);
1286 	if (!buffer)
1287 		return -ENOMEM;
1288 
1289 	/* Read the buffer */
1290 	sg_init_one(&sg, buffer, size_to_read);
1291 
1292 	while (offset < size_to_read) {
1293 		error = msb_read_page(msb, pba, page, NULL, &sg, offset);
1294 		if (error)
1295 			goto out;
1296 
1297 		page++;
1298 		offset += msb->page_size;
1299 
1300 		if (page == msb->pages_in_block) {
1301 			pr_err(
1302 			"bad block table extends beyond the boot block");
1303 			break;
1304 		}
1305 	}
1306 
1307 	/* Process the bad block table */
1308 	for (i = page_offset; i < data_size / sizeof(u16); i++) {
1309 
1310 		u16 bad_block = be16_to_cpu(buffer[i]);
1311 
1312 		if (bad_block >= msb->block_count) {
1313 			dbg("bad block table contains invalid block %d",
1314 								bad_block);
1315 			continue;
1316 		}
1317 
1318 		if (test_bit(bad_block, msb->used_blocks_bitmap))  {
1319 			dbg("duplicate bad block %d in the table",
1320 				bad_block);
1321 			continue;
1322 		}
1323 
1324 		dbg("block %d is marked as factory bad", bad_block);
1325 		msb_mark_block_used(msb, bad_block);
1326 	}
1327 out:
1328 	kfree(buffer);
1329 	return error;
1330 }
1331 
msb_ftl_initialize(struct msb_data * msb)1332 static int msb_ftl_initialize(struct msb_data *msb)
1333 {
1334 	int i;
1335 
1336 	if (msb->ftl_initialized)
1337 		return 0;
1338 
1339 	msb->zone_count = msb->block_count / MS_BLOCKS_IN_ZONE;
1340 	msb->logical_block_count = msb->zone_count * 496 - 2;
1341 
1342 	msb->used_blocks_bitmap = kzalloc(msb->block_count / 8, GFP_KERNEL);
1343 	msb->erased_blocks_bitmap = kzalloc(msb->block_count / 8, GFP_KERNEL);
1344 	msb->lba_to_pba_table =
1345 		kmalloc_array(msb->logical_block_count, sizeof(u16),
1346 			      GFP_KERNEL);
1347 
1348 	if (!msb->used_blocks_bitmap || !msb->lba_to_pba_table ||
1349 						!msb->erased_blocks_bitmap) {
1350 		kfree(msb->used_blocks_bitmap);
1351 		kfree(msb->lba_to_pba_table);
1352 		kfree(msb->erased_blocks_bitmap);
1353 		return -ENOMEM;
1354 	}
1355 
1356 	for (i = 0; i < msb->zone_count; i++)
1357 		msb->free_block_count[i] = MS_BLOCKS_IN_ZONE;
1358 
1359 	memset(msb->lba_to_pba_table, MS_BLOCK_INVALID,
1360 			msb->logical_block_count * sizeof(u16));
1361 
1362 	dbg("initial FTL tables created. Zone count = %d, Logical block count = %d",
1363 		msb->zone_count, msb->logical_block_count);
1364 
1365 	msb->ftl_initialized = true;
1366 	return 0;
1367 }
1368 
msb_ftl_scan(struct msb_data * msb)1369 static int msb_ftl_scan(struct msb_data *msb)
1370 {
1371 	u16 pba, lba, other_block;
1372 	u8 overwrite_flag, management_flag, other_overwrite_flag;
1373 	int error;
1374 	struct ms_extra_data_register extra;
1375 	u8 *overwrite_flags = kzalloc(msb->block_count, GFP_KERNEL);
1376 
1377 	if (!overwrite_flags)
1378 		return -ENOMEM;
1379 
1380 	dbg("Start of media scanning");
1381 	for (pba = 0; pba < msb->block_count; pba++) {
1382 
1383 		if (pba == msb->boot_block_locations[0] ||
1384 			pba == msb->boot_block_locations[1]) {
1385 			dbg_verbose("pba %05d -> [boot block]", pba);
1386 			msb_mark_block_used(msb, pba);
1387 			continue;
1388 		}
1389 
1390 		if (test_bit(pba, msb->used_blocks_bitmap)) {
1391 			dbg_verbose("pba %05d -> [factory bad]", pba);
1392 			continue;
1393 		}
1394 
1395 		memset(&extra, 0, sizeof(extra));
1396 		error = msb_read_oob(msb, pba, 0, &extra);
1397 
1398 		/* can't trust the page if we can't read the oob */
1399 		if (error == -EBADMSG) {
1400 			pr_notice(
1401 			"oob of pba %d damaged, will try to erase it", pba);
1402 			msb_mark_block_used(msb, pba);
1403 			msb_erase_block(msb, pba);
1404 			continue;
1405 		} else if (error) {
1406 			pr_err("unknown error %d on read of oob of pba %d - aborting",
1407 				error, pba);
1408 
1409 			kfree(overwrite_flags);
1410 			return error;
1411 		}
1412 
1413 		lba = be16_to_cpu(extra.logical_address);
1414 		management_flag = extra.management_flag;
1415 		overwrite_flag = extra.overwrite_flag;
1416 		overwrite_flags[pba] = overwrite_flag;
1417 
1418 		/* Skip bad blocks */
1419 		if (!(overwrite_flag & MEMSTICK_OVERWRITE_BKST)) {
1420 			dbg("pba %05d -> [BAD]", pba);
1421 			msb_mark_block_used(msb, pba);
1422 			continue;
1423 		}
1424 
1425 		/* Skip system/drm blocks */
1426 		if ((management_flag & MEMSTICK_MANAGEMENT_FLAG_NORMAL) !=
1427 			MEMSTICK_MANAGEMENT_FLAG_NORMAL) {
1428 			dbg("pba %05d -> [reserved management flag %02x]",
1429 							pba, management_flag);
1430 			msb_mark_block_used(msb, pba);
1431 			continue;
1432 		}
1433 
1434 		/* Erase temporary tables */
1435 		if (!(management_flag & MEMSTICK_MANAGEMENT_ATFLG)) {
1436 			dbg("pba %05d -> [temp table] - will erase", pba);
1437 
1438 			msb_mark_block_used(msb, pba);
1439 			msb_erase_block(msb, pba);
1440 			continue;
1441 		}
1442 
1443 		if (lba == MS_BLOCK_INVALID) {
1444 			dbg_verbose("pba %05d -> [free]", pba);
1445 			continue;
1446 		}
1447 
1448 		msb_mark_block_used(msb, pba);
1449 
1450 		/* Block has LBA not according to zoning*/
1451 		if (msb_get_zone_from_lba(lba) != msb_get_zone_from_pba(pba)) {
1452 			pr_notice("pba %05d -> [bad lba %05d] - will erase",
1453 								pba, lba);
1454 			msb_erase_block(msb, pba);
1455 			continue;
1456 		}
1457 
1458 		/* No collisions - great */
1459 		if (msb->lba_to_pba_table[lba] == MS_BLOCK_INVALID) {
1460 			dbg_verbose("pba %05d -> [lba %05d]", pba, lba);
1461 			msb->lba_to_pba_table[lba] = pba;
1462 			continue;
1463 		}
1464 
1465 		other_block = msb->lba_to_pba_table[lba];
1466 		other_overwrite_flag = overwrite_flags[other_block];
1467 
1468 		pr_notice("Collision between pba %d and pba %d",
1469 			pba, other_block);
1470 
1471 		if (!(overwrite_flag & MEMSTICK_OVERWRITE_UDST)) {
1472 			pr_notice("pba %d is marked as stable, use it", pba);
1473 			msb_erase_block(msb, other_block);
1474 			msb->lba_to_pba_table[lba] = pba;
1475 			continue;
1476 		}
1477 
1478 		if (!(other_overwrite_flag & MEMSTICK_OVERWRITE_UDST)) {
1479 			pr_notice("pba %d is marked as stable, use it",
1480 								other_block);
1481 			msb_erase_block(msb, pba);
1482 			continue;
1483 		}
1484 
1485 		pr_notice("collision between blocks %d and %d, without stable flag set on both, erasing pba %d",
1486 				pba, other_block, other_block);
1487 
1488 		msb_erase_block(msb, other_block);
1489 		msb->lba_to_pba_table[lba] = pba;
1490 	}
1491 
1492 	dbg("End of media scanning");
1493 	kfree(overwrite_flags);
1494 	return 0;
1495 }
1496 
msb_cache_flush_timer(struct timer_list * t)1497 static void msb_cache_flush_timer(struct timer_list *t)
1498 {
1499 	struct msb_data *msb = from_timer(msb, t, cache_flush_timer);
1500 	msb->need_flush_cache = true;
1501 	queue_work(msb->io_queue, &msb->io_work);
1502 }
1503 
1504 
msb_cache_discard(struct msb_data * msb)1505 static void msb_cache_discard(struct msb_data *msb)
1506 {
1507 	if (msb->cache_block_lba == MS_BLOCK_INVALID)
1508 		return;
1509 
1510 	del_timer_sync(&msb->cache_flush_timer);
1511 
1512 	dbg_verbose("Discarding the write cache");
1513 	msb->cache_block_lba = MS_BLOCK_INVALID;
1514 	bitmap_zero(&msb->valid_cache_bitmap, msb->pages_in_block);
1515 }
1516 
msb_cache_init(struct msb_data * msb)1517 static int msb_cache_init(struct msb_data *msb)
1518 {
1519 	timer_setup(&msb->cache_flush_timer, msb_cache_flush_timer, 0);
1520 
1521 	if (!msb->cache)
1522 		msb->cache = kzalloc(msb->block_size, GFP_KERNEL);
1523 	if (!msb->cache)
1524 		return -ENOMEM;
1525 
1526 	msb_cache_discard(msb);
1527 	return 0;
1528 }
1529 
msb_cache_flush(struct msb_data * msb)1530 static int msb_cache_flush(struct msb_data *msb)
1531 {
1532 	struct scatterlist sg;
1533 	struct ms_extra_data_register extra;
1534 	int page, offset, error;
1535 	u16 pba, lba;
1536 
1537 	if (msb->read_only)
1538 		return -EROFS;
1539 
1540 	if (msb->cache_block_lba == MS_BLOCK_INVALID)
1541 		return 0;
1542 
1543 	lba = msb->cache_block_lba;
1544 	pba = msb->lba_to_pba_table[lba];
1545 
1546 	dbg_verbose("Flushing the write cache of pba %d (LBA %d)",
1547 						pba, msb->cache_block_lba);
1548 
1549 	sg_init_one(&sg, msb->cache , msb->block_size);
1550 
1551 	/* Read all missing pages in cache */
1552 	for (page = 0; page < msb->pages_in_block; page++) {
1553 
1554 		if (test_bit(page, &msb->valid_cache_bitmap))
1555 			continue;
1556 
1557 		offset = page * msb->page_size;
1558 
1559 		dbg_verbose("reading non-present sector %d of cache block %d",
1560 			page, lba);
1561 		error = msb_read_page(msb, pba, page, &extra, &sg, offset);
1562 
1563 		/* Bad pages are copied with 00 page status */
1564 		if (error == -EBADMSG) {
1565 			pr_err("read error on sector %d, contents probably damaged", page);
1566 			continue;
1567 		}
1568 
1569 		if (error)
1570 			return error;
1571 
1572 		if ((extra.overwrite_flag & MEMSTICK_OV_PG_NORMAL) !=
1573 							MEMSTICK_OV_PG_NORMAL) {
1574 			dbg("page %d is marked as bad", page);
1575 			continue;
1576 		}
1577 
1578 		set_bit(page, &msb->valid_cache_bitmap);
1579 	}
1580 
1581 	/* Write the cache now */
1582 	error = msb_update_block(msb, msb->cache_block_lba, &sg, 0);
1583 	pba = msb->lba_to_pba_table[msb->cache_block_lba];
1584 
1585 	/* Mark invalid pages */
1586 	if (!error) {
1587 		for (page = 0; page < msb->pages_in_block; page++) {
1588 
1589 			if (test_bit(page, &msb->valid_cache_bitmap))
1590 				continue;
1591 
1592 			dbg("marking page %d as containing damaged data",
1593 				page);
1594 			msb_set_overwrite_flag(msb,
1595 				pba , page, 0xFF & ~MEMSTICK_OV_PG_NORMAL);
1596 		}
1597 	}
1598 
1599 	msb_cache_discard(msb);
1600 	return error;
1601 }
1602 
msb_cache_write(struct msb_data * msb,int lba,int page,bool add_to_cache_only,struct scatterlist * sg,int offset)1603 static int msb_cache_write(struct msb_data *msb, int lba,
1604 	int page, bool add_to_cache_only, struct scatterlist *sg, int offset)
1605 {
1606 	int error;
1607 	struct scatterlist sg_tmp[10];
1608 
1609 	if (msb->read_only)
1610 		return -EROFS;
1611 
1612 	if (msb->cache_block_lba == MS_BLOCK_INVALID ||
1613 						lba != msb->cache_block_lba)
1614 		if (add_to_cache_only)
1615 			return 0;
1616 
1617 	/* If we need to write different block */
1618 	if (msb->cache_block_lba != MS_BLOCK_INVALID &&
1619 						lba != msb->cache_block_lba) {
1620 		dbg_verbose("first flush the cache");
1621 		error = msb_cache_flush(msb);
1622 		if (error)
1623 			return error;
1624 	}
1625 
1626 	if (msb->cache_block_lba  == MS_BLOCK_INVALID) {
1627 		msb->cache_block_lba  = lba;
1628 		mod_timer(&msb->cache_flush_timer,
1629 			jiffies + msecs_to_jiffies(cache_flush_timeout));
1630 	}
1631 
1632 	dbg_verbose("Write of LBA %d page %d to cache ", lba, page);
1633 
1634 	sg_init_table(sg_tmp, ARRAY_SIZE(sg_tmp));
1635 	msb_sg_copy(sg, sg_tmp, ARRAY_SIZE(sg_tmp), offset, msb->page_size);
1636 
1637 	sg_copy_to_buffer(sg_tmp, sg_nents(sg_tmp),
1638 		msb->cache + page * msb->page_size, msb->page_size);
1639 
1640 	set_bit(page, &msb->valid_cache_bitmap);
1641 	return 0;
1642 }
1643 
msb_cache_read(struct msb_data * msb,int lba,int page,struct scatterlist * sg,int offset)1644 static int msb_cache_read(struct msb_data *msb, int lba,
1645 				int page, struct scatterlist *sg, int offset)
1646 {
1647 	int pba = msb->lba_to_pba_table[lba];
1648 	struct scatterlist sg_tmp[10];
1649 	int error = 0;
1650 
1651 	if (lba == msb->cache_block_lba &&
1652 			test_bit(page, &msb->valid_cache_bitmap)) {
1653 
1654 		dbg_verbose("Read of LBA %d (pba %d) sector %d from cache",
1655 							lba, pba, page);
1656 
1657 		sg_init_table(sg_tmp, ARRAY_SIZE(sg_tmp));
1658 		msb_sg_copy(sg, sg_tmp, ARRAY_SIZE(sg_tmp),
1659 			offset, msb->page_size);
1660 		sg_copy_from_buffer(sg_tmp, sg_nents(sg_tmp),
1661 			msb->cache + msb->page_size * page,
1662 							msb->page_size);
1663 	} else {
1664 		dbg_verbose("Read of LBA %d (pba %d) sector %d from device",
1665 							lba, pba, page);
1666 
1667 		error = msb_read_page(msb, pba, page, NULL, sg, offset);
1668 		if (error)
1669 			return error;
1670 
1671 		msb_cache_write(msb, lba, page, true, sg, offset);
1672 	}
1673 	return error;
1674 }
1675 
1676 /* Emulated geometry table
1677  * This table content isn't that importaint,
1678  * One could put here different values, providing that they still
1679  * cover whole disk.
1680  * 64 MB entry is what windows reports for my 64M memstick */
1681 
1682 static const struct chs_entry chs_table[] = {
1683 /*        size sectors cylynders  heads */
1684 	{ 4,    16,    247,       2  },
1685 	{ 8,    16,    495,       2  },
1686 	{ 16,   16,    495,       4  },
1687 	{ 32,   16,    991,       4  },
1688 	{ 64,   16,    991,       8  },
1689 	{128,   16,    991,       16 },
1690 	{ 0 }
1691 };
1692 
1693 /* Load information about the card */
msb_init_card(struct memstick_dev * card)1694 static int msb_init_card(struct memstick_dev *card)
1695 {
1696 	struct msb_data *msb = memstick_get_drvdata(card);
1697 	struct memstick_host *host = card->host;
1698 	struct ms_boot_page *boot_block;
1699 	int error = 0, i, raw_size_in_megs;
1700 
1701 	msb->caps = 0;
1702 
1703 	if (card->id.class >= MEMSTICK_CLASS_ROM &&
1704 				card->id.class <= MEMSTICK_CLASS_ROM)
1705 		msb->read_only = true;
1706 
1707 	msb->state = -1;
1708 	error = msb_reset(msb, false);
1709 	if (error)
1710 		return error;
1711 
1712 	/* Due to a bug in Jmicron driver written by Alex Dubov,
1713 	 its serial mode barely works,
1714 	 so we switch to parallel mode right away */
1715 	if (host->caps & MEMSTICK_CAP_PAR4)
1716 		msb_switch_to_parallel(msb);
1717 
1718 	msb->page_size = sizeof(struct ms_boot_page);
1719 
1720 	/* Read the boot page */
1721 	error = msb_read_boot_blocks(msb);
1722 	if (error)
1723 		return -EIO;
1724 
1725 	boot_block = &msb->boot_page[0];
1726 
1727 	/* Save intersting attributes from boot page */
1728 	msb->block_count = boot_block->attr.number_of_blocks;
1729 	msb->page_size = boot_block->attr.page_size;
1730 
1731 	msb->pages_in_block = boot_block->attr.block_size * 2;
1732 	msb->block_size = msb->page_size * msb->pages_in_block;
1733 
1734 	if (msb->page_size > PAGE_SIZE) {
1735 		/* this isn't supported by linux at all, anyway*/
1736 		dbg("device page %d size isn't supported", msb->page_size);
1737 		return -EINVAL;
1738 	}
1739 
1740 	msb->block_buffer = kzalloc(msb->block_size, GFP_KERNEL);
1741 	if (!msb->block_buffer)
1742 		return -ENOMEM;
1743 
1744 	raw_size_in_megs = (msb->block_size * msb->block_count) >> 20;
1745 
1746 	for (i = 0; chs_table[i].size; i++) {
1747 
1748 		if (chs_table[i].size != raw_size_in_megs)
1749 			continue;
1750 
1751 		msb->geometry.cylinders = chs_table[i].cyl;
1752 		msb->geometry.heads = chs_table[i].head;
1753 		msb->geometry.sectors = chs_table[i].sec;
1754 		break;
1755 	}
1756 
1757 	if (boot_block->attr.transfer_supporting == 1)
1758 		msb->caps |= MEMSTICK_CAP_PAR4;
1759 
1760 	if (boot_block->attr.device_type & 0x03)
1761 		msb->read_only = true;
1762 
1763 	dbg("Total block count = %d", msb->block_count);
1764 	dbg("Each block consists of %d pages", msb->pages_in_block);
1765 	dbg("Page size = %d bytes", msb->page_size);
1766 	dbg("Parallel mode supported: %d", !!(msb->caps & MEMSTICK_CAP_PAR4));
1767 	dbg("Read only: %d", msb->read_only);
1768 
1769 #if 0
1770 	/* Now we can switch the interface */
1771 	if (host->caps & msb->caps & MEMSTICK_CAP_PAR4)
1772 		msb_switch_to_parallel(msb);
1773 #endif
1774 
1775 	error = msb_cache_init(msb);
1776 	if (error)
1777 		return error;
1778 
1779 	error = msb_ftl_initialize(msb);
1780 	if (error)
1781 		return error;
1782 
1783 
1784 	/* Read the bad block table */
1785 	error = msb_read_bad_block_table(msb, 0);
1786 
1787 	if (error && error != -ENOMEM) {
1788 		dbg("failed to read bad block table from primary boot block, trying from backup");
1789 		error = msb_read_bad_block_table(msb, 1);
1790 	}
1791 
1792 	if (error)
1793 		return error;
1794 
1795 	/* *drum roll* Scan the media */
1796 	error = msb_ftl_scan(msb);
1797 	if (error) {
1798 		pr_err("Scan of media failed");
1799 		return error;
1800 	}
1801 
1802 	return 0;
1803 
1804 }
1805 
msb_do_write_request(struct msb_data * msb,int lba,int page,struct scatterlist * sg,size_t len,int * sucessfuly_written)1806 static int msb_do_write_request(struct msb_data *msb, int lba,
1807 	int page, struct scatterlist *sg, size_t len, int *sucessfuly_written)
1808 {
1809 	int error = 0;
1810 	off_t offset = 0;
1811 	*sucessfuly_written = 0;
1812 
1813 	while (offset < len) {
1814 		if (page == 0 && len - offset >= msb->block_size) {
1815 
1816 			if (msb->cache_block_lba == lba)
1817 				msb_cache_discard(msb);
1818 
1819 			dbg_verbose("Writing whole lba %d", lba);
1820 			error = msb_update_block(msb, lba, sg, offset);
1821 			if (error)
1822 				return error;
1823 
1824 			offset += msb->block_size;
1825 			*sucessfuly_written += msb->block_size;
1826 			lba++;
1827 			continue;
1828 		}
1829 
1830 		error = msb_cache_write(msb, lba, page, false, sg, offset);
1831 		if (error)
1832 			return error;
1833 
1834 		offset += msb->page_size;
1835 		*sucessfuly_written += msb->page_size;
1836 
1837 		page++;
1838 		if (page == msb->pages_in_block) {
1839 			page = 0;
1840 			lba++;
1841 		}
1842 	}
1843 	return 0;
1844 }
1845 
msb_do_read_request(struct msb_data * msb,int lba,int page,struct scatterlist * sg,int len,int * sucessfuly_read)1846 static int msb_do_read_request(struct msb_data *msb, int lba,
1847 		int page, struct scatterlist *sg, int len, int *sucessfuly_read)
1848 {
1849 	int error = 0;
1850 	int offset = 0;
1851 	*sucessfuly_read = 0;
1852 
1853 	while (offset < len) {
1854 
1855 		error = msb_cache_read(msb, lba, page, sg, offset);
1856 		if (error)
1857 			return error;
1858 
1859 		offset += msb->page_size;
1860 		*sucessfuly_read += msb->page_size;
1861 
1862 		page++;
1863 		if (page == msb->pages_in_block) {
1864 			page = 0;
1865 			lba++;
1866 		}
1867 	}
1868 	return 0;
1869 }
1870 
msb_io_work(struct work_struct * work)1871 static void msb_io_work(struct work_struct *work)
1872 {
1873 	struct msb_data *msb = container_of(work, struct msb_data, io_work);
1874 	int page, error, len;
1875 	sector_t lba;
1876 	unsigned long flags;
1877 	struct scatterlist *sg = msb->prealloc_sg;
1878 
1879 	dbg_verbose("IO: work started");
1880 
1881 	while (1) {
1882 		spin_lock_irqsave(&msb->q_lock, flags);
1883 
1884 		if (msb->need_flush_cache) {
1885 			msb->need_flush_cache = false;
1886 			spin_unlock_irqrestore(&msb->q_lock, flags);
1887 			msb_cache_flush(msb);
1888 			continue;
1889 		}
1890 
1891 		if (!msb->req) {
1892 			msb->req = blk_fetch_request(msb->queue);
1893 			if (!msb->req) {
1894 				dbg_verbose("IO: no more requests exiting");
1895 				spin_unlock_irqrestore(&msb->q_lock, flags);
1896 				return;
1897 			}
1898 		}
1899 
1900 		spin_unlock_irqrestore(&msb->q_lock, flags);
1901 
1902 		/* If card was removed meanwhile */
1903 		if (!msb->req)
1904 			return;
1905 
1906 		/* process the request */
1907 		dbg_verbose("IO: processing new request");
1908 		blk_rq_map_sg(msb->queue, msb->req, sg);
1909 
1910 		lba = blk_rq_pos(msb->req);
1911 
1912 		sector_div(lba, msb->page_size / 512);
1913 		page = sector_div(lba, msb->pages_in_block);
1914 
1915 		if (rq_data_dir(msb->req) == READ)
1916 			error = msb_do_read_request(msb, lba, page, sg,
1917 				blk_rq_bytes(msb->req), &len);
1918 		else
1919 			error = msb_do_write_request(msb, lba, page, sg,
1920 				blk_rq_bytes(msb->req), &len);
1921 
1922 		spin_lock_irqsave(&msb->q_lock, flags);
1923 
1924 		if (len)
1925 			if (!__blk_end_request(msb->req, BLK_STS_OK, len))
1926 				msb->req = NULL;
1927 
1928 		if (error && msb->req) {
1929 			blk_status_t ret = errno_to_blk_status(error);
1930 			dbg_verbose("IO: ending one sector of the request with error");
1931 			if (!__blk_end_request(msb->req, ret, msb->page_size))
1932 				msb->req = NULL;
1933 		}
1934 
1935 		if (msb->req)
1936 			dbg_verbose("IO: request still pending");
1937 
1938 		spin_unlock_irqrestore(&msb->q_lock, flags);
1939 	}
1940 }
1941 
1942 static DEFINE_IDR(msb_disk_idr); /*set of used disk numbers */
1943 static DEFINE_MUTEX(msb_disk_lock); /* protects against races in open/release */
1944 
msb_bd_open(struct block_device * bdev,fmode_t mode)1945 static int msb_bd_open(struct block_device *bdev, fmode_t mode)
1946 {
1947 	struct gendisk *disk = bdev->bd_disk;
1948 	struct msb_data *msb = disk->private_data;
1949 
1950 	dbg_verbose("block device open");
1951 
1952 	mutex_lock(&msb_disk_lock);
1953 
1954 	if (msb && msb->card)
1955 		msb->usage_count++;
1956 
1957 	mutex_unlock(&msb_disk_lock);
1958 	return 0;
1959 }
1960 
msb_data_clear(struct msb_data * msb)1961 static void msb_data_clear(struct msb_data *msb)
1962 {
1963 	kfree(msb->boot_page);
1964 	kfree(msb->used_blocks_bitmap);
1965 	kfree(msb->lba_to_pba_table);
1966 	kfree(msb->cache);
1967 	msb->card = NULL;
1968 }
1969 
msb_disk_release(struct gendisk * disk)1970 static int msb_disk_release(struct gendisk *disk)
1971 {
1972 	struct msb_data *msb = disk->private_data;
1973 
1974 	dbg_verbose("block device release");
1975 	mutex_lock(&msb_disk_lock);
1976 
1977 	if (msb) {
1978 		if (msb->usage_count)
1979 			msb->usage_count--;
1980 
1981 		if (!msb->usage_count) {
1982 			disk->private_data = NULL;
1983 			idr_remove(&msb_disk_idr, msb->disk_id);
1984 			put_disk(disk);
1985 			kfree(msb);
1986 		}
1987 	}
1988 	mutex_unlock(&msb_disk_lock);
1989 	return 0;
1990 }
1991 
msb_bd_release(struct gendisk * disk,fmode_t mode)1992 static void msb_bd_release(struct gendisk *disk, fmode_t mode)
1993 {
1994 	msb_disk_release(disk);
1995 }
1996 
msb_bd_getgeo(struct block_device * bdev,struct hd_geometry * geo)1997 static int msb_bd_getgeo(struct block_device *bdev,
1998 				 struct hd_geometry *geo)
1999 {
2000 	struct msb_data *msb = bdev->bd_disk->private_data;
2001 	*geo = msb->geometry;
2002 	return 0;
2003 }
2004 
msb_submit_req(struct request_queue * q)2005 static void msb_submit_req(struct request_queue *q)
2006 {
2007 	struct memstick_dev *card = q->queuedata;
2008 	struct msb_data *msb = memstick_get_drvdata(card);
2009 	struct request *req = NULL;
2010 
2011 	dbg_verbose("Submit request");
2012 
2013 	if (msb->card_dead) {
2014 		dbg("Refusing requests on removed card");
2015 
2016 		WARN_ON(!msb->io_queue_stopped);
2017 
2018 		while ((req = blk_fetch_request(q)) != NULL)
2019 			__blk_end_request_all(req, BLK_STS_IOERR);
2020 		return;
2021 	}
2022 
2023 	if (msb->req)
2024 		return;
2025 
2026 	if (!msb->io_queue_stopped)
2027 		queue_work(msb->io_queue, &msb->io_work);
2028 }
2029 
msb_check_card(struct memstick_dev * card)2030 static int msb_check_card(struct memstick_dev *card)
2031 {
2032 	struct msb_data *msb = memstick_get_drvdata(card);
2033 	return (msb->card_dead == 0);
2034 }
2035 
msb_stop(struct memstick_dev * card)2036 static void msb_stop(struct memstick_dev *card)
2037 {
2038 	struct msb_data *msb = memstick_get_drvdata(card);
2039 	unsigned long flags;
2040 
2041 	dbg("Stopping all msblock IO");
2042 
2043 	spin_lock_irqsave(&msb->q_lock, flags);
2044 	blk_stop_queue(msb->queue);
2045 	msb->io_queue_stopped = true;
2046 	spin_unlock_irqrestore(&msb->q_lock, flags);
2047 
2048 	del_timer_sync(&msb->cache_flush_timer);
2049 	flush_workqueue(msb->io_queue);
2050 
2051 	if (msb->req) {
2052 		spin_lock_irqsave(&msb->q_lock, flags);
2053 		blk_requeue_request(msb->queue, msb->req);
2054 		msb->req = NULL;
2055 		spin_unlock_irqrestore(&msb->q_lock, flags);
2056 	}
2057 
2058 }
2059 
msb_start(struct memstick_dev * card)2060 static void msb_start(struct memstick_dev *card)
2061 {
2062 	struct msb_data *msb = memstick_get_drvdata(card);
2063 	unsigned long flags;
2064 
2065 	dbg("Resuming IO from msblock");
2066 
2067 	msb_invalidate_reg_window(msb);
2068 
2069 	spin_lock_irqsave(&msb->q_lock, flags);
2070 	if (!msb->io_queue_stopped || msb->card_dead) {
2071 		spin_unlock_irqrestore(&msb->q_lock, flags);
2072 		return;
2073 	}
2074 	spin_unlock_irqrestore(&msb->q_lock, flags);
2075 
2076 	/* Kick cache flush anyway, its harmless */
2077 	msb->need_flush_cache = true;
2078 	msb->io_queue_stopped = false;
2079 
2080 	spin_lock_irqsave(&msb->q_lock, flags);
2081 	blk_start_queue(msb->queue);
2082 	spin_unlock_irqrestore(&msb->q_lock, flags);
2083 
2084 	queue_work(msb->io_queue, &msb->io_work);
2085 
2086 }
2087 
2088 static const struct block_device_operations msb_bdops = {
2089 	.open    = msb_bd_open,
2090 	.release = msb_bd_release,
2091 	.getgeo  = msb_bd_getgeo,
2092 	.owner   = THIS_MODULE
2093 };
2094 
2095 /* Registers the block device */
msb_init_disk(struct memstick_dev * card)2096 static int msb_init_disk(struct memstick_dev *card)
2097 {
2098 	struct msb_data *msb = memstick_get_drvdata(card);
2099 	int rc;
2100 	unsigned long capacity;
2101 
2102 	mutex_lock(&msb_disk_lock);
2103 	msb->disk_id = idr_alloc(&msb_disk_idr, card, 0, 256, GFP_KERNEL);
2104 	mutex_unlock(&msb_disk_lock);
2105 
2106 	if (msb->disk_id  < 0)
2107 		return msb->disk_id;
2108 
2109 	msb->disk = alloc_disk(0);
2110 	if (!msb->disk) {
2111 		rc = -ENOMEM;
2112 		goto out_release_id;
2113 	}
2114 
2115 	msb->queue = blk_init_queue(msb_submit_req, &msb->q_lock);
2116 	if (!msb->queue) {
2117 		rc = -ENOMEM;
2118 		goto out_put_disk;
2119 	}
2120 
2121 	msb->queue->queuedata = card;
2122 
2123 	blk_queue_max_hw_sectors(msb->queue, MS_BLOCK_MAX_PAGES);
2124 	blk_queue_max_segments(msb->queue, MS_BLOCK_MAX_SEGS);
2125 	blk_queue_max_segment_size(msb->queue,
2126 				   MS_BLOCK_MAX_PAGES * msb->page_size);
2127 	blk_queue_logical_block_size(msb->queue, msb->page_size);
2128 
2129 	sprintf(msb->disk->disk_name, "msblk%d", msb->disk_id);
2130 	msb->disk->fops = &msb_bdops;
2131 	msb->disk->private_data = msb;
2132 	msb->disk->queue = msb->queue;
2133 	msb->disk->flags |= GENHD_FL_EXT_DEVT;
2134 
2135 	capacity = msb->pages_in_block * msb->logical_block_count;
2136 	capacity *= (msb->page_size / 512);
2137 	set_capacity(msb->disk, capacity);
2138 	dbg("Set total disk size to %lu sectors", capacity);
2139 
2140 	msb->usage_count = 1;
2141 	msb->io_queue = alloc_ordered_workqueue("ms_block", WQ_MEM_RECLAIM);
2142 	INIT_WORK(&msb->io_work, msb_io_work);
2143 	sg_init_table(msb->prealloc_sg, MS_BLOCK_MAX_SEGS+1);
2144 
2145 	if (msb->read_only)
2146 		set_disk_ro(msb->disk, 1);
2147 
2148 	msb_start(card);
2149 	device_add_disk(&card->dev, msb->disk);
2150 	dbg("Disk added");
2151 	return 0;
2152 
2153 out_put_disk:
2154 	put_disk(msb->disk);
2155 out_release_id:
2156 	mutex_lock(&msb_disk_lock);
2157 	idr_remove(&msb_disk_idr, msb->disk_id);
2158 	mutex_unlock(&msb_disk_lock);
2159 	return rc;
2160 }
2161 
msb_probe(struct memstick_dev * card)2162 static int msb_probe(struct memstick_dev *card)
2163 {
2164 	struct msb_data *msb;
2165 	int rc = 0;
2166 
2167 	msb = kzalloc(sizeof(struct msb_data), GFP_KERNEL);
2168 	if (!msb)
2169 		return -ENOMEM;
2170 	memstick_set_drvdata(card, msb);
2171 	msb->card = card;
2172 	spin_lock_init(&msb->q_lock);
2173 
2174 	rc = msb_init_card(card);
2175 	if (rc)
2176 		goto out_free;
2177 
2178 	rc = msb_init_disk(card);
2179 	if (!rc) {
2180 		card->check = msb_check_card;
2181 		card->stop = msb_stop;
2182 		card->start = msb_start;
2183 		return 0;
2184 	}
2185 out_free:
2186 	memstick_set_drvdata(card, NULL);
2187 	msb_data_clear(msb);
2188 	kfree(msb);
2189 	return rc;
2190 }
2191 
msb_remove(struct memstick_dev * card)2192 static void msb_remove(struct memstick_dev *card)
2193 {
2194 	struct msb_data *msb = memstick_get_drvdata(card);
2195 	unsigned long flags;
2196 
2197 	if (!msb->io_queue_stopped)
2198 		msb_stop(card);
2199 
2200 	dbg("Removing the disk device");
2201 
2202 	/* Take care of unhandled + new requests from now on */
2203 	spin_lock_irqsave(&msb->q_lock, flags);
2204 	msb->card_dead = true;
2205 	blk_start_queue(msb->queue);
2206 	spin_unlock_irqrestore(&msb->q_lock, flags);
2207 
2208 	/* Remove the disk */
2209 	del_gendisk(msb->disk);
2210 	blk_cleanup_queue(msb->queue);
2211 	msb->queue = NULL;
2212 
2213 	mutex_lock(&msb_disk_lock);
2214 	msb_data_clear(msb);
2215 	mutex_unlock(&msb_disk_lock);
2216 
2217 	msb_disk_release(msb->disk);
2218 	memstick_set_drvdata(card, NULL);
2219 }
2220 
2221 #ifdef CONFIG_PM
2222 
msb_suspend(struct memstick_dev * card,pm_message_t state)2223 static int msb_suspend(struct memstick_dev *card, pm_message_t state)
2224 {
2225 	msb_stop(card);
2226 	return 0;
2227 }
2228 
msb_resume(struct memstick_dev * card)2229 static int msb_resume(struct memstick_dev *card)
2230 {
2231 	struct msb_data *msb = memstick_get_drvdata(card);
2232 	struct msb_data *new_msb = NULL;
2233 	bool card_dead = true;
2234 
2235 #ifndef CONFIG_MEMSTICK_UNSAFE_RESUME
2236 	msb->card_dead = true;
2237 	return 0;
2238 #endif
2239 	mutex_lock(&card->host->lock);
2240 
2241 	new_msb = kzalloc(sizeof(struct msb_data), GFP_KERNEL);
2242 	if (!new_msb)
2243 		goto out;
2244 
2245 	new_msb->card = card;
2246 	memstick_set_drvdata(card, new_msb);
2247 	spin_lock_init(&new_msb->q_lock);
2248 	sg_init_table(msb->prealloc_sg, MS_BLOCK_MAX_SEGS+1);
2249 
2250 	if (msb_init_card(card))
2251 		goto out;
2252 
2253 	if (msb->block_size != new_msb->block_size)
2254 		goto out;
2255 
2256 	if (memcmp(msb->boot_page, new_msb->boot_page,
2257 					sizeof(struct ms_boot_page)))
2258 		goto out;
2259 
2260 	if (msb->logical_block_count != new_msb->logical_block_count ||
2261 		memcmp(msb->lba_to_pba_table, new_msb->lba_to_pba_table,
2262 						msb->logical_block_count))
2263 		goto out;
2264 
2265 	if (msb->block_count != new_msb->block_count ||
2266 		memcmp(msb->used_blocks_bitmap, new_msb->used_blocks_bitmap,
2267 							msb->block_count / 8))
2268 		goto out;
2269 
2270 	card_dead = false;
2271 out:
2272 	if (card_dead)
2273 		dbg("Card was removed/replaced during suspend");
2274 
2275 	msb->card_dead = card_dead;
2276 	memstick_set_drvdata(card, msb);
2277 
2278 	if (new_msb) {
2279 		msb_data_clear(new_msb);
2280 		kfree(new_msb);
2281 	}
2282 
2283 	msb_start(card);
2284 	mutex_unlock(&card->host->lock);
2285 	return 0;
2286 }
2287 #else
2288 
2289 #define msb_suspend NULL
2290 #define msb_resume NULL
2291 
2292 #endif /* CONFIG_PM */
2293 
2294 static struct memstick_device_id msb_id_tbl[] = {
2295 	{MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_LEGACY, MEMSTICK_CATEGORY_STORAGE,
2296 	 MEMSTICK_CLASS_FLASH},
2297 
2298 	{MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_LEGACY, MEMSTICK_CATEGORY_STORAGE,
2299 	 MEMSTICK_CLASS_ROM},
2300 
2301 	{MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_LEGACY, MEMSTICK_CATEGORY_STORAGE,
2302 	 MEMSTICK_CLASS_RO},
2303 
2304 	{MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_LEGACY, MEMSTICK_CATEGORY_STORAGE,
2305 	 MEMSTICK_CLASS_WP},
2306 
2307 	{MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_DUO, MEMSTICK_CATEGORY_STORAGE_DUO,
2308 	 MEMSTICK_CLASS_DUO},
2309 	{}
2310 };
2311 MODULE_DEVICE_TABLE(memstick, msb_id_tbl);
2312 
2313 
2314 static struct memstick_driver msb_driver = {
2315 	.driver = {
2316 		.name  = DRIVER_NAME,
2317 		.owner = THIS_MODULE
2318 	},
2319 	.id_table = msb_id_tbl,
2320 	.probe    = msb_probe,
2321 	.remove   = msb_remove,
2322 	.suspend  = msb_suspend,
2323 	.resume   = msb_resume
2324 };
2325 
msb_init(void)2326 static int __init msb_init(void)
2327 {
2328 	int rc = memstick_register_driver(&msb_driver);
2329 	if (rc)
2330 		pr_err("failed to register memstick driver (error %d)\n", rc);
2331 
2332 	return rc;
2333 }
2334 
msb_exit(void)2335 static void __exit msb_exit(void)
2336 {
2337 	memstick_unregister_driver(&msb_driver);
2338 	idr_destroy(&msb_disk_idr);
2339 }
2340 
2341 module_init(msb_init);
2342 module_exit(msb_exit);
2343 
2344 module_param(cache_flush_timeout, int, S_IRUGO);
2345 MODULE_PARM_DESC(cache_flush_timeout,
2346 				"Cache flush timeout in msec (1000 default)");
2347 module_param(debug, int, S_IRUGO | S_IWUSR);
2348 MODULE_PARM_DESC(debug, "Debug level (0-2)");
2349 
2350 module_param(verify_writes, bool, S_IRUGO);
2351 MODULE_PARM_DESC(verify_writes, "Read back and check all data that is written");
2352 
2353 MODULE_LICENSE("GPL");
2354 MODULE_AUTHOR("Maxim Levitsky");
2355 MODULE_DESCRIPTION("Sony MemoryStick block device driver");
2356