1 /*
2  * Copyright (C) 2016 CNEX Labs
3  * Initial release: Javier Gonzalez <javier@cnexlabs.com>
4  *                  Matias Bjorling <matias@cnexlabs.com>
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License version
8  * 2 as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License for more details.
14  *
15  * pblk-core.c - pblk's core functionality
16  *
17  */
18 
19 #include "pblk.h"
20 
pblk_line_mark_bb(struct work_struct * work)21 static void pblk_line_mark_bb(struct work_struct *work)
22 {
23 	struct pblk_line_ws *line_ws = container_of(work, struct pblk_line_ws,
24 									ws);
25 	struct pblk *pblk = line_ws->pblk;
26 	struct nvm_tgt_dev *dev = pblk->dev;
27 	struct ppa_addr *ppa = line_ws->priv;
28 	int ret;
29 
30 	ret = nvm_set_tgt_bb_tbl(dev, ppa, 1, NVM_BLK_T_GRWN_BAD);
31 	if (ret) {
32 		struct pblk_line *line;
33 		int pos;
34 
35 		line = &pblk->lines[pblk_ppa_to_line(*ppa)];
36 		pos = pblk_ppa_to_pos(&dev->geo, *ppa);
37 
38 		pblk_err(pblk, "failed to mark bb, line:%d, pos:%d\n",
39 				line->id, pos);
40 	}
41 
42 	kfree(ppa);
43 	mempool_free(line_ws, &pblk->gen_ws_pool);
44 }
45 
pblk_mark_bb(struct pblk * pblk,struct pblk_line * line,struct ppa_addr ppa_addr)46 static void pblk_mark_bb(struct pblk *pblk, struct pblk_line *line,
47 			 struct ppa_addr ppa_addr)
48 {
49 	struct nvm_tgt_dev *dev = pblk->dev;
50 	struct nvm_geo *geo = &dev->geo;
51 	struct ppa_addr *ppa;
52 	int pos = pblk_ppa_to_pos(geo, ppa_addr);
53 
54 	pblk_debug(pblk, "erase failed: line:%d, pos:%d\n", line->id, pos);
55 	atomic_long_inc(&pblk->erase_failed);
56 
57 	atomic_dec(&line->blk_in_line);
58 	if (test_and_set_bit(pos, line->blk_bitmap))
59 		pblk_err(pblk, "attempted to erase bb: line:%d, pos:%d\n",
60 							line->id, pos);
61 
62 	/* Not necessary to mark bad blocks on 2.0 spec. */
63 	if (geo->version == NVM_OCSSD_SPEC_20)
64 		return;
65 
66 	ppa = kmalloc(sizeof(struct ppa_addr), GFP_ATOMIC);
67 	if (!ppa)
68 		return;
69 
70 	*ppa = ppa_addr;
71 	pblk_gen_run_ws(pblk, NULL, ppa, pblk_line_mark_bb,
72 						GFP_ATOMIC, pblk->bb_wq);
73 }
74 
__pblk_end_io_erase(struct pblk * pblk,struct nvm_rq * rqd)75 static void __pblk_end_io_erase(struct pblk *pblk, struct nvm_rq *rqd)
76 {
77 	struct nvm_tgt_dev *dev = pblk->dev;
78 	struct nvm_geo *geo = &dev->geo;
79 	struct nvm_chk_meta *chunk;
80 	struct pblk_line *line;
81 	int pos;
82 
83 	line = &pblk->lines[pblk_ppa_to_line(rqd->ppa_addr)];
84 	pos = pblk_ppa_to_pos(geo, rqd->ppa_addr);
85 	chunk = &line->chks[pos];
86 
87 	atomic_dec(&line->left_seblks);
88 
89 	if (rqd->error) {
90 		chunk->state = NVM_CHK_ST_OFFLINE;
91 		pblk_mark_bb(pblk, line, rqd->ppa_addr);
92 	} else {
93 		chunk->state = NVM_CHK_ST_FREE;
94 	}
95 
96 	atomic_dec(&pblk->inflight_io);
97 }
98 
99 /* Erase completion assumes that only one block is erased at the time */
pblk_end_io_erase(struct nvm_rq * rqd)100 static void pblk_end_io_erase(struct nvm_rq *rqd)
101 {
102 	struct pblk *pblk = rqd->private;
103 
104 	__pblk_end_io_erase(pblk, rqd);
105 	mempool_free(rqd, &pblk->e_rq_pool);
106 }
107 
108 /*
109  * Get information for all chunks from the device.
110  *
111  * The caller is responsible for freeing the returned structure
112  */
pblk_chunk_get_info(struct pblk * pblk)113 struct nvm_chk_meta *pblk_chunk_get_info(struct pblk *pblk)
114 {
115 	struct nvm_tgt_dev *dev = pblk->dev;
116 	struct nvm_geo *geo = &dev->geo;
117 	struct nvm_chk_meta *meta;
118 	struct ppa_addr ppa;
119 	unsigned long len;
120 	int ret;
121 
122 	ppa.ppa = 0;
123 
124 	len = geo->all_chunks * sizeof(*meta);
125 	meta = kzalloc(len, GFP_KERNEL);
126 	if (!meta)
127 		return ERR_PTR(-ENOMEM);
128 
129 	ret = nvm_get_chunk_meta(dev, meta, ppa, geo->all_chunks);
130 	if (ret) {
131 		kfree(meta);
132 		return ERR_PTR(-EIO);
133 	}
134 
135 	return meta;
136 }
137 
pblk_chunk_get_off(struct pblk * pblk,struct nvm_chk_meta * meta,struct ppa_addr ppa)138 struct nvm_chk_meta *pblk_chunk_get_off(struct pblk *pblk,
139 					      struct nvm_chk_meta *meta,
140 					      struct ppa_addr ppa)
141 {
142 	struct nvm_tgt_dev *dev = pblk->dev;
143 	struct nvm_geo *geo = &dev->geo;
144 	int ch_off = ppa.m.grp * geo->num_chk * geo->num_lun;
145 	int lun_off = ppa.m.pu * geo->num_chk;
146 	int chk_off = ppa.m.chk;
147 
148 	return meta + ch_off + lun_off + chk_off;
149 }
150 
__pblk_map_invalidate(struct pblk * pblk,struct pblk_line * line,u64 paddr)151 void __pblk_map_invalidate(struct pblk *pblk, struct pblk_line *line,
152 			   u64 paddr)
153 {
154 	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
155 	struct list_head *move_list = NULL;
156 
157 	/* Lines being reclaimed (GC'ed) cannot be invalidated. Before the L2P
158 	 * table is modified with reclaimed sectors, a check is done to endure
159 	 * that newer updates are not overwritten.
160 	 */
161 	spin_lock(&line->lock);
162 	WARN_ON(line->state == PBLK_LINESTATE_FREE);
163 
164 	if (test_and_set_bit(paddr, line->invalid_bitmap)) {
165 		WARN_ONCE(1, "pblk: double invalidate\n");
166 		spin_unlock(&line->lock);
167 		return;
168 	}
169 	le32_add_cpu(line->vsc, -1);
170 
171 	if (line->state == PBLK_LINESTATE_CLOSED)
172 		move_list = pblk_line_gc_list(pblk, line);
173 	spin_unlock(&line->lock);
174 
175 	if (move_list) {
176 		spin_lock(&l_mg->gc_lock);
177 		spin_lock(&line->lock);
178 		/* Prevent moving a line that has just been chosen for GC */
179 		if (line->state == PBLK_LINESTATE_GC) {
180 			spin_unlock(&line->lock);
181 			spin_unlock(&l_mg->gc_lock);
182 			return;
183 		}
184 		spin_unlock(&line->lock);
185 
186 		list_move_tail(&line->list, move_list);
187 		spin_unlock(&l_mg->gc_lock);
188 	}
189 }
190 
pblk_map_invalidate(struct pblk * pblk,struct ppa_addr ppa)191 void pblk_map_invalidate(struct pblk *pblk, struct ppa_addr ppa)
192 {
193 	struct pblk_line *line;
194 	u64 paddr;
195 	int line_id;
196 
197 #ifdef CONFIG_NVM_PBLK_DEBUG
198 	/* Callers must ensure that the ppa points to a device address */
199 	BUG_ON(pblk_addr_in_cache(ppa));
200 	BUG_ON(pblk_ppa_empty(ppa));
201 #endif
202 
203 	line_id = pblk_ppa_to_line(ppa);
204 	line = &pblk->lines[line_id];
205 	paddr = pblk_dev_ppa_to_line_addr(pblk, ppa);
206 
207 	__pblk_map_invalidate(pblk, line, paddr);
208 }
209 
pblk_invalidate_range(struct pblk * pblk,sector_t slba,unsigned int nr_secs)210 static void pblk_invalidate_range(struct pblk *pblk, sector_t slba,
211 				  unsigned int nr_secs)
212 {
213 	sector_t lba;
214 
215 	spin_lock(&pblk->trans_lock);
216 	for (lba = slba; lba < slba + nr_secs; lba++) {
217 		struct ppa_addr ppa;
218 
219 		ppa = pblk_trans_map_get(pblk, lba);
220 
221 		if (!pblk_addr_in_cache(ppa) && !pblk_ppa_empty(ppa))
222 			pblk_map_invalidate(pblk, ppa);
223 
224 		pblk_ppa_set_empty(&ppa);
225 		pblk_trans_map_set(pblk, lba, ppa);
226 	}
227 	spin_unlock(&pblk->trans_lock);
228 }
229 
230 /* Caller must guarantee that the request is a valid type */
pblk_alloc_rqd(struct pblk * pblk,int type)231 struct nvm_rq *pblk_alloc_rqd(struct pblk *pblk, int type)
232 {
233 	mempool_t *pool;
234 	struct nvm_rq *rqd;
235 	int rq_size;
236 
237 	switch (type) {
238 	case PBLK_WRITE:
239 	case PBLK_WRITE_INT:
240 		pool = &pblk->w_rq_pool;
241 		rq_size = pblk_w_rq_size;
242 		break;
243 	case PBLK_READ:
244 		pool = &pblk->r_rq_pool;
245 		rq_size = pblk_g_rq_size;
246 		break;
247 	default:
248 		pool = &pblk->e_rq_pool;
249 		rq_size = pblk_g_rq_size;
250 	}
251 
252 	rqd = mempool_alloc(pool, GFP_KERNEL);
253 	memset(rqd, 0, rq_size);
254 
255 	return rqd;
256 }
257 
258 /* Typically used on completion path. Cannot guarantee request consistency */
pblk_free_rqd(struct pblk * pblk,struct nvm_rq * rqd,int type)259 void pblk_free_rqd(struct pblk *pblk, struct nvm_rq *rqd, int type)
260 {
261 	struct nvm_tgt_dev *dev = pblk->dev;
262 	mempool_t *pool;
263 
264 	switch (type) {
265 	case PBLK_WRITE:
266 		kfree(((struct pblk_c_ctx *)nvm_rq_to_pdu(rqd))->lun_bitmap);
267 		/* fall through */
268 	case PBLK_WRITE_INT:
269 		pool = &pblk->w_rq_pool;
270 		break;
271 	case PBLK_READ:
272 		pool = &pblk->r_rq_pool;
273 		break;
274 	case PBLK_ERASE:
275 		pool = &pblk->e_rq_pool;
276 		break;
277 	default:
278 		pblk_err(pblk, "trying to free unknown rqd type\n");
279 		return;
280 	}
281 
282 	if (rqd->meta_list)
283 		nvm_dev_dma_free(dev->parent, rqd->meta_list,
284 				rqd->dma_meta_list);
285 	mempool_free(rqd, pool);
286 }
287 
pblk_bio_free_pages(struct pblk * pblk,struct bio * bio,int off,int nr_pages)288 void pblk_bio_free_pages(struct pblk *pblk, struct bio *bio, int off,
289 			 int nr_pages)
290 {
291 	struct bio_vec bv;
292 	int i;
293 
294 	WARN_ON(off + nr_pages != bio->bi_vcnt);
295 
296 	for (i = off; i < nr_pages + off; i++) {
297 		bv = bio->bi_io_vec[i];
298 		mempool_free(bv.bv_page, &pblk->page_bio_pool);
299 	}
300 }
301 
pblk_bio_add_pages(struct pblk * pblk,struct bio * bio,gfp_t flags,int nr_pages)302 int pblk_bio_add_pages(struct pblk *pblk, struct bio *bio, gfp_t flags,
303 		       int nr_pages)
304 {
305 	struct request_queue *q = pblk->dev->q;
306 	struct page *page;
307 	int i, ret;
308 
309 	for (i = 0; i < nr_pages; i++) {
310 		page = mempool_alloc(&pblk->page_bio_pool, flags);
311 
312 		ret = bio_add_pc_page(q, bio, page, PBLK_EXPOSED_PAGE_SIZE, 0);
313 		if (ret != PBLK_EXPOSED_PAGE_SIZE) {
314 			pblk_err(pblk, "could not add page to bio\n");
315 			mempool_free(page, &pblk->page_bio_pool);
316 			goto err;
317 		}
318 	}
319 
320 	return 0;
321 err:
322 	pblk_bio_free_pages(pblk, bio, (bio->bi_vcnt - i), i);
323 	return -1;
324 }
325 
pblk_write_kick(struct pblk * pblk)326 void pblk_write_kick(struct pblk *pblk)
327 {
328 	wake_up_process(pblk->writer_ts);
329 	mod_timer(&pblk->wtimer, jiffies + msecs_to_jiffies(1000));
330 }
331 
pblk_write_timer_fn(struct timer_list * t)332 void pblk_write_timer_fn(struct timer_list *t)
333 {
334 	struct pblk *pblk = from_timer(pblk, t, wtimer);
335 
336 	/* kick the write thread every tick to flush outstanding data */
337 	pblk_write_kick(pblk);
338 }
339 
pblk_write_should_kick(struct pblk * pblk)340 void pblk_write_should_kick(struct pblk *pblk)
341 {
342 	unsigned int secs_avail = pblk_rb_read_count(&pblk->rwb);
343 
344 	if (secs_avail >= pblk->min_write_pgs)
345 		pblk_write_kick(pblk);
346 }
347 
pblk_wait_for_meta(struct pblk * pblk)348 static void pblk_wait_for_meta(struct pblk *pblk)
349 {
350 	do {
351 		if (!atomic_read(&pblk->inflight_io))
352 			break;
353 
354 		schedule();
355 	} while (1);
356 }
357 
pblk_flush_writer(struct pblk * pblk)358 static void pblk_flush_writer(struct pblk *pblk)
359 {
360 	pblk_rb_flush(&pblk->rwb);
361 	do {
362 		if (!pblk_rb_sync_count(&pblk->rwb))
363 			break;
364 
365 		pblk_write_kick(pblk);
366 		schedule();
367 	} while (1);
368 }
369 
pblk_line_gc_list(struct pblk * pblk,struct pblk_line * line)370 struct list_head *pblk_line_gc_list(struct pblk *pblk, struct pblk_line *line)
371 {
372 	struct pblk_line_meta *lm = &pblk->lm;
373 	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
374 	struct list_head *move_list = NULL;
375 	int vsc = le32_to_cpu(*line->vsc);
376 
377 	lockdep_assert_held(&line->lock);
378 
379 	if (line->w_err_gc->has_write_err) {
380 		if (line->gc_group != PBLK_LINEGC_WERR) {
381 			line->gc_group = PBLK_LINEGC_WERR;
382 			move_list = &l_mg->gc_werr_list;
383 			pblk_rl_werr_line_in(&pblk->rl);
384 		}
385 	} else if (!vsc) {
386 		if (line->gc_group != PBLK_LINEGC_FULL) {
387 			line->gc_group = PBLK_LINEGC_FULL;
388 			move_list = &l_mg->gc_full_list;
389 		}
390 	} else if (vsc < lm->high_thrs) {
391 		if (line->gc_group != PBLK_LINEGC_HIGH) {
392 			line->gc_group = PBLK_LINEGC_HIGH;
393 			move_list = &l_mg->gc_high_list;
394 		}
395 	} else if (vsc < lm->mid_thrs) {
396 		if (line->gc_group != PBLK_LINEGC_MID) {
397 			line->gc_group = PBLK_LINEGC_MID;
398 			move_list = &l_mg->gc_mid_list;
399 		}
400 	} else if (vsc < line->sec_in_line) {
401 		if (line->gc_group != PBLK_LINEGC_LOW) {
402 			line->gc_group = PBLK_LINEGC_LOW;
403 			move_list = &l_mg->gc_low_list;
404 		}
405 	} else if (vsc == line->sec_in_line) {
406 		if (line->gc_group != PBLK_LINEGC_EMPTY) {
407 			line->gc_group = PBLK_LINEGC_EMPTY;
408 			move_list = &l_mg->gc_empty_list;
409 		}
410 	} else {
411 		line->state = PBLK_LINESTATE_CORRUPT;
412 		line->gc_group = PBLK_LINEGC_NONE;
413 		move_list =  &l_mg->corrupt_list;
414 		pblk_err(pblk, "corrupted vsc for line %d, vsc:%d (%d/%d/%d)\n",
415 						line->id, vsc,
416 						line->sec_in_line,
417 						lm->high_thrs, lm->mid_thrs);
418 	}
419 
420 	return move_list;
421 }
422 
pblk_discard(struct pblk * pblk,struct bio * bio)423 void pblk_discard(struct pblk *pblk, struct bio *bio)
424 {
425 	sector_t slba = pblk_get_lba(bio);
426 	sector_t nr_secs = pblk_get_secs(bio);
427 
428 	pblk_invalidate_range(pblk, slba, nr_secs);
429 }
430 
pblk_log_write_err(struct pblk * pblk,struct nvm_rq * rqd)431 void pblk_log_write_err(struct pblk *pblk, struct nvm_rq *rqd)
432 {
433 	atomic_long_inc(&pblk->write_failed);
434 #ifdef CONFIG_NVM_PBLK_DEBUG
435 	pblk_print_failed_rqd(pblk, rqd, rqd->error);
436 #endif
437 }
438 
pblk_log_read_err(struct pblk * pblk,struct nvm_rq * rqd)439 void pblk_log_read_err(struct pblk *pblk, struct nvm_rq *rqd)
440 {
441 	/* Empty page read is not necessarily an error (e.g., L2P recovery) */
442 	if (rqd->error == NVM_RSP_ERR_EMPTYPAGE) {
443 		atomic_long_inc(&pblk->read_empty);
444 		return;
445 	}
446 
447 	switch (rqd->error) {
448 	case NVM_RSP_WARN_HIGHECC:
449 		atomic_long_inc(&pblk->read_high_ecc);
450 		break;
451 	case NVM_RSP_ERR_FAILECC:
452 	case NVM_RSP_ERR_FAILCRC:
453 		atomic_long_inc(&pblk->read_failed);
454 		break;
455 	default:
456 		pblk_err(pblk, "unknown read error:%d\n", rqd->error);
457 	}
458 #ifdef CONFIG_NVM_PBLK_DEBUG
459 	pblk_print_failed_rqd(pblk, rqd, rqd->error);
460 #endif
461 }
462 
pblk_set_sec_per_write(struct pblk * pblk,int sec_per_write)463 void pblk_set_sec_per_write(struct pblk *pblk, int sec_per_write)
464 {
465 	pblk->sec_per_write = sec_per_write;
466 }
467 
pblk_submit_io(struct pblk * pblk,struct nvm_rq * rqd)468 int pblk_submit_io(struct pblk *pblk, struct nvm_rq *rqd)
469 {
470 	struct nvm_tgt_dev *dev = pblk->dev;
471 
472 	atomic_inc(&pblk->inflight_io);
473 
474 #ifdef CONFIG_NVM_PBLK_DEBUG
475 	if (pblk_check_io(pblk, rqd))
476 		return NVM_IO_ERR;
477 #endif
478 
479 	return nvm_submit_io(dev, rqd);
480 }
481 
pblk_submit_io_sync(struct pblk * pblk,struct nvm_rq * rqd)482 int pblk_submit_io_sync(struct pblk *pblk, struct nvm_rq *rqd)
483 {
484 	struct nvm_tgt_dev *dev = pblk->dev;
485 
486 	atomic_inc(&pblk->inflight_io);
487 
488 #ifdef CONFIG_NVM_PBLK_DEBUG
489 	if (pblk_check_io(pblk, rqd))
490 		return NVM_IO_ERR;
491 #endif
492 
493 	return nvm_submit_io_sync(dev, rqd);
494 }
495 
pblk_bio_map_addr_endio(struct bio * bio)496 static void pblk_bio_map_addr_endio(struct bio *bio)
497 {
498 	bio_put(bio);
499 }
500 
pblk_bio_map_addr(struct pblk * pblk,void * data,unsigned int nr_secs,unsigned int len,int alloc_type,gfp_t gfp_mask)501 struct bio *pblk_bio_map_addr(struct pblk *pblk, void *data,
502 			      unsigned int nr_secs, unsigned int len,
503 			      int alloc_type, gfp_t gfp_mask)
504 {
505 	struct nvm_tgt_dev *dev = pblk->dev;
506 	void *kaddr = data;
507 	struct page *page;
508 	struct bio *bio;
509 	int i, ret;
510 
511 	if (alloc_type == PBLK_KMALLOC_META)
512 		return bio_map_kern(dev->q, kaddr, len, gfp_mask);
513 
514 	bio = bio_kmalloc(gfp_mask, nr_secs);
515 	if (!bio)
516 		return ERR_PTR(-ENOMEM);
517 
518 	for (i = 0; i < nr_secs; i++) {
519 		page = vmalloc_to_page(kaddr);
520 		if (!page) {
521 			pblk_err(pblk, "could not map vmalloc bio\n");
522 			bio_put(bio);
523 			bio = ERR_PTR(-ENOMEM);
524 			goto out;
525 		}
526 
527 		ret = bio_add_pc_page(dev->q, bio, page, PAGE_SIZE, 0);
528 		if (ret != PAGE_SIZE) {
529 			pblk_err(pblk, "could not add page to bio\n");
530 			bio_put(bio);
531 			bio = ERR_PTR(-ENOMEM);
532 			goto out;
533 		}
534 
535 		kaddr += PAGE_SIZE;
536 	}
537 
538 	bio->bi_end_io = pblk_bio_map_addr_endio;
539 out:
540 	return bio;
541 }
542 
pblk_calc_secs(struct pblk * pblk,unsigned long secs_avail,unsigned long secs_to_flush)543 int pblk_calc_secs(struct pblk *pblk, unsigned long secs_avail,
544 		   unsigned long secs_to_flush)
545 {
546 	int max = pblk->sec_per_write;
547 	int min = pblk->min_write_pgs;
548 	int secs_to_sync = 0;
549 
550 	if (secs_avail >= max)
551 		secs_to_sync = max;
552 	else if (secs_avail >= min)
553 		secs_to_sync = min * (secs_avail / min);
554 	else if (secs_to_flush)
555 		secs_to_sync = min;
556 
557 	return secs_to_sync;
558 }
559 
pblk_dealloc_page(struct pblk * pblk,struct pblk_line * line,int nr_secs)560 void pblk_dealloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
561 {
562 	u64 addr;
563 	int i;
564 
565 	spin_lock(&line->lock);
566 	addr = find_next_zero_bit(line->map_bitmap,
567 					pblk->lm.sec_per_line, line->cur_sec);
568 	line->cur_sec = addr - nr_secs;
569 
570 	for (i = 0; i < nr_secs; i++, line->cur_sec--)
571 		WARN_ON(!test_and_clear_bit(line->cur_sec, line->map_bitmap));
572 	spin_unlock(&line->lock);
573 }
574 
__pblk_alloc_page(struct pblk * pblk,struct pblk_line * line,int nr_secs)575 u64 __pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
576 {
577 	u64 addr;
578 	int i;
579 
580 	lockdep_assert_held(&line->lock);
581 
582 	/* logic error: ppa out-of-bounds. Prevent generating bad address */
583 	if (line->cur_sec + nr_secs > pblk->lm.sec_per_line) {
584 		WARN(1, "pblk: page allocation out of bounds\n");
585 		nr_secs = pblk->lm.sec_per_line - line->cur_sec;
586 	}
587 
588 	line->cur_sec = addr = find_next_zero_bit(line->map_bitmap,
589 					pblk->lm.sec_per_line, line->cur_sec);
590 	for (i = 0; i < nr_secs; i++, line->cur_sec++)
591 		WARN_ON(test_and_set_bit(line->cur_sec, line->map_bitmap));
592 
593 	return addr;
594 }
595 
pblk_alloc_page(struct pblk * pblk,struct pblk_line * line,int nr_secs)596 u64 pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
597 {
598 	u64 addr;
599 
600 	/* Lock needed in case a write fails and a recovery needs to remap
601 	 * failed write buffer entries
602 	 */
603 	spin_lock(&line->lock);
604 	addr = __pblk_alloc_page(pblk, line, nr_secs);
605 	line->left_msecs -= nr_secs;
606 	WARN(line->left_msecs < 0, "pblk: page allocation out of bounds\n");
607 	spin_unlock(&line->lock);
608 
609 	return addr;
610 }
611 
pblk_lookup_page(struct pblk * pblk,struct pblk_line * line)612 u64 pblk_lookup_page(struct pblk *pblk, struct pblk_line *line)
613 {
614 	u64 paddr;
615 
616 	spin_lock(&line->lock);
617 	paddr = find_next_zero_bit(line->map_bitmap,
618 					pblk->lm.sec_per_line, line->cur_sec);
619 	spin_unlock(&line->lock);
620 
621 	return paddr;
622 }
623 
624 /*
625  * Submit emeta to one LUN in the raid line at the time to avoid a deadlock when
626  * taking the per LUN semaphore.
627  */
pblk_line_submit_emeta_io(struct pblk * pblk,struct pblk_line * line,void * emeta_buf,u64 paddr,int dir)628 static int pblk_line_submit_emeta_io(struct pblk *pblk, struct pblk_line *line,
629 				     void *emeta_buf, u64 paddr, int dir)
630 {
631 	struct nvm_tgt_dev *dev = pblk->dev;
632 	struct nvm_geo *geo = &dev->geo;
633 	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
634 	struct pblk_line_meta *lm = &pblk->lm;
635 	void *ppa_list, *meta_list;
636 	struct bio *bio;
637 	struct nvm_rq rqd;
638 	dma_addr_t dma_ppa_list, dma_meta_list;
639 	int min = pblk->min_write_pgs;
640 	int left_ppas = lm->emeta_sec[0];
641 	int id = line->id;
642 	int rq_ppas, rq_len;
643 	int cmd_op, bio_op;
644 	int i, j;
645 	int ret;
646 
647 	if (dir == PBLK_WRITE) {
648 		bio_op = REQ_OP_WRITE;
649 		cmd_op = NVM_OP_PWRITE;
650 	} else if (dir == PBLK_READ) {
651 		bio_op = REQ_OP_READ;
652 		cmd_op = NVM_OP_PREAD;
653 	} else
654 		return -EINVAL;
655 
656 	meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
657 							&dma_meta_list);
658 	if (!meta_list)
659 		return -ENOMEM;
660 
661 	ppa_list = meta_list + pblk_dma_meta_size;
662 	dma_ppa_list = dma_meta_list + pblk_dma_meta_size;
663 
664 next_rq:
665 	memset(&rqd, 0, sizeof(struct nvm_rq));
666 
667 	rq_ppas = pblk_calc_secs(pblk, left_ppas, 0);
668 	rq_len = rq_ppas * geo->csecs;
669 
670 	bio = pblk_bio_map_addr(pblk, emeta_buf, rq_ppas, rq_len,
671 					l_mg->emeta_alloc_type, GFP_KERNEL);
672 	if (IS_ERR(bio)) {
673 		ret = PTR_ERR(bio);
674 		goto free_rqd_dma;
675 	}
676 
677 	bio->bi_iter.bi_sector = 0; /* internal bio */
678 	bio_set_op_attrs(bio, bio_op, 0);
679 
680 	rqd.bio = bio;
681 	rqd.meta_list = meta_list;
682 	rqd.ppa_list = ppa_list;
683 	rqd.dma_meta_list = dma_meta_list;
684 	rqd.dma_ppa_list = dma_ppa_list;
685 	rqd.opcode = cmd_op;
686 	rqd.nr_ppas = rq_ppas;
687 
688 	if (dir == PBLK_WRITE) {
689 		struct pblk_sec_meta *meta_list = rqd.meta_list;
690 
691 		rqd.flags = pblk_set_progr_mode(pblk, PBLK_WRITE);
692 		for (i = 0; i < rqd.nr_ppas; ) {
693 			spin_lock(&line->lock);
694 			paddr = __pblk_alloc_page(pblk, line, min);
695 			spin_unlock(&line->lock);
696 			for (j = 0; j < min; j++, i++, paddr++) {
697 				meta_list[i].lba = cpu_to_le64(ADDR_EMPTY);
698 				rqd.ppa_list[i] =
699 					addr_to_gen_ppa(pblk, paddr, id);
700 			}
701 		}
702 	} else {
703 		for (i = 0; i < rqd.nr_ppas; ) {
704 			struct ppa_addr ppa = addr_to_gen_ppa(pblk, paddr, id);
705 			int pos = pblk_ppa_to_pos(geo, ppa);
706 			int read_type = PBLK_READ_RANDOM;
707 
708 			if (pblk_io_aligned(pblk, rq_ppas))
709 				read_type = PBLK_READ_SEQUENTIAL;
710 			rqd.flags = pblk_set_read_mode(pblk, read_type);
711 
712 			while (test_bit(pos, line->blk_bitmap)) {
713 				paddr += min;
714 				if (pblk_boundary_paddr_checks(pblk, paddr)) {
715 					pblk_err(pblk, "corrupt emeta line:%d\n",
716 								line->id);
717 					bio_put(bio);
718 					ret = -EINTR;
719 					goto free_rqd_dma;
720 				}
721 
722 				ppa = addr_to_gen_ppa(pblk, paddr, id);
723 				pos = pblk_ppa_to_pos(geo, ppa);
724 			}
725 
726 			if (pblk_boundary_paddr_checks(pblk, paddr + min)) {
727 				pblk_err(pblk, "corrupt emeta line:%d\n",
728 								line->id);
729 				bio_put(bio);
730 				ret = -EINTR;
731 				goto free_rqd_dma;
732 			}
733 
734 			for (j = 0; j < min; j++, i++, paddr++)
735 				rqd.ppa_list[i] =
736 					addr_to_gen_ppa(pblk, paddr, line->id);
737 		}
738 	}
739 
740 	ret = pblk_submit_io_sync(pblk, &rqd);
741 	if (ret) {
742 		pblk_err(pblk, "emeta I/O submission failed: %d\n", ret);
743 		bio_put(bio);
744 		goto free_rqd_dma;
745 	}
746 
747 	atomic_dec(&pblk->inflight_io);
748 
749 	if (rqd.error) {
750 		if (dir == PBLK_WRITE)
751 			pblk_log_write_err(pblk, &rqd);
752 		else
753 			pblk_log_read_err(pblk, &rqd);
754 	}
755 
756 	emeta_buf += rq_len;
757 	left_ppas -= rq_ppas;
758 	if (left_ppas)
759 		goto next_rq;
760 free_rqd_dma:
761 	nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
762 	return ret;
763 }
764 
pblk_line_smeta_start(struct pblk * pblk,struct pblk_line * line)765 u64 pblk_line_smeta_start(struct pblk *pblk, struct pblk_line *line)
766 {
767 	struct nvm_tgt_dev *dev = pblk->dev;
768 	struct nvm_geo *geo = &dev->geo;
769 	struct pblk_line_meta *lm = &pblk->lm;
770 	int bit;
771 
772 	/* This usually only happens on bad lines */
773 	bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
774 	if (bit >= lm->blk_per_line)
775 		return -1;
776 
777 	return bit * geo->ws_opt;
778 }
779 
pblk_line_submit_smeta_io(struct pblk * pblk,struct pblk_line * line,u64 paddr,int dir)780 static int pblk_line_submit_smeta_io(struct pblk *pblk, struct pblk_line *line,
781 				     u64 paddr, int dir)
782 {
783 	struct nvm_tgt_dev *dev = pblk->dev;
784 	struct pblk_line_meta *lm = &pblk->lm;
785 	struct bio *bio;
786 	struct nvm_rq rqd;
787 	__le64 *lba_list = NULL;
788 	int i, ret;
789 	int cmd_op, bio_op;
790 	int flags;
791 
792 	if (dir == PBLK_WRITE) {
793 		bio_op = REQ_OP_WRITE;
794 		cmd_op = NVM_OP_PWRITE;
795 		flags = pblk_set_progr_mode(pblk, PBLK_WRITE);
796 		lba_list = emeta_to_lbas(pblk, line->emeta->buf);
797 	} else if (dir == PBLK_READ_RECOV || dir == PBLK_READ) {
798 		bio_op = REQ_OP_READ;
799 		cmd_op = NVM_OP_PREAD;
800 		flags = pblk_set_read_mode(pblk, PBLK_READ_SEQUENTIAL);
801 	} else
802 		return -EINVAL;
803 
804 	memset(&rqd, 0, sizeof(struct nvm_rq));
805 
806 	rqd.meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
807 							&rqd.dma_meta_list);
808 	if (!rqd.meta_list)
809 		return -ENOMEM;
810 
811 	rqd.ppa_list = rqd.meta_list + pblk_dma_meta_size;
812 	rqd.dma_ppa_list = rqd.dma_meta_list + pblk_dma_meta_size;
813 
814 	bio = bio_map_kern(dev->q, line->smeta, lm->smeta_len, GFP_KERNEL);
815 	if (IS_ERR(bio)) {
816 		ret = PTR_ERR(bio);
817 		goto free_ppa_list;
818 	}
819 
820 	bio->bi_iter.bi_sector = 0; /* internal bio */
821 	bio_set_op_attrs(bio, bio_op, 0);
822 
823 	rqd.bio = bio;
824 	rqd.opcode = cmd_op;
825 	rqd.flags = flags;
826 	rqd.nr_ppas = lm->smeta_sec;
827 
828 	for (i = 0; i < lm->smeta_sec; i++, paddr++) {
829 		struct pblk_sec_meta *meta_list = rqd.meta_list;
830 
831 		rqd.ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line->id);
832 
833 		if (dir == PBLK_WRITE) {
834 			__le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
835 
836 			meta_list[i].lba = lba_list[paddr] = addr_empty;
837 		}
838 	}
839 
840 	/*
841 	 * This I/O is sent by the write thread when a line is replace. Since
842 	 * the write thread is the only one sending write and erase commands,
843 	 * there is no need to take the LUN semaphore.
844 	 */
845 	ret = pblk_submit_io_sync(pblk, &rqd);
846 	if (ret) {
847 		pblk_err(pblk, "smeta I/O submission failed: %d\n", ret);
848 		bio_put(bio);
849 		goto free_ppa_list;
850 	}
851 
852 	atomic_dec(&pblk->inflight_io);
853 
854 	if (rqd.error) {
855 		if (dir == PBLK_WRITE) {
856 			pblk_log_write_err(pblk, &rqd);
857 			ret = 1;
858 		} else if (dir == PBLK_READ)
859 			pblk_log_read_err(pblk, &rqd);
860 	}
861 
862 free_ppa_list:
863 	nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
864 
865 	return ret;
866 }
867 
pblk_line_read_smeta(struct pblk * pblk,struct pblk_line * line)868 int pblk_line_read_smeta(struct pblk *pblk, struct pblk_line *line)
869 {
870 	u64 bpaddr = pblk_line_smeta_start(pblk, line);
871 
872 	return pblk_line_submit_smeta_io(pblk, line, bpaddr, PBLK_READ_RECOV);
873 }
874 
pblk_line_read_emeta(struct pblk * pblk,struct pblk_line * line,void * emeta_buf)875 int pblk_line_read_emeta(struct pblk *pblk, struct pblk_line *line,
876 			 void *emeta_buf)
877 {
878 	return pblk_line_submit_emeta_io(pblk, line, emeta_buf,
879 						line->emeta_ssec, PBLK_READ);
880 }
881 
pblk_setup_e_rq(struct pblk * pblk,struct nvm_rq * rqd,struct ppa_addr ppa)882 static void pblk_setup_e_rq(struct pblk *pblk, struct nvm_rq *rqd,
883 			    struct ppa_addr ppa)
884 {
885 	rqd->opcode = NVM_OP_ERASE;
886 	rqd->ppa_addr = ppa;
887 	rqd->nr_ppas = 1;
888 	rqd->flags = pblk_set_progr_mode(pblk, PBLK_ERASE);
889 	rqd->bio = NULL;
890 }
891 
pblk_blk_erase_sync(struct pblk * pblk,struct ppa_addr ppa)892 static int pblk_blk_erase_sync(struct pblk *pblk, struct ppa_addr ppa)
893 {
894 	struct nvm_rq rqd;
895 	int ret = 0;
896 
897 	memset(&rqd, 0, sizeof(struct nvm_rq));
898 
899 	pblk_setup_e_rq(pblk, &rqd, ppa);
900 
901 	/* The write thread schedules erases so that it minimizes disturbances
902 	 * with writes. Thus, there is no need to take the LUN semaphore.
903 	 */
904 	ret = pblk_submit_io_sync(pblk, &rqd);
905 	if (ret) {
906 		struct nvm_tgt_dev *dev = pblk->dev;
907 		struct nvm_geo *geo = &dev->geo;
908 
909 		pblk_err(pblk, "could not sync erase line:%d,blk:%d\n",
910 					pblk_ppa_to_line(ppa),
911 					pblk_ppa_to_pos(geo, ppa));
912 
913 		rqd.error = ret;
914 		goto out;
915 	}
916 
917 out:
918 	rqd.private = pblk;
919 	__pblk_end_io_erase(pblk, &rqd);
920 
921 	return ret;
922 }
923 
pblk_line_erase(struct pblk * pblk,struct pblk_line * line)924 int pblk_line_erase(struct pblk *pblk, struct pblk_line *line)
925 {
926 	struct pblk_line_meta *lm = &pblk->lm;
927 	struct ppa_addr ppa;
928 	int ret, bit = -1;
929 
930 	/* Erase only good blocks, one at a time */
931 	do {
932 		spin_lock(&line->lock);
933 		bit = find_next_zero_bit(line->erase_bitmap, lm->blk_per_line,
934 								bit + 1);
935 		if (bit >= lm->blk_per_line) {
936 			spin_unlock(&line->lock);
937 			break;
938 		}
939 
940 		ppa = pblk->luns[bit].bppa; /* set ch and lun */
941 		ppa.a.blk = line->id;
942 
943 		atomic_dec(&line->left_eblks);
944 		WARN_ON(test_and_set_bit(bit, line->erase_bitmap));
945 		spin_unlock(&line->lock);
946 
947 		ret = pblk_blk_erase_sync(pblk, ppa);
948 		if (ret) {
949 			pblk_err(pblk, "failed to erase line %d\n", line->id);
950 			return ret;
951 		}
952 	} while (1);
953 
954 	return 0;
955 }
956 
pblk_line_setup_metadata(struct pblk_line * line,struct pblk_line_mgmt * l_mg,struct pblk_line_meta * lm)957 static void pblk_line_setup_metadata(struct pblk_line *line,
958 				     struct pblk_line_mgmt *l_mg,
959 				     struct pblk_line_meta *lm)
960 {
961 	int meta_line;
962 
963 	lockdep_assert_held(&l_mg->free_lock);
964 
965 retry_meta:
966 	meta_line = find_first_zero_bit(&l_mg->meta_bitmap, PBLK_DATA_LINES);
967 	if (meta_line == PBLK_DATA_LINES) {
968 		spin_unlock(&l_mg->free_lock);
969 		io_schedule();
970 		spin_lock(&l_mg->free_lock);
971 		goto retry_meta;
972 	}
973 
974 	set_bit(meta_line, &l_mg->meta_bitmap);
975 	line->meta_line = meta_line;
976 
977 	line->smeta = l_mg->sline_meta[meta_line];
978 	line->emeta = l_mg->eline_meta[meta_line];
979 
980 	memset(line->smeta, 0, lm->smeta_len);
981 	memset(line->emeta->buf, 0, lm->emeta_len[0]);
982 
983 	line->emeta->mem = 0;
984 	atomic_set(&line->emeta->sync, 0);
985 }
986 
987 /* For now lines are always assumed full lines. Thus, smeta former and current
988  * lun bitmaps are omitted.
989  */
pblk_line_init_metadata(struct pblk * pblk,struct pblk_line * line,struct pblk_line * cur)990 static int pblk_line_init_metadata(struct pblk *pblk, struct pblk_line *line,
991 				  struct pblk_line *cur)
992 {
993 	struct nvm_tgt_dev *dev = pblk->dev;
994 	struct nvm_geo *geo = &dev->geo;
995 	struct pblk_line_meta *lm = &pblk->lm;
996 	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
997 	struct pblk_emeta *emeta = line->emeta;
998 	struct line_emeta *emeta_buf = emeta->buf;
999 	struct line_smeta *smeta_buf = (struct line_smeta *)line->smeta;
1000 	int nr_blk_line;
1001 
1002 	/* After erasing the line, new bad blocks might appear and we risk
1003 	 * having an invalid line
1004 	 */
1005 	nr_blk_line = lm->blk_per_line -
1006 			bitmap_weight(line->blk_bitmap, lm->blk_per_line);
1007 	if (nr_blk_line < lm->min_blk_line) {
1008 		spin_lock(&l_mg->free_lock);
1009 		spin_lock(&line->lock);
1010 		line->state = PBLK_LINESTATE_BAD;
1011 		spin_unlock(&line->lock);
1012 
1013 		list_add_tail(&line->list, &l_mg->bad_list);
1014 		spin_unlock(&l_mg->free_lock);
1015 
1016 		pblk_debug(pblk, "line %d is bad\n", line->id);
1017 
1018 		return 0;
1019 	}
1020 
1021 	/* Run-time metadata */
1022 	line->lun_bitmap = ((void *)(smeta_buf)) + sizeof(struct line_smeta);
1023 
1024 	/* Mark LUNs allocated in this line (all for now) */
1025 	bitmap_set(line->lun_bitmap, 0, lm->lun_bitmap_len);
1026 
1027 	smeta_buf->header.identifier = cpu_to_le32(PBLK_MAGIC);
1028 	memcpy(smeta_buf->header.uuid, pblk->instance_uuid, 16);
1029 	smeta_buf->header.id = cpu_to_le32(line->id);
1030 	smeta_buf->header.type = cpu_to_le16(line->type);
1031 	smeta_buf->header.version_major = SMETA_VERSION_MAJOR;
1032 	smeta_buf->header.version_minor = SMETA_VERSION_MINOR;
1033 
1034 	/* Start metadata */
1035 	smeta_buf->seq_nr = cpu_to_le64(line->seq_nr);
1036 	smeta_buf->window_wr_lun = cpu_to_le32(geo->all_luns);
1037 
1038 	/* Fill metadata among lines */
1039 	if (cur) {
1040 		memcpy(line->lun_bitmap, cur->lun_bitmap, lm->lun_bitmap_len);
1041 		smeta_buf->prev_id = cpu_to_le32(cur->id);
1042 		cur->emeta->buf->next_id = cpu_to_le32(line->id);
1043 	} else {
1044 		smeta_buf->prev_id = cpu_to_le32(PBLK_LINE_EMPTY);
1045 	}
1046 
1047 	/* All smeta must be set at this point */
1048 	smeta_buf->header.crc = cpu_to_le32(
1049 			pblk_calc_meta_header_crc(pblk, &smeta_buf->header));
1050 	smeta_buf->crc = cpu_to_le32(pblk_calc_smeta_crc(pblk, smeta_buf));
1051 
1052 	/* End metadata */
1053 	memcpy(&emeta_buf->header, &smeta_buf->header,
1054 						sizeof(struct line_header));
1055 
1056 	emeta_buf->header.version_major = EMETA_VERSION_MAJOR;
1057 	emeta_buf->header.version_minor = EMETA_VERSION_MINOR;
1058 	emeta_buf->header.crc = cpu_to_le32(
1059 			pblk_calc_meta_header_crc(pblk, &emeta_buf->header));
1060 
1061 	emeta_buf->seq_nr = cpu_to_le64(line->seq_nr);
1062 	emeta_buf->nr_lbas = cpu_to_le64(line->sec_in_line);
1063 	emeta_buf->nr_valid_lbas = cpu_to_le64(0);
1064 	emeta_buf->next_id = cpu_to_le32(PBLK_LINE_EMPTY);
1065 	emeta_buf->crc = cpu_to_le32(0);
1066 	emeta_buf->prev_id = smeta_buf->prev_id;
1067 
1068 	return 1;
1069 }
1070 
pblk_line_alloc_bitmaps(struct pblk * pblk,struct pblk_line * line)1071 static int pblk_line_alloc_bitmaps(struct pblk *pblk, struct pblk_line *line)
1072 {
1073 	struct pblk_line_meta *lm = &pblk->lm;
1074 
1075 	line->map_bitmap = kzalloc(lm->sec_bitmap_len, GFP_KERNEL);
1076 	if (!line->map_bitmap)
1077 		return -ENOMEM;
1078 
1079 	/* will be initialized using bb info from map_bitmap */
1080 	line->invalid_bitmap = kmalloc(lm->sec_bitmap_len, GFP_KERNEL);
1081 	if (!line->invalid_bitmap) {
1082 		kfree(line->map_bitmap);
1083 		line->map_bitmap = NULL;
1084 		return -ENOMEM;
1085 	}
1086 
1087 	return 0;
1088 }
1089 
1090 /* For now lines are always assumed full lines. Thus, smeta former and current
1091  * lun bitmaps are omitted.
1092  */
pblk_line_init_bb(struct pblk * pblk,struct pblk_line * line,int init)1093 static int pblk_line_init_bb(struct pblk *pblk, struct pblk_line *line,
1094 			     int init)
1095 {
1096 	struct nvm_tgt_dev *dev = pblk->dev;
1097 	struct nvm_geo *geo = &dev->geo;
1098 	struct pblk_line_meta *lm = &pblk->lm;
1099 	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1100 	u64 off;
1101 	int bit = -1;
1102 	int emeta_secs;
1103 
1104 	line->sec_in_line = lm->sec_per_line;
1105 
1106 	/* Capture bad block information on line mapping bitmaps */
1107 	while ((bit = find_next_bit(line->blk_bitmap, lm->blk_per_line,
1108 					bit + 1)) < lm->blk_per_line) {
1109 		off = bit * geo->ws_opt;
1110 		bitmap_shift_left(l_mg->bb_aux, l_mg->bb_template, off,
1111 							lm->sec_per_line);
1112 		bitmap_or(line->map_bitmap, line->map_bitmap, l_mg->bb_aux,
1113 							lm->sec_per_line);
1114 		line->sec_in_line -= geo->clba;
1115 	}
1116 
1117 	/* Mark smeta metadata sectors as bad sectors */
1118 	bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
1119 	off = bit * geo->ws_opt;
1120 	bitmap_set(line->map_bitmap, off, lm->smeta_sec);
1121 	line->sec_in_line -= lm->smeta_sec;
1122 	line->smeta_ssec = off;
1123 	line->cur_sec = off + lm->smeta_sec;
1124 
1125 	if (init && pblk_line_submit_smeta_io(pblk, line, off, PBLK_WRITE)) {
1126 		pblk_debug(pblk, "line smeta I/O failed. Retry\n");
1127 		return 0;
1128 	}
1129 
1130 	bitmap_copy(line->invalid_bitmap, line->map_bitmap, lm->sec_per_line);
1131 
1132 	/* Mark emeta metadata sectors as bad sectors. We need to consider bad
1133 	 * blocks to make sure that there are enough sectors to store emeta
1134 	 */
1135 	emeta_secs = lm->emeta_sec[0];
1136 	off = lm->sec_per_line;
1137 	while (emeta_secs) {
1138 		off -= geo->ws_opt;
1139 		if (!test_bit(off, line->invalid_bitmap)) {
1140 			bitmap_set(line->invalid_bitmap, off, geo->ws_opt);
1141 			emeta_secs -= geo->ws_opt;
1142 		}
1143 	}
1144 
1145 	line->emeta_ssec = off;
1146 	line->sec_in_line -= lm->emeta_sec[0];
1147 	line->nr_valid_lbas = 0;
1148 	line->left_msecs = line->sec_in_line;
1149 	*line->vsc = cpu_to_le32(line->sec_in_line);
1150 
1151 	if (lm->sec_per_line - line->sec_in_line !=
1152 		bitmap_weight(line->invalid_bitmap, lm->sec_per_line)) {
1153 		spin_lock(&line->lock);
1154 		line->state = PBLK_LINESTATE_BAD;
1155 		spin_unlock(&line->lock);
1156 
1157 		list_add_tail(&line->list, &l_mg->bad_list);
1158 		pblk_err(pblk, "unexpected line %d is bad\n", line->id);
1159 
1160 		return 0;
1161 	}
1162 
1163 	return 1;
1164 }
1165 
pblk_prepare_new_line(struct pblk * pblk,struct pblk_line * line)1166 static int pblk_prepare_new_line(struct pblk *pblk, struct pblk_line *line)
1167 {
1168 	struct pblk_line_meta *lm = &pblk->lm;
1169 	struct nvm_tgt_dev *dev = pblk->dev;
1170 	struct nvm_geo *geo = &dev->geo;
1171 	int blk_to_erase = atomic_read(&line->blk_in_line);
1172 	int i;
1173 
1174 	for (i = 0; i < lm->blk_per_line; i++) {
1175 		struct pblk_lun *rlun = &pblk->luns[i];
1176 		int pos = pblk_ppa_to_pos(geo, rlun->bppa);
1177 		int state = line->chks[pos].state;
1178 
1179 		/* Free chunks should not be erased */
1180 		if (state & NVM_CHK_ST_FREE) {
1181 			set_bit(pblk_ppa_to_pos(geo, rlun->bppa),
1182 							line->erase_bitmap);
1183 			blk_to_erase--;
1184 		}
1185 	}
1186 
1187 	return blk_to_erase;
1188 }
1189 
pblk_line_prepare(struct pblk * pblk,struct pblk_line * line)1190 static int pblk_line_prepare(struct pblk *pblk, struct pblk_line *line)
1191 {
1192 	struct pblk_line_meta *lm = &pblk->lm;
1193 	int blk_in_line = atomic_read(&line->blk_in_line);
1194 	int blk_to_erase;
1195 
1196 	/* Bad blocks do not need to be erased */
1197 	bitmap_copy(line->erase_bitmap, line->blk_bitmap, lm->blk_per_line);
1198 
1199 	spin_lock(&line->lock);
1200 
1201 	/* If we have not written to this line, we need to mark up free chunks
1202 	 * as already erased
1203 	 */
1204 	if (line->state == PBLK_LINESTATE_NEW) {
1205 		blk_to_erase = pblk_prepare_new_line(pblk, line);
1206 		line->state = PBLK_LINESTATE_FREE;
1207 	} else {
1208 		blk_to_erase = blk_in_line;
1209 	}
1210 
1211 	if (blk_in_line < lm->min_blk_line) {
1212 		spin_unlock(&line->lock);
1213 		return -EAGAIN;
1214 	}
1215 
1216 	if (line->state != PBLK_LINESTATE_FREE) {
1217 		WARN(1, "pblk: corrupted line %d, state %d\n",
1218 							line->id, line->state);
1219 		spin_unlock(&line->lock);
1220 		return -EINTR;
1221 	}
1222 
1223 	line->state = PBLK_LINESTATE_OPEN;
1224 
1225 	atomic_set(&line->left_eblks, blk_to_erase);
1226 	atomic_set(&line->left_seblks, blk_to_erase);
1227 
1228 	line->meta_distance = lm->meta_distance;
1229 	spin_unlock(&line->lock);
1230 
1231 	kref_init(&line->ref);
1232 
1233 	return 0;
1234 }
1235 
pblk_line_recov_alloc(struct pblk * pblk,struct pblk_line * line)1236 int pblk_line_recov_alloc(struct pblk *pblk, struct pblk_line *line)
1237 {
1238 	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1239 	int ret;
1240 
1241 	spin_lock(&l_mg->free_lock);
1242 	l_mg->data_line = line;
1243 	list_del(&line->list);
1244 
1245 	ret = pblk_line_prepare(pblk, line);
1246 	if (ret) {
1247 		list_add(&line->list, &l_mg->free_list);
1248 		spin_unlock(&l_mg->free_lock);
1249 		return ret;
1250 	}
1251 	spin_unlock(&l_mg->free_lock);
1252 
1253 	ret = pblk_line_alloc_bitmaps(pblk, line);
1254 	if (ret)
1255 		return ret;
1256 
1257 	if (!pblk_line_init_bb(pblk, line, 0)) {
1258 		list_add(&line->list, &l_mg->free_list);
1259 		return -EINTR;
1260 	}
1261 
1262 	pblk_rl_free_lines_dec(&pblk->rl, line, true);
1263 	return 0;
1264 }
1265 
pblk_line_recov_close(struct pblk * pblk,struct pblk_line * line)1266 void pblk_line_recov_close(struct pblk *pblk, struct pblk_line *line)
1267 {
1268 	kfree(line->map_bitmap);
1269 	line->map_bitmap = NULL;
1270 	line->smeta = NULL;
1271 	line->emeta = NULL;
1272 }
1273 
pblk_line_reinit(struct pblk_line * line)1274 static void pblk_line_reinit(struct pblk_line *line)
1275 {
1276 	*line->vsc = cpu_to_le32(EMPTY_ENTRY);
1277 
1278 	line->map_bitmap = NULL;
1279 	line->invalid_bitmap = NULL;
1280 	line->smeta = NULL;
1281 	line->emeta = NULL;
1282 }
1283 
pblk_line_free(struct pblk_line * line)1284 void pblk_line_free(struct pblk_line *line)
1285 {
1286 	kfree(line->map_bitmap);
1287 	kfree(line->invalid_bitmap);
1288 
1289 	pblk_line_reinit(line);
1290 }
1291 
pblk_line_get(struct pblk * pblk)1292 struct pblk_line *pblk_line_get(struct pblk *pblk)
1293 {
1294 	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1295 	struct pblk_line_meta *lm = &pblk->lm;
1296 	struct pblk_line *line;
1297 	int ret, bit;
1298 
1299 	lockdep_assert_held(&l_mg->free_lock);
1300 
1301 retry:
1302 	if (list_empty(&l_mg->free_list)) {
1303 		pblk_err(pblk, "no free lines\n");
1304 		return NULL;
1305 	}
1306 
1307 	line = list_first_entry(&l_mg->free_list, struct pblk_line, list);
1308 	list_del(&line->list);
1309 	l_mg->nr_free_lines--;
1310 
1311 	bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
1312 	if (unlikely(bit >= lm->blk_per_line)) {
1313 		spin_lock(&line->lock);
1314 		line->state = PBLK_LINESTATE_BAD;
1315 		spin_unlock(&line->lock);
1316 
1317 		list_add_tail(&line->list, &l_mg->bad_list);
1318 
1319 		pblk_debug(pblk, "line %d is bad\n", line->id);
1320 		goto retry;
1321 	}
1322 
1323 	ret = pblk_line_prepare(pblk, line);
1324 	if (ret) {
1325 		switch (ret) {
1326 		case -EAGAIN:
1327 			list_add(&line->list, &l_mg->bad_list);
1328 			goto retry;
1329 		case -EINTR:
1330 			list_add(&line->list, &l_mg->corrupt_list);
1331 			goto retry;
1332 		default:
1333 			pblk_err(pblk, "failed to prepare line %d\n", line->id);
1334 			list_add(&line->list, &l_mg->free_list);
1335 			l_mg->nr_free_lines++;
1336 			return NULL;
1337 		}
1338 	}
1339 
1340 	return line;
1341 }
1342 
pblk_line_retry(struct pblk * pblk,struct pblk_line * line)1343 static struct pblk_line *pblk_line_retry(struct pblk *pblk,
1344 					 struct pblk_line *line)
1345 {
1346 	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1347 	struct pblk_line *retry_line;
1348 
1349 retry:
1350 	spin_lock(&l_mg->free_lock);
1351 	retry_line = pblk_line_get(pblk);
1352 	if (!retry_line) {
1353 		l_mg->data_line = NULL;
1354 		spin_unlock(&l_mg->free_lock);
1355 		return NULL;
1356 	}
1357 
1358 	retry_line->map_bitmap = line->map_bitmap;
1359 	retry_line->invalid_bitmap = line->invalid_bitmap;
1360 	retry_line->smeta = line->smeta;
1361 	retry_line->emeta = line->emeta;
1362 	retry_line->meta_line = line->meta_line;
1363 
1364 	pblk_line_reinit(line);
1365 
1366 	l_mg->data_line = retry_line;
1367 	spin_unlock(&l_mg->free_lock);
1368 
1369 	pblk_rl_free_lines_dec(&pblk->rl, line, false);
1370 
1371 	if (pblk_line_erase(pblk, retry_line))
1372 		goto retry;
1373 
1374 	return retry_line;
1375 }
1376 
pblk_set_space_limit(struct pblk * pblk)1377 static void pblk_set_space_limit(struct pblk *pblk)
1378 {
1379 	struct pblk_rl *rl = &pblk->rl;
1380 
1381 	atomic_set(&rl->rb_space, 0);
1382 }
1383 
pblk_line_get_first_data(struct pblk * pblk)1384 struct pblk_line *pblk_line_get_first_data(struct pblk *pblk)
1385 {
1386 	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1387 	struct pblk_line *line;
1388 
1389 	spin_lock(&l_mg->free_lock);
1390 	line = pblk_line_get(pblk);
1391 	if (!line) {
1392 		spin_unlock(&l_mg->free_lock);
1393 		return NULL;
1394 	}
1395 
1396 	line->seq_nr = l_mg->d_seq_nr++;
1397 	line->type = PBLK_LINETYPE_DATA;
1398 	l_mg->data_line = line;
1399 
1400 	pblk_line_setup_metadata(line, l_mg, &pblk->lm);
1401 
1402 	/* Allocate next line for preparation */
1403 	l_mg->data_next = pblk_line_get(pblk);
1404 	if (!l_mg->data_next) {
1405 		/* If we cannot get a new line, we need to stop the pipeline.
1406 		 * Only allow as many writes in as we can store safely and then
1407 		 * fail gracefully
1408 		 */
1409 		pblk_set_space_limit(pblk);
1410 
1411 		l_mg->data_next = NULL;
1412 	} else {
1413 		l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
1414 		l_mg->data_next->type = PBLK_LINETYPE_DATA;
1415 	}
1416 	spin_unlock(&l_mg->free_lock);
1417 
1418 	if (pblk_line_alloc_bitmaps(pblk, line))
1419 		return NULL;
1420 
1421 	if (pblk_line_erase(pblk, line)) {
1422 		line = pblk_line_retry(pblk, line);
1423 		if (!line)
1424 			return NULL;
1425 	}
1426 
1427 retry_setup:
1428 	if (!pblk_line_init_metadata(pblk, line, NULL)) {
1429 		line = pblk_line_retry(pblk, line);
1430 		if (!line)
1431 			return NULL;
1432 
1433 		goto retry_setup;
1434 	}
1435 
1436 	if (!pblk_line_init_bb(pblk, line, 1)) {
1437 		line = pblk_line_retry(pblk, line);
1438 		if (!line)
1439 			return NULL;
1440 
1441 		goto retry_setup;
1442 	}
1443 
1444 	pblk_rl_free_lines_dec(&pblk->rl, line, true);
1445 
1446 	return line;
1447 }
1448 
pblk_stop_writes(struct pblk * pblk,struct pblk_line * line)1449 static void pblk_stop_writes(struct pblk *pblk, struct pblk_line *line)
1450 {
1451 	lockdep_assert_held(&pblk->l_mg.free_lock);
1452 
1453 	pblk_set_space_limit(pblk);
1454 	pblk->state = PBLK_STATE_STOPPING;
1455 }
1456 
pblk_line_close_meta_sync(struct pblk * pblk)1457 static void pblk_line_close_meta_sync(struct pblk *pblk)
1458 {
1459 	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1460 	struct pblk_line_meta *lm = &pblk->lm;
1461 	struct pblk_line *line, *tline;
1462 	LIST_HEAD(list);
1463 
1464 	spin_lock(&l_mg->close_lock);
1465 	if (list_empty(&l_mg->emeta_list)) {
1466 		spin_unlock(&l_mg->close_lock);
1467 		return;
1468 	}
1469 
1470 	list_cut_position(&list, &l_mg->emeta_list, l_mg->emeta_list.prev);
1471 	spin_unlock(&l_mg->close_lock);
1472 
1473 	list_for_each_entry_safe(line, tline, &list, list) {
1474 		struct pblk_emeta *emeta = line->emeta;
1475 
1476 		while (emeta->mem < lm->emeta_len[0]) {
1477 			int ret;
1478 
1479 			ret = pblk_submit_meta_io(pblk, line);
1480 			if (ret) {
1481 				pblk_err(pblk, "sync meta line %d failed (%d)\n",
1482 							line->id, ret);
1483 				return;
1484 			}
1485 		}
1486 	}
1487 
1488 	pblk_wait_for_meta(pblk);
1489 	flush_workqueue(pblk->close_wq);
1490 }
1491 
__pblk_pipeline_flush(struct pblk * pblk)1492 void __pblk_pipeline_flush(struct pblk *pblk)
1493 {
1494 	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1495 	int ret;
1496 
1497 	spin_lock(&l_mg->free_lock);
1498 	if (pblk->state == PBLK_STATE_RECOVERING ||
1499 					pblk->state == PBLK_STATE_STOPPED) {
1500 		spin_unlock(&l_mg->free_lock);
1501 		return;
1502 	}
1503 	pblk->state = PBLK_STATE_RECOVERING;
1504 	spin_unlock(&l_mg->free_lock);
1505 
1506 	pblk_flush_writer(pblk);
1507 	pblk_wait_for_meta(pblk);
1508 
1509 	ret = pblk_recov_pad(pblk);
1510 	if (ret) {
1511 		pblk_err(pblk, "could not close data on teardown(%d)\n", ret);
1512 		return;
1513 	}
1514 
1515 	flush_workqueue(pblk->bb_wq);
1516 	pblk_line_close_meta_sync(pblk);
1517 }
1518 
__pblk_pipeline_stop(struct pblk * pblk)1519 void __pblk_pipeline_stop(struct pblk *pblk)
1520 {
1521 	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1522 
1523 	spin_lock(&l_mg->free_lock);
1524 	pblk->state = PBLK_STATE_STOPPED;
1525 	l_mg->data_line = NULL;
1526 	l_mg->data_next = NULL;
1527 	spin_unlock(&l_mg->free_lock);
1528 }
1529 
pblk_pipeline_stop(struct pblk * pblk)1530 void pblk_pipeline_stop(struct pblk *pblk)
1531 {
1532 	__pblk_pipeline_flush(pblk);
1533 	__pblk_pipeline_stop(pblk);
1534 }
1535 
pblk_line_replace_data(struct pblk * pblk)1536 struct pblk_line *pblk_line_replace_data(struct pblk *pblk)
1537 {
1538 	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1539 	struct pblk_line *cur, *new = NULL;
1540 	unsigned int left_seblks;
1541 
1542 	cur = l_mg->data_line;
1543 	new = l_mg->data_next;
1544 	if (!new)
1545 		goto out;
1546 	l_mg->data_line = new;
1547 
1548 	spin_lock(&l_mg->free_lock);
1549 	pblk_line_setup_metadata(new, l_mg, &pblk->lm);
1550 	spin_unlock(&l_mg->free_lock);
1551 
1552 retry_erase:
1553 	left_seblks = atomic_read(&new->left_seblks);
1554 	if (left_seblks) {
1555 		/* If line is not fully erased, erase it */
1556 		if (atomic_read(&new->left_eblks)) {
1557 			if (pblk_line_erase(pblk, new))
1558 				goto out;
1559 		} else {
1560 			io_schedule();
1561 		}
1562 		goto retry_erase;
1563 	}
1564 
1565 	if (pblk_line_alloc_bitmaps(pblk, new))
1566 		return NULL;
1567 
1568 retry_setup:
1569 	if (!pblk_line_init_metadata(pblk, new, cur)) {
1570 		new = pblk_line_retry(pblk, new);
1571 		if (!new)
1572 			goto out;
1573 
1574 		goto retry_setup;
1575 	}
1576 
1577 	if (!pblk_line_init_bb(pblk, new, 1)) {
1578 		new = pblk_line_retry(pblk, new);
1579 		if (!new)
1580 			goto out;
1581 
1582 		goto retry_setup;
1583 	}
1584 
1585 	pblk_rl_free_lines_dec(&pblk->rl, new, true);
1586 
1587 	/* Allocate next line for preparation */
1588 	spin_lock(&l_mg->free_lock);
1589 	l_mg->data_next = pblk_line_get(pblk);
1590 	if (!l_mg->data_next) {
1591 		/* If we cannot get a new line, we need to stop the pipeline.
1592 		 * Only allow as many writes in as we can store safely and then
1593 		 * fail gracefully
1594 		 */
1595 		pblk_stop_writes(pblk, new);
1596 		l_mg->data_next = NULL;
1597 	} else {
1598 		l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
1599 		l_mg->data_next->type = PBLK_LINETYPE_DATA;
1600 	}
1601 	spin_unlock(&l_mg->free_lock);
1602 
1603 out:
1604 	return new;
1605 }
1606 
__pblk_line_put(struct pblk * pblk,struct pblk_line * line)1607 static void __pblk_line_put(struct pblk *pblk, struct pblk_line *line)
1608 {
1609 	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1610 	struct pblk_gc *gc = &pblk->gc;
1611 
1612 	spin_lock(&line->lock);
1613 	WARN_ON(line->state != PBLK_LINESTATE_GC);
1614 	line->state = PBLK_LINESTATE_FREE;
1615 	line->gc_group = PBLK_LINEGC_NONE;
1616 	pblk_line_free(line);
1617 
1618 	if (line->w_err_gc->has_write_err) {
1619 		pblk_rl_werr_line_out(&pblk->rl);
1620 		line->w_err_gc->has_write_err = 0;
1621 	}
1622 
1623 	spin_unlock(&line->lock);
1624 	atomic_dec(&gc->pipeline_gc);
1625 
1626 	spin_lock(&l_mg->free_lock);
1627 	list_add_tail(&line->list, &l_mg->free_list);
1628 	l_mg->nr_free_lines++;
1629 	spin_unlock(&l_mg->free_lock);
1630 
1631 	pblk_rl_free_lines_inc(&pblk->rl, line);
1632 }
1633 
pblk_line_put_ws(struct work_struct * work)1634 static void pblk_line_put_ws(struct work_struct *work)
1635 {
1636 	struct pblk_line_ws *line_put_ws = container_of(work,
1637 						struct pblk_line_ws, ws);
1638 	struct pblk *pblk = line_put_ws->pblk;
1639 	struct pblk_line *line = line_put_ws->line;
1640 
1641 	__pblk_line_put(pblk, line);
1642 	mempool_free(line_put_ws, &pblk->gen_ws_pool);
1643 }
1644 
pblk_line_put(struct kref * ref)1645 void pblk_line_put(struct kref *ref)
1646 {
1647 	struct pblk_line *line = container_of(ref, struct pblk_line, ref);
1648 	struct pblk *pblk = line->pblk;
1649 
1650 	__pblk_line_put(pblk, line);
1651 }
1652 
pblk_line_put_wq(struct kref * ref)1653 void pblk_line_put_wq(struct kref *ref)
1654 {
1655 	struct pblk_line *line = container_of(ref, struct pblk_line, ref);
1656 	struct pblk *pblk = line->pblk;
1657 	struct pblk_line_ws *line_put_ws;
1658 
1659 	line_put_ws = mempool_alloc(&pblk->gen_ws_pool, GFP_ATOMIC);
1660 	if (!line_put_ws)
1661 		return;
1662 
1663 	line_put_ws->pblk = pblk;
1664 	line_put_ws->line = line;
1665 	line_put_ws->priv = NULL;
1666 
1667 	INIT_WORK(&line_put_ws->ws, pblk_line_put_ws);
1668 	queue_work(pblk->r_end_wq, &line_put_ws->ws);
1669 }
1670 
pblk_blk_erase_async(struct pblk * pblk,struct ppa_addr ppa)1671 int pblk_blk_erase_async(struct pblk *pblk, struct ppa_addr ppa)
1672 {
1673 	struct nvm_rq *rqd;
1674 	int err;
1675 
1676 	rqd = pblk_alloc_rqd(pblk, PBLK_ERASE);
1677 
1678 	pblk_setup_e_rq(pblk, rqd, ppa);
1679 
1680 	rqd->end_io = pblk_end_io_erase;
1681 	rqd->private = pblk;
1682 
1683 	/* The write thread schedules erases so that it minimizes disturbances
1684 	 * with writes. Thus, there is no need to take the LUN semaphore.
1685 	 */
1686 	err = pblk_submit_io(pblk, rqd);
1687 	if (err) {
1688 		struct nvm_tgt_dev *dev = pblk->dev;
1689 		struct nvm_geo *geo = &dev->geo;
1690 
1691 		pblk_err(pblk, "could not async erase line:%d,blk:%d\n",
1692 					pblk_ppa_to_line(ppa),
1693 					pblk_ppa_to_pos(geo, ppa));
1694 	}
1695 
1696 	return err;
1697 }
1698 
pblk_line_get_data(struct pblk * pblk)1699 struct pblk_line *pblk_line_get_data(struct pblk *pblk)
1700 {
1701 	return pblk->l_mg.data_line;
1702 }
1703 
1704 /* For now, always erase next line */
pblk_line_get_erase(struct pblk * pblk)1705 struct pblk_line *pblk_line_get_erase(struct pblk *pblk)
1706 {
1707 	return pblk->l_mg.data_next;
1708 }
1709 
pblk_line_is_full(struct pblk_line * line)1710 int pblk_line_is_full(struct pblk_line *line)
1711 {
1712 	return (line->left_msecs == 0);
1713 }
1714 
pblk_line_should_sync_meta(struct pblk * pblk)1715 static void pblk_line_should_sync_meta(struct pblk *pblk)
1716 {
1717 	if (pblk_rl_is_limit(&pblk->rl))
1718 		pblk_line_close_meta_sync(pblk);
1719 }
1720 
pblk_line_close(struct pblk * pblk,struct pblk_line * line)1721 void pblk_line_close(struct pblk *pblk, struct pblk_line *line)
1722 {
1723 	struct nvm_tgt_dev *dev = pblk->dev;
1724 	struct nvm_geo *geo = &dev->geo;
1725 	struct pblk_line_meta *lm = &pblk->lm;
1726 	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1727 	struct list_head *move_list;
1728 	int i;
1729 
1730 #ifdef CONFIG_NVM_PBLK_DEBUG
1731 	WARN(!bitmap_full(line->map_bitmap, lm->sec_per_line),
1732 				"pblk: corrupt closed line %d\n", line->id);
1733 #endif
1734 
1735 	spin_lock(&l_mg->free_lock);
1736 	WARN_ON(!test_and_clear_bit(line->meta_line, &l_mg->meta_bitmap));
1737 	spin_unlock(&l_mg->free_lock);
1738 
1739 	spin_lock(&l_mg->gc_lock);
1740 	spin_lock(&line->lock);
1741 	WARN_ON(line->state != PBLK_LINESTATE_OPEN);
1742 	line->state = PBLK_LINESTATE_CLOSED;
1743 	move_list = pblk_line_gc_list(pblk, line);
1744 
1745 	list_add_tail(&line->list, move_list);
1746 
1747 	kfree(line->map_bitmap);
1748 	line->map_bitmap = NULL;
1749 	line->smeta = NULL;
1750 	line->emeta = NULL;
1751 
1752 	for (i = 0; i < lm->blk_per_line; i++) {
1753 		struct pblk_lun *rlun = &pblk->luns[i];
1754 		int pos = pblk_ppa_to_pos(geo, rlun->bppa);
1755 		int state = line->chks[pos].state;
1756 
1757 		if (!(state & NVM_CHK_ST_OFFLINE))
1758 			state = NVM_CHK_ST_CLOSED;
1759 	}
1760 
1761 	spin_unlock(&line->lock);
1762 	spin_unlock(&l_mg->gc_lock);
1763 }
1764 
pblk_line_close_meta(struct pblk * pblk,struct pblk_line * line)1765 void pblk_line_close_meta(struct pblk *pblk, struct pblk_line *line)
1766 {
1767 	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1768 	struct pblk_line_meta *lm = &pblk->lm;
1769 	struct pblk_emeta *emeta = line->emeta;
1770 	struct line_emeta *emeta_buf = emeta->buf;
1771 	struct wa_counters *wa = emeta_to_wa(lm, emeta_buf);
1772 
1773 	/* No need for exact vsc value; avoid a big line lock and take aprox. */
1774 	memcpy(emeta_to_vsc(pblk, emeta_buf), l_mg->vsc_list, lm->vsc_list_len);
1775 	memcpy(emeta_to_bb(emeta_buf), line->blk_bitmap, lm->blk_bitmap_len);
1776 
1777 	wa->user = cpu_to_le64(atomic64_read(&pblk->user_wa));
1778 	wa->pad = cpu_to_le64(atomic64_read(&pblk->pad_wa));
1779 	wa->gc = cpu_to_le64(atomic64_read(&pblk->gc_wa));
1780 
1781 	emeta_buf->nr_valid_lbas = cpu_to_le64(line->nr_valid_lbas);
1782 	emeta_buf->crc = cpu_to_le32(pblk_calc_emeta_crc(pblk, emeta_buf));
1783 
1784 	spin_lock(&l_mg->close_lock);
1785 	spin_lock(&line->lock);
1786 
1787 	/* Update the in-memory start address for emeta, in case it has
1788 	 * shifted due to write errors
1789 	 */
1790 	if (line->emeta_ssec != line->cur_sec)
1791 		line->emeta_ssec = line->cur_sec;
1792 
1793 	list_add_tail(&line->list, &l_mg->emeta_list);
1794 	spin_unlock(&line->lock);
1795 	spin_unlock(&l_mg->close_lock);
1796 
1797 	pblk_line_should_sync_meta(pblk);
1798 
1799 
1800 }
1801 
pblk_save_lba_list(struct pblk * pblk,struct pblk_line * line)1802 static void pblk_save_lba_list(struct pblk *pblk, struct pblk_line *line)
1803 {
1804 	struct pblk_line_meta *lm = &pblk->lm;
1805 	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1806 	unsigned int lba_list_size = lm->emeta_len[2];
1807 	struct pblk_w_err_gc *w_err_gc = line->w_err_gc;
1808 	struct pblk_emeta *emeta = line->emeta;
1809 
1810 	w_err_gc->lba_list = pblk_malloc(lba_list_size,
1811 					 l_mg->emeta_alloc_type, GFP_KERNEL);
1812 	memcpy(w_err_gc->lba_list, emeta_to_lbas(pblk, emeta->buf),
1813 				lba_list_size);
1814 }
1815 
pblk_line_close_ws(struct work_struct * work)1816 void pblk_line_close_ws(struct work_struct *work)
1817 {
1818 	struct pblk_line_ws *line_ws = container_of(work, struct pblk_line_ws,
1819 									ws);
1820 	struct pblk *pblk = line_ws->pblk;
1821 	struct pblk_line *line = line_ws->line;
1822 	struct pblk_w_err_gc *w_err_gc = line->w_err_gc;
1823 
1824 	/* Write errors makes the emeta start address stored in smeta invalid,
1825 	 * so keep a copy of the lba list until we've gc'd the line
1826 	 */
1827 	if (w_err_gc->has_write_err)
1828 		pblk_save_lba_list(pblk, line);
1829 
1830 	pblk_line_close(pblk, line);
1831 	mempool_free(line_ws, &pblk->gen_ws_pool);
1832 }
1833 
pblk_gen_run_ws(struct pblk * pblk,struct pblk_line * line,void * priv,void (* work)(struct work_struct *),gfp_t gfp_mask,struct workqueue_struct * wq)1834 void pblk_gen_run_ws(struct pblk *pblk, struct pblk_line *line, void *priv,
1835 		      void (*work)(struct work_struct *), gfp_t gfp_mask,
1836 		      struct workqueue_struct *wq)
1837 {
1838 	struct pblk_line_ws *line_ws;
1839 
1840 	line_ws = mempool_alloc(&pblk->gen_ws_pool, gfp_mask);
1841 
1842 	line_ws->pblk = pblk;
1843 	line_ws->line = line;
1844 	line_ws->priv = priv;
1845 
1846 	INIT_WORK(&line_ws->ws, work);
1847 	queue_work(wq, &line_ws->ws);
1848 }
1849 
__pblk_down_page(struct pblk * pblk,struct ppa_addr * ppa_list,int nr_ppas,int pos)1850 static void __pblk_down_page(struct pblk *pblk, struct ppa_addr *ppa_list,
1851 			     int nr_ppas, int pos)
1852 {
1853 	struct pblk_lun *rlun = &pblk->luns[pos];
1854 	int ret;
1855 
1856 	/*
1857 	 * Only send one inflight I/O per LUN. Since we map at a page
1858 	 * granurality, all ppas in the I/O will map to the same LUN
1859 	 */
1860 #ifdef CONFIG_NVM_PBLK_DEBUG
1861 	int i;
1862 
1863 	for (i = 1; i < nr_ppas; i++)
1864 		WARN_ON(ppa_list[0].a.lun != ppa_list[i].a.lun ||
1865 				ppa_list[0].a.ch != ppa_list[i].a.ch);
1866 #endif
1867 
1868 	ret = down_timeout(&rlun->wr_sem, msecs_to_jiffies(30000));
1869 	if (ret == -ETIME || ret == -EINTR)
1870 		pblk_err(pblk, "taking lun semaphore timed out: err %d\n",
1871 				-ret);
1872 }
1873 
pblk_down_page(struct pblk * pblk,struct ppa_addr * ppa_list,int nr_ppas)1874 void pblk_down_page(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas)
1875 {
1876 	struct nvm_tgt_dev *dev = pblk->dev;
1877 	struct nvm_geo *geo = &dev->geo;
1878 	int pos = pblk_ppa_to_pos(geo, ppa_list[0]);
1879 
1880 	__pblk_down_page(pblk, ppa_list, nr_ppas, pos);
1881 }
1882 
pblk_down_rq(struct pblk * pblk,struct ppa_addr * ppa_list,int nr_ppas,unsigned long * lun_bitmap)1883 void pblk_down_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
1884 		  unsigned long *lun_bitmap)
1885 {
1886 	struct nvm_tgt_dev *dev = pblk->dev;
1887 	struct nvm_geo *geo = &dev->geo;
1888 	int pos = pblk_ppa_to_pos(geo, ppa_list[0]);
1889 
1890 	/* If the LUN has been locked for this same request, do no attempt to
1891 	 * lock it again
1892 	 */
1893 	if (test_and_set_bit(pos, lun_bitmap))
1894 		return;
1895 
1896 	__pblk_down_page(pblk, ppa_list, nr_ppas, pos);
1897 }
1898 
pblk_up_page(struct pblk * pblk,struct ppa_addr * ppa_list,int nr_ppas)1899 void pblk_up_page(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas)
1900 {
1901 	struct nvm_tgt_dev *dev = pblk->dev;
1902 	struct nvm_geo *geo = &dev->geo;
1903 	struct pblk_lun *rlun;
1904 	int pos = pblk_ppa_to_pos(geo, ppa_list[0]);
1905 
1906 #ifdef CONFIG_NVM_PBLK_DEBUG
1907 	int i;
1908 
1909 	for (i = 1; i < nr_ppas; i++)
1910 		WARN_ON(ppa_list[0].a.lun != ppa_list[i].a.lun ||
1911 				ppa_list[0].a.ch != ppa_list[i].a.ch);
1912 #endif
1913 
1914 	rlun = &pblk->luns[pos];
1915 	up(&rlun->wr_sem);
1916 }
1917 
pblk_up_rq(struct pblk * pblk,struct ppa_addr * ppa_list,int nr_ppas,unsigned long * lun_bitmap)1918 void pblk_up_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
1919 		unsigned long *lun_bitmap)
1920 {
1921 	struct nvm_tgt_dev *dev = pblk->dev;
1922 	struct nvm_geo *geo = &dev->geo;
1923 	struct pblk_lun *rlun;
1924 	int num_lun = geo->all_luns;
1925 	int bit = -1;
1926 
1927 	while ((bit = find_next_bit(lun_bitmap, num_lun, bit + 1)) < num_lun) {
1928 		rlun = &pblk->luns[bit];
1929 		up(&rlun->wr_sem);
1930 	}
1931 }
1932 
pblk_update_map(struct pblk * pblk,sector_t lba,struct ppa_addr ppa)1933 void pblk_update_map(struct pblk *pblk, sector_t lba, struct ppa_addr ppa)
1934 {
1935 	struct ppa_addr ppa_l2p;
1936 
1937 	/* logic error: lba out-of-bounds. Ignore update */
1938 	if (!(lba < pblk->rl.nr_secs)) {
1939 		WARN(1, "pblk: corrupted L2P map request\n");
1940 		return;
1941 	}
1942 
1943 	spin_lock(&pblk->trans_lock);
1944 	ppa_l2p = pblk_trans_map_get(pblk, lba);
1945 
1946 	if (!pblk_addr_in_cache(ppa_l2p) && !pblk_ppa_empty(ppa_l2p))
1947 		pblk_map_invalidate(pblk, ppa_l2p);
1948 
1949 	pblk_trans_map_set(pblk, lba, ppa);
1950 	spin_unlock(&pblk->trans_lock);
1951 }
1952 
pblk_update_map_cache(struct pblk * pblk,sector_t lba,struct ppa_addr ppa)1953 void pblk_update_map_cache(struct pblk *pblk, sector_t lba, struct ppa_addr ppa)
1954 {
1955 
1956 #ifdef CONFIG_NVM_PBLK_DEBUG
1957 	/* Callers must ensure that the ppa points to a cache address */
1958 	BUG_ON(!pblk_addr_in_cache(ppa));
1959 	BUG_ON(pblk_rb_pos_oob(&pblk->rwb, pblk_addr_to_cacheline(ppa)));
1960 #endif
1961 
1962 	pblk_update_map(pblk, lba, ppa);
1963 }
1964 
pblk_update_map_gc(struct pblk * pblk,sector_t lba,struct ppa_addr ppa_new,struct pblk_line * gc_line,u64 paddr_gc)1965 int pblk_update_map_gc(struct pblk *pblk, sector_t lba, struct ppa_addr ppa_new,
1966 		       struct pblk_line *gc_line, u64 paddr_gc)
1967 {
1968 	struct ppa_addr ppa_l2p, ppa_gc;
1969 	int ret = 1;
1970 
1971 #ifdef CONFIG_NVM_PBLK_DEBUG
1972 	/* Callers must ensure that the ppa points to a cache address */
1973 	BUG_ON(!pblk_addr_in_cache(ppa_new));
1974 	BUG_ON(pblk_rb_pos_oob(&pblk->rwb, pblk_addr_to_cacheline(ppa_new)));
1975 #endif
1976 
1977 	/* logic error: lba out-of-bounds. Ignore update */
1978 	if (!(lba < pblk->rl.nr_secs)) {
1979 		WARN(1, "pblk: corrupted L2P map request\n");
1980 		return 0;
1981 	}
1982 
1983 	spin_lock(&pblk->trans_lock);
1984 	ppa_l2p = pblk_trans_map_get(pblk, lba);
1985 	ppa_gc = addr_to_gen_ppa(pblk, paddr_gc, gc_line->id);
1986 
1987 	if (!pblk_ppa_comp(ppa_l2p, ppa_gc)) {
1988 		spin_lock(&gc_line->lock);
1989 		WARN(!test_bit(paddr_gc, gc_line->invalid_bitmap),
1990 						"pblk: corrupted GC update");
1991 		spin_unlock(&gc_line->lock);
1992 
1993 		ret = 0;
1994 		goto out;
1995 	}
1996 
1997 	pblk_trans_map_set(pblk, lba, ppa_new);
1998 out:
1999 	spin_unlock(&pblk->trans_lock);
2000 	return ret;
2001 }
2002 
pblk_update_map_dev(struct pblk * pblk,sector_t lba,struct ppa_addr ppa_mapped,struct ppa_addr ppa_cache)2003 void pblk_update_map_dev(struct pblk *pblk, sector_t lba,
2004 			 struct ppa_addr ppa_mapped, struct ppa_addr ppa_cache)
2005 {
2006 	struct ppa_addr ppa_l2p;
2007 
2008 #ifdef CONFIG_NVM_PBLK_DEBUG
2009 	/* Callers must ensure that the ppa points to a device address */
2010 	BUG_ON(pblk_addr_in_cache(ppa_mapped));
2011 #endif
2012 	/* Invalidate and discard padded entries */
2013 	if (lba == ADDR_EMPTY) {
2014 		atomic64_inc(&pblk->pad_wa);
2015 #ifdef CONFIG_NVM_PBLK_DEBUG
2016 		atomic_long_inc(&pblk->padded_wb);
2017 #endif
2018 		if (!pblk_ppa_empty(ppa_mapped))
2019 			pblk_map_invalidate(pblk, ppa_mapped);
2020 		return;
2021 	}
2022 
2023 	/* logic error: lba out-of-bounds. Ignore update */
2024 	if (!(lba < pblk->rl.nr_secs)) {
2025 		WARN(1, "pblk: corrupted L2P map request\n");
2026 		return;
2027 	}
2028 
2029 	spin_lock(&pblk->trans_lock);
2030 	ppa_l2p = pblk_trans_map_get(pblk, lba);
2031 
2032 	/* Do not update L2P if the cacheline has been updated. In this case,
2033 	 * the mapped ppa must be invalidated
2034 	 */
2035 	if (!pblk_ppa_comp(ppa_l2p, ppa_cache)) {
2036 		if (!pblk_ppa_empty(ppa_mapped))
2037 			pblk_map_invalidate(pblk, ppa_mapped);
2038 		goto out;
2039 	}
2040 
2041 #ifdef CONFIG_NVM_PBLK_DEBUG
2042 	WARN_ON(!pblk_addr_in_cache(ppa_l2p) && !pblk_ppa_empty(ppa_l2p));
2043 #endif
2044 
2045 	pblk_trans_map_set(pblk, lba, ppa_mapped);
2046 out:
2047 	spin_unlock(&pblk->trans_lock);
2048 }
2049 
pblk_lookup_l2p_seq(struct pblk * pblk,struct ppa_addr * ppas,sector_t blba,int nr_secs)2050 void pblk_lookup_l2p_seq(struct pblk *pblk, struct ppa_addr *ppas,
2051 			 sector_t blba, int nr_secs)
2052 {
2053 	int i;
2054 
2055 	spin_lock(&pblk->trans_lock);
2056 	for (i = 0; i < nr_secs; i++) {
2057 		struct ppa_addr ppa;
2058 
2059 		ppa = ppas[i] = pblk_trans_map_get(pblk, blba + i);
2060 
2061 		/* If the L2P entry maps to a line, the reference is valid */
2062 		if (!pblk_ppa_empty(ppa) && !pblk_addr_in_cache(ppa)) {
2063 			int line_id = pblk_ppa_to_line(ppa);
2064 			struct pblk_line *line = &pblk->lines[line_id];
2065 
2066 			kref_get(&line->ref);
2067 		}
2068 	}
2069 	spin_unlock(&pblk->trans_lock);
2070 }
2071 
pblk_lookup_l2p_rand(struct pblk * pblk,struct ppa_addr * ppas,u64 * lba_list,int nr_secs)2072 void pblk_lookup_l2p_rand(struct pblk *pblk, struct ppa_addr *ppas,
2073 			  u64 *lba_list, int nr_secs)
2074 {
2075 	u64 lba;
2076 	int i;
2077 
2078 	spin_lock(&pblk->trans_lock);
2079 	for (i = 0; i < nr_secs; i++) {
2080 		lba = lba_list[i];
2081 		if (lba != ADDR_EMPTY) {
2082 			/* logic error: lba out-of-bounds. Ignore update */
2083 			if (!(lba < pblk->rl.nr_secs)) {
2084 				WARN(1, "pblk: corrupted L2P map request\n");
2085 				continue;
2086 			}
2087 			ppas[i] = pblk_trans_map_get(pblk, lba);
2088 		}
2089 	}
2090 	spin_unlock(&pblk->trans_lock);
2091 }
2092