1 /*
2  * Copyright (C) 2016 CNEX Labs
3  * Initial release: Javier Gonzalez <javier@cnexlabs.com>
4  *                  Matias Bjorling <matias@cnexlabs.com>
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License version
8  * 2 as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License for more details.
14  *
15  * pblk-core.c - pblk's core functionality
16  *
17  */
18 
19 #include "pblk.h"
20 
pblk_line_mark_bb(struct work_struct * work)21 static void pblk_line_mark_bb(struct work_struct *work)
22 {
23 	struct pblk_line_ws *line_ws = container_of(work, struct pblk_line_ws,
24 									ws);
25 	struct pblk *pblk = line_ws->pblk;
26 	struct nvm_tgt_dev *dev = pblk->dev;
27 	struct ppa_addr *ppa = line_ws->priv;
28 	int ret;
29 
30 	ret = nvm_set_tgt_bb_tbl(dev, ppa, 1, NVM_BLK_T_GRWN_BAD);
31 	if (ret) {
32 		struct pblk_line *line;
33 		int pos;
34 
35 		line = &pblk->lines[pblk_ppa_to_line(*ppa)];
36 		pos = pblk_ppa_to_pos(&dev->geo, *ppa);
37 
38 		pblk_err(pblk, "failed to mark bb, line:%d, pos:%d\n",
39 				line->id, pos);
40 	}
41 
42 	kfree(ppa);
43 	mempool_free(line_ws, &pblk->gen_ws_pool);
44 }
45 
pblk_mark_bb(struct pblk * pblk,struct pblk_line * line,struct ppa_addr ppa_addr)46 static void pblk_mark_bb(struct pblk *pblk, struct pblk_line *line,
47 			 struct ppa_addr ppa_addr)
48 {
49 	struct nvm_tgt_dev *dev = pblk->dev;
50 	struct nvm_geo *geo = &dev->geo;
51 	struct ppa_addr *ppa;
52 	int pos = pblk_ppa_to_pos(geo, ppa_addr);
53 
54 	pblk_debug(pblk, "erase failed: line:%d, pos:%d\n", line->id, pos);
55 	atomic_long_inc(&pblk->erase_failed);
56 
57 	atomic_dec(&line->blk_in_line);
58 	if (test_and_set_bit(pos, line->blk_bitmap))
59 		pblk_err(pblk, "attempted to erase bb: line:%d, pos:%d\n",
60 							line->id, pos);
61 
62 	/* Not necessary to mark bad blocks on 2.0 spec. */
63 	if (geo->version == NVM_OCSSD_SPEC_20)
64 		return;
65 
66 	ppa = kmalloc(sizeof(struct ppa_addr), GFP_ATOMIC);
67 	if (!ppa)
68 		return;
69 
70 	*ppa = ppa_addr;
71 	pblk_gen_run_ws(pblk, NULL, ppa, pblk_line_mark_bb,
72 						GFP_ATOMIC, pblk->bb_wq);
73 }
74 
__pblk_end_io_erase(struct pblk * pblk,struct nvm_rq * rqd)75 static void __pblk_end_io_erase(struct pblk *pblk, struct nvm_rq *rqd)
76 {
77 	struct nvm_tgt_dev *dev = pblk->dev;
78 	struct nvm_geo *geo = &dev->geo;
79 	struct nvm_chk_meta *chunk;
80 	struct pblk_line *line;
81 	int pos;
82 
83 	line = &pblk->lines[pblk_ppa_to_line(rqd->ppa_addr)];
84 	pos = pblk_ppa_to_pos(geo, rqd->ppa_addr);
85 	chunk = &line->chks[pos];
86 
87 	atomic_dec(&line->left_seblks);
88 
89 	if (rqd->error) {
90 		chunk->state = NVM_CHK_ST_OFFLINE;
91 		pblk_mark_bb(pblk, line, rqd->ppa_addr);
92 	} else {
93 		chunk->state = NVM_CHK_ST_FREE;
94 	}
95 
96 	atomic_dec(&pblk->inflight_io);
97 }
98 
99 /* Erase completion assumes that only one block is erased at the time */
pblk_end_io_erase(struct nvm_rq * rqd)100 static void pblk_end_io_erase(struct nvm_rq *rqd)
101 {
102 	struct pblk *pblk = rqd->private;
103 
104 	__pblk_end_io_erase(pblk, rqd);
105 	mempool_free(rqd, &pblk->e_rq_pool);
106 }
107 
108 /*
109  * Get information for all chunks from the device.
110  *
111  * The caller is responsible for freeing the returned structure
112  */
pblk_chunk_get_info(struct pblk * pblk)113 struct nvm_chk_meta *pblk_chunk_get_info(struct pblk *pblk)
114 {
115 	struct nvm_tgt_dev *dev = pblk->dev;
116 	struct nvm_geo *geo = &dev->geo;
117 	struct nvm_chk_meta *meta;
118 	struct ppa_addr ppa;
119 	unsigned long len;
120 	int ret;
121 
122 	ppa.ppa = 0;
123 
124 	len = geo->all_chunks * sizeof(*meta);
125 	meta = kzalloc(len, GFP_KERNEL);
126 	if (!meta)
127 		return ERR_PTR(-ENOMEM);
128 
129 	ret = nvm_get_chunk_meta(dev, meta, ppa, geo->all_chunks);
130 	if (ret) {
131 		kfree(meta);
132 		return ERR_PTR(-EIO);
133 	}
134 
135 	return meta;
136 }
137 
pblk_chunk_get_off(struct pblk * pblk,struct nvm_chk_meta * meta,struct ppa_addr ppa)138 struct nvm_chk_meta *pblk_chunk_get_off(struct pblk *pblk,
139 					      struct nvm_chk_meta *meta,
140 					      struct ppa_addr ppa)
141 {
142 	struct nvm_tgt_dev *dev = pblk->dev;
143 	struct nvm_geo *geo = &dev->geo;
144 	int ch_off = ppa.m.grp * geo->num_chk * geo->num_lun;
145 	int lun_off = ppa.m.pu * geo->num_chk;
146 	int chk_off = ppa.m.chk;
147 
148 	return meta + ch_off + lun_off + chk_off;
149 }
150 
__pblk_map_invalidate(struct pblk * pblk,struct pblk_line * line,u64 paddr)151 void __pblk_map_invalidate(struct pblk *pblk, struct pblk_line *line,
152 			   u64 paddr)
153 {
154 	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
155 	struct list_head *move_list = NULL;
156 
157 	/* Lines being reclaimed (GC'ed) cannot be invalidated. Before the L2P
158 	 * table is modified with reclaimed sectors, a check is done to endure
159 	 * that newer updates are not overwritten.
160 	 */
161 	spin_lock(&line->lock);
162 	WARN_ON(line->state == PBLK_LINESTATE_FREE);
163 
164 	if (test_and_set_bit(paddr, line->invalid_bitmap)) {
165 		WARN_ONCE(1, "pblk: double invalidate\n");
166 		spin_unlock(&line->lock);
167 		return;
168 	}
169 	le32_add_cpu(line->vsc, -1);
170 
171 	if (line->state == PBLK_LINESTATE_CLOSED)
172 		move_list = pblk_line_gc_list(pblk, line);
173 	spin_unlock(&line->lock);
174 
175 	if (move_list) {
176 		spin_lock(&l_mg->gc_lock);
177 		spin_lock(&line->lock);
178 		/* Prevent moving a line that has just been chosen for GC */
179 		if (line->state == PBLK_LINESTATE_GC) {
180 			spin_unlock(&line->lock);
181 			spin_unlock(&l_mg->gc_lock);
182 			return;
183 		}
184 		spin_unlock(&line->lock);
185 
186 		list_move_tail(&line->list, move_list);
187 		spin_unlock(&l_mg->gc_lock);
188 	}
189 }
190 
pblk_map_invalidate(struct pblk * pblk,struct ppa_addr ppa)191 void pblk_map_invalidate(struct pblk *pblk, struct ppa_addr ppa)
192 {
193 	struct pblk_line *line;
194 	u64 paddr;
195 	int line_id;
196 
197 #ifdef CONFIG_NVM_PBLK_DEBUG
198 	/* Callers must ensure that the ppa points to a device address */
199 	BUG_ON(pblk_addr_in_cache(ppa));
200 	BUG_ON(pblk_ppa_empty(ppa));
201 #endif
202 
203 	line_id = pblk_ppa_to_line(ppa);
204 	line = &pblk->lines[line_id];
205 	paddr = pblk_dev_ppa_to_line_addr(pblk, ppa);
206 
207 	__pblk_map_invalidate(pblk, line, paddr);
208 }
209 
pblk_invalidate_range(struct pblk * pblk,sector_t slba,unsigned int nr_secs)210 static void pblk_invalidate_range(struct pblk *pblk, sector_t slba,
211 				  unsigned int nr_secs)
212 {
213 	sector_t lba;
214 
215 	spin_lock(&pblk->trans_lock);
216 	for (lba = slba; lba < slba + nr_secs; lba++) {
217 		struct ppa_addr ppa;
218 
219 		ppa = pblk_trans_map_get(pblk, lba);
220 
221 		if (!pblk_addr_in_cache(ppa) && !pblk_ppa_empty(ppa))
222 			pblk_map_invalidate(pblk, ppa);
223 
224 		pblk_ppa_set_empty(&ppa);
225 		pblk_trans_map_set(pblk, lba, ppa);
226 	}
227 	spin_unlock(&pblk->trans_lock);
228 }
229 
230 /* Caller must guarantee that the request is a valid type */
pblk_alloc_rqd(struct pblk * pblk,int type)231 struct nvm_rq *pblk_alloc_rqd(struct pblk *pblk, int type)
232 {
233 	mempool_t *pool;
234 	struct nvm_rq *rqd;
235 	int rq_size;
236 
237 	switch (type) {
238 	case PBLK_WRITE:
239 	case PBLK_WRITE_INT:
240 		pool = &pblk->w_rq_pool;
241 		rq_size = pblk_w_rq_size;
242 		break;
243 	case PBLK_READ:
244 		pool = &pblk->r_rq_pool;
245 		rq_size = pblk_g_rq_size;
246 		break;
247 	default:
248 		pool = &pblk->e_rq_pool;
249 		rq_size = pblk_g_rq_size;
250 	}
251 
252 	rqd = mempool_alloc(pool, GFP_KERNEL);
253 	memset(rqd, 0, rq_size);
254 
255 	return rqd;
256 }
257 
258 /* Typically used on completion path. Cannot guarantee request consistency */
pblk_free_rqd(struct pblk * pblk,struct nvm_rq * rqd,int type)259 void pblk_free_rqd(struct pblk *pblk, struct nvm_rq *rqd, int type)
260 {
261 	struct nvm_tgt_dev *dev = pblk->dev;
262 	mempool_t *pool;
263 
264 	switch (type) {
265 	case PBLK_WRITE:
266 		kfree(((struct pblk_c_ctx *)nvm_rq_to_pdu(rqd))->lun_bitmap);
267 		/* fall through */
268 	case PBLK_WRITE_INT:
269 		pool = &pblk->w_rq_pool;
270 		break;
271 	case PBLK_READ:
272 		pool = &pblk->r_rq_pool;
273 		break;
274 	case PBLK_ERASE:
275 		pool = &pblk->e_rq_pool;
276 		break;
277 	default:
278 		pblk_err(pblk, "trying to free unknown rqd type\n");
279 		return;
280 	}
281 
282 	if (rqd->meta_list)
283 		nvm_dev_dma_free(dev->parent, rqd->meta_list,
284 				rqd->dma_meta_list);
285 	mempool_free(rqd, pool);
286 }
287 
pblk_bio_free_pages(struct pblk * pblk,struct bio * bio,int off,int nr_pages)288 void pblk_bio_free_pages(struct pblk *pblk, struct bio *bio, int off,
289 			 int nr_pages)
290 {
291 	struct bio_vec *bv;
292 	struct page *page;
293 	int i, e, nbv = 0;
294 
295 	for (i = 0; i < bio->bi_vcnt; i++) {
296 		bv = &bio->bi_io_vec[i];
297 		page = bv->bv_page;
298 		for (e = 0; e < bv->bv_len; e += PBLK_EXPOSED_PAGE_SIZE, nbv++)
299 			if (nbv >= off)
300 				mempool_free(page++, &pblk->page_bio_pool);
301 	}
302 }
303 
pblk_bio_add_pages(struct pblk * pblk,struct bio * bio,gfp_t flags,int nr_pages)304 int pblk_bio_add_pages(struct pblk *pblk, struct bio *bio, gfp_t flags,
305 		       int nr_pages)
306 {
307 	struct request_queue *q = pblk->dev->q;
308 	struct page *page;
309 	int i, ret;
310 
311 	for (i = 0; i < nr_pages; i++) {
312 		page = mempool_alloc(&pblk->page_bio_pool, flags);
313 
314 		ret = bio_add_pc_page(q, bio, page, PBLK_EXPOSED_PAGE_SIZE, 0);
315 		if (ret != PBLK_EXPOSED_PAGE_SIZE) {
316 			pblk_err(pblk, "could not add page to bio\n");
317 			mempool_free(page, &pblk->page_bio_pool);
318 			goto err;
319 		}
320 	}
321 
322 	return 0;
323 err:
324 	pblk_bio_free_pages(pblk, bio, (bio->bi_vcnt - i), i);
325 	return -1;
326 }
327 
pblk_write_kick(struct pblk * pblk)328 void pblk_write_kick(struct pblk *pblk)
329 {
330 	wake_up_process(pblk->writer_ts);
331 	mod_timer(&pblk->wtimer, jiffies + msecs_to_jiffies(1000));
332 }
333 
pblk_write_timer_fn(struct timer_list * t)334 void pblk_write_timer_fn(struct timer_list *t)
335 {
336 	struct pblk *pblk = from_timer(pblk, t, wtimer);
337 
338 	/* kick the write thread every tick to flush outstanding data */
339 	pblk_write_kick(pblk);
340 }
341 
pblk_write_should_kick(struct pblk * pblk)342 void pblk_write_should_kick(struct pblk *pblk)
343 {
344 	unsigned int secs_avail = pblk_rb_read_count(&pblk->rwb);
345 
346 	if (secs_avail >= pblk->min_write_pgs)
347 		pblk_write_kick(pblk);
348 }
349 
pblk_wait_for_meta(struct pblk * pblk)350 static void pblk_wait_for_meta(struct pblk *pblk)
351 {
352 	do {
353 		if (!atomic_read(&pblk->inflight_io))
354 			break;
355 
356 		schedule();
357 	} while (1);
358 }
359 
pblk_flush_writer(struct pblk * pblk)360 static void pblk_flush_writer(struct pblk *pblk)
361 {
362 	pblk_rb_flush(&pblk->rwb);
363 	do {
364 		if (!pblk_rb_sync_count(&pblk->rwb))
365 			break;
366 
367 		pblk_write_kick(pblk);
368 		schedule();
369 	} while (1);
370 }
371 
pblk_line_gc_list(struct pblk * pblk,struct pblk_line * line)372 struct list_head *pblk_line_gc_list(struct pblk *pblk, struct pblk_line *line)
373 {
374 	struct pblk_line_meta *lm = &pblk->lm;
375 	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
376 	struct list_head *move_list = NULL;
377 	int vsc = le32_to_cpu(*line->vsc);
378 
379 	lockdep_assert_held(&line->lock);
380 
381 	if (line->w_err_gc->has_write_err) {
382 		if (line->gc_group != PBLK_LINEGC_WERR) {
383 			line->gc_group = PBLK_LINEGC_WERR;
384 			move_list = &l_mg->gc_werr_list;
385 			pblk_rl_werr_line_in(&pblk->rl);
386 		}
387 	} else if (!vsc) {
388 		if (line->gc_group != PBLK_LINEGC_FULL) {
389 			line->gc_group = PBLK_LINEGC_FULL;
390 			move_list = &l_mg->gc_full_list;
391 		}
392 	} else if (vsc < lm->high_thrs) {
393 		if (line->gc_group != PBLK_LINEGC_HIGH) {
394 			line->gc_group = PBLK_LINEGC_HIGH;
395 			move_list = &l_mg->gc_high_list;
396 		}
397 	} else if (vsc < lm->mid_thrs) {
398 		if (line->gc_group != PBLK_LINEGC_MID) {
399 			line->gc_group = PBLK_LINEGC_MID;
400 			move_list = &l_mg->gc_mid_list;
401 		}
402 	} else if (vsc < line->sec_in_line) {
403 		if (line->gc_group != PBLK_LINEGC_LOW) {
404 			line->gc_group = PBLK_LINEGC_LOW;
405 			move_list = &l_mg->gc_low_list;
406 		}
407 	} else if (vsc == line->sec_in_line) {
408 		if (line->gc_group != PBLK_LINEGC_EMPTY) {
409 			line->gc_group = PBLK_LINEGC_EMPTY;
410 			move_list = &l_mg->gc_empty_list;
411 		}
412 	} else {
413 		line->state = PBLK_LINESTATE_CORRUPT;
414 		line->gc_group = PBLK_LINEGC_NONE;
415 		move_list =  &l_mg->corrupt_list;
416 		pblk_err(pblk, "corrupted vsc for line %d, vsc:%d (%d/%d/%d)\n",
417 						line->id, vsc,
418 						line->sec_in_line,
419 						lm->high_thrs, lm->mid_thrs);
420 	}
421 
422 	return move_list;
423 }
424 
pblk_discard(struct pblk * pblk,struct bio * bio)425 void pblk_discard(struct pblk *pblk, struct bio *bio)
426 {
427 	sector_t slba = pblk_get_lba(bio);
428 	sector_t nr_secs = pblk_get_secs(bio);
429 
430 	pblk_invalidate_range(pblk, slba, nr_secs);
431 }
432 
pblk_log_write_err(struct pblk * pblk,struct nvm_rq * rqd)433 void pblk_log_write_err(struct pblk *pblk, struct nvm_rq *rqd)
434 {
435 	atomic_long_inc(&pblk->write_failed);
436 #ifdef CONFIG_NVM_PBLK_DEBUG
437 	pblk_print_failed_rqd(pblk, rqd, rqd->error);
438 #endif
439 }
440 
pblk_log_read_err(struct pblk * pblk,struct nvm_rq * rqd)441 void pblk_log_read_err(struct pblk *pblk, struct nvm_rq *rqd)
442 {
443 	/* Empty page read is not necessarily an error (e.g., L2P recovery) */
444 	if (rqd->error == NVM_RSP_ERR_EMPTYPAGE) {
445 		atomic_long_inc(&pblk->read_empty);
446 		return;
447 	}
448 
449 	switch (rqd->error) {
450 	case NVM_RSP_WARN_HIGHECC:
451 		atomic_long_inc(&pblk->read_high_ecc);
452 		break;
453 	case NVM_RSP_ERR_FAILECC:
454 	case NVM_RSP_ERR_FAILCRC:
455 		atomic_long_inc(&pblk->read_failed);
456 		break;
457 	default:
458 		pblk_err(pblk, "unknown read error:%d\n", rqd->error);
459 	}
460 #ifdef CONFIG_NVM_PBLK_DEBUG
461 	pblk_print_failed_rqd(pblk, rqd, rqd->error);
462 #endif
463 }
464 
pblk_set_sec_per_write(struct pblk * pblk,int sec_per_write)465 void pblk_set_sec_per_write(struct pblk *pblk, int sec_per_write)
466 {
467 	pblk->sec_per_write = sec_per_write;
468 }
469 
pblk_submit_io(struct pblk * pblk,struct nvm_rq * rqd)470 int pblk_submit_io(struct pblk *pblk, struct nvm_rq *rqd)
471 {
472 	struct nvm_tgt_dev *dev = pblk->dev;
473 
474 	atomic_inc(&pblk->inflight_io);
475 
476 #ifdef CONFIG_NVM_PBLK_DEBUG
477 	if (pblk_check_io(pblk, rqd))
478 		return NVM_IO_ERR;
479 #endif
480 
481 	return nvm_submit_io(dev, rqd);
482 }
483 
pblk_submit_io_sync(struct pblk * pblk,struct nvm_rq * rqd)484 int pblk_submit_io_sync(struct pblk *pblk, struct nvm_rq *rqd)
485 {
486 	struct nvm_tgt_dev *dev = pblk->dev;
487 
488 	atomic_inc(&pblk->inflight_io);
489 
490 #ifdef CONFIG_NVM_PBLK_DEBUG
491 	if (pblk_check_io(pblk, rqd))
492 		return NVM_IO_ERR;
493 #endif
494 
495 	return nvm_submit_io_sync(dev, rqd);
496 }
497 
pblk_bio_map_addr_endio(struct bio * bio)498 static void pblk_bio_map_addr_endio(struct bio *bio)
499 {
500 	bio_put(bio);
501 }
502 
pblk_bio_map_addr(struct pblk * pblk,void * data,unsigned int nr_secs,unsigned int len,int alloc_type,gfp_t gfp_mask)503 struct bio *pblk_bio_map_addr(struct pblk *pblk, void *data,
504 			      unsigned int nr_secs, unsigned int len,
505 			      int alloc_type, gfp_t gfp_mask)
506 {
507 	struct nvm_tgt_dev *dev = pblk->dev;
508 	void *kaddr = data;
509 	struct page *page;
510 	struct bio *bio;
511 	int i, ret;
512 
513 	if (alloc_type == PBLK_KMALLOC_META)
514 		return bio_map_kern(dev->q, kaddr, len, gfp_mask);
515 
516 	bio = bio_kmalloc(gfp_mask, nr_secs);
517 	if (!bio)
518 		return ERR_PTR(-ENOMEM);
519 
520 	for (i = 0; i < nr_secs; i++) {
521 		page = vmalloc_to_page(kaddr);
522 		if (!page) {
523 			pblk_err(pblk, "could not map vmalloc bio\n");
524 			bio_put(bio);
525 			bio = ERR_PTR(-ENOMEM);
526 			goto out;
527 		}
528 
529 		ret = bio_add_pc_page(dev->q, bio, page, PAGE_SIZE, 0);
530 		if (ret != PAGE_SIZE) {
531 			pblk_err(pblk, "could not add page to bio\n");
532 			bio_put(bio);
533 			bio = ERR_PTR(-ENOMEM);
534 			goto out;
535 		}
536 
537 		kaddr += PAGE_SIZE;
538 	}
539 
540 	bio->bi_end_io = pblk_bio_map_addr_endio;
541 out:
542 	return bio;
543 }
544 
pblk_calc_secs(struct pblk * pblk,unsigned long secs_avail,unsigned long secs_to_flush)545 int pblk_calc_secs(struct pblk *pblk, unsigned long secs_avail,
546 		   unsigned long secs_to_flush)
547 {
548 	int max = pblk->sec_per_write;
549 	int min = pblk->min_write_pgs;
550 	int secs_to_sync = 0;
551 
552 	if (secs_avail >= max)
553 		secs_to_sync = max;
554 	else if (secs_avail >= min)
555 		secs_to_sync = min * (secs_avail / min);
556 	else if (secs_to_flush)
557 		secs_to_sync = min;
558 
559 	return secs_to_sync;
560 }
561 
pblk_dealloc_page(struct pblk * pblk,struct pblk_line * line,int nr_secs)562 void pblk_dealloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
563 {
564 	u64 addr;
565 	int i;
566 
567 	spin_lock(&line->lock);
568 	addr = find_next_zero_bit(line->map_bitmap,
569 					pblk->lm.sec_per_line, line->cur_sec);
570 	line->cur_sec = addr - nr_secs;
571 
572 	for (i = 0; i < nr_secs; i++, line->cur_sec--)
573 		WARN_ON(!test_and_clear_bit(line->cur_sec, line->map_bitmap));
574 	spin_unlock(&line->lock);
575 }
576 
__pblk_alloc_page(struct pblk * pblk,struct pblk_line * line,int nr_secs)577 u64 __pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
578 {
579 	u64 addr;
580 	int i;
581 
582 	lockdep_assert_held(&line->lock);
583 
584 	/* logic error: ppa out-of-bounds. Prevent generating bad address */
585 	if (line->cur_sec + nr_secs > pblk->lm.sec_per_line) {
586 		WARN(1, "pblk: page allocation out of bounds\n");
587 		nr_secs = pblk->lm.sec_per_line - line->cur_sec;
588 	}
589 
590 	line->cur_sec = addr = find_next_zero_bit(line->map_bitmap,
591 					pblk->lm.sec_per_line, line->cur_sec);
592 	for (i = 0; i < nr_secs; i++, line->cur_sec++)
593 		WARN_ON(test_and_set_bit(line->cur_sec, line->map_bitmap));
594 
595 	return addr;
596 }
597 
pblk_alloc_page(struct pblk * pblk,struct pblk_line * line,int nr_secs)598 u64 pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
599 {
600 	u64 addr;
601 
602 	/* Lock needed in case a write fails and a recovery needs to remap
603 	 * failed write buffer entries
604 	 */
605 	spin_lock(&line->lock);
606 	addr = __pblk_alloc_page(pblk, line, nr_secs);
607 	line->left_msecs -= nr_secs;
608 	WARN(line->left_msecs < 0, "pblk: page allocation out of bounds\n");
609 	spin_unlock(&line->lock);
610 
611 	return addr;
612 }
613 
pblk_lookup_page(struct pblk * pblk,struct pblk_line * line)614 u64 pblk_lookup_page(struct pblk *pblk, struct pblk_line *line)
615 {
616 	u64 paddr;
617 
618 	spin_lock(&line->lock);
619 	paddr = find_next_zero_bit(line->map_bitmap,
620 					pblk->lm.sec_per_line, line->cur_sec);
621 	spin_unlock(&line->lock);
622 
623 	return paddr;
624 }
625 
626 /*
627  * Submit emeta to one LUN in the raid line at the time to avoid a deadlock when
628  * taking the per LUN semaphore.
629  */
pblk_line_submit_emeta_io(struct pblk * pblk,struct pblk_line * line,void * emeta_buf,u64 paddr,int dir)630 static int pblk_line_submit_emeta_io(struct pblk *pblk, struct pblk_line *line,
631 				     void *emeta_buf, u64 paddr, int dir)
632 {
633 	struct nvm_tgt_dev *dev = pblk->dev;
634 	struct nvm_geo *geo = &dev->geo;
635 	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
636 	struct pblk_line_meta *lm = &pblk->lm;
637 	void *ppa_list, *meta_list;
638 	struct bio *bio;
639 	struct nvm_rq rqd;
640 	dma_addr_t dma_ppa_list, dma_meta_list;
641 	int min = pblk->min_write_pgs;
642 	int left_ppas = lm->emeta_sec[0];
643 	int id = line->id;
644 	int rq_ppas, rq_len;
645 	int cmd_op, bio_op;
646 	int i, j;
647 	int ret;
648 
649 	if (dir == PBLK_WRITE) {
650 		bio_op = REQ_OP_WRITE;
651 		cmd_op = NVM_OP_PWRITE;
652 	} else if (dir == PBLK_READ) {
653 		bio_op = REQ_OP_READ;
654 		cmd_op = NVM_OP_PREAD;
655 	} else
656 		return -EINVAL;
657 
658 	meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
659 							&dma_meta_list);
660 	if (!meta_list)
661 		return -ENOMEM;
662 
663 	ppa_list = meta_list + pblk_dma_meta_size;
664 	dma_ppa_list = dma_meta_list + pblk_dma_meta_size;
665 
666 next_rq:
667 	memset(&rqd, 0, sizeof(struct nvm_rq));
668 
669 	rq_ppas = pblk_calc_secs(pblk, left_ppas, 0);
670 	rq_len = rq_ppas * geo->csecs;
671 
672 	bio = pblk_bio_map_addr(pblk, emeta_buf, rq_ppas, rq_len,
673 					l_mg->emeta_alloc_type, GFP_KERNEL);
674 	if (IS_ERR(bio)) {
675 		ret = PTR_ERR(bio);
676 		goto free_rqd_dma;
677 	}
678 
679 	bio->bi_iter.bi_sector = 0; /* internal bio */
680 	bio_set_op_attrs(bio, bio_op, 0);
681 
682 	rqd.bio = bio;
683 	rqd.meta_list = meta_list;
684 	rqd.ppa_list = ppa_list;
685 	rqd.dma_meta_list = dma_meta_list;
686 	rqd.dma_ppa_list = dma_ppa_list;
687 	rqd.opcode = cmd_op;
688 	rqd.nr_ppas = rq_ppas;
689 
690 	if (dir == PBLK_WRITE) {
691 		struct pblk_sec_meta *meta_list = rqd.meta_list;
692 
693 		rqd.flags = pblk_set_progr_mode(pblk, PBLK_WRITE);
694 		for (i = 0; i < rqd.nr_ppas; ) {
695 			spin_lock(&line->lock);
696 			paddr = __pblk_alloc_page(pblk, line, min);
697 			spin_unlock(&line->lock);
698 			for (j = 0; j < min; j++, i++, paddr++) {
699 				meta_list[i].lba = cpu_to_le64(ADDR_EMPTY);
700 				rqd.ppa_list[i] =
701 					addr_to_gen_ppa(pblk, paddr, id);
702 			}
703 		}
704 	} else {
705 		for (i = 0; i < rqd.nr_ppas; ) {
706 			struct ppa_addr ppa = addr_to_gen_ppa(pblk, paddr, id);
707 			int pos = pblk_ppa_to_pos(geo, ppa);
708 			int read_type = PBLK_READ_RANDOM;
709 
710 			if (pblk_io_aligned(pblk, rq_ppas))
711 				read_type = PBLK_READ_SEQUENTIAL;
712 			rqd.flags = pblk_set_read_mode(pblk, read_type);
713 
714 			while (test_bit(pos, line->blk_bitmap)) {
715 				paddr += min;
716 				if (pblk_boundary_paddr_checks(pblk, paddr)) {
717 					pblk_err(pblk, "corrupt emeta line:%d\n",
718 								line->id);
719 					bio_put(bio);
720 					ret = -EINTR;
721 					goto free_rqd_dma;
722 				}
723 
724 				ppa = addr_to_gen_ppa(pblk, paddr, id);
725 				pos = pblk_ppa_to_pos(geo, ppa);
726 			}
727 
728 			if (pblk_boundary_paddr_checks(pblk, paddr + min)) {
729 				pblk_err(pblk, "corrupt emeta line:%d\n",
730 								line->id);
731 				bio_put(bio);
732 				ret = -EINTR;
733 				goto free_rqd_dma;
734 			}
735 
736 			for (j = 0; j < min; j++, i++, paddr++)
737 				rqd.ppa_list[i] =
738 					addr_to_gen_ppa(pblk, paddr, line->id);
739 		}
740 	}
741 
742 	ret = pblk_submit_io_sync(pblk, &rqd);
743 	if (ret) {
744 		pblk_err(pblk, "emeta I/O submission failed: %d\n", ret);
745 		bio_put(bio);
746 		goto free_rqd_dma;
747 	}
748 
749 	atomic_dec(&pblk->inflight_io);
750 
751 	if (rqd.error) {
752 		if (dir == PBLK_WRITE)
753 			pblk_log_write_err(pblk, &rqd);
754 		else
755 			pblk_log_read_err(pblk, &rqd);
756 	}
757 
758 	emeta_buf += rq_len;
759 	left_ppas -= rq_ppas;
760 	if (left_ppas)
761 		goto next_rq;
762 free_rqd_dma:
763 	nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
764 	return ret;
765 }
766 
pblk_line_smeta_start(struct pblk * pblk,struct pblk_line * line)767 u64 pblk_line_smeta_start(struct pblk *pblk, struct pblk_line *line)
768 {
769 	struct nvm_tgt_dev *dev = pblk->dev;
770 	struct nvm_geo *geo = &dev->geo;
771 	struct pblk_line_meta *lm = &pblk->lm;
772 	int bit;
773 
774 	/* This usually only happens on bad lines */
775 	bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
776 	if (bit >= lm->blk_per_line)
777 		return -1;
778 
779 	return bit * geo->ws_opt;
780 }
781 
pblk_line_submit_smeta_io(struct pblk * pblk,struct pblk_line * line,u64 paddr,int dir)782 static int pblk_line_submit_smeta_io(struct pblk *pblk, struct pblk_line *line,
783 				     u64 paddr, int dir)
784 {
785 	struct nvm_tgt_dev *dev = pblk->dev;
786 	struct pblk_line_meta *lm = &pblk->lm;
787 	struct bio *bio;
788 	struct nvm_rq rqd;
789 	__le64 *lba_list = NULL;
790 	int i, ret;
791 	int cmd_op, bio_op;
792 	int flags;
793 
794 	if (dir == PBLK_WRITE) {
795 		bio_op = REQ_OP_WRITE;
796 		cmd_op = NVM_OP_PWRITE;
797 		flags = pblk_set_progr_mode(pblk, PBLK_WRITE);
798 		lba_list = emeta_to_lbas(pblk, line->emeta->buf);
799 	} else if (dir == PBLK_READ_RECOV || dir == PBLK_READ) {
800 		bio_op = REQ_OP_READ;
801 		cmd_op = NVM_OP_PREAD;
802 		flags = pblk_set_read_mode(pblk, PBLK_READ_SEQUENTIAL);
803 	} else
804 		return -EINVAL;
805 
806 	memset(&rqd, 0, sizeof(struct nvm_rq));
807 
808 	rqd.meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
809 							&rqd.dma_meta_list);
810 	if (!rqd.meta_list)
811 		return -ENOMEM;
812 
813 	rqd.ppa_list = rqd.meta_list + pblk_dma_meta_size;
814 	rqd.dma_ppa_list = rqd.dma_meta_list + pblk_dma_meta_size;
815 
816 	bio = bio_map_kern(dev->q, line->smeta, lm->smeta_len, GFP_KERNEL);
817 	if (IS_ERR(bio)) {
818 		ret = PTR_ERR(bio);
819 		goto free_ppa_list;
820 	}
821 
822 	bio->bi_iter.bi_sector = 0; /* internal bio */
823 	bio_set_op_attrs(bio, bio_op, 0);
824 
825 	rqd.bio = bio;
826 	rqd.opcode = cmd_op;
827 	rqd.flags = flags;
828 	rqd.nr_ppas = lm->smeta_sec;
829 
830 	for (i = 0; i < lm->smeta_sec; i++, paddr++) {
831 		struct pblk_sec_meta *meta_list = rqd.meta_list;
832 
833 		rqd.ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line->id);
834 
835 		if (dir == PBLK_WRITE) {
836 			__le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
837 
838 			meta_list[i].lba = lba_list[paddr] = addr_empty;
839 		}
840 	}
841 
842 	/*
843 	 * This I/O is sent by the write thread when a line is replace. Since
844 	 * the write thread is the only one sending write and erase commands,
845 	 * there is no need to take the LUN semaphore.
846 	 */
847 	ret = pblk_submit_io_sync(pblk, &rqd);
848 	if (ret) {
849 		pblk_err(pblk, "smeta I/O submission failed: %d\n", ret);
850 		bio_put(bio);
851 		goto free_ppa_list;
852 	}
853 
854 	atomic_dec(&pblk->inflight_io);
855 
856 	if (rqd.error) {
857 		if (dir == PBLK_WRITE) {
858 			pblk_log_write_err(pblk, &rqd);
859 			ret = 1;
860 		} else if (dir == PBLK_READ)
861 			pblk_log_read_err(pblk, &rqd);
862 	}
863 
864 free_ppa_list:
865 	nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
866 
867 	return ret;
868 }
869 
pblk_line_read_smeta(struct pblk * pblk,struct pblk_line * line)870 int pblk_line_read_smeta(struct pblk *pblk, struct pblk_line *line)
871 {
872 	u64 bpaddr = pblk_line_smeta_start(pblk, line);
873 
874 	return pblk_line_submit_smeta_io(pblk, line, bpaddr, PBLK_READ_RECOV);
875 }
876 
pblk_line_read_emeta(struct pblk * pblk,struct pblk_line * line,void * emeta_buf)877 int pblk_line_read_emeta(struct pblk *pblk, struct pblk_line *line,
878 			 void *emeta_buf)
879 {
880 	return pblk_line_submit_emeta_io(pblk, line, emeta_buf,
881 						line->emeta_ssec, PBLK_READ);
882 }
883 
pblk_setup_e_rq(struct pblk * pblk,struct nvm_rq * rqd,struct ppa_addr ppa)884 static void pblk_setup_e_rq(struct pblk *pblk, struct nvm_rq *rqd,
885 			    struct ppa_addr ppa)
886 {
887 	rqd->opcode = NVM_OP_ERASE;
888 	rqd->ppa_addr = ppa;
889 	rqd->nr_ppas = 1;
890 	rqd->flags = pblk_set_progr_mode(pblk, PBLK_ERASE);
891 	rqd->bio = NULL;
892 }
893 
pblk_blk_erase_sync(struct pblk * pblk,struct ppa_addr ppa)894 static int pblk_blk_erase_sync(struct pblk *pblk, struct ppa_addr ppa)
895 {
896 	struct nvm_rq rqd = {NULL};
897 	int ret;
898 
899 	pblk_setup_e_rq(pblk, &rqd, ppa);
900 
901 	/* The write thread schedules erases so that it minimizes disturbances
902 	 * with writes. Thus, there is no need to take the LUN semaphore.
903 	 */
904 	ret = pblk_submit_io_sync(pblk, &rqd);
905 	rqd.private = pblk;
906 	__pblk_end_io_erase(pblk, &rqd);
907 
908 	return ret;
909 }
910 
pblk_line_erase(struct pblk * pblk,struct pblk_line * line)911 int pblk_line_erase(struct pblk *pblk, struct pblk_line *line)
912 {
913 	struct pblk_line_meta *lm = &pblk->lm;
914 	struct ppa_addr ppa;
915 	int ret, bit = -1;
916 
917 	/* Erase only good blocks, one at a time */
918 	do {
919 		spin_lock(&line->lock);
920 		bit = find_next_zero_bit(line->erase_bitmap, lm->blk_per_line,
921 								bit + 1);
922 		if (bit >= lm->blk_per_line) {
923 			spin_unlock(&line->lock);
924 			break;
925 		}
926 
927 		ppa = pblk->luns[bit].bppa; /* set ch and lun */
928 		ppa.a.blk = line->id;
929 
930 		atomic_dec(&line->left_eblks);
931 		WARN_ON(test_and_set_bit(bit, line->erase_bitmap));
932 		spin_unlock(&line->lock);
933 
934 		ret = pblk_blk_erase_sync(pblk, ppa);
935 		if (ret) {
936 			pblk_err(pblk, "failed to erase line %d\n", line->id);
937 			return ret;
938 		}
939 	} while (1);
940 
941 	return 0;
942 }
943 
pblk_line_setup_metadata(struct pblk_line * line,struct pblk_line_mgmt * l_mg,struct pblk_line_meta * lm)944 static void pblk_line_setup_metadata(struct pblk_line *line,
945 				     struct pblk_line_mgmt *l_mg,
946 				     struct pblk_line_meta *lm)
947 {
948 	int meta_line;
949 
950 	lockdep_assert_held(&l_mg->free_lock);
951 
952 retry_meta:
953 	meta_line = find_first_zero_bit(&l_mg->meta_bitmap, PBLK_DATA_LINES);
954 	if (meta_line == PBLK_DATA_LINES) {
955 		spin_unlock(&l_mg->free_lock);
956 		io_schedule();
957 		spin_lock(&l_mg->free_lock);
958 		goto retry_meta;
959 	}
960 
961 	set_bit(meta_line, &l_mg->meta_bitmap);
962 	line->meta_line = meta_line;
963 
964 	line->smeta = l_mg->sline_meta[meta_line];
965 	line->emeta = l_mg->eline_meta[meta_line];
966 
967 	memset(line->smeta, 0, lm->smeta_len);
968 	memset(line->emeta->buf, 0, lm->emeta_len[0]);
969 
970 	line->emeta->mem = 0;
971 	atomic_set(&line->emeta->sync, 0);
972 }
973 
974 /* For now lines are always assumed full lines. Thus, smeta former and current
975  * lun bitmaps are omitted.
976  */
pblk_line_init_metadata(struct pblk * pblk,struct pblk_line * line,struct pblk_line * cur)977 static int pblk_line_init_metadata(struct pblk *pblk, struct pblk_line *line,
978 				  struct pblk_line *cur)
979 {
980 	struct nvm_tgt_dev *dev = pblk->dev;
981 	struct nvm_geo *geo = &dev->geo;
982 	struct pblk_line_meta *lm = &pblk->lm;
983 	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
984 	struct pblk_emeta *emeta = line->emeta;
985 	struct line_emeta *emeta_buf = emeta->buf;
986 	struct line_smeta *smeta_buf = (struct line_smeta *)line->smeta;
987 	int nr_blk_line;
988 
989 	/* After erasing the line, new bad blocks might appear and we risk
990 	 * having an invalid line
991 	 */
992 	nr_blk_line = lm->blk_per_line -
993 			bitmap_weight(line->blk_bitmap, lm->blk_per_line);
994 	if (nr_blk_line < lm->min_blk_line) {
995 		spin_lock(&l_mg->free_lock);
996 		spin_lock(&line->lock);
997 		line->state = PBLK_LINESTATE_BAD;
998 		spin_unlock(&line->lock);
999 
1000 		list_add_tail(&line->list, &l_mg->bad_list);
1001 		spin_unlock(&l_mg->free_lock);
1002 
1003 		pblk_debug(pblk, "line %d is bad\n", line->id);
1004 
1005 		return 0;
1006 	}
1007 
1008 	/* Run-time metadata */
1009 	line->lun_bitmap = ((void *)(smeta_buf)) + sizeof(struct line_smeta);
1010 
1011 	/* Mark LUNs allocated in this line (all for now) */
1012 	bitmap_set(line->lun_bitmap, 0, lm->lun_bitmap_len);
1013 
1014 	smeta_buf->header.identifier = cpu_to_le32(PBLK_MAGIC);
1015 	memcpy(smeta_buf->header.uuid, pblk->instance_uuid, 16);
1016 	smeta_buf->header.id = cpu_to_le32(line->id);
1017 	smeta_buf->header.type = cpu_to_le16(line->type);
1018 	smeta_buf->header.version_major = SMETA_VERSION_MAJOR;
1019 	smeta_buf->header.version_minor = SMETA_VERSION_MINOR;
1020 
1021 	/* Start metadata */
1022 	smeta_buf->seq_nr = cpu_to_le64(line->seq_nr);
1023 	smeta_buf->window_wr_lun = cpu_to_le32(geo->all_luns);
1024 
1025 	/* Fill metadata among lines */
1026 	if (cur) {
1027 		memcpy(line->lun_bitmap, cur->lun_bitmap, lm->lun_bitmap_len);
1028 		smeta_buf->prev_id = cpu_to_le32(cur->id);
1029 		cur->emeta->buf->next_id = cpu_to_le32(line->id);
1030 	} else {
1031 		smeta_buf->prev_id = cpu_to_le32(PBLK_LINE_EMPTY);
1032 	}
1033 
1034 	/* All smeta must be set at this point */
1035 	smeta_buf->header.crc = cpu_to_le32(
1036 			pblk_calc_meta_header_crc(pblk, &smeta_buf->header));
1037 	smeta_buf->crc = cpu_to_le32(pblk_calc_smeta_crc(pblk, smeta_buf));
1038 
1039 	/* End metadata */
1040 	memcpy(&emeta_buf->header, &smeta_buf->header,
1041 						sizeof(struct line_header));
1042 
1043 	emeta_buf->header.version_major = EMETA_VERSION_MAJOR;
1044 	emeta_buf->header.version_minor = EMETA_VERSION_MINOR;
1045 	emeta_buf->header.crc = cpu_to_le32(
1046 			pblk_calc_meta_header_crc(pblk, &emeta_buf->header));
1047 
1048 	emeta_buf->seq_nr = cpu_to_le64(line->seq_nr);
1049 	emeta_buf->nr_lbas = cpu_to_le64(line->sec_in_line);
1050 	emeta_buf->nr_valid_lbas = cpu_to_le64(0);
1051 	emeta_buf->next_id = cpu_to_le32(PBLK_LINE_EMPTY);
1052 	emeta_buf->crc = cpu_to_le32(0);
1053 	emeta_buf->prev_id = smeta_buf->prev_id;
1054 
1055 	return 1;
1056 }
1057 
pblk_line_alloc_bitmaps(struct pblk * pblk,struct pblk_line * line)1058 static int pblk_line_alloc_bitmaps(struct pblk *pblk, struct pblk_line *line)
1059 {
1060 	struct pblk_line_meta *lm = &pblk->lm;
1061 
1062 	line->map_bitmap = kzalloc(lm->sec_bitmap_len, GFP_KERNEL);
1063 	if (!line->map_bitmap)
1064 		return -ENOMEM;
1065 
1066 	/* will be initialized using bb info from map_bitmap */
1067 	line->invalid_bitmap = kmalloc(lm->sec_bitmap_len, GFP_KERNEL);
1068 	if (!line->invalid_bitmap) {
1069 		kfree(line->map_bitmap);
1070 		line->map_bitmap = NULL;
1071 		return -ENOMEM;
1072 	}
1073 
1074 	return 0;
1075 }
1076 
1077 /* For now lines are always assumed full lines. Thus, smeta former and current
1078  * lun bitmaps are omitted.
1079  */
pblk_line_init_bb(struct pblk * pblk,struct pblk_line * line,int init)1080 static int pblk_line_init_bb(struct pblk *pblk, struct pblk_line *line,
1081 			     int init)
1082 {
1083 	struct nvm_tgt_dev *dev = pblk->dev;
1084 	struct nvm_geo *geo = &dev->geo;
1085 	struct pblk_line_meta *lm = &pblk->lm;
1086 	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1087 	u64 off;
1088 	int bit = -1;
1089 	int emeta_secs;
1090 
1091 	line->sec_in_line = lm->sec_per_line;
1092 
1093 	/* Capture bad block information on line mapping bitmaps */
1094 	while ((bit = find_next_bit(line->blk_bitmap, lm->blk_per_line,
1095 					bit + 1)) < lm->blk_per_line) {
1096 		off = bit * geo->ws_opt;
1097 		bitmap_shift_left(l_mg->bb_aux, l_mg->bb_template, off,
1098 							lm->sec_per_line);
1099 		bitmap_or(line->map_bitmap, line->map_bitmap, l_mg->bb_aux,
1100 							lm->sec_per_line);
1101 		line->sec_in_line -= geo->clba;
1102 	}
1103 
1104 	/* Mark smeta metadata sectors as bad sectors */
1105 	bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
1106 	off = bit * geo->ws_opt;
1107 	bitmap_set(line->map_bitmap, off, lm->smeta_sec);
1108 	line->sec_in_line -= lm->smeta_sec;
1109 	line->smeta_ssec = off;
1110 	line->cur_sec = off + lm->smeta_sec;
1111 
1112 	if (init && pblk_line_submit_smeta_io(pblk, line, off, PBLK_WRITE)) {
1113 		pblk_debug(pblk, "line smeta I/O failed. Retry\n");
1114 		return 0;
1115 	}
1116 
1117 	bitmap_copy(line->invalid_bitmap, line->map_bitmap, lm->sec_per_line);
1118 
1119 	/* Mark emeta metadata sectors as bad sectors. We need to consider bad
1120 	 * blocks to make sure that there are enough sectors to store emeta
1121 	 */
1122 	emeta_secs = lm->emeta_sec[0];
1123 	off = lm->sec_per_line;
1124 	while (emeta_secs) {
1125 		off -= geo->ws_opt;
1126 		if (!test_bit(off, line->invalid_bitmap)) {
1127 			bitmap_set(line->invalid_bitmap, off, geo->ws_opt);
1128 			emeta_secs -= geo->ws_opt;
1129 		}
1130 	}
1131 
1132 	line->emeta_ssec = off;
1133 	line->sec_in_line -= lm->emeta_sec[0];
1134 	line->nr_valid_lbas = 0;
1135 	line->left_msecs = line->sec_in_line;
1136 	*line->vsc = cpu_to_le32(line->sec_in_line);
1137 
1138 	if (lm->sec_per_line - line->sec_in_line !=
1139 		bitmap_weight(line->invalid_bitmap, lm->sec_per_line)) {
1140 		spin_lock(&line->lock);
1141 		line->state = PBLK_LINESTATE_BAD;
1142 		spin_unlock(&line->lock);
1143 
1144 		list_add_tail(&line->list, &l_mg->bad_list);
1145 		pblk_err(pblk, "unexpected line %d is bad\n", line->id);
1146 
1147 		return 0;
1148 	}
1149 
1150 	return 1;
1151 }
1152 
pblk_prepare_new_line(struct pblk * pblk,struct pblk_line * line)1153 static int pblk_prepare_new_line(struct pblk *pblk, struct pblk_line *line)
1154 {
1155 	struct pblk_line_meta *lm = &pblk->lm;
1156 	struct nvm_tgt_dev *dev = pblk->dev;
1157 	struct nvm_geo *geo = &dev->geo;
1158 	int blk_to_erase = atomic_read(&line->blk_in_line);
1159 	int i;
1160 
1161 	for (i = 0; i < lm->blk_per_line; i++) {
1162 		struct pblk_lun *rlun = &pblk->luns[i];
1163 		int pos = pblk_ppa_to_pos(geo, rlun->bppa);
1164 		int state = line->chks[pos].state;
1165 
1166 		/* Free chunks should not be erased */
1167 		if (state & NVM_CHK_ST_FREE) {
1168 			set_bit(pblk_ppa_to_pos(geo, rlun->bppa),
1169 							line->erase_bitmap);
1170 			blk_to_erase--;
1171 		}
1172 	}
1173 
1174 	return blk_to_erase;
1175 }
1176 
pblk_line_prepare(struct pblk * pblk,struct pblk_line * line)1177 static int pblk_line_prepare(struct pblk *pblk, struct pblk_line *line)
1178 {
1179 	struct pblk_line_meta *lm = &pblk->lm;
1180 	int blk_in_line = atomic_read(&line->blk_in_line);
1181 	int blk_to_erase;
1182 
1183 	/* Bad blocks do not need to be erased */
1184 	bitmap_copy(line->erase_bitmap, line->blk_bitmap, lm->blk_per_line);
1185 
1186 	spin_lock(&line->lock);
1187 
1188 	/* If we have not written to this line, we need to mark up free chunks
1189 	 * as already erased
1190 	 */
1191 	if (line->state == PBLK_LINESTATE_NEW) {
1192 		blk_to_erase = pblk_prepare_new_line(pblk, line);
1193 		line->state = PBLK_LINESTATE_FREE;
1194 	} else {
1195 		blk_to_erase = blk_in_line;
1196 	}
1197 
1198 	if (blk_in_line < lm->min_blk_line) {
1199 		spin_unlock(&line->lock);
1200 		return -EAGAIN;
1201 	}
1202 
1203 	if (line->state != PBLK_LINESTATE_FREE) {
1204 		WARN(1, "pblk: corrupted line %d, state %d\n",
1205 							line->id, line->state);
1206 		spin_unlock(&line->lock);
1207 		return -EINTR;
1208 	}
1209 
1210 	line->state = PBLK_LINESTATE_OPEN;
1211 
1212 	atomic_set(&line->left_eblks, blk_to_erase);
1213 	atomic_set(&line->left_seblks, blk_to_erase);
1214 
1215 	line->meta_distance = lm->meta_distance;
1216 	spin_unlock(&line->lock);
1217 
1218 	kref_init(&line->ref);
1219 
1220 	return 0;
1221 }
1222 
pblk_line_recov_alloc(struct pblk * pblk,struct pblk_line * line)1223 int pblk_line_recov_alloc(struct pblk *pblk, struct pblk_line *line)
1224 {
1225 	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1226 	int ret;
1227 
1228 	spin_lock(&l_mg->free_lock);
1229 	l_mg->data_line = line;
1230 	list_del(&line->list);
1231 
1232 	ret = pblk_line_prepare(pblk, line);
1233 	if (ret) {
1234 		list_add(&line->list, &l_mg->free_list);
1235 		spin_unlock(&l_mg->free_lock);
1236 		return ret;
1237 	}
1238 	spin_unlock(&l_mg->free_lock);
1239 
1240 	ret = pblk_line_alloc_bitmaps(pblk, line);
1241 	if (ret)
1242 		goto fail;
1243 
1244 	if (!pblk_line_init_bb(pblk, line, 0)) {
1245 		ret = -EINTR;
1246 		goto fail;
1247 	}
1248 
1249 	pblk_rl_free_lines_dec(&pblk->rl, line, true);
1250 	return 0;
1251 
1252 fail:
1253 	spin_lock(&l_mg->free_lock);
1254 	list_add(&line->list, &l_mg->free_list);
1255 	spin_unlock(&l_mg->free_lock);
1256 
1257 	return ret;
1258 }
1259 
pblk_line_recov_close(struct pblk * pblk,struct pblk_line * line)1260 void pblk_line_recov_close(struct pblk *pblk, struct pblk_line *line)
1261 {
1262 	kfree(line->map_bitmap);
1263 	line->map_bitmap = NULL;
1264 	line->smeta = NULL;
1265 	line->emeta = NULL;
1266 }
1267 
pblk_line_reinit(struct pblk_line * line)1268 static void pblk_line_reinit(struct pblk_line *line)
1269 {
1270 	*line->vsc = cpu_to_le32(EMPTY_ENTRY);
1271 
1272 	line->map_bitmap = NULL;
1273 	line->invalid_bitmap = NULL;
1274 	line->smeta = NULL;
1275 	line->emeta = NULL;
1276 }
1277 
pblk_line_free(struct pblk_line * line)1278 void pblk_line_free(struct pblk_line *line)
1279 {
1280 	kfree(line->map_bitmap);
1281 	kfree(line->invalid_bitmap);
1282 
1283 	pblk_line_reinit(line);
1284 }
1285 
pblk_line_get(struct pblk * pblk)1286 struct pblk_line *pblk_line_get(struct pblk *pblk)
1287 {
1288 	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1289 	struct pblk_line_meta *lm = &pblk->lm;
1290 	struct pblk_line *line;
1291 	int ret, bit;
1292 
1293 	lockdep_assert_held(&l_mg->free_lock);
1294 
1295 retry:
1296 	if (list_empty(&l_mg->free_list)) {
1297 		pblk_err(pblk, "no free lines\n");
1298 		return NULL;
1299 	}
1300 
1301 	line = list_first_entry(&l_mg->free_list, struct pblk_line, list);
1302 	list_del(&line->list);
1303 	l_mg->nr_free_lines--;
1304 
1305 	bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
1306 	if (unlikely(bit >= lm->blk_per_line)) {
1307 		spin_lock(&line->lock);
1308 		line->state = PBLK_LINESTATE_BAD;
1309 		spin_unlock(&line->lock);
1310 
1311 		list_add_tail(&line->list, &l_mg->bad_list);
1312 
1313 		pblk_debug(pblk, "line %d is bad\n", line->id);
1314 		goto retry;
1315 	}
1316 
1317 	ret = pblk_line_prepare(pblk, line);
1318 	if (ret) {
1319 		switch (ret) {
1320 		case -EAGAIN:
1321 			list_add(&line->list, &l_mg->bad_list);
1322 			goto retry;
1323 		case -EINTR:
1324 			list_add(&line->list, &l_mg->corrupt_list);
1325 			goto retry;
1326 		default:
1327 			pblk_err(pblk, "failed to prepare line %d\n", line->id);
1328 			list_add(&line->list, &l_mg->free_list);
1329 			l_mg->nr_free_lines++;
1330 			return NULL;
1331 		}
1332 	}
1333 
1334 	return line;
1335 }
1336 
pblk_line_retry(struct pblk * pblk,struct pblk_line * line)1337 static struct pblk_line *pblk_line_retry(struct pblk *pblk,
1338 					 struct pblk_line *line)
1339 {
1340 	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1341 	struct pblk_line *retry_line;
1342 
1343 retry:
1344 	spin_lock(&l_mg->free_lock);
1345 	retry_line = pblk_line_get(pblk);
1346 	if (!retry_line) {
1347 		l_mg->data_line = NULL;
1348 		spin_unlock(&l_mg->free_lock);
1349 		return NULL;
1350 	}
1351 
1352 	retry_line->map_bitmap = line->map_bitmap;
1353 	retry_line->invalid_bitmap = line->invalid_bitmap;
1354 	retry_line->smeta = line->smeta;
1355 	retry_line->emeta = line->emeta;
1356 	retry_line->meta_line = line->meta_line;
1357 
1358 	pblk_line_reinit(line);
1359 
1360 	l_mg->data_line = retry_line;
1361 	spin_unlock(&l_mg->free_lock);
1362 
1363 	pblk_rl_free_lines_dec(&pblk->rl, line, false);
1364 
1365 	if (pblk_line_erase(pblk, retry_line))
1366 		goto retry;
1367 
1368 	return retry_line;
1369 }
1370 
pblk_set_space_limit(struct pblk * pblk)1371 static void pblk_set_space_limit(struct pblk *pblk)
1372 {
1373 	struct pblk_rl *rl = &pblk->rl;
1374 
1375 	atomic_set(&rl->rb_space, 0);
1376 }
1377 
pblk_line_get_first_data(struct pblk * pblk)1378 struct pblk_line *pblk_line_get_first_data(struct pblk *pblk)
1379 {
1380 	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1381 	struct pblk_line *line;
1382 
1383 	spin_lock(&l_mg->free_lock);
1384 	line = pblk_line_get(pblk);
1385 	if (!line) {
1386 		spin_unlock(&l_mg->free_lock);
1387 		return NULL;
1388 	}
1389 
1390 	line->seq_nr = l_mg->d_seq_nr++;
1391 	line->type = PBLK_LINETYPE_DATA;
1392 	l_mg->data_line = line;
1393 
1394 	pblk_line_setup_metadata(line, l_mg, &pblk->lm);
1395 
1396 	/* Allocate next line for preparation */
1397 	l_mg->data_next = pblk_line_get(pblk);
1398 	if (!l_mg->data_next) {
1399 		/* If we cannot get a new line, we need to stop the pipeline.
1400 		 * Only allow as many writes in as we can store safely and then
1401 		 * fail gracefully
1402 		 */
1403 		pblk_set_space_limit(pblk);
1404 
1405 		l_mg->data_next = NULL;
1406 	} else {
1407 		l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
1408 		l_mg->data_next->type = PBLK_LINETYPE_DATA;
1409 	}
1410 	spin_unlock(&l_mg->free_lock);
1411 
1412 	if (pblk_line_alloc_bitmaps(pblk, line))
1413 		return NULL;
1414 
1415 	if (pblk_line_erase(pblk, line)) {
1416 		line = pblk_line_retry(pblk, line);
1417 		if (!line)
1418 			return NULL;
1419 	}
1420 
1421 retry_setup:
1422 	if (!pblk_line_init_metadata(pblk, line, NULL)) {
1423 		line = pblk_line_retry(pblk, line);
1424 		if (!line)
1425 			return NULL;
1426 
1427 		goto retry_setup;
1428 	}
1429 
1430 	if (!pblk_line_init_bb(pblk, line, 1)) {
1431 		line = pblk_line_retry(pblk, line);
1432 		if (!line)
1433 			return NULL;
1434 
1435 		goto retry_setup;
1436 	}
1437 
1438 	pblk_rl_free_lines_dec(&pblk->rl, line, true);
1439 
1440 	return line;
1441 }
1442 
pblk_stop_writes(struct pblk * pblk,struct pblk_line * line)1443 static void pblk_stop_writes(struct pblk *pblk, struct pblk_line *line)
1444 {
1445 	lockdep_assert_held(&pblk->l_mg.free_lock);
1446 
1447 	pblk_set_space_limit(pblk);
1448 	pblk->state = PBLK_STATE_STOPPING;
1449 }
1450 
pblk_line_close_meta_sync(struct pblk * pblk)1451 static void pblk_line_close_meta_sync(struct pblk *pblk)
1452 {
1453 	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1454 	struct pblk_line_meta *lm = &pblk->lm;
1455 	struct pblk_line *line, *tline;
1456 	LIST_HEAD(list);
1457 
1458 	spin_lock(&l_mg->close_lock);
1459 	if (list_empty(&l_mg->emeta_list)) {
1460 		spin_unlock(&l_mg->close_lock);
1461 		return;
1462 	}
1463 
1464 	list_cut_position(&list, &l_mg->emeta_list, l_mg->emeta_list.prev);
1465 	spin_unlock(&l_mg->close_lock);
1466 
1467 	list_for_each_entry_safe(line, tline, &list, list) {
1468 		struct pblk_emeta *emeta = line->emeta;
1469 
1470 		while (emeta->mem < lm->emeta_len[0]) {
1471 			int ret;
1472 
1473 			ret = pblk_submit_meta_io(pblk, line);
1474 			if (ret) {
1475 				pblk_err(pblk, "sync meta line %d failed (%d)\n",
1476 							line->id, ret);
1477 				return;
1478 			}
1479 		}
1480 	}
1481 
1482 	pblk_wait_for_meta(pblk);
1483 	flush_workqueue(pblk->close_wq);
1484 }
1485 
__pblk_pipeline_flush(struct pblk * pblk)1486 void __pblk_pipeline_flush(struct pblk *pblk)
1487 {
1488 	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1489 	int ret;
1490 
1491 	spin_lock(&l_mg->free_lock);
1492 	if (pblk->state == PBLK_STATE_RECOVERING ||
1493 					pblk->state == PBLK_STATE_STOPPED) {
1494 		spin_unlock(&l_mg->free_lock);
1495 		return;
1496 	}
1497 	pblk->state = PBLK_STATE_RECOVERING;
1498 	spin_unlock(&l_mg->free_lock);
1499 
1500 	pblk_flush_writer(pblk);
1501 	pblk_wait_for_meta(pblk);
1502 
1503 	ret = pblk_recov_pad(pblk);
1504 	if (ret) {
1505 		pblk_err(pblk, "could not close data on teardown(%d)\n", ret);
1506 		return;
1507 	}
1508 
1509 	flush_workqueue(pblk->bb_wq);
1510 	pblk_line_close_meta_sync(pblk);
1511 }
1512 
__pblk_pipeline_stop(struct pblk * pblk)1513 void __pblk_pipeline_stop(struct pblk *pblk)
1514 {
1515 	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1516 
1517 	spin_lock(&l_mg->free_lock);
1518 	pblk->state = PBLK_STATE_STOPPED;
1519 	l_mg->data_line = NULL;
1520 	l_mg->data_next = NULL;
1521 	spin_unlock(&l_mg->free_lock);
1522 }
1523 
pblk_pipeline_stop(struct pblk * pblk)1524 void pblk_pipeline_stop(struct pblk *pblk)
1525 {
1526 	__pblk_pipeline_flush(pblk);
1527 	__pblk_pipeline_stop(pblk);
1528 }
1529 
pblk_line_replace_data(struct pblk * pblk)1530 struct pblk_line *pblk_line_replace_data(struct pblk *pblk)
1531 {
1532 	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1533 	struct pblk_line *cur, *new = NULL;
1534 	unsigned int left_seblks;
1535 
1536 	new = l_mg->data_next;
1537 	if (!new)
1538 		goto out;
1539 
1540 	spin_lock(&l_mg->free_lock);
1541 	cur = l_mg->data_line;
1542 	l_mg->data_line = new;
1543 
1544 	pblk_line_setup_metadata(new, l_mg, &pblk->lm);
1545 	spin_unlock(&l_mg->free_lock);
1546 
1547 retry_erase:
1548 	left_seblks = atomic_read(&new->left_seblks);
1549 	if (left_seblks) {
1550 		/* If line is not fully erased, erase it */
1551 		if (atomic_read(&new->left_eblks)) {
1552 			if (pblk_line_erase(pblk, new))
1553 				goto out;
1554 		} else {
1555 			io_schedule();
1556 		}
1557 		goto retry_erase;
1558 	}
1559 
1560 	if (pblk_line_alloc_bitmaps(pblk, new))
1561 		return NULL;
1562 
1563 retry_setup:
1564 	if (!pblk_line_init_metadata(pblk, new, cur)) {
1565 		new = pblk_line_retry(pblk, new);
1566 		if (!new)
1567 			goto out;
1568 
1569 		goto retry_setup;
1570 	}
1571 
1572 	if (!pblk_line_init_bb(pblk, new, 1)) {
1573 		new = pblk_line_retry(pblk, new);
1574 		if (!new)
1575 			goto out;
1576 
1577 		goto retry_setup;
1578 	}
1579 
1580 	pblk_rl_free_lines_dec(&pblk->rl, new, true);
1581 
1582 	/* Allocate next line for preparation */
1583 	spin_lock(&l_mg->free_lock);
1584 	l_mg->data_next = pblk_line_get(pblk);
1585 	if (!l_mg->data_next) {
1586 		/* If we cannot get a new line, we need to stop the pipeline.
1587 		 * Only allow as many writes in as we can store safely and then
1588 		 * fail gracefully
1589 		 */
1590 		pblk_stop_writes(pblk, new);
1591 		l_mg->data_next = NULL;
1592 	} else {
1593 		l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
1594 		l_mg->data_next->type = PBLK_LINETYPE_DATA;
1595 	}
1596 	spin_unlock(&l_mg->free_lock);
1597 
1598 out:
1599 	return new;
1600 }
1601 
__pblk_line_put(struct pblk * pblk,struct pblk_line * line)1602 static void __pblk_line_put(struct pblk *pblk, struct pblk_line *line)
1603 {
1604 	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1605 	struct pblk_gc *gc = &pblk->gc;
1606 
1607 	spin_lock(&line->lock);
1608 	WARN_ON(line->state != PBLK_LINESTATE_GC);
1609 	line->state = PBLK_LINESTATE_FREE;
1610 	line->gc_group = PBLK_LINEGC_NONE;
1611 	pblk_line_free(line);
1612 
1613 	if (line->w_err_gc->has_write_err) {
1614 		pblk_rl_werr_line_out(&pblk->rl);
1615 		line->w_err_gc->has_write_err = 0;
1616 	}
1617 
1618 	spin_unlock(&line->lock);
1619 	atomic_dec(&gc->pipeline_gc);
1620 
1621 	spin_lock(&l_mg->free_lock);
1622 	list_add_tail(&line->list, &l_mg->free_list);
1623 	l_mg->nr_free_lines++;
1624 	spin_unlock(&l_mg->free_lock);
1625 
1626 	pblk_rl_free_lines_inc(&pblk->rl, line);
1627 }
1628 
pblk_line_put_ws(struct work_struct * work)1629 static void pblk_line_put_ws(struct work_struct *work)
1630 {
1631 	struct pblk_line_ws *line_put_ws = container_of(work,
1632 						struct pblk_line_ws, ws);
1633 	struct pblk *pblk = line_put_ws->pblk;
1634 	struct pblk_line *line = line_put_ws->line;
1635 
1636 	__pblk_line_put(pblk, line);
1637 	mempool_free(line_put_ws, &pblk->gen_ws_pool);
1638 }
1639 
pblk_line_put(struct kref * ref)1640 void pblk_line_put(struct kref *ref)
1641 {
1642 	struct pblk_line *line = container_of(ref, struct pblk_line, ref);
1643 	struct pblk *pblk = line->pblk;
1644 
1645 	__pblk_line_put(pblk, line);
1646 }
1647 
pblk_line_put_wq(struct kref * ref)1648 void pblk_line_put_wq(struct kref *ref)
1649 {
1650 	struct pblk_line *line = container_of(ref, struct pblk_line, ref);
1651 	struct pblk *pblk = line->pblk;
1652 	struct pblk_line_ws *line_put_ws;
1653 
1654 	line_put_ws = mempool_alloc(&pblk->gen_ws_pool, GFP_ATOMIC);
1655 	if (!line_put_ws)
1656 		return;
1657 
1658 	line_put_ws->pblk = pblk;
1659 	line_put_ws->line = line;
1660 	line_put_ws->priv = NULL;
1661 
1662 	INIT_WORK(&line_put_ws->ws, pblk_line_put_ws);
1663 	queue_work(pblk->r_end_wq, &line_put_ws->ws);
1664 }
1665 
pblk_blk_erase_async(struct pblk * pblk,struct ppa_addr ppa)1666 int pblk_blk_erase_async(struct pblk *pblk, struct ppa_addr ppa)
1667 {
1668 	struct nvm_rq *rqd;
1669 	int err;
1670 
1671 	rqd = pblk_alloc_rqd(pblk, PBLK_ERASE);
1672 
1673 	pblk_setup_e_rq(pblk, rqd, ppa);
1674 
1675 	rqd->end_io = pblk_end_io_erase;
1676 	rqd->private = pblk;
1677 
1678 	/* The write thread schedules erases so that it minimizes disturbances
1679 	 * with writes. Thus, there is no need to take the LUN semaphore.
1680 	 */
1681 	err = pblk_submit_io(pblk, rqd);
1682 	if (err) {
1683 		struct nvm_tgt_dev *dev = pblk->dev;
1684 		struct nvm_geo *geo = &dev->geo;
1685 
1686 		pblk_err(pblk, "could not async erase line:%d,blk:%d\n",
1687 					pblk_ppa_to_line(ppa),
1688 					pblk_ppa_to_pos(geo, ppa));
1689 	}
1690 
1691 	return err;
1692 }
1693 
pblk_line_get_data(struct pblk * pblk)1694 struct pblk_line *pblk_line_get_data(struct pblk *pblk)
1695 {
1696 	return pblk->l_mg.data_line;
1697 }
1698 
1699 /* For now, always erase next line */
pblk_line_get_erase(struct pblk * pblk)1700 struct pblk_line *pblk_line_get_erase(struct pblk *pblk)
1701 {
1702 	return pblk->l_mg.data_next;
1703 }
1704 
pblk_line_is_full(struct pblk_line * line)1705 int pblk_line_is_full(struct pblk_line *line)
1706 {
1707 	return (line->left_msecs == 0);
1708 }
1709 
pblk_line_should_sync_meta(struct pblk * pblk)1710 static void pblk_line_should_sync_meta(struct pblk *pblk)
1711 {
1712 	if (pblk_rl_is_limit(&pblk->rl))
1713 		pblk_line_close_meta_sync(pblk);
1714 }
1715 
pblk_line_close(struct pblk * pblk,struct pblk_line * line)1716 void pblk_line_close(struct pblk *pblk, struct pblk_line *line)
1717 {
1718 	struct nvm_tgt_dev *dev = pblk->dev;
1719 	struct nvm_geo *geo = &dev->geo;
1720 	struct pblk_line_meta *lm = &pblk->lm;
1721 	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1722 	struct list_head *move_list;
1723 	int i;
1724 
1725 #ifdef CONFIG_NVM_PBLK_DEBUG
1726 	WARN(!bitmap_full(line->map_bitmap, lm->sec_per_line),
1727 				"pblk: corrupt closed line %d\n", line->id);
1728 #endif
1729 
1730 	spin_lock(&l_mg->free_lock);
1731 	WARN_ON(!test_and_clear_bit(line->meta_line, &l_mg->meta_bitmap));
1732 	spin_unlock(&l_mg->free_lock);
1733 
1734 	spin_lock(&l_mg->gc_lock);
1735 	spin_lock(&line->lock);
1736 	WARN_ON(line->state != PBLK_LINESTATE_OPEN);
1737 	line->state = PBLK_LINESTATE_CLOSED;
1738 	move_list = pblk_line_gc_list(pblk, line);
1739 
1740 	list_add_tail(&line->list, move_list);
1741 
1742 	kfree(line->map_bitmap);
1743 	line->map_bitmap = NULL;
1744 	line->smeta = NULL;
1745 	line->emeta = NULL;
1746 
1747 	for (i = 0; i < lm->blk_per_line; i++) {
1748 		struct pblk_lun *rlun = &pblk->luns[i];
1749 		int pos = pblk_ppa_to_pos(geo, rlun->bppa);
1750 		int state = line->chks[pos].state;
1751 
1752 		if (!(state & NVM_CHK_ST_OFFLINE))
1753 			state = NVM_CHK_ST_CLOSED;
1754 	}
1755 
1756 	spin_unlock(&line->lock);
1757 	spin_unlock(&l_mg->gc_lock);
1758 }
1759 
pblk_line_close_meta(struct pblk * pblk,struct pblk_line * line)1760 void pblk_line_close_meta(struct pblk *pblk, struct pblk_line *line)
1761 {
1762 	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1763 	struct pblk_line_meta *lm = &pblk->lm;
1764 	struct pblk_emeta *emeta = line->emeta;
1765 	struct line_emeta *emeta_buf = emeta->buf;
1766 	struct wa_counters *wa = emeta_to_wa(lm, emeta_buf);
1767 
1768 	/* No need for exact vsc value; avoid a big line lock and take aprox. */
1769 	memcpy(emeta_to_vsc(pblk, emeta_buf), l_mg->vsc_list, lm->vsc_list_len);
1770 	memcpy(emeta_to_bb(emeta_buf), line->blk_bitmap, lm->blk_bitmap_len);
1771 
1772 	wa->user = cpu_to_le64(atomic64_read(&pblk->user_wa));
1773 	wa->pad = cpu_to_le64(atomic64_read(&pblk->pad_wa));
1774 	wa->gc = cpu_to_le64(atomic64_read(&pblk->gc_wa));
1775 
1776 	if (le32_to_cpu(emeta_buf->header.identifier) != PBLK_MAGIC) {
1777 		emeta_buf->header.identifier = cpu_to_le32(PBLK_MAGIC);
1778 		memcpy(emeta_buf->header.uuid, pblk->instance_uuid, 16);
1779 		emeta_buf->header.id = cpu_to_le32(line->id);
1780 		emeta_buf->header.type = cpu_to_le16(line->type);
1781 		emeta_buf->header.version_major = EMETA_VERSION_MAJOR;
1782 		emeta_buf->header.version_minor = EMETA_VERSION_MINOR;
1783 		emeta_buf->header.crc = cpu_to_le32(
1784 			pblk_calc_meta_header_crc(pblk, &emeta_buf->header));
1785 	}
1786 
1787 	emeta_buf->nr_valid_lbas = cpu_to_le64(line->nr_valid_lbas);
1788 	emeta_buf->crc = cpu_to_le32(pblk_calc_emeta_crc(pblk, emeta_buf));
1789 
1790 	spin_lock(&l_mg->close_lock);
1791 	spin_lock(&line->lock);
1792 
1793 	/* Update the in-memory start address for emeta, in case it has
1794 	 * shifted due to write errors
1795 	 */
1796 	if (line->emeta_ssec != line->cur_sec)
1797 		line->emeta_ssec = line->cur_sec;
1798 
1799 	list_add_tail(&line->list, &l_mg->emeta_list);
1800 	spin_unlock(&line->lock);
1801 	spin_unlock(&l_mg->close_lock);
1802 
1803 	pblk_line_should_sync_meta(pblk);
1804 }
1805 
pblk_save_lba_list(struct pblk * pblk,struct pblk_line * line)1806 static void pblk_save_lba_list(struct pblk *pblk, struct pblk_line *line)
1807 {
1808 	struct pblk_line_meta *lm = &pblk->lm;
1809 	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1810 	unsigned int lba_list_size = lm->emeta_len[2];
1811 	struct pblk_w_err_gc *w_err_gc = line->w_err_gc;
1812 	struct pblk_emeta *emeta = line->emeta;
1813 
1814 	w_err_gc->lba_list = pblk_malloc(lba_list_size,
1815 					 l_mg->emeta_alloc_type, GFP_KERNEL);
1816 	memcpy(w_err_gc->lba_list, emeta_to_lbas(pblk, emeta->buf),
1817 				lba_list_size);
1818 }
1819 
pblk_line_close_ws(struct work_struct * work)1820 void pblk_line_close_ws(struct work_struct *work)
1821 {
1822 	struct pblk_line_ws *line_ws = container_of(work, struct pblk_line_ws,
1823 									ws);
1824 	struct pblk *pblk = line_ws->pblk;
1825 	struct pblk_line *line = line_ws->line;
1826 	struct pblk_w_err_gc *w_err_gc = line->w_err_gc;
1827 
1828 	/* Write errors makes the emeta start address stored in smeta invalid,
1829 	 * so keep a copy of the lba list until we've gc'd the line
1830 	 */
1831 	if (w_err_gc->has_write_err)
1832 		pblk_save_lba_list(pblk, line);
1833 
1834 	pblk_line_close(pblk, line);
1835 	mempool_free(line_ws, &pblk->gen_ws_pool);
1836 }
1837 
pblk_gen_run_ws(struct pblk * pblk,struct pblk_line * line,void * priv,void (* work)(struct work_struct *),gfp_t gfp_mask,struct workqueue_struct * wq)1838 void pblk_gen_run_ws(struct pblk *pblk, struct pblk_line *line, void *priv,
1839 		      void (*work)(struct work_struct *), gfp_t gfp_mask,
1840 		      struct workqueue_struct *wq)
1841 {
1842 	struct pblk_line_ws *line_ws;
1843 
1844 	line_ws = mempool_alloc(&pblk->gen_ws_pool, gfp_mask);
1845 
1846 	line_ws->pblk = pblk;
1847 	line_ws->line = line;
1848 	line_ws->priv = priv;
1849 
1850 	INIT_WORK(&line_ws->ws, work);
1851 	queue_work(wq, &line_ws->ws);
1852 }
1853 
__pblk_down_page(struct pblk * pblk,struct ppa_addr * ppa_list,int nr_ppas,int pos)1854 static void __pblk_down_page(struct pblk *pblk, struct ppa_addr *ppa_list,
1855 			     int nr_ppas, int pos)
1856 {
1857 	struct pblk_lun *rlun = &pblk->luns[pos];
1858 	int ret;
1859 
1860 	/*
1861 	 * Only send one inflight I/O per LUN. Since we map at a page
1862 	 * granurality, all ppas in the I/O will map to the same LUN
1863 	 */
1864 #ifdef CONFIG_NVM_PBLK_DEBUG
1865 	int i;
1866 
1867 	for (i = 1; i < nr_ppas; i++)
1868 		WARN_ON(ppa_list[0].a.lun != ppa_list[i].a.lun ||
1869 				ppa_list[0].a.ch != ppa_list[i].a.ch);
1870 #endif
1871 
1872 	ret = down_timeout(&rlun->wr_sem, msecs_to_jiffies(30000));
1873 	if (ret == -ETIME || ret == -EINTR)
1874 		pblk_err(pblk, "taking lun semaphore timed out: err %d\n",
1875 				-ret);
1876 }
1877 
pblk_down_page(struct pblk * pblk,struct ppa_addr * ppa_list,int nr_ppas)1878 void pblk_down_page(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas)
1879 {
1880 	struct nvm_tgt_dev *dev = pblk->dev;
1881 	struct nvm_geo *geo = &dev->geo;
1882 	int pos = pblk_ppa_to_pos(geo, ppa_list[0]);
1883 
1884 	__pblk_down_page(pblk, ppa_list, nr_ppas, pos);
1885 }
1886 
pblk_down_rq(struct pblk * pblk,struct ppa_addr * ppa_list,int nr_ppas,unsigned long * lun_bitmap)1887 void pblk_down_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
1888 		  unsigned long *lun_bitmap)
1889 {
1890 	struct nvm_tgt_dev *dev = pblk->dev;
1891 	struct nvm_geo *geo = &dev->geo;
1892 	int pos = pblk_ppa_to_pos(geo, ppa_list[0]);
1893 
1894 	/* If the LUN has been locked for this same request, do no attempt to
1895 	 * lock it again
1896 	 */
1897 	if (test_and_set_bit(pos, lun_bitmap))
1898 		return;
1899 
1900 	__pblk_down_page(pblk, ppa_list, nr_ppas, pos);
1901 }
1902 
pblk_up_page(struct pblk * pblk,struct ppa_addr * ppa_list,int nr_ppas)1903 void pblk_up_page(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas)
1904 {
1905 	struct nvm_tgt_dev *dev = pblk->dev;
1906 	struct nvm_geo *geo = &dev->geo;
1907 	struct pblk_lun *rlun;
1908 	int pos = pblk_ppa_to_pos(geo, ppa_list[0]);
1909 
1910 #ifdef CONFIG_NVM_PBLK_DEBUG
1911 	int i;
1912 
1913 	for (i = 1; i < nr_ppas; i++)
1914 		WARN_ON(ppa_list[0].a.lun != ppa_list[i].a.lun ||
1915 				ppa_list[0].a.ch != ppa_list[i].a.ch);
1916 #endif
1917 
1918 	rlun = &pblk->luns[pos];
1919 	up(&rlun->wr_sem);
1920 }
1921 
pblk_up_rq(struct pblk * pblk,struct ppa_addr * ppa_list,int nr_ppas,unsigned long * lun_bitmap)1922 void pblk_up_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
1923 		unsigned long *lun_bitmap)
1924 {
1925 	struct nvm_tgt_dev *dev = pblk->dev;
1926 	struct nvm_geo *geo = &dev->geo;
1927 	struct pblk_lun *rlun;
1928 	int num_lun = geo->all_luns;
1929 	int bit = -1;
1930 
1931 	while ((bit = find_next_bit(lun_bitmap, num_lun, bit + 1)) < num_lun) {
1932 		rlun = &pblk->luns[bit];
1933 		up(&rlun->wr_sem);
1934 	}
1935 }
1936 
pblk_update_map(struct pblk * pblk,sector_t lba,struct ppa_addr ppa)1937 void pblk_update_map(struct pblk *pblk, sector_t lba, struct ppa_addr ppa)
1938 {
1939 	struct ppa_addr ppa_l2p;
1940 
1941 	/* logic error: lba out-of-bounds. Ignore update */
1942 	if (!(lba < pblk->rl.nr_secs)) {
1943 		WARN(1, "pblk: corrupted L2P map request\n");
1944 		return;
1945 	}
1946 
1947 	spin_lock(&pblk->trans_lock);
1948 	ppa_l2p = pblk_trans_map_get(pblk, lba);
1949 
1950 	if (!pblk_addr_in_cache(ppa_l2p) && !pblk_ppa_empty(ppa_l2p))
1951 		pblk_map_invalidate(pblk, ppa_l2p);
1952 
1953 	pblk_trans_map_set(pblk, lba, ppa);
1954 	spin_unlock(&pblk->trans_lock);
1955 }
1956 
pblk_update_map_cache(struct pblk * pblk,sector_t lba,struct ppa_addr ppa)1957 void pblk_update_map_cache(struct pblk *pblk, sector_t lba, struct ppa_addr ppa)
1958 {
1959 
1960 #ifdef CONFIG_NVM_PBLK_DEBUG
1961 	/* Callers must ensure that the ppa points to a cache address */
1962 	BUG_ON(!pblk_addr_in_cache(ppa));
1963 	BUG_ON(pblk_rb_pos_oob(&pblk->rwb, pblk_addr_to_cacheline(ppa)));
1964 #endif
1965 
1966 	pblk_update_map(pblk, lba, ppa);
1967 }
1968 
pblk_update_map_gc(struct pblk * pblk,sector_t lba,struct ppa_addr ppa_new,struct pblk_line * gc_line,u64 paddr_gc)1969 int pblk_update_map_gc(struct pblk *pblk, sector_t lba, struct ppa_addr ppa_new,
1970 		       struct pblk_line *gc_line, u64 paddr_gc)
1971 {
1972 	struct ppa_addr ppa_l2p, ppa_gc;
1973 	int ret = 1;
1974 
1975 #ifdef CONFIG_NVM_PBLK_DEBUG
1976 	/* Callers must ensure that the ppa points to a cache address */
1977 	BUG_ON(!pblk_addr_in_cache(ppa_new));
1978 	BUG_ON(pblk_rb_pos_oob(&pblk->rwb, pblk_addr_to_cacheline(ppa_new)));
1979 #endif
1980 
1981 	/* logic error: lba out-of-bounds. Ignore update */
1982 	if (!(lba < pblk->rl.nr_secs)) {
1983 		WARN(1, "pblk: corrupted L2P map request\n");
1984 		return 0;
1985 	}
1986 
1987 	spin_lock(&pblk->trans_lock);
1988 	ppa_l2p = pblk_trans_map_get(pblk, lba);
1989 	ppa_gc = addr_to_gen_ppa(pblk, paddr_gc, gc_line->id);
1990 
1991 	if (!pblk_ppa_comp(ppa_l2p, ppa_gc)) {
1992 		spin_lock(&gc_line->lock);
1993 		WARN(!test_bit(paddr_gc, gc_line->invalid_bitmap),
1994 						"pblk: corrupted GC update");
1995 		spin_unlock(&gc_line->lock);
1996 
1997 		ret = 0;
1998 		goto out;
1999 	}
2000 
2001 	pblk_trans_map_set(pblk, lba, ppa_new);
2002 out:
2003 	spin_unlock(&pblk->trans_lock);
2004 	return ret;
2005 }
2006 
pblk_update_map_dev(struct pblk * pblk,sector_t lba,struct ppa_addr ppa_mapped,struct ppa_addr ppa_cache)2007 void pblk_update_map_dev(struct pblk *pblk, sector_t lba,
2008 			 struct ppa_addr ppa_mapped, struct ppa_addr ppa_cache)
2009 {
2010 	struct ppa_addr ppa_l2p;
2011 
2012 #ifdef CONFIG_NVM_PBLK_DEBUG
2013 	/* Callers must ensure that the ppa points to a device address */
2014 	BUG_ON(pblk_addr_in_cache(ppa_mapped));
2015 #endif
2016 	/* Invalidate and discard padded entries */
2017 	if (lba == ADDR_EMPTY) {
2018 		atomic64_inc(&pblk->pad_wa);
2019 #ifdef CONFIG_NVM_PBLK_DEBUG
2020 		atomic_long_inc(&pblk->padded_wb);
2021 #endif
2022 		if (!pblk_ppa_empty(ppa_mapped))
2023 			pblk_map_invalidate(pblk, ppa_mapped);
2024 		return;
2025 	}
2026 
2027 	/* logic error: lba out-of-bounds. Ignore update */
2028 	if (!(lba < pblk->rl.nr_secs)) {
2029 		WARN(1, "pblk: corrupted L2P map request\n");
2030 		return;
2031 	}
2032 
2033 	spin_lock(&pblk->trans_lock);
2034 	ppa_l2p = pblk_trans_map_get(pblk, lba);
2035 
2036 	/* Do not update L2P if the cacheline has been updated. In this case,
2037 	 * the mapped ppa must be invalidated
2038 	 */
2039 	if (!pblk_ppa_comp(ppa_l2p, ppa_cache)) {
2040 		if (!pblk_ppa_empty(ppa_mapped))
2041 			pblk_map_invalidate(pblk, ppa_mapped);
2042 		goto out;
2043 	}
2044 
2045 #ifdef CONFIG_NVM_PBLK_DEBUG
2046 	WARN_ON(!pblk_addr_in_cache(ppa_l2p) && !pblk_ppa_empty(ppa_l2p));
2047 #endif
2048 
2049 	pblk_trans_map_set(pblk, lba, ppa_mapped);
2050 out:
2051 	spin_unlock(&pblk->trans_lock);
2052 }
2053 
pblk_lookup_l2p_seq(struct pblk * pblk,struct ppa_addr * ppas,sector_t blba,int nr_secs)2054 void pblk_lookup_l2p_seq(struct pblk *pblk, struct ppa_addr *ppas,
2055 			 sector_t blba, int nr_secs)
2056 {
2057 	int i;
2058 
2059 	spin_lock(&pblk->trans_lock);
2060 	for (i = 0; i < nr_secs; i++) {
2061 		struct ppa_addr ppa;
2062 
2063 		ppa = ppas[i] = pblk_trans_map_get(pblk, blba + i);
2064 
2065 		/* If the L2P entry maps to a line, the reference is valid */
2066 		if (!pblk_ppa_empty(ppa) && !pblk_addr_in_cache(ppa)) {
2067 			int line_id = pblk_ppa_to_line(ppa);
2068 			struct pblk_line *line = &pblk->lines[line_id];
2069 
2070 			kref_get(&line->ref);
2071 		}
2072 	}
2073 	spin_unlock(&pblk->trans_lock);
2074 }
2075 
pblk_lookup_l2p_rand(struct pblk * pblk,struct ppa_addr * ppas,u64 * lba_list,int nr_secs)2076 void pblk_lookup_l2p_rand(struct pblk *pblk, struct ppa_addr *ppas,
2077 			  u64 *lba_list, int nr_secs)
2078 {
2079 	u64 lba;
2080 	int i;
2081 
2082 	spin_lock(&pblk->trans_lock);
2083 	for (i = 0; i < nr_secs; i++) {
2084 		lba = lba_list[i];
2085 		if (lba != ADDR_EMPTY) {
2086 			/* logic error: lba out-of-bounds. Ignore update */
2087 			if (!(lba < pblk->rl.nr_secs)) {
2088 				WARN(1, "pblk: corrupted L2P map request\n");
2089 				continue;
2090 			}
2091 			ppas[i] = pblk_trans_map_get(pblk, lba);
2092 		}
2093 	}
2094 	spin_unlock(&pblk->trans_lock);
2095 }
2096