1 /*
2  * Copyright (C) 2016 CNEX Labs
3  * Initial release: Javier Gonzalez <javier@cnexlabs.com>
4  *                  Matias Bjorling <matias@cnexlabs.com>
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License version
8  * 2 as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License for more details.
14  *
15  * pblk-read.c - pblk's read path
16  */
17 
18 #include "pblk.h"
19 
20 /*
21  * There is no guarantee that the value read from cache has not been updated and
22  * resides at another location in the cache. We guarantee though that if the
23  * value is read from the cache, it belongs to the mapped lba. In order to
24  * guarantee and order between writes and reads are ordered, a flush must be
25  * issued.
26  */
pblk_read_from_cache(struct pblk * pblk,struct bio * bio,sector_t lba,struct ppa_addr ppa,int bio_iter,bool advanced_bio)27 static int pblk_read_from_cache(struct pblk *pblk, struct bio *bio,
28 				sector_t lba, struct ppa_addr ppa,
29 				int bio_iter, bool advanced_bio)
30 {
31 #ifdef CONFIG_NVM_PBLK_DEBUG
32 	/* Callers must ensure that the ppa points to a cache address */
33 	BUG_ON(pblk_ppa_empty(ppa));
34 	BUG_ON(!pblk_addr_in_cache(ppa));
35 #endif
36 
37 	return pblk_rb_copy_to_bio(&pblk->rwb, bio, lba, ppa,
38 						bio_iter, advanced_bio);
39 }
40 
pblk_read_ppalist_rq(struct pblk * pblk,struct nvm_rq * rqd,struct bio * bio,sector_t blba,unsigned long * read_bitmap)41 static void pblk_read_ppalist_rq(struct pblk *pblk, struct nvm_rq *rqd,
42 				 struct bio *bio, sector_t blba,
43 				 unsigned long *read_bitmap)
44 {
45 	struct pblk_sec_meta *meta_list = rqd->meta_list;
46 	struct ppa_addr ppas[PBLK_MAX_REQ_ADDRS];
47 	int nr_secs = rqd->nr_ppas;
48 	bool advanced_bio = false;
49 	int i, j = 0;
50 
51 	pblk_lookup_l2p_seq(pblk, ppas, blba, nr_secs);
52 
53 	for (i = 0; i < nr_secs; i++) {
54 		struct ppa_addr p = ppas[i];
55 		sector_t lba = blba + i;
56 
57 retry:
58 		if (pblk_ppa_empty(p)) {
59 			WARN_ON(test_and_set_bit(i, read_bitmap));
60 			meta_list[i].lba = cpu_to_le64(ADDR_EMPTY);
61 
62 			if (unlikely(!advanced_bio)) {
63 				bio_advance(bio, (i) * PBLK_EXPOSED_PAGE_SIZE);
64 				advanced_bio = true;
65 			}
66 
67 			goto next;
68 		}
69 
70 		/* Try to read from write buffer. The address is later checked
71 		 * on the write buffer to prevent retrieving overwritten data.
72 		 */
73 		if (pblk_addr_in_cache(p)) {
74 			if (!pblk_read_from_cache(pblk, bio, lba, p, i,
75 								advanced_bio)) {
76 				pblk_lookup_l2p_seq(pblk, &p, lba, 1);
77 				goto retry;
78 			}
79 			WARN_ON(test_and_set_bit(i, read_bitmap));
80 			meta_list[i].lba = cpu_to_le64(lba);
81 			advanced_bio = true;
82 #ifdef CONFIG_NVM_PBLK_DEBUG
83 			atomic_long_inc(&pblk->cache_reads);
84 #endif
85 		} else {
86 			/* Read from media non-cached sectors */
87 			rqd->ppa_list[j++] = p;
88 		}
89 
90 next:
91 		if (advanced_bio)
92 			bio_advance(bio, PBLK_EXPOSED_PAGE_SIZE);
93 	}
94 
95 	if (pblk_io_aligned(pblk, nr_secs))
96 		rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_SEQUENTIAL);
97 	else
98 		rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
99 
100 #ifdef CONFIG_NVM_PBLK_DEBUG
101 	atomic_long_add(nr_secs, &pblk->inflight_reads);
102 #endif
103 }
104 
105 
pblk_read_check_seq(struct pblk * pblk,struct nvm_rq * rqd,sector_t blba)106 static void pblk_read_check_seq(struct pblk *pblk, struct nvm_rq *rqd,
107 				sector_t blba)
108 {
109 	struct pblk_sec_meta *meta_lba_list = rqd->meta_list;
110 	int nr_lbas = rqd->nr_ppas;
111 	int i;
112 
113 	for (i = 0; i < nr_lbas; i++) {
114 		u64 lba = le64_to_cpu(meta_lba_list[i].lba);
115 
116 		if (lba == ADDR_EMPTY)
117 			continue;
118 
119 		if (lba != blba + i) {
120 #ifdef CONFIG_NVM_PBLK_DEBUG
121 			struct ppa_addr *p;
122 
123 			p = (nr_lbas == 1) ? &rqd->ppa_list[i] : &rqd->ppa_addr;
124 			print_ppa(pblk, p, "seq", i);
125 #endif
126 			pblk_err(pblk, "corrupted read LBA (%llu/%llu)\n",
127 							lba, (u64)blba + i);
128 			WARN_ON(1);
129 		}
130 	}
131 }
132 
133 /*
134  * There can be holes in the lba list.
135  */
pblk_read_check_rand(struct pblk * pblk,struct nvm_rq * rqd,u64 * lba_list,int nr_lbas)136 static void pblk_read_check_rand(struct pblk *pblk, struct nvm_rq *rqd,
137 				 u64 *lba_list, int nr_lbas)
138 {
139 	struct pblk_sec_meta *meta_lba_list = rqd->meta_list;
140 	int i, j;
141 
142 	for (i = 0, j = 0; i < nr_lbas; i++) {
143 		u64 lba = lba_list[i];
144 		u64 meta_lba;
145 
146 		if (lba == ADDR_EMPTY)
147 			continue;
148 
149 		meta_lba = le64_to_cpu(meta_lba_list[j].lba);
150 
151 		if (lba != meta_lba) {
152 #ifdef CONFIG_NVM_PBLK_DEBUG
153 			struct ppa_addr *p;
154 			int nr_ppas = rqd->nr_ppas;
155 
156 			p = (nr_ppas == 1) ? &rqd->ppa_list[j] : &rqd->ppa_addr;
157 			print_ppa(pblk, p, "seq", j);
158 #endif
159 			pblk_err(pblk, "corrupted read LBA (%llu/%llu)\n",
160 								lba, meta_lba);
161 			WARN_ON(1);
162 		}
163 
164 		j++;
165 	}
166 
167 	WARN_ONCE(j != rqd->nr_ppas, "pblk: corrupted random request\n");
168 }
169 
pblk_read_put_rqd_kref(struct pblk * pblk,struct nvm_rq * rqd)170 static void pblk_read_put_rqd_kref(struct pblk *pblk, struct nvm_rq *rqd)
171 {
172 	struct ppa_addr *ppa_list;
173 	int i;
174 
175 	ppa_list = (rqd->nr_ppas > 1) ? rqd->ppa_list : &rqd->ppa_addr;
176 
177 	for (i = 0; i < rqd->nr_ppas; i++) {
178 		struct ppa_addr ppa = ppa_list[i];
179 		struct pblk_line *line;
180 
181 		line = &pblk->lines[pblk_ppa_to_line(ppa)];
182 		kref_put(&line->ref, pblk_line_put_wq);
183 	}
184 }
185 
pblk_end_user_read(struct bio * bio)186 static void pblk_end_user_read(struct bio *bio)
187 {
188 #ifdef CONFIG_NVM_PBLK_DEBUG
189 	WARN_ONCE(bio->bi_status, "pblk: corrupted read bio\n");
190 #endif
191 	bio_endio(bio);
192 }
193 
__pblk_end_io_read(struct pblk * pblk,struct nvm_rq * rqd,bool put_line)194 static void __pblk_end_io_read(struct pblk *pblk, struct nvm_rq *rqd,
195 			       bool put_line)
196 {
197 	struct nvm_tgt_dev *dev = pblk->dev;
198 	struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd);
199 	struct bio *int_bio = rqd->bio;
200 	unsigned long start_time = r_ctx->start_time;
201 
202 	generic_end_io_acct(dev->q, REQ_OP_READ, &pblk->disk->part0, start_time);
203 
204 	if (rqd->error)
205 		pblk_log_read_err(pblk, rqd);
206 
207 	pblk_read_check_seq(pblk, rqd, r_ctx->lba);
208 
209 	if (int_bio)
210 		bio_put(int_bio);
211 
212 	if (put_line)
213 		pblk_read_put_rqd_kref(pblk, rqd);
214 
215 #ifdef CONFIG_NVM_PBLK_DEBUG
216 	atomic_long_add(rqd->nr_ppas, &pblk->sync_reads);
217 	atomic_long_sub(rqd->nr_ppas, &pblk->inflight_reads);
218 #endif
219 
220 	pblk_free_rqd(pblk, rqd, PBLK_READ);
221 	atomic_dec(&pblk->inflight_io);
222 }
223 
pblk_end_io_read(struct nvm_rq * rqd)224 static void pblk_end_io_read(struct nvm_rq *rqd)
225 {
226 	struct pblk *pblk = rqd->private;
227 	struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd);
228 	struct bio *bio = (struct bio *)r_ctx->private;
229 
230 	pblk_end_user_read(bio);
231 	__pblk_end_io_read(pblk, rqd, true);
232 }
233 
pblk_end_partial_read(struct nvm_rq * rqd)234 static void pblk_end_partial_read(struct nvm_rq *rqd)
235 {
236 	struct pblk *pblk = rqd->private;
237 	struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd);
238 	struct pblk_pr_ctx *pr_ctx = r_ctx->private;
239 	struct bio *new_bio = rqd->bio;
240 	struct bio *bio = pr_ctx->orig_bio;
241 	struct bio_vec src_bv, dst_bv;
242 	struct pblk_sec_meta *meta_list = rqd->meta_list;
243 	int bio_init_idx = pr_ctx->bio_init_idx;
244 	unsigned long *read_bitmap = pr_ctx->bitmap;
245 	int nr_secs = pr_ctx->orig_nr_secs;
246 	int nr_holes = nr_secs - bitmap_weight(read_bitmap, nr_secs);
247 	__le64 *lba_list_mem, *lba_list_media;
248 	void *src_p, *dst_p;
249 	int hole, i;
250 
251 	if (unlikely(nr_holes == 1)) {
252 		struct ppa_addr ppa;
253 
254 		ppa = rqd->ppa_addr;
255 		rqd->ppa_list = pr_ctx->ppa_ptr;
256 		rqd->dma_ppa_list = pr_ctx->dma_ppa_list;
257 		rqd->ppa_list[0] = ppa;
258 	}
259 
260 	/* Re-use allocated memory for intermediate lbas */
261 	lba_list_mem = (((void *)rqd->ppa_list) + pblk_dma_ppa_size);
262 	lba_list_media = (((void *)rqd->ppa_list) + 2 * pblk_dma_ppa_size);
263 
264 	for (i = 0; i < nr_secs; i++) {
265 		lba_list_media[i] = meta_list[i].lba;
266 		meta_list[i].lba = lba_list_mem[i];
267 	}
268 
269 	/* Fill the holes in the original bio */
270 	i = 0;
271 	hole = find_first_zero_bit(read_bitmap, nr_secs);
272 	do {
273 		int line_id = pblk_ppa_to_line(rqd->ppa_list[i]);
274 		struct pblk_line *line = &pblk->lines[line_id];
275 
276 		kref_put(&line->ref, pblk_line_put);
277 
278 		meta_list[hole].lba = lba_list_media[i];
279 
280 		src_bv = new_bio->bi_io_vec[i++];
281 		dst_bv = bio->bi_io_vec[bio_init_idx + hole];
282 
283 		src_p = kmap_atomic(src_bv.bv_page);
284 		dst_p = kmap_atomic(dst_bv.bv_page);
285 
286 		memcpy(dst_p + dst_bv.bv_offset,
287 			src_p + src_bv.bv_offset,
288 			PBLK_EXPOSED_PAGE_SIZE);
289 
290 		kunmap_atomic(src_p);
291 		kunmap_atomic(dst_p);
292 
293 		mempool_free(src_bv.bv_page, &pblk->page_bio_pool);
294 
295 		hole = find_next_zero_bit(read_bitmap, nr_secs, hole + 1);
296 	} while (hole < nr_secs);
297 
298 	bio_put(new_bio);
299 	kfree(pr_ctx);
300 
301 	/* restore original request */
302 	rqd->bio = NULL;
303 	rqd->nr_ppas = nr_secs;
304 
305 	bio_endio(bio);
306 	__pblk_end_io_read(pblk, rqd, false);
307 }
308 
pblk_setup_partial_read(struct pblk * pblk,struct nvm_rq * rqd,unsigned int bio_init_idx,unsigned long * read_bitmap,int nr_holes)309 static int pblk_setup_partial_read(struct pblk *pblk, struct nvm_rq *rqd,
310 			    unsigned int bio_init_idx,
311 			    unsigned long *read_bitmap,
312 			    int nr_holes)
313 {
314 	struct pblk_sec_meta *meta_list = rqd->meta_list;
315 	struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd);
316 	struct pblk_pr_ctx *pr_ctx;
317 	struct bio *new_bio, *bio = r_ctx->private;
318 	__le64 *lba_list_mem;
319 	int nr_secs = rqd->nr_ppas;
320 	int i;
321 
322 	/* Re-use allocated memory for intermediate lbas */
323 	lba_list_mem = (((void *)rqd->ppa_list) + pblk_dma_ppa_size);
324 
325 	new_bio = bio_alloc(GFP_KERNEL, nr_holes);
326 
327 	if (pblk_bio_add_pages(pblk, new_bio, GFP_KERNEL, nr_holes))
328 		goto fail_bio_put;
329 
330 	if (nr_holes != new_bio->bi_vcnt) {
331 		WARN_ONCE(1, "pblk: malformed bio\n");
332 		goto fail_free_pages;
333 	}
334 
335 	pr_ctx = kmalloc(sizeof(struct pblk_pr_ctx), GFP_KERNEL);
336 	if (!pr_ctx)
337 		goto fail_free_pages;
338 
339 	for (i = 0; i < nr_secs; i++)
340 		lba_list_mem[i] = meta_list[i].lba;
341 
342 	new_bio->bi_iter.bi_sector = 0; /* internal bio */
343 	bio_set_op_attrs(new_bio, REQ_OP_READ, 0);
344 
345 	rqd->bio = new_bio;
346 	rqd->nr_ppas = nr_holes;
347 	rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
348 
349 	pr_ctx->ppa_ptr = NULL;
350 	pr_ctx->orig_bio = bio;
351 	bitmap_copy(pr_ctx->bitmap, read_bitmap, NVM_MAX_VLBA);
352 	pr_ctx->bio_init_idx = bio_init_idx;
353 	pr_ctx->orig_nr_secs = nr_secs;
354 	r_ctx->private = pr_ctx;
355 
356 	if (unlikely(nr_holes == 1)) {
357 		pr_ctx->ppa_ptr = rqd->ppa_list;
358 		pr_ctx->dma_ppa_list = rqd->dma_ppa_list;
359 		rqd->ppa_addr = rqd->ppa_list[0];
360 	}
361 	return 0;
362 
363 fail_free_pages:
364 	pblk_bio_free_pages(pblk, new_bio, 0, new_bio->bi_vcnt);
365 fail_bio_put:
366 	bio_put(new_bio);
367 
368 	return -ENOMEM;
369 }
370 
pblk_partial_read_bio(struct pblk * pblk,struct nvm_rq * rqd,unsigned int bio_init_idx,unsigned long * read_bitmap,int nr_secs)371 static int pblk_partial_read_bio(struct pblk *pblk, struct nvm_rq *rqd,
372 				 unsigned int bio_init_idx,
373 				 unsigned long *read_bitmap, int nr_secs)
374 {
375 	int nr_holes;
376 	int ret;
377 
378 	nr_holes = nr_secs - bitmap_weight(read_bitmap, nr_secs);
379 
380 	if (pblk_setup_partial_read(pblk, rqd, bio_init_idx, read_bitmap,
381 				    nr_holes))
382 		return NVM_IO_ERR;
383 
384 	rqd->end_io = pblk_end_partial_read;
385 
386 	ret = pblk_submit_io(pblk, rqd);
387 	if (ret) {
388 		bio_put(rqd->bio);
389 		pblk_err(pblk, "partial read IO submission failed\n");
390 		goto err;
391 	}
392 
393 	return NVM_IO_OK;
394 
395 err:
396 	pblk_err(pblk, "failed to perform partial read\n");
397 
398 	/* Free allocated pages in new bio */
399 	pblk_bio_free_pages(pblk, rqd->bio, 0, rqd->bio->bi_vcnt);
400 	__pblk_end_io_read(pblk, rqd, false);
401 	return NVM_IO_ERR;
402 }
403 
pblk_read_rq(struct pblk * pblk,struct nvm_rq * rqd,struct bio * bio,sector_t lba,unsigned long * read_bitmap)404 static void pblk_read_rq(struct pblk *pblk, struct nvm_rq *rqd, struct bio *bio,
405 			 sector_t lba, unsigned long *read_bitmap)
406 {
407 	struct pblk_sec_meta *meta_list = rqd->meta_list;
408 	struct ppa_addr ppa;
409 
410 	pblk_lookup_l2p_seq(pblk, &ppa, lba, 1);
411 
412 #ifdef CONFIG_NVM_PBLK_DEBUG
413 	atomic_long_inc(&pblk->inflight_reads);
414 #endif
415 
416 retry:
417 	if (pblk_ppa_empty(ppa)) {
418 		WARN_ON(test_and_set_bit(0, read_bitmap));
419 		meta_list[0].lba = cpu_to_le64(ADDR_EMPTY);
420 		return;
421 	}
422 
423 	/* Try to read from write buffer. The address is later checked on the
424 	 * write buffer to prevent retrieving overwritten data.
425 	 */
426 	if (pblk_addr_in_cache(ppa)) {
427 		if (!pblk_read_from_cache(pblk, bio, lba, ppa, 0, 1)) {
428 			pblk_lookup_l2p_seq(pblk, &ppa, lba, 1);
429 			goto retry;
430 		}
431 
432 		WARN_ON(test_and_set_bit(0, read_bitmap));
433 		meta_list[0].lba = cpu_to_le64(lba);
434 
435 #ifdef CONFIG_NVM_PBLK_DEBUG
436 		atomic_long_inc(&pblk->cache_reads);
437 #endif
438 	} else {
439 		rqd->ppa_addr = ppa;
440 	}
441 
442 	rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
443 }
444 
pblk_submit_read(struct pblk * pblk,struct bio * bio)445 int pblk_submit_read(struct pblk *pblk, struct bio *bio)
446 {
447 	struct nvm_tgt_dev *dev = pblk->dev;
448 	struct request_queue *q = dev->q;
449 	sector_t blba = pblk_get_lba(bio);
450 	unsigned int nr_secs = pblk_get_secs(bio);
451 	struct pblk_g_ctx *r_ctx;
452 	struct nvm_rq *rqd;
453 	unsigned int bio_init_idx;
454 	DECLARE_BITMAP(read_bitmap, NVM_MAX_VLBA);
455 	int ret = NVM_IO_ERR;
456 
457 	/* logic error: lba out-of-bounds. Ignore read request */
458 	if (blba >= pblk->rl.nr_secs || nr_secs > PBLK_MAX_REQ_ADDRS) {
459 		WARN(1, "pblk: read lba out of bounds (lba:%llu, nr:%d)\n",
460 					(unsigned long long)blba, nr_secs);
461 		return NVM_IO_ERR;
462 	}
463 
464 	generic_start_io_acct(q, REQ_OP_READ, bio_sectors(bio),
465 			      &pblk->disk->part0);
466 
467 	bitmap_zero(read_bitmap, nr_secs);
468 
469 	rqd = pblk_alloc_rqd(pblk, PBLK_READ);
470 
471 	rqd->opcode = NVM_OP_PREAD;
472 	rqd->nr_ppas = nr_secs;
473 	rqd->bio = NULL; /* cloned bio if needed */
474 	rqd->private = pblk;
475 	rqd->end_io = pblk_end_io_read;
476 
477 	r_ctx = nvm_rq_to_pdu(rqd);
478 	r_ctx->start_time = jiffies;
479 	r_ctx->lba = blba;
480 	r_ctx->private = bio; /* original bio */
481 
482 	/* Save the index for this bio's start. This is needed in case
483 	 * we need to fill a partial read.
484 	 */
485 	bio_init_idx = pblk_get_bi_idx(bio);
486 
487 	rqd->meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
488 							&rqd->dma_meta_list);
489 	if (!rqd->meta_list) {
490 		pblk_err(pblk, "not able to allocate ppa list\n");
491 		goto fail_rqd_free;
492 	}
493 
494 	if (nr_secs > 1) {
495 		rqd->ppa_list = rqd->meta_list + pblk_dma_meta_size;
496 		rqd->dma_ppa_list = rqd->dma_meta_list + pblk_dma_meta_size;
497 
498 		pblk_read_ppalist_rq(pblk, rqd, bio, blba, read_bitmap);
499 	} else {
500 		pblk_read_rq(pblk, rqd, bio, blba, read_bitmap);
501 	}
502 
503 	if (bitmap_full(read_bitmap, nr_secs)) {
504 		atomic_inc(&pblk->inflight_io);
505 		__pblk_end_io_read(pblk, rqd, false);
506 		return NVM_IO_DONE;
507 	}
508 
509 	/* All sectors are to be read from the device */
510 	if (bitmap_empty(read_bitmap, rqd->nr_ppas)) {
511 		struct bio *int_bio = NULL;
512 
513 		/* Clone read bio to deal with read errors internally */
514 		int_bio = bio_clone_fast(bio, GFP_KERNEL, &pblk_bio_set);
515 		if (!int_bio) {
516 			pblk_err(pblk, "could not clone read bio\n");
517 			goto fail_end_io;
518 		}
519 
520 		rqd->bio = int_bio;
521 
522 		if (pblk_submit_io(pblk, rqd)) {
523 			pblk_err(pblk, "read IO submission failed\n");
524 			ret = NVM_IO_ERR;
525 			goto fail_end_io;
526 		}
527 
528 		return NVM_IO_OK;
529 	}
530 
531 	/* The read bio request could be partially filled by the write buffer,
532 	 * but there are some holes that need to be read from the drive.
533 	 */
534 	ret = pblk_partial_read_bio(pblk, rqd, bio_init_idx, read_bitmap,
535 				    nr_secs);
536 	if (ret)
537 		goto fail_meta_free;
538 
539 	return NVM_IO_OK;
540 
541 fail_meta_free:
542 	nvm_dev_dma_free(dev->parent, rqd->meta_list, rqd->dma_meta_list);
543 fail_rqd_free:
544 	pblk_free_rqd(pblk, rqd, PBLK_READ);
545 	return ret;
546 fail_end_io:
547 	__pblk_end_io_read(pblk, rqd, false);
548 	return ret;
549 }
550 
read_ppalist_rq_gc(struct pblk * pblk,struct nvm_rq * rqd,struct pblk_line * line,u64 * lba_list,u64 * paddr_list_gc,unsigned int nr_secs)551 static int read_ppalist_rq_gc(struct pblk *pblk, struct nvm_rq *rqd,
552 			      struct pblk_line *line, u64 *lba_list,
553 			      u64 *paddr_list_gc, unsigned int nr_secs)
554 {
555 	struct ppa_addr ppa_list_l2p[PBLK_MAX_REQ_ADDRS];
556 	struct ppa_addr ppa_gc;
557 	int valid_secs = 0;
558 	int i;
559 
560 	pblk_lookup_l2p_rand(pblk, ppa_list_l2p, lba_list, nr_secs);
561 
562 	for (i = 0; i < nr_secs; i++) {
563 		if (lba_list[i] == ADDR_EMPTY)
564 			continue;
565 
566 		ppa_gc = addr_to_gen_ppa(pblk, paddr_list_gc[i], line->id);
567 		if (!pblk_ppa_comp(ppa_list_l2p[i], ppa_gc)) {
568 			paddr_list_gc[i] = lba_list[i] = ADDR_EMPTY;
569 			continue;
570 		}
571 
572 		rqd->ppa_list[valid_secs++] = ppa_list_l2p[i];
573 	}
574 
575 #ifdef CONFIG_NVM_PBLK_DEBUG
576 	atomic_long_add(valid_secs, &pblk->inflight_reads);
577 #endif
578 
579 	return valid_secs;
580 }
581 
read_rq_gc(struct pblk * pblk,struct nvm_rq * rqd,struct pblk_line * line,sector_t lba,u64 paddr_gc)582 static int read_rq_gc(struct pblk *pblk, struct nvm_rq *rqd,
583 		      struct pblk_line *line, sector_t lba,
584 		      u64 paddr_gc)
585 {
586 	struct ppa_addr ppa_l2p, ppa_gc;
587 	int valid_secs = 0;
588 
589 	if (lba == ADDR_EMPTY)
590 		goto out;
591 
592 	/* logic error: lba out-of-bounds */
593 	if (lba >= pblk->rl.nr_secs) {
594 		WARN(1, "pblk: read lba out of bounds\n");
595 		goto out;
596 	}
597 
598 	spin_lock(&pblk->trans_lock);
599 	ppa_l2p = pblk_trans_map_get(pblk, lba);
600 	spin_unlock(&pblk->trans_lock);
601 
602 	ppa_gc = addr_to_gen_ppa(pblk, paddr_gc, line->id);
603 	if (!pblk_ppa_comp(ppa_l2p, ppa_gc))
604 		goto out;
605 
606 	rqd->ppa_addr = ppa_l2p;
607 	valid_secs = 1;
608 
609 #ifdef CONFIG_NVM_PBLK_DEBUG
610 	atomic_long_inc(&pblk->inflight_reads);
611 #endif
612 
613 out:
614 	return valid_secs;
615 }
616 
pblk_submit_read_gc(struct pblk * pblk,struct pblk_gc_rq * gc_rq)617 int pblk_submit_read_gc(struct pblk *pblk, struct pblk_gc_rq *gc_rq)
618 {
619 	struct nvm_tgt_dev *dev = pblk->dev;
620 	struct nvm_geo *geo = &dev->geo;
621 	struct bio *bio;
622 	struct nvm_rq rqd;
623 	int data_len;
624 	int ret = NVM_IO_OK;
625 
626 	memset(&rqd, 0, sizeof(struct nvm_rq));
627 
628 	rqd.meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
629 							&rqd.dma_meta_list);
630 	if (!rqd.meta_list)
631 		return -ENOMEM;
632 
633 	if (gc_rq->nr_secs > 1) {
634 		rqd.ppa_list = rqd.meta_list + pblk_dma_meta_size;
635 		rqd.dma_ppa_list = rqd.dma_meta_list + pblk_dma_meta_size;
636 
637 		gc_rq->secs_to_gc = read_ppalist_rq_gc(pblk, &rqd, gc_rq->line,
638 							gc_rq->lba_list,
639 							gc_rq->paddr_list,
640 							gc_rq->nr_secs);
641 		if (gc_rq->secs_to_gc == 1)
642 			rqd.ppa_addr = rqd.ppa_list[0];
643 	} else {
644 		gc_rq->secs_to_gc = read_rq_gc(pblk, &rqd, gc_rq->line,
645 							gc_rq->lba_list[0],
646 							gc_rq->paddr_list[0]);
647 	}
648 
649 	if (!(gc_rq->secs_to_gc))
650 		goto out;
651 
652 	data_len = (gc_rq->secs_to_gc) * geo->csecs;
653 	bio = pblk_bio_map_addr(pblk, gc_rq->data, gc_rq->secs_to_gc, data_len,
654 						PBLK_VMALLOC_META, GFP_KERNEL);
655 	if (IS_ERR(bio)) {
656 		pblk_err(pblk, "could not allocate GC bio (%lu)\n",
657 				PTR_ERR(bio));
658 		goto err_free_dma;
659 	}
660 
661 	bio->bi_iter.bi_sector = 0; /* internal bio */
662 	bio_set_op_attrs(bio, REQ_OP_READ, 0);
663 
664 	rqd.opcode = NVM_OP_PREAD;
665 	rqd.nr_ppas = gc_rq->secs_to_gc;
666 	rqd.flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
667 	rqd.bio = bio;
668 
669 	if (pblk_submit_io_sync(pblk, &rqd)) {
670 		ret = -EIO;
671 		pblk_err(pblk, "GC read request failed\n");
672 		goto err_free_bio;
673 	}
674 
675 	pblk_read_check_rand(pblk, &rqd, gc_rq->lba_list, gc_rq->nr_secs);
676 
677 	atomic_dec(&pblk->inflight_io);
678 
679 	if (rqd.error) {
680 		atomic_long_inc(&pblk->read_failed_gc);
681 #ifdef CONFIG_NVM_PBLK_DEBUG
682 		pblk_print_failed_rqd(pblk, &rqd, rqd.error);
683 #endif
684 	}
685 
686 #ifdef CONFIG_NVM_PBLK_DEBUG
687 	atomic_long_add(gc_rq->secs_to_gc, &pblk->sync_reads);
688 	atomic_long_add(gc_rq->secs_to_gc, &pblk->recov_gc_reads);
689 	atomic_long_sub(gc_rq->secs_to_gc, &pblk->inflight_reads);
690 #endif
691 
692 out:
693 	nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
694 	return ret;
695 
696 err_free_bio:
697 	bio_put(bio);
698 err_free_dma:
699 	nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
700 	return ret;
701 }
702