1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2012 Fusion-io All rights reserved.
4 * Copyright (C) 2012 Intel Corp. All rights reserved.
5 */
6
7 #include <linux/sched.h>
8 #include <linux/bio.h>
9 #include <linux/slab.h>
10 #include <linux/blkdev.h>
11 #include <linux/raid/pq.h>
12 #include <linux/hash.h>
13 #include <linux/list_sort.h>
14 #include <linux/raid/xor.h>
15 #include <linux/mm.h>
16 #include "ctree.h"
17 #include "disk-io.h"
18 #include "volumes.h"
19 #include "raid56.h"
20 #include "async-thread.h"
21
22 /* set when additional merges to this rbio are not allowed */
23 #define RBIO_RMW_LOCKED_BIT 1
24
25 /*
26 * set when this rbio is sitting in the hash, but it is just a cache
27 * of past RMW
28 */
29 #define RBIO_CACHE_BIT 2
30
31 /*
32 * set when it is safe to trust the stripe_pages for caching
33 */
34 #define RBIO_CACHE_READY_BIT 3
35
36 #define RBIO_CACHE_SIZE 1024
37
38 enum btrfs_rbio_ops {
39 BTRFS_RBIO_WRITE,
40 BTRFS_RBIO_READ_REBUILD,
41 BTRFS_RBIO_PARITY_SCRUB,
42 BTRFS_RBIO_REBUILD_MISSING,
43 };
44
45 struct btrfs_raid_bio {
46 struct btrfs_fs_info *fs_info;
47 struct btrfs_bio *bbio;
48
49 /* while we're doing rmw on a stripe
50 * we put it into a hash table so we can
51 * lock the stripe and merge more rbios
52 * into it.
53 */
54 struct list_head hash_list;
55
56 /*
57 * LRU list for the stripe cache
58 */
59 struct list_head stripe_cache;
60
61 /*
62 * for scheduling work in the helper threads
63 */
64 struct btrfs_work work;
65
66 /*
67 * bio list and bio_list_lock are used
68 * to add more bios into the stripe
69 * in hopes of avoiding the full rmw
70 */
71 struct bio_list bio_list;
72 spinlock_t bio_list_lock;
73
74 /* also protected by the bio_list_lock, the
75 * plug list is used by the plugging code
76 * to collect partial bios while plugged. The
77 * stripe locking code also uses it to hand off
78 * the stripe lock to the next pending IO
79 */
80 struct list_head plug_list;
81
82 /*
83 * flags that tell us if it is safe to
84 * merge with this bio
85 */
86 unsigned long flags;
87
88 /* size of each individual stripe on disk */
89 int stripe_len;
90
91 /* number of data stripes (no p/q) */
92 int nr_data;
93
94 int real_stripes;
95
96 int stripe_npages;
97 /*
98 * set if we're doing a parity rebuild
99 * for a read from higher up, which is handled
100 * differently from a parity rebuild as part of
101 * rmw
102 */
103 enum btrfs_rbio_ops operation;
104
105 /* first bad stripe */
106 int faila;
107
108 /* second bad stripe (for raid6 use) */
109 int failb;
110
111 int scrubp;
112 /*
113 * number of pages needed to represent the full
114 * stripe
115 */
116 int nr_pages;
117
118 /*
119 * size of all the bios in the bio_list. This
120 * helps us decide if the rbio maps to a full
121 * stripe or not
122 */
123 int bio_list_bytes;
124
125 int generic_bio_cnt;
126
127 refcount_t refs;
128
129 atomic_t stripes_pending;
130
131 atomic_t error;
132 /*
133 * these are two arrays of pointers. We allocate the
134 * rbio big enough to hold them both and setup their
135 * locations when the rbio is allocated
136 */
137
138 /* pointers to pages that we allocated for
139 * reading/writing stripes directly from the disk (including P/Q)
140 */
141 struct page **stripe_pages;
142
143 /*
144 * pointers to the pages in the bio_list. Stored
145 * here for faster lookup
146 */
147 struct page **bio_pages;
148
149 /*
150 * bitmap to record which horizontal stripe has data
151 */
152 unsigned long *dbitmap;
153
154 /* allocated with real_stripes-many pointers for finish_*() calls */
155 void **finish_pointers;
156
157 /* allocated with stripe_npages-many bits for finish_*() calls */
158 unsigned long *finish_pbitmap;
159 };
160
161 static int __raid56_parity_recover(struct btrfs_raid_bio *rbio);
162 static noinline void finish_rmw(struct btrfs_raid_bio *rbio);
163 static void rmw_work(struct btrfs_work *work);
164 static void read_rebuild_work(struct btrfs_work *work);
165 static int fail_bio_stripe(struct btrfs_raid_bio *rbio, struct bio *bio);
166 static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed);
167 static void __free_raid_bio(struct btrfs_raid_bio *rbio);
168 static void index_rbio_pages(struct btrfs_raid_bio *rbio);
169 static int alloc_rbio_pages(struct btrfs_raid_bio *rbio);
170
171 static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
172 int need_check);
173 static void scrub_parity_work(struct btrfs_work *work);
174
start_async_work(struct btrfs_raid_bio * rbio,btrfs_func_t work_func)175 static void start_async_work(struct btrfs_raid_bio *rbio, btrfs_func_t work_func)
176 {
177 btrfs_init_work(&rbio->work, btrfs_rmw_helper, work_func, NULL, NULL);
178 btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work);
179 }
180
181 /*
182 * the stripe hash table is used for locking, and to collect
183 * bios in hopes of making a full stripe
184 */
btrfs_alloc_stripe_hash_table(struct btrfs_fs_info * info)185 int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info)
186 {
187 struct btrfs_stripe_hash_table *table;
188 struct btrfs_stripe_hash_table *x;
189 struct btrfs_stripe_hash *cur;
190 struct btrfs_stripe_hash *h;
191 int num_entries = 1 << BTRFS_STRIPE_HASH_TABLE_BITS;
192 int i;
193 int table_size;
194
195 if (info->stripe_hash_table)
196 return 0;
197
198 /*
199 * The table is large, starting with order 4 and can go as high as
200 * order 7 in case lock debugging is turned on.
201 *
202 * Try harder to allocate and fallback to vmalloc to lower the chance
203 * of a failing mount.
204 */
205 table_size = sizeof(*table) + sizeof(*h) * num_entries;
206 table = kvzalloc(table_size, GFP_KERNEL);
207 if (!table)
208 return -ENOMEM;
209
210 spin_lock_init(&table->cache_lock);
211 INIT_LIST_HEAD(&table->stripe_cache);
212
213 h = table->table;
214
215 for (i = 0; i < num_entries; i++) {
216 cur = h + i;
217 INIT_LIST_HEAD(&cur->hash_list);
218 spin_lock_init(&cur->lock);
219 }
220
221 x = cmpxchg(&info->stripe_hash_table, NULL, table);
222 if (x)
223 kvfree(x);
224 return 0;
225 }
226
227 /*
228 * caching an rbio means to copy anything from the
229 * bio_pages array into the stripe_pages array. We
230 * use the page uptodate bit in the stripe cache array
231 * to indicate if it has valid data
232 *
233 * once the caching is done, we set the cache ready
234 * bit.
235 */
cache_rbio_pages(struct btrfs_raid_bio * rbio)236 static void cache_rbio_pages(struct btrfs_raid_bio *rbio)
237 {
238 int i;
239 char *s;
240 char *d;
241 int ret;
242
243 ret = alloc_rbio_pages(rbio);
244 if (ret)
245 return;
246
247 for (i = 0; i < rbio->nr_pages; i++) {
248 if (!rbio->bio_pages[i])
249 continue;
250
251 s = kmap(rbio->bio_pages[i]);
252 d = kmap(rbio->stripe_pages[i]);
253
254 copy_page(d, s);
255
256 kunmap(rbio->bio_pages[i]);
257 kunmap(rbio->stripe_pages[i]);
258 SetPageUptodate(rbio->stripe_pages[i]);
259 }
260 set_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
261 }
262
263 /*
264 * we hash on the first logical address of the stripe
265 */
rbio_bucket(struct btrfs_raid_bio * rbio)266 static int rbio_bucket(struct btrfs_raid_bio *rbio)
267 {
268 u64 num = rbio->bbio->raid_map[0];
269
270 /*
271 * we shift down quite a bit. We're using byte
272 * addressing, and most of the lower bits are zeros.
273 * This tends to upset hash_64, and it consistently
274 * returns just one or two different values.
275 *
276 * shifting off the lower bits fixes things.
277 */
278 return hash_64(num >> 16, BTRFS_STRIPE_HASH_TABLE_BITS);
279 }
280
281 /*
282 * stealing an rbio means taking all the uptodate pages from the stripe
283 * array in the source rbio and putting them into the destination rbio
284 */
steal_rbio(struct btrfs_raid_bio * src,struct btrfs_raid_bio * dest)285 static void steal_rbio(struct btrfs_raid_bio *src, struct btrfs_raid_bio *dest)
286 {
287 int i;
288 struct page *s;
289 struct page *d;
290
291 if (!test_bit(RBIO_CACHE_READY_BIT, &src->flags))
292 return;
293
294 for (i = 0; i < dest->nr_pages; i++) {
295 s = src->stripe_pages[i];
296 if (!s || !PageUptodate(s)) {
297 continue;
298 }
299
300 d = dest->stripe_pages[i];
301 if (d)
302 __free_page(d);
303
304 dest->stripe_pages[i] = s;
305 src->stripe_pages[i] = NULL;
306 }
307 }
308
309 /*
310 * merging means we take the bio_list from the victim and
311 * splice it into the destination. The victim should
312 * be discarded afterwards.
313 *
314 * must be called with dest->rbio_list_lock held
315 */
merge_rbio(struct btrfs_raid_bio * dest,struct btrfs_raid_bio * victim)316 static void merge_rbio(struct btrfs_raid_bio *dest,
317 struct btrfs_raid_bio *victim)
318 {
319 bio_list_merge(&dest->bio_list, &victim->bio_list);
320 dest->bio_list_bytes += victim->bio_list_bytes;
321 /* Also inherit the bitmaps from @victim. */
322 bitmap_or(dest->dbitmap, victim->dbitmap, dest->dbitmap,
323 dest->stripe_npages);
324 dest->generic_bio_cnt += victim->generic_bio_cnt;
325 bio_list_init(&victim->bio_list);
326 }
327
328 /*
329 * used to prune items that are in the cache. The caller
330 * must hold the hash table lock.
331 */
__remove_rbio_from_cache(struct btrfs_raid_bio * rbio)332 static void __remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
333 {
334 int bucket = rbio_bucket(rbio);
335 struct btrfs_stripe_hash_table *table;
336 struct btrfs_stripe_hash *h;
337 int freeit = 0;
338
339 /*
340 * check the bit again under the hash table lock.
341 */
342 if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
343 return;
344
345 table = rbio->fs_info->stripe_hash_table;
346 h = table->table + bucket;
347
348 /* hold the lock for the bucket because we may be
349 * removing it from the hash table
350 */
351 spin_lock(&h->lock);
352
353 /*
354 * hold the lock for the bio list because we need
355 * to make sure the bio list is empty
356 */
357 spin_lock(&rbio->bio_list_lock);
358
359 if (test_and_clear_bit(RBIO_CACHE_BIT, &rbio->flags)) {
360 list_del_init(&rbio->stripe_cache);
361 table->cache_size -= 1;
362 freeit = 1;
363
364 /* if the bio list isn't empty, this rbio is
365 * still involved in an IO. We take it out
366 * of the cache list, and drop the ref that
367 * was held for the list.
368 *
369 * If the bio_list was empty, we also remove
370 * the rbio from the hash_table, and drop
371 * the corresponding ref
372 */
373 if (bio_list_empty(&rbio->bio_list)) {
374 if (!list_empty(&rbio->hash_list)) {
375 list_del_init(&rbio->hash_list);
376 refcount_dec(&rbio->refs);
377 BUG_ON(!list_empty(&rbio->plug_list));
378 }
379 }
380 }
381
382 spin_unlock(&rbio->bio_list_lock);
383 spin_unlock(&h->lock);
384
385 if (freeit)
386 __free_raid_bio(rbio);
387 }
388
389 /*
390 * prune a given rbio from the cache
391 */
remove_rbio_from_cache(struct btrfs_raid_bio * rbio)392 static void remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
393 {
394 struct btrfs_stripe_hash_table *table;
395 unsigned long flags;
396
397 if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
398 return;
399
400 table = rbio->fs_info->stripe_hash_table;
401
402 spin_lock_irqsave(&table->cache_lock, flags);
403 __remove_rbio_from_cache(rbio);
404 spin_unlock_irqrestore(&table->cache_lock, flags);
405 }
406
407 /*
408 * remove everything in the cache
409 */
btrfs_clear_rbio_cache(struct btrfs_fs_info * info)410 static void btrfs_clear_rbio_cache(struct btrfs_fs_info *info)
411 {
412 struct btrfs_stripe_hash_table *table;
413 unsigned long flags;
414 struct btrfs_raid_bio *rbio;
415
416 table = info->stripe_hash_table;
417
418 spin_lock_irqsave(&table->cache_lock, flags);
419 while (!list_empty(&table->stripe_cache)) {
420 rbio = list_entry(table->stripe_cache.next,
421 struct btrfs_raid_bio,
422 stripe_cache);
423 __remove_rbio_from_cache(rbio);
424 }
425 spin_unlock_irqrestore(&table->cache_lock, flags);
426 }
427
428 /*
429 * remove all cached entries and free the hash table
430 * used by unmount
431 */
btrfs_free_stripe_hash_table(struct btrfs_fs_info * info)432 void btrfs_free_stripe_hash_table(struct btrfs_fs_info *info)
433 {
434 if (!info->stripe_hash_table)
435 return;
436 btrfs_clear_rbio_cache(info);
437 kvfree(info->stripe_hash_table);
438 info->stripe_hash_table = NULL;
439 }
440
441 /*
442 * insert an rbio into the stripe cache. It
443 * must have already been prepared by calling
444 * cache_rbio_pages
445 *
446 * If this rbio was already cached, it gets
447 * moved to the front of the lru.
448 *
449 * If the size of the rbio cache is too big, we
450 * prune an item.
451 */
cache_rbio(struct btrfs_raid_bio * rbio)452 static void cache_rbio(struct btrfs_raid_bio *rbio)
453 {
454 struct btrfs_stripe_hash_table *table;
455 unsigned long flags;
456
457 if (!test_bit(RBIO_CACHE_READY_BIT, &rbio->flags))
458 return;
459
460 table = rbio->fs_info->stripe_hash_table;
461
462 spin_lock_irqsave(&table->cache_lock, flags);
463 spin_lock(&rbio->bio_list_lock);
464
465 /* bump our ref if we were not in the list before */
466 if (!test_and_set_bit(RBIO_CACHE_BIT, &rbio->flags))
467 refcount_inc(&rbio->refs);
468
469 if (!list_empty(&rbio->stripe_cache)){
470 list_move(&rbio->stripe_cache, &table->stripe_cache);
471 } else {
472 list_add(&rbio->stripe_cache, &table->stripe_cache);
473 table->cache_size += 1;
474 }
475
476 spin_unlock(&rbio->bio_list_lock);
477
478 if (table->cache_size > RBIO_CACHE_SIZE) {
479 struct btrfs_raid_bio *found;
480
481 found = list_entry(table->stripe_cache.prev,
482 struct btrfs_raid_bio,
483 stripe_cache);
484
485 if (found != rbio)
486 __remove_rbio_from_cache(found);
487 }
488
489 spin_unlock_irqrestore(&table->cache_lock, flags);
490 }
491
492 /*
493 * helper function to run the xor_blocks api. It is only
494 * able to do MAX_XOR_BLOCKS at a time, so we need to
495 * loop through.
496 */
run_xor(void ** pages,int src_cnt,ssize_t len)497 static void run_xor(void **pages, int src_cnt, ssize_t len)
498 {
499 int src_off = 0;
500 int xor_src_cnt = 0;
501 void *dest = pages[src_cnt];
502
503 while(src_cnt > 0) {
504 xor_src_cnt = min(src_cnt, MAX_XOR_BLOCKS);
505 xor_blocks(xor_src_cnt, len, dest, pages + src_off);
506
507 src_cnt -= xor_src_cnt;
508 src_off += xor_src_cnt;
509 }
510 }
511
512 /*
513 * Returns true if the bio list inside this rbio covers an entire stripe (no
514 * rmw required).
515 */
rbio_is_full(struct btrfs_raid_bio * rbio)516 static int rbio_is_full(struct btrfs_raid_bio *rbio)
517 {
518 unsigned long flags;
519 unsigned long size = rbio->bio_list_bytes;
520 int ret = 1;
521
522 spin_lock_irqsave(&rbio->bio_list_lock, flags);
523 if (size != rbio->nr_data * rbio->stripe_len)
524 ret = 0;
525 BUG_ON(size > rbio->nr_data * rbio->stripe_len);
526 spin_unlock_irqrestore(&rbio->bio_list_lock, flags);
527
528 return ret;
529 }
530
531 /*
532 * returns 1 if it is safe to merge two rbios together.
533 * The merging is safe if the two rbios correspond to
534 * the same stripe and if they are both going in the same
535 * direction (read vs write), and if neither one is
536 * locked for final IO
537 *
538 * The caller is responsible for locking such that
539 * rmw_locked is safe to test
540 */
rbio_can_merge(struct btrfs_raid_bio * last,struct btrfs_raid_bio * cur)541 static int rbio_can_merge(struct btrfs_raid_bio *last,
542 struct btrfs_raid_bio *cur)
543 {
544 if (test_bit(RBIO_RMW_LOCKED_BIT, &last->flags) ||
545 test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags))
546 return 0;
547
548 /*
549 * we can't merge with cached rbios, since the
550 * idea is that when we merge the destination
551 * rbio is going to run our IO for us. We can
552 * steal from cached rbios though, other functions
553 * handle that.
554 */
555 if (test_bit(RBIO_CACHE_BIT, &last->flags) ||
556 test_bit(RBIO_CACHE_BIT, &cur->flags))
557 return 0;
558
559 if (last->bbio->raid_map[0] !=
560 cur->bbio->raid_map[0])
561 return 0;
562
563 /* we can't merge with different operations */
564 if (last->operation != cur->operation)
565 return 0;
566 /*
567 * We've need read the full stripe from the drive.
568 * check and repair the parity and write the new results.
569 *
570 * We're not allowed to add any new bios to the
571 * bio list here, anyone else that wants to
572 * change this stripe needs to do their own rmw.
573 */
574 if (last->operation == BTRFS_RBIO_PARITY_SCRUB)
575 return 0;
576
577 if (last->operation == BTRFS_RBIO_REBUILD_MISSING)
578 return 0;
579
580 if (last->operation == BTRFS_RBIO_READ_REBUILD) {
581 int fa = last->faila;
582 int fb = last->failb;
583 int cur_fa = cur->faila;
584 int cur_fb = cur->failb;
585
586 if (last->faila >= last->failb) {
587 fa = last->failb;
588 fb = last->faila;
589 }
590
591 if (cur->faila >= cur->failb) {
592 cur_fa = cur->failb;
593 cur_fb = cur->faila;
594 }
595
596 if (fa != cur_fa || fb != cur_fb)
597 return 0;
598 }
599 return 1;
600 }
601
rbio_stripe_page_index(struct btrfs_raid_bio * rbio,int stripe,int index)602 static int rbio_stripe_page_index(struct btrfs_raid_bio *rbio, int stripe,
603 int index)
604 {
605 return stripe * rbio->stripe_npages + index;
606 }
607
608 /*
609 * these are just the pages from the rbio array, not from anything
610 * the FS sent down to us
611 */
rbio_stripe_page(struct btrfs_raid_bio * rbio,int stripe,int index)612 static struct page *rbio_stripe_page(struct btrfs_raid_bio *rbio, int stripe,
613 int index)
614 {
615 return rbio->stripe_pages[rbio_stripe_page_index(rbio, stripe, index)];
616 }
617
618 /*
619 * helper to index into the pstripe
620 */
rbio_pstripe_page(struct btrfs_raid_bio * rbio,int index)621 static struct page *rbio_pstripe_page(struct btrfs_raid_bio *rbio, int index)
622 {
623 return rbio_stripe_page(rbio, rbio->nr_data, index);
624 }
625
626 /*
627 * helper to index into the qstripe, returns null
628 * if there is no qstripe
629 */
rbio_qstripe_page(struct btrfs_raid_bio * rbio,int index)630 static struct page *rbio_qstripe_page(struct btrfs_raid_bio *rbio, int index)
631 {
632 if (rbio->nr_data + 1 == rbio->real_stripes)
633 return NULL;
634 return rbio_stripe_page(rbio, rbio->nr_data + 1, index);
635 }
636
637 /*
638 * The first stripe in the table for a logical address
639 * has the lock. rbios are added in one of three ways:
640 *
641 * 1) Nobody has the stripe locked yet. The rbio is given
642 * the lock and 0 is returned. The caller must start the IO
643 * themselves.
644 *
645 * 2) Someone has the stripe locked, but we're able to merge
646 * with the lock owner. The rbio is freed and the IO will
647 * start automatically along with the existing rbio. 1 is returned.
648 *
649 * 3) Someone has the stripe locked, but we're not able to merge.
650 * The rbio is added to the lock owner's plug list, or merged into
651 * an rbio already on the plug list. When the lock owner unlocks,
652 * the next rbio on the list is run and the IO is started automatically.
653 * 1 is returned
654 *
655 * If we return 0, the caller still owns the rbio and must continue with
656 * IO submission. If we return 1, the caller must assume the rbio has
657 * already been freed.
658 */
lock_stripe_add(struct btrfs_raid_bio * rbio)659 static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio)
660 {
661 int bucket = rbio_bucket(rbio);
662 struct btrfs_stripe_hash *h = rbio->fs_info->stripe_hash_table->table + bucket;
663 struct btrfs_raid_bio *cur;
664 struct btrfs_raid_bio *pending;
665 unsigned long flags;
666 struct btrfs_raid_bio *freeit = NULL;
667 struct btrfs_raid_bio *cache_drop = NULL;
668 int ret = 0;
669
670 spin_lock_irqsave(&h->lock, flags);
671 list_for_each_entry(cur, &h->hash_list, hash_list) {
672 if (cur->bbio->raid_map[0] == rbio->bbio->raid_map[0]) {
673 spin_lock(&cur->bio_list_lock);
674
675 /* can we steal this cached rbio's pages? */
676 if (bio_list_empty(&cur->bio_list) &&
677 list_empty(&cur->plug_list) &&
678 test_bit(RBIO_CACHE_BIT, &cur->flags) &&
679 !test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags)) {
680 list_del_init(&cur->hash_list);
681 refcount_dec(&cur->refs);
682
683 steal_rbio(cur, rbio);
684 cache_drop = cur;
685 spin_unlock(&cur->bio_list_lock);
686
687 goto lockit;
688 }
689
690 /* can we merge into the lock owner? */
691 if (rbio_can_merge(cur, rbio)) {
692 merge_rbio(cur, rbio);
693 spin_unlock(&cur->bio_list_lock);
694 freeit = rbio;
695 ret = 1;
696 goto out;
697 }
698
699
700 /*
701 * we couldn't merge with the running
702 * rbio, see if we can merge with the
703 * pending ones. We don't have to
704 * check for rmw_locked because there
705 * is no way they are inside finish_rmw
706 * right now
707 */
708 list_for_each_entry(pending, &cur->plug_list,
709 plug_list) {
710 if (rbio_can_merge(pending, rbio)) {
711 merge_rbio(pending, rbio);
712 spin_unlock(&cur->bio_list_lock);
713 freeit = rbio;
714 ret = 1;
715 goto out;
716 }
717 }
718
719 /* no merging, put us on the tail of the plug list,
720 * our rbio will be started with the currently
721 * running rbio unlocks
722 */
723 list_add_tail(&rbio->plug_list, &cur->plug_list);
724 spin_unlock(&cur->bio_list_lock);
725 ret = 1;
726 goto out;
727 }
728 }
729 lockit:
730 refcount_inc(&rbio->refs);
731 list_add(&rbio->hash_list, &h->hash_list);
732 out:
733 spin_unlock_irqrestore(&h->lock, flags);
734 if (cache_drop)
735 remove_rbio_from_cache(cache_drop);
736 if (freeit)
737 __free_raid_bio(freeit);
738 return ret;
739 }
740
741 /*
742 * called as rmw or parity rebuild is completed. If the plug list has more
743 * rbios waiting for this stripe, the next one on the list will be started
744 */
unlock_stripe(struct btrfs_raid_bio * rbio)745 static noinline void unlock_stripe(struct btrfs_raid_bio *rbio)
746 {
747 int bucket;
748 struct btrfs_stripe_hash *h;
749 unsigned long flags;
750 int keep_cache = 0;
751
752 bucket = rbio_bucket(rbio);
753 h = rbio->fs_info->stripe_hash_table->table + bucket;
754
755 if (list_empty(&rbio->plug_list))
756 cache_rbio(rbio);
757
758 spin_lock_irqsave(&h->lock, flags);
759 spin_lock(&rbio->bio_list_lock);
760
761 if (!list_empty(&rbio->hash_list)) {
762 /*
763 * if we're still cached and there is no other IO
764 * to perform, just leave this rbio here for others
765 * to steal from later
766 */
767 if (list_empty(&rbio->plug_list) &&
768 test_bit(RBIO_CACHE_BIT, &rbio->flags)) {
769 keep_cache = 1;
770 clear_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
771 BUG_ON(!bio_list_empty(&rbio->bio_list));
772 goto done;
773 }
774
775 list_del_init(&rbio->hash_list);
776 refcount_dec(&rbio->refs);
777
778 /*
779 * we use the plug list to hold all the rbios
780 * waiting for the chance to lock this stripe.
781 * hand the lock over to one of them.
782 */
783 if (!list_empty(&rbio->plug_list)) {
784 struct btrfs_raid_bio *next;
785 struct list_head *head = rbio->plug_list.next;
786
787 next = list_entry(head, struct btrfs_raid_bio,
788 plug_list);
789
790 list_del_init(&rbio->plug_list);
791
792 list_add(&next->hash_list, &h->hash_list);
793 refcount_inc(&next->refs);
794 spin_unlock(&rbio->bio_list_lock);
795 spin_unlock_irqrestore(&h->lock, flags);
796
797 if (next->operation == BTRFS_RBIO_READ_REBUILD)
798 start_async_work(next, read_rebuild_work);
799 else if (next->operation == BTRFS_RBIO_REBUILD_MISSING) {
800 steal_rbio(rbio, next);
801 start_async_work(next, read_rebuild_work);
802 } else if (next->operation == BTRFS_RBIO_WRITE) {
803 steal_rbio(rbio, next);
804 start_async_work(next, rmw_work);
805 } else if (next->operation == BTRFS_RBIO_PARITY_SCRUB) {
806 steal_rbio(rbio, next);
807 start_async_work(next, scrub_parity_work);
808 }
809
810 goto done_nolock;
811 }
812 }
813 done:
814 spin_unlock(&rbio->bio_list_lock);
815 spin_unlock_irqrestore(&h->lock, flags);
816
817 done_nolock:
818 if (!keep_cache)
819 remove_rbio_from_cache(rbio);
820 }
821
__free_raid_bio(struct btrfs_raid_bio * rbio)822 static void __free_raid_bio(struct btrfs_raid_bio *rbio)
823 {
824 int i;
825
826 if (!refcount_dec_and_test(&rbio->refs))
827 return;
828
829 WARN_ON(!list_empty(&rbio->stripe_cache));
830 WARN_ON(!list_empty(&rbio->hash_list));
831 WARN_ON(!bio_list_empty(&rbio->bio_list));
832
833 for (i = 0; i < rbio->nr_pages; i++) {
834 if (rbio->stripe_pages[i]) {
835 __free_page(rbio->stripe_pages[i]);
836 rbio->stripe_pages[i] = NULL;
837 }
838 }
839
840 btrfs_put_bbio(rbio->bbio);
841 kfree(rbio);
842 }
843
rbio_endio_bio_list(struct bio * cur,blk_status_t err)844 static void rbio_endio_bio_list(struct bio *cur, blk_status_t err)
845 {
846 struct bio *next;
847
848 while (cur) {
849 next = cur->bi_next;
850 cur->bi_next = NULL;
851 cur->bi_status = err;
852 bio_endio(cur);
853 cur = next;
854 }
855 }
856
857 /*
858 * this frees the rbio and runs through all the bios in the
859 * bio_list and calls end_io on them
860 */
rbio_orig_end_io(struct btrfs_raid_bio * rbio,blk_status_t err)861 static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, blk_status_t err)
862 {
863 struct bio *cur = bio_list_get(&rbio->bio_list);
864 struct bio *extra;
865
866 if (rbio->generic_bio_cnt)
867 btrfs_bio_counter_sub(rbio->fs_info, rbio->generic_bio_cnt);
868 /*
869 * Clear the data bitmap, as the rbio may be cached for later usage.
870 * do this before before unlock_stripe() so there will be no new bio
871 * for this bio.
872 */
873 bitmap_clear(rbio->dbitmap, 0, rbio->stripe_npages);
874
875 /*
876 * At this moment, rbio->bio_list is empty, however since rbio does not
877 * always have RBIO_RMW_LOCKED_BIT set and rbio is still linked on the
878 * hash list, rbio may be merged with others so that rbio->bio_list
879 * becomes non-empty.
880 * Once unlock_stripe() is done, rbio->bio_list will not be updated any
881 * more and we can call bio_endio() on all queued bios.
882 */
883 unlock_stripe(rbio);
884 extra = bio_list_get(&rbio->bio_list);
885 __free_raid_bio(rbio);
886
887 rbio_endio_bio_list(cur, err);
888 if (extra)
889 rbio_endio_bio_list(extra, err);
890 }
891
892 /*
893 * end io function used by finish_rmw. When we finally
894 * get here, we've written a full stripe
895 */
raid_write_end_io(struct bio * bio)896 static void raid_write_end_io(struct bio *bio)
897 {
898 struct btrfs_raid_bio *rbio = bio->bi_private;
899 blk_status_t err = bio->bi_status;
900 int max_errors;
901
902 if (err)
903 fail_bio_stripe(rbio, bio);
904
905 bio_put(bio);
906
907 if (!atomic_dec_and_test(&rbio->stripes_pending))
908 return;
909
910 err = BLK_STS_OK;
911
912 /* OK, we have read all the stripes we need to. */
913 max_errors = (rbio->operation == BTRFS_RBIO_PARITY_SCRUB) ?
914 0 : rbio->bbio->max_errors;
915 if (atomic_read(&rbio->error) > max_errors)
916 err = BLK_STS_IOERR;
917
918 rbio_orig_end_io(rbio, err);
919 }
920
921 /*
922 * the read/modify/write code wants to use the original bio for
923 * any pages it included, and then use the rbio for everything
924 * else. This function decides if a given index (stripe number)
925 * and page number in that stripe fall inside the original bio
926 * or the rbio.
927 *
928 * if you set bio_list_only, you'll get a NULL back for any ranges
929 * that are outside the bio_list
930 *
931 * This doesn't take any refs on anything, you get a bare page pointer
932 * and the caller must bump refs as required.
933 *
934 * You must call index_rbio_pages once before you can trust
935 * the answers from this function.
936 */
page_in_rbio(struct btrfs_raid_bio * rbio,int index,int pagenr,int bio_list_only)937 static struct page *page_in_rbio(struct btrfs_raid_bio *rbio,
938 int index, int pagenr, int bio_list_only)
939 {
940 int chunk_page;
941 struct page *p = NULL;
942
943 chunk_page = index * (rbio->stripe_len >> PAGE_SHIFT) + pagenr;
944
945 spin_lock_irq(&rbio->bio_list_lock);
946 p = rbio->bio_pages[chunk_page];
947 spin_unlock_irq(&rbio->bio_list_lock);
948
949 if (p || bio_list_only)
950 return p;
951
952 return rbio->stripe_pages[chunk_page];
953 }
954
955 /*
956 * number of pages we need for the entire stripe across all the
957 * drives
958 */
rbio_nr_pages(unsigned long stripe_len,int nr_stripes)959 static unsigned long rbio_nr_pages(unsigned long stripe_len, int nr_stripes)
960 {
961 return DIV_ROUND_UP(stripe_len, PAGE_SIZE) * nr_stripes;
962 }
963
964 /*
965 * allocation and initial setup for the btrfs_raid_bio. Not
966 * this does not allocate any pages for rbio->pages.
967 */
alloc_rbio(struct btrfs_fs_info * fs_info,struct btrfs_bio * bbio,u64 stripe_len)968 static struct btrfs_raid_bio *alloc_rbio(struct btrfs_fs_info *fs_info,
969 struct btrfs_bio *bbio,
970 u64 stripe_len)
971 {
972 struct btrfs_raid_bio *rbio;
973 int nr_data = 0;
974 int real_stripes = bbio->num_stripes - bbio->num_tgtdevs;
975 int num_pages = rbio_nr_pages(stripe_len, real_stripes);
976 int stripe_npages = DIV_ROUND_UP(stripe_len, PAGE_SIZE);
977 void *p;
978
979 rbio = kzalloc(sizeof(*rbio) +
980 sizeof(*rbio->stripe_pages) * num_pages +
981 sizeof(*rbio->bio_pages) * num_pages +
982 sizeof(*rbio->finish_pointers) * real_stripes +
983 sizeof(*rbio->dbitmap) * BITS_TO_LONGS(stripe_npages) +
984 sizeof(*rbio->finish_pbitmap) *
985 BITS_TO_LONGS(stripe_npages),
986 GFP_NOFS);
987 if (!rbio)
988 return ERR_PTR(-ENOMEM);
989
990 bio_list_init(&rbio->bio_list);
991 INIT_LIST_HEAD(&rbio->plug_list);
992 spin_lock_init(&rbio->bio_list_lock);
993 INIT_LIST_HEAD(&rbio->stripe_cache);
994 INIT_LIST_HEAD(&rbio->hash_list);
995 rbio->bbio = bbio;
996 rbio->fs_info = fs_info;
997 rbio->stripe_len = stripe_len;
998 rbio->nr_pages = num_pages;
999 rbio->real_stripes = real_stripes;
1000 rbio->stripe_npages = stripe_npages;
1001 rbio->faila = -1;
1002 rbio->failb = -1;
1003 refcount_set(&rbio->refs, 1);
1004 atomic_set(&rbio->error, 0);
1005 atomic_set(&rbio->stripes_pending, 0);
1006
1007 /*
1008 * the stripe_pages, bio_pages, etc arrays point to the extra
1009 * memory we allocated past the end of the rbio
1010 */
1011 p = rbio + 1;
1012 #define CONSUME_ALLOC(ptr, count) do { \
1013 ptr = p; \
1014 p = (unsigned char *)p + sizeof(*(ptr)) * (count); \
1015 } while (0)
1016 CONSUME_ALLOC(rbio->stripe_pages, num_pages);
1017 CONSUME_ALLOC(rbio->bio_pages, num_pages);
1018 CONSUME_ALLOC(rbio->finish_pointers, real_stripes);
1019 CONSUME_ALLOC(rbio->dbitmap, BITS_TO_LONGS(stripe_npages));
1020 CONSUME_ALLOC(rbio->finish_pbitmap, BITS_TO_LONGS(stripe_npages));
1021 #undef CONSUME_ALLOC
1022
1023 if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID5)
1024 nr_data = real_stripes - 1;
1025 else if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID6)
1026 nr_data = real_stripes - 2;
1027 else
1028 BUG();
1029
1030 rbio->nr_data = nr_data;
1031 return rbio;
1032 }
1033
1034 /* allocate pages for all the stripes in the bio, including parity */
alloc_rbio_pages(struct btrfs_raid_bio * rbio)1035 static int alloc_rbio_pages(struct btrfs_raid_bio *rbio)
1036 {
1037 int i;
1038 struct page *page;
1039
1040 for (i = 0; i < rbio->nr_pages; i++) {
1041 if (rbio->stripe_pages[i])
1042 continue;
1043 page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
1044 if (!page)
1045 return -ENOMEM;
1046 rbio->stripe_pages[i] = page;
1047 }
1048 return 0;
1049 }
1050
1051 /* only allocate pages for p/q stripes */
alloc_rbio_parity_pages(struct btrfs_raid_bio * rbio)1052 static int alloc_rbio_parity_pages(struct btrfs_raid_bio *rbio)
1053 {
1054 int i;
1055 struct page *page;
1056
1057 i = rbio_stripe_page_index(rbio, rbio->nr_data, 0);
1058
1059 for (; i < rbio->nr_pages; i++) {
1060 if (rbio->stripe_pages[i])
1061 continue;
1062 page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
1063 if (!page)
1064 return -ENOMEM;
1065 rbio->stripe_pages[i] = page;
1066 }
1067 return 0;
1068 }
1069
1070 /*
1071 * add a single page from a specific stripe into our list of bios for IO
1072 * this will try to merge into existing bios if possible, and returns
1073 * zero if all went well.
1074 */
rbio_add_io_page(struct btrfs_raid_bio * rbio,struct bio_list * bio_list,struct page * page,int stripe_nr,unsigned long page_index,unsigned long bio_max_len)1075 static int rbio_add_io_page(struct btrfs_raid_bio *rbio,
1076 struct bio_list *bio_list,
1077 struct page *page,
1078 int stripe_nr,
1079 unsigned long page_index,
1080 unsigned long bio_max_len)
1081 {
1082 struct bio *last = bio_list->tail;
1083 u64 last_end = 0;
1084 int ret;
1085 struct bio *bio;
1086 struct btrfs_bio_stripe *stripe;
1087 u64 disk_start;
1088
1089 stripe = &rbio->bbio->stripes[stripe_nr];
1090 disk_start = stripe->physical + (page_index << PAGE_SHIFT);
1091
1092 /* if the device is missing, just fail this stripe */
1093 if (!stripe->dev->bdev)
1094 return fail_rbio_index(rbio, stripe_nr);
1095
1096 /* see if we can add this page onto our existing bio */
1097 if (last) {
1098 last_end = (u64)last->bi_iter.bi_sector << 9;
1099 last_end += last->bi_iter.bi_size;
1100
1101 /*
1102 * we can't merge these if they are from different
1103 * devices or if they are not contiguous
1104 */
1105 if (last_end == disk_start && stripe->dev->bdev &&
1106 !last->bi_status &&
1107 last->bi_disk == stripe->dev->bdev->bd_disk &&
1108 last->bi_partno == stripe->dev->bdev->bd_partno) {
1109 ret = bio_add_page(last, page, PAGE_SIZE, 0);
1110 if (ret == PAGE_SIZE)
1111 return 0;
1112 }
1113 }
1114
1115 /* put a new bio on the list */
1116 bio = btrfs_io_bio_alloc(bio_max_len >> PAGE_SHIFT ?: 1);
1117 bio->bi_iter.bi_size = 0;
1118 bio_set_dev(bio, stripe->dev->bdev);
1119 bio->bi_iter.bi_sector = disk_start >> 9;
1120
1121 bio_add_page(bio, page, PAGE_SIZE, 0);
1122 bio_list_add(bio_list, bio);
1123 return 0;
1124 }
1125
1126 /*
1127 * while we're doing the read/modify/write cycle, we could
1128 * have errors in reading pages off the disk. This checks
1129 * for errors and if we're not able to read the page it'll
1130 * trigger parity reconstruction. The rmw will be finished
1131 * after we've reconstructed the failed stripes
1132 */
validate_rbio_for_rmw(struct btrfs_raid_bio * rbio)1133 static void validate_rbio_for_rmw(struct btrfs_raid_bio *rbio)
1134 {
1135 if (rbio->faila >= 0 || rbio->failb >= 0) {
1136 BUG_ON(rbio->faila == rbio->real_stripes - 1);
1137 __raid56_parity_recover(rbio);
1138 } else {
1139 finish_rmw(rbio);
1140 }
1141 }
1142
1143 /*
1144 * helper function to walk our bio list and populate the bio_pages array with
1145 * the result. This seems expensive, but it is faster than constantly
1146 * searching through the bio list as we setup the IO in finish_rmw or stripe
1147 * reconstruction.
1148 *
1149 * This must be called before you trust the answers from page_in_rbio
1150 */
index_rbio_pages(struct btrfs_raid_bio * rbio)1151 static void index_rbio_pages(struct btrfs_raid_bio *rbio)
1152 {
1153 struct bio *bio;
1154 u64 start;
1155 unsigned long stripe_offset;
1156 unsigned long page_index;
1157
1158 spin_lock_irq(&rbio->bio_list_lock);
1159 bio_list_for_each(bio, &rbio->bio_list) {
1160 struct bio_vec bvec;
1161 struct bvec_iter iter;
1162 int i = 0;
1163
1164 start = (u64)bio->bi_iter.bi_sector << 9;
1165 stripe_offset = start - rbio->bbio->raid_map[0];
1166 page_index = stripe_offset >> PAGE_SHIFT;
1167
1168 if (bio_flagged(bio, BIO_CLONED))
1169 bio->bi_iter = btrfs_io_bio(bio)->iter;
1170
1171 bio_for_each_segment(bvec, bio, iter) {
1172 rbio->bio_pages[page_index + i] = bvec.bv_page;
1173 i++;
1174 }
1175 }
1176 spin_unlock_irq(&rbio->bio_list_lock);
1177 }
1178
1179 /*
1180 * this is called from one of two situations. We either
1181 * have a full stripe from the higher layers, or we've read all
1182 * the missing bits off disk.
1183 *
1184 * This will calculate the parity and then send down any
1185 * changed blocks.
1186 */
finish_rmw(struct btrfs_raid_bio * rbio)1187 static noinline void finish_rmw(struct btrfs_raid_bio *rbio)
1188 {
1189 struct btrfs_bio *bbio = rbio->bbio;
1190 void **pointers = rbio->finish_pointers;
1191 int nr_data = rbio->nr_data;
1192 int stripe;
1193 int pagenr;
1194 bool has_qstripe;
1195 struct bio_list bio_list;
1196 struct bio *bio;
1197 int ret;
1198
1199 bio_list_init(&bio_list);
1200
1201 if (rbio->real_stripes - rbio->nr_data == 1)
1202 has_qstripe = false;
1203 else if (rbio->real_stripes - rbio->nr_data == 2)
1204 has_qstripe = true;
1205 else
1206 BUG();
1207
1208 /* We should have at least one data sector. */
1209 ASSERT(bitmap_weight(rbio->dbitmap, rbio->stripe_npages));
1210
1211 /* at this point we either have a full stripe,
1212 * or we've read the full stripe from the drive.
1213 * recalculate the parity and write the new results.
1214 *
1215 * We're not allowed to add any new bios to the
1216 * bio list here, anyone else that wants to
1217 * change this stripe needs to do their own rmw.
1218 */
1219 spin_lock_irq(&rbio->bio_list_lock);
1220 set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
1221 spin_unlock_irq(&rbio->bio_list_lock);
1222
1223 atomic_set(&rbio->error, 0);
1224
1225 /*
1226 * now that we've set rmw_locked, run through the
1227 * bio list one last time and map the page pointers
1228 *
1229 * We don't cache full rbios because we're assuming
1230 * the higher layers are unlikely to use this area of
1231 * the disk again soon. If they do use it again,
1232 * hopefully they will send another full bio.
1233 */
1234 index_rbio_pages(rbio);
1235 if (!rbio_is_full(rbio))
1236 cache_rbio_pages(rbio);
1237 else
1238 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
1239
1240 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1241 struct page *p;
1242 /* first collect one page from each data stripe */
1243 for (stripe = 0; stripe < nr_data; stripe++) {
1244 p = page_in_rbio(rbio, stripe, pagenr, 0);
1245 pointers[stripe] = kmap(p);
1246 }
1247
1248 /* then add the parity stripe */
1249 p = rbio_pstripe_page(rbio, pagenr);
1250 SetPageUptodate(p);
1251 pointers[stripe++] = kmap(p);
1252
1253 if (has_qstripe) {
1254
1255 /*
1256 * raid6, add the qstripe and call the
1257 * library function to fill in our p/q
1258 */
1259 p = rbio_qstripe_page(rbio, pagenr);
1260 SetPageUptodate(p);
1261 pointers[stripe++] = kmap(p);
1262
1263 raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE,
1264 pointers);
1265 } else {
1266 /* raid5 */
1267 copy_page(pointers[nr_data], pointers[0]);
1268 run_xor(pointers + 1, nr_data - 1, PAGE_SIZE);
1269 }
1270
1271
1272 for (stripe = 0; stripe < rbio->real_stripes; stripe++)
1273 kunmap(page_in_rbio(rbio, stripe, pagenr, 0));
1274 }
1275
1276 /*
1277 * time to start writing. Make bios for everything from the
1278 * higher layers (the bio_list in our rbio) and our p/q. Ignore
1279 * everything else.
1280 */
1281 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1282 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1283 struct page *page;
1284
1285 /* This vertical stripe has no data, skip it. */
1286 if (!test_bit(pagenr, rbio->dbitmap))
1287 continue;
1288
1289 if (stripe < rbio->nr_data) {
1290 page = page_in_rbio(rbio, stripe, pagenr, 1);
1291 if (!page)
1292 continue;
1293 } else {
1294 page = rbio_stripe_page(rbio, stripe, pagenr);
1295 }
1296
1297 ret = rbio_add_io_page(rbio, &bio_list,
1298 page, stripe, pagenr, rbio->stripe_len);
1299 if (ret)
1300 goto cleanup;
1301 }
1302 }
1303
1304 if (likely(!bbio->num_tgtdevs))
1305 goto write_data;
1306
1307 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1308 if (!bbio->tgtdev_map[stripe])
1309 continue;
1310
1311 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1312 struct page *page;
1313
1314 /* This vertical stripe has no data, skip it. */
1315 if (!test_bit(pagenr, rbio->dbitmap))
1316 continue;
1317
1318 if (stripe < rbio->nr_data) {
1319 page = page_in_rbio(rbio, stripe, pagenr, 1);
1320 if (!page)
1321 continue;
1322 } else {
1323 page = rbio_stripe_page(rbio, stripe, pagenr);
1324 }
1325
1326 ret = rbio_add_io_page(rbio, &bio_list, page,
1327 rbio->bbio->tgtdev_map[stripe],
1328 pagenr, rbio->stripe_len);
1329 if (ret)
1330 goto cleanup;
1331 }
1332 }
1333
1334 write_data:
1335 atomic_set(&rbio->stripes_pending, bio_list_size(&bio_list));
1336 BUG_ON(atomic_read(&rbio->stripes_pending) == 0);
1337
1338 while (1) {
1339 bio = bio_list_pop(&bio_list);
1340 if (!bio)
1341 break;
1342
1343 bio->bi_private = rbio;
1344 bio->bi_end_io = raid_write_end_io;
1345 bio->bi_opf = REQ_OP_WRITE;
1346
1347 submit_bio(bio);
1348 }
1349 return;
1350
1351 cleanup:
1352 rbio_orig_end_io(rbio, BLK_STS_IOERR);
1353
1354 while ((bio = bio_list_pop(&bio_list)))
1355 bio_put(bio);
1356 }
1357
1358 /*
1359 * helper to find the stripe number for a given bio. Used to figure out which
1360 * stripe has failed. This expects the bio to correspond to a physical disk,
1361 * so it looks up based on physical sector numbers.
1362 */
find_bio_stripe(struct btrfs_raid_bio * rbio,struct bio * bio)1363 static int find_bio_stripe(struct btrfs_raid_bio *rbio,
1364 struct bio *bio)
1365 {
1366 u64 physical = bio->bi_iter.bi_sector;
1367 u64 stripe_start;
1368 int i;
1369 struct btrfs_bio_stripe *stripe;
1370
1371 physical <<= 9;
1372
1373 for (i = 0; i < rbio->bbio->num_stripes; i++) {
1374 stripe = &rbio->bbio->stripes[i];
1375 stripe_start = stripe->physical;
1376 if (physical >= stripe_start &&
1377 physical < stripe_start + rbio->stripe_len &&
1378 stripe->dev->bdev &&
1379 bio->bi_disk == stripe->dev->bdev->bd_disk &&
1380 bio->bi_partno == stripe->dev->bdev->bd_partno) {
1381 return i;
1382 }
1383 }
1384 return -1;
1385 }
1386
1387 /*
1388 * helper to find the stripe number for a given
1389 * bio (before mapping). Used to figure out which stripe has
1390 * failed. This looks up based on logical block numbers.
1391 */
find_logical_bio_stripe(struct btrfs_raid_bio * rbio,struct bio * bio)1392 static int find_logical_bio_stripe(struct btrfs_raid_bio *rbio,
1393 struct bio *bio)
1394 {
1395 u64 logical = bio->bi_iter.bi_sector;
1396 u64 stripe_start;
1397 int i;
1398
1399 logical <<= 9;
1400
1401 for (i = 0; i < rbio->nr_data; i++) {
1402 stripe_start = rbio->bbio->raid_map[i];
1403 if (logical >= stripe_start &&
1404 logical < stripe_start + rbio->stripe_len) {
1405 return i;
1406 }
1407 }
1408 return -1;
1409 }
1410
1411 /*
1412 * returns -EIO if we had too many failures
1413 */
fail_rbio_index(struct btrfs_raid_bio * rbio,int failed)1414 static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed)
1415 {
1416 unsigned long flags;
1417 int ret = 0;
1418
1419 spin_lock_irqsave(&rbio->bio_list_lock, flags);
1420
1421 /* we already know this stripe is bad, move on */
1422 if (rbio->faila == failed || rbio->failb == failed)
1423 goto out;
1424
1425 if (rbio->faila == -1) {
1426 /* first failure on this rbio */
1427 rbio->faila = failed;
1428 atomic_inc(&rbio->error);
1429 } else if (rbio->failb == -1) {
1430 /* second failure on this rbio */
1431 rbio->failb = failed;
1432 atomic_inc(&rbio->error);
1433 } else {
1434 ret = -EIO;
1435 }
1436 out:
1437 spin_unlock_irqrestore(&rbio->bio_list_lock, flags);
1438
1439 return ret;
1440 }
1441
1442 /*
1443 * helper to fail a stripe based on a physical disk
1444 * bio.
1445 */
fail_bio_stripe(struct btrfs_raid_bio * rbio,struct bio * bio)1446 static int fail_bio_stripe(struct btrfs_raid_bio *rbio,
1447 struct bio *bio)
1448 {
1449 int failed = find_bio_stripe(rbio, bio);
1450
1451 if (failed < 0)
1452 return -EIO;
1453
1454 return fail_rbio_index(rbio, failed);
1455 }
1456
1457 /*
1458 * this sets each page in the bio uptodate. It should only be used on private
1459 * rbio pages, nothing that comes in from the higher layers
1460 */
set_bio_pages_uptodate(struct bio * bio)1461 static void set_bio_pages_uptodate(struct bio *bio)
1462 {
1463 struct bio_vec *bvec;
1464 int i;
1465
1466 ASSERT(!bio_flagged(bio, BIO_CLONED));
1467
1468 bio_for_each_segment_all(bvec, bio, i)
1469 SetPageUptodate(bvec->bv_page);
1470 }
1471
1472 /*
1473 * end io for the read phase of the rmw cycle. All the bios here are physical
1474 * stripe bios we've read from the disk so we can recalculate the parity of the
1475 * stripe.
1476 *
1477 * This will usually kick off finish_rmw once all the bios are read in, but it
1478 * may trigger parity reconstruction if we had any errors along the way
1479 */
raid_rmw_end_io(struct bio * bio)1480 static void raid_rmw_end_io(struct bio *bio)
1481 {
1482 struct btrfs_raid_bio *rbio = bio->bi_private;
1483
1484 if (bio->bi_status)
1485 fail_bio_stripe(rbio, bio);
1486 else
1487 set_bio_pages_uptodate(bio);
1488
1489 bio_put(bio);
1490
1491 if (!atomic_dec_and_test(&rbio->stripes_pending))
1492 return;
1493
1494 if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
1495 goto cleanup;
1496
1497 /*
1498 * this will normally call finish_rmw to start our write
1499 * but if there are any failed stripes we'll reconstruct
1500 * from parity first
1501 */
1502 validate_rbio_for_rmw(rbio);
1503 return;
1504
1505 cleanup:
1506
1507 rbio_orig_end_io(rbio, BLK_STS_IOERR);
1508 }
1509
1510 /*
1511 * the stripe must be locked by the caller. It will
1512 * unlock after all the writes are done
1513 */
raid56_rmw_stripe(struct btrfs_raid_bio * rbio)1514 static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio)
1515 {
1516 int bios_to_read = 0;
1517 struct bio_list bio_list;
1518 int ret;
1519 int pagenr;
1520 int stripe;
1521 struct bio *bio;
1522
1523 bio_list_init(&bio_list);
1524
1525 ret = alloc_rbio_pages(rbio);
1526 if (ret)
1527 goto cleanup;
1528
1529 index_rbio_pages(rbio);
1530
1531 atomic_set(&rbio->error, 0);
1532 /*
1533 * build a list of bios to read all the missing parts of this
1534 * stripe
1535 */
1536 for (stripe = 0; stripe < rbio->nr_data; stripe++) {
1537 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1538 struct page *page;
1539 /*
1540 * we want to find all the pages missing from
1541 * the rbio and read them from the disk. If
1542 * page_in_rbio finds a page in the bio list
1543 * we don't need to read it off the stripe.
1544 */
1545 page = page_in_rbio(rbio, stripe, pagenr, 1);
1546 if (page)
1547 continue;
1548
1549 page = rbio_stripe_page(rbio, stripe, pagenr);
1550 /*
1551 * the bio cache may have handed us an uptodate
1552 * page. If so, be happy and use it
1553 */
1554 if (PageUptodate(page))
1555 continue;
1556
1557 ret = rbio_add_io_page(rbio, &bio_list, page,
1558 stripe, pagenr, rbio->stripe_len);
1559 if (ret)
1560 goto cleanup;
1561 }
1562 }
1563
1564 bios_to_read = bio_list_size(&bio_list);
1565 if (!bios_to_read) {
1566 /*
1567 * this can happen if others have merged with
1568 * us, it means there is nothing left to read.
1569 * But if there are missing devices it may not be
1570 * safe to do the full stripe write yet.
1571 */
1572 goto finish;
1573 }
1574
1575 /*
1576 * the bbio may be freed once we submit the last bio. Make sure
1577 * not to touch it after that
1578 */
1579 atomic_set(&rbio->stripes_pending, bios_to_read);
1580 while (1) {
1581 bio = bio_list_pop(&bio_list);
1582 if (!bio)
1583 break;
1584
1585 bio->bi_private = rbio;
1586 bio->bi_end_io = raid_rmw_end_io;
1587 bio->bi_opf = REQ_OP_READ;
1588
1589 btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
1590
1591 submit_bio(bio);
1592 }
1593 /* the actual write will happen once the reads are done */
1594 return 0;
1595
1596 cleanup:
1597 rbio_orig_end_io(rbio, BLK_STS_IOERR);
1598
1599 while ((bio = bio_list_pop(&bio_list)))
1600 bio_put(bio);
1601
1602 return -EIO;
1603
1604 finish:
1605 validate_rbio_for_rmw(rbio);
1606 return 0;
1607 }
1608
1609 /*
1610 * if the upper layers pass in a full stripe, we thank them by only allocating
1611 * enough pages to hold the parity, and sending it all down quickly.
1612 */
full_stripe_write(struct btrfs_raid_bio * rbio)1613 static int full_stripe_write(struct btrfs_raid_bio *rbio)
1614 {
1615 int ret;
1616
1617 ret = alloc_rbio_parity_pages(rbio);
1618 if (ret) {
1619 __free_raid_bio(rbio);
1620 return ret;
1621 }
1622
1623 ret = lock_stripe_add(rbio);
1624 if (ret == 0)
1625 finish_rmw(rbio);
1626 return 0;
1627 }
1628
1629 /*
1630 * partial stripe writes get handed over to async helpers.
1631 * We're really hoping to merge a few more writes into this
1632 * rbio before calculating new parity
1633 */
partial_stripe_write(struct btrfs_raid_bio * rbio)1634 static int partial_stripe_write(struct btrfs_raid_bio *rbio)
1635 {
1636 int ret;
1637
1638 ret = lock_stripe_add(rbio);
1639 if (ret == 0)
1640 start_async_work(rbio, rmw_work);
1641 return 0;
1642 }
1643
1644 /*
1645 * sometimes while we were reading from the drive to
1646 * recalculate parity, enough new bios come into create
1647 * a full stripe. So we do a check here to see if we can
1648 * go directly to finish_rmw
1649 */
__raid56_parity_write(struct btrfs_raid_bio * rbio)1650 static int __raid56_parity_write(struct btrfs_raid_bio *rbio)
1651 {
1652 /* head off into rmw land if we don't have a full stripe */
1653 if (!rbio_is_full(rbio))
1654 return partial_stripe_write(rbio);
1655 return full_stripe_write(rbio);
1656 }
1657
1658 /*
1659 * We use plugging call backs to collect full stripes.
1660 * Any time we get a partial stripe write while plugged
1661 * we collect it into a list. When the unplug comes down,
1662 * we sort the list by logical block number and merge
1663 * everything we can into the same rbios
1664 */
1665 struct btrfs_plug_cb {
1666 struct blk_plug_cb cb;
1667 struct btrfs_fs_info *info;
1668 struct list_head rbio_list;
1669 struct btrfs_work work;
1670 };
1671
1672 /*
1673 * rbios on the plug list are sorted for easier merging.
1674 */
plug_cmp(void * priv,struct list_head * a,struct list_head * b)1675 static int plug_cmp(void *priv, struct list_head *a, struct list_head *b)
1676 {
1677 struct btrfs_raid_bio *ra = container_of(a, struct btrfs_raid_bio,
1678 plug_list);
1679 struct btrfs_raid_bio *rb = container_of(b, struct btrfs_raid_bio,
1680 plug_list);
1681 u64 a_sector = ra->bio_list.head->bi_iter.bi_sector;
1682 u64 b_sector = rb->bio_list.head->bi_iter.bi_sector;
1683
1684 if (a_sector < b_sector)
1685 return -1;
1686 if (a_sector > b_sector)
1687 return 1;
1688 return 0;
1689 }
1690
run_plug(struct btrfs_plug_cb * plug)1691 static void run_plug(struct btrfs_plug_cb *plug)
1692 {
1693 struct btrfs_raid_bio *cur;
1694 struct btrfs_raid_bio *last = NULL;
1695
1696 /*
1697 * sort our plug list then try to merge
1698 * everything we can in hopes of creating full
1699 * stripes.
1700 */
1701 list_sort(NULL, &plug->rbio_list, plug_cmp);
1702 while (!list_empty(&plug->rbio_list)) {
1703 cur = list_entry(plug->rbio_list.next,
1704 struct btrfs_raid_bio, plug_list);
1705 list_del_init(&cur->plug_list);
1706
1707 if (rbio_is_full(cur)) {
1708 int ret;
1709
1710 /* we have a full stripe, send it down */
1711 ret = full_stripe_write(cur);
1712 BUG_ON(ret);
1713 continue;
1714 }
1715 if (last) {
1716 if (rbio_can_merge(last, cur)) {
1717 merge_rbio(last, cur);
1718 __free_raid_bio(cur);
1719 continue;
1720
1721 }
1722 __raid56_parity_write(last);
1723 }
1724 last = cur;
1725 }
1726 if (last) {
1727 __raid56_parity_write(last);
1728 }
1729 kfree(plug);
1730 }
1731
1732 /*
1733 * if the unplug comes from schedule, we have to push the
1734 * work off to a helper thread
1735 */
unplug_work(struct btrfs_work * work)1736 static void unplug_work(struct btrfs_work *work)
1737 {
1738 struct btrfs_plug_cb *plug;
1739 plug = container_of(work, struct btrfs_plug_cb, work);
1740 run_plug(plug);
1741 }
1742
btrfs_raid_unplug(struct blk_plug_cb * cb,bool from_schedule)1743 static void btrfs_raid_unplug(struct blk_plug_cb *cb, bool from_schedule)
1744 {
1745 struct btrfs_plug_cb *plug;
1746 plug = container_of(cb, struct btrfs_plug_cb, cb);
1747
1748 if (from_schedule) {
1749 btrfs_init_work(&plug->work, btrfs_rmw_helper,
1750 unplug_work, NULL, NULL);
1751 btrfs_queue_work(plug->info->rmw_workers,
1752 &plug->work);
1753 return;
1754 }
1755 run_plug(plug);
1756 }
1757
1758 /* Add the original bio into rbio->bio_list, and update rbio::dbitmap. */
rbio_add_bio(struct btrfs_raid_bio * rbio,struct bio * orig_bio)1759 static void rbio_add_bio(struct btrfs_raid_bio *rbio, struct bio *orig_bio)
1760 {
1761 const struct btrfs_fs_info *fs_info = rbio->fs_info;
1762 const u64 orig_logical = orig_bio->bi_iter.bi_sector << SECTOR_SHIFT;
1763 const u64 full_stripe_start = rbio->bbio->raid_map[0];
1764 const u32 orig_len = orig_bio->bi_iter.bi_size;
1765 const u32 sectorsize = fs_info->sectorsize;
1766 u64 cur_logical;
1767
1768 ASSERT(orig_logical >= full_stripe_start &&
1769 orig_logical + orig_len <= full_stripe_start +
1770 rbio->nr_data * rbio->stripe_len);
1771
1772 bio_list_add(&rbio->bio_list, orig_bio);
1773 rbio->bio_list_bytes += orig_bio->bi_iter.bi_size;
1774
1775 /* Update the dbitmap. */
1776 for (cur_logical = orig_logical; cur_logical < orig_logical + orig_len;
1777 cur_logical += sectorsize) {
1778 int bit = ((u32)(cur_logical - full_stripe_start) >>
1779 PAGE_SHIFT) % rbio->stripe_npages;
1780
1781 set_bit(bit, rbio->dbitmap);
1782 }
1783 }
1784
1785 /*
1786 * our main entry point for writes from the rest of the FS.
1787 */
raid56_parity_write(struct btrfs_fs_info * fs_info,struct bio * bio,struct btrfs_bio * bbio,u64 stripe_len)1788 int raid56_parity_write(struct btrfs_fs_info *fs_info, struct bio *bio,
1789 struct btrfs_bio *bbio, u64 stripe_len)
1790 {
1791 struct btrfs_raid_bio *rbio;
1792 struct btrfs_plug_cb *plug = NULL;
1793 struct blk_plug_cb *cb;
1794 int ret;
1795
1796 rbio = alloc_rbio(fs_info, bbio, stripe_len);
1797 if (IS_ERR(rbio)) {
1798 btrfs_put_bbio(bbio);
1799 return PTR_ERR(rbio);
1800 }
1801 rbio->operation = BTRFS_RBIO_WRITE;
1802 rbio_add_bio(rbio, bio);
1803
1804 btrfs_bio_counter_inc_noblocked(fs_info);
1805 rbio->generic_bio_cnt = 1;
1806
1807 /*
1808 * don't plug on full rbios, just get them out the door
1809 * as quickly as we can
1810 */
1811 if (rbio_is_full(rbio)) {
1812 ret = full_stripe_write(rbio);
1813 if (ret)
1814 btrfs_bio_counter_dec(fs_info);
1815 return ret;
1816 }
1817
1818 cb = blk_check_plugged(btrfs_raid_unplug, fs_info, sizeof(*plug));
1819 if (cb) {
1820 plug = container_of(cb, struct btrfs_plug_cb, cb);
1821 if (!plug->info) {
1822 plug->info = fs_info;
1823 INIT_LIST_HEAD(&plug->rbio_list);
1824 }
1825 list_add_tail(&rbio->plug_list, &plug->rbio_list);
1826 ret = 0;
1827 } else {
1828 ret = __raid56_parity_write(rbio);
1829 if (ret)
1830 btrfs_bio_counter_dec(fs_info);
1831 }
1832 return ret;
1833 }
1834
1835 /*
1836 * all parity reconstruction happens here. We've read in everything
1837 * we can find from the drives and this does the heavy lifting of
1838 * sorting the good from the bad.
1839 */
__raid_recover_end_io(struct btrfs_raid_bio * rbio)1840 static void __raid_recover_end_io(struct btrfs_raid_bio *rbio)
1841 {
1842 int pagenr, stripe;
1843 void **pointers;
1844 int faila = -1, failb = -1;
1845 struct page *page;
1846 blk_status_t err;
1847 int i;
1848
1849 pointers = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS);
1850 if (!pointers) {
1851 err = BLK_STS_RESOURCE;
1852 goto cleanup_io;
1853 }
1854
1855 faila = rbio->faila;
1856 failb = rbio->failb;
1857
1858 if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1859 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) {
1860 spin_lock_irq(&rbio->bio_list_lock);
1861 set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
1862 spin_unlock_irq(&rbio->bio_list_lock);
1863 }
1864
1865 index_rbio_pages(rbio);
1866
1867 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1868 /*
1869 * Now we just use bitmap to mark the horizontal stripes in
1870 * which we have data when doing parity scrub.
1871 */
1872 if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB &&
1873 !test_bit(pagenr, rbio->dbitmap))
1874 continue;
1875
1876 /* setup our array of pointers with pages
1877 * from each stripe
1878 */
1879 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1880 /*
1881 * if we're rebuilding a read, we have to use
1882 * pages from the bio list
1883 */
1884 if ((rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1885 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) &&
1886 (stripe == faila || stripe == failb)) {
1887 page = page_in_rbio(rbio, stripe, pagenr, 0);
1888 } else {
1889 page = rbio_stripe_page(rbio, stripe, pagenr);
1890 }
1891 pointers[stripe] = kmap(page);
1892 }
1893
1894 /* all raid6 handling here */
1895 if (rbio->bbio->map_type & BTRFS_BLOCK_GROUP_RAID6) {
1896 /*
1897 * single failure, rebuild from parity raid5
1898 * style
1899 */
1900 if (failb < 0) {
1901 if (faila == rbio->nr_data) {
1902 /*
1903 * Just the P stripe has failed, without
1904 * a bad data or Q stripe.
1905 * TODO, we should redo the xor here.
1906 */
1907 err = BLK_STS_IOERR;
1908 goto cleanup;
1909 }
1910 /*
1911 * a single failure in raid6 is rebuilt
1912 * in the pstripe code below
1913 */
1914 goto pstripe;
1915 }
1916
1917 /* make sure our ps and qs are in order */
1918 if (faila > failb) {
1919 int tmp = failb;
1920 failb = faila;
1921 faila = tmp;
1922 }
1923
1924 /* if the q stripe is failed, do a pstripe reconstruction
1925 * from the xors.
1926 * If both the q stripe and the P stripe are failed, we're
1927 * here due to a crc mismatch and we can't give them the
1928 * data they want
1929 */
1930 if (rbio->bbio->raid_map[failb] == RAID6_Q_STRIPE) {
1931 if (rbio->bbio->raid_map[faila] ==
1932 RAID5_P_STRIPE) {
1933 err = BLK_STS_IOERR;
1934 goto cleanup;
1935 }
1936 /*
1937 * otherwise we have one bad data stripe and
1938 * a good P stripe. raid5!
1939 */
1940 goto pstripe;
1941 }
1942
1943 if (rbio->bbio->raid_map[failb] == RAID5_P_STRIPE) {
1944 raid6_datap_recov(rbio->real_stripes,
1945 PAGE_SIZE, faila, pointers);
1946 } else {
1947 raid6_2data_recov(rbio->real_stripes,
1948 PAGE_SIZE, faila, failb,
1949 pointers);
1950 }
1951 } else {
1952 void *p;
1953
1954 /* rebuild from P stripe here (raid5 or raid6) */
1955 BUG_ON(failb != -1);
1956 pstripe:
1957 /* Copy parity block into failed block to start with */
1958 copy_page(pointers[faila], pointers[rbio->nr_data]);
1959
1960 /* rearrange the pointer array */
1961 p = pointers[faila];
1962 for (stripe = faila; stripe < rbio->nr_data - 1; stripe++)
1963 pointers[stripe] = pointers[stripe + 1];
1964 pointers[rbio->nr_data - 1] = p;
1965
1966 /* xor in the rest */
1967 run_xor(pointers, rbio->nr_data - 1, PAGE_SIZE);
1968 }
1969 /* if we're doing this rebuild as part of an rmw, go through
1970 * and set all of our private rbio pages in the
1971 * failed stripes as uptodate. This way finish_rmw will
1972 * know they can be trusted. If this was a read reconstruction,
1973 * other endio functions will fiddle the uptodate bits
1974 */
1975 if (rbio->operation == BTRFS_RBIO_WRITE) {
1976 for (i = 0; i < rbio->stripe_npages; i++) {
1977 if (faila != -1) {
1978 page = rbio_stripe_page(rbio, faila, i);
1979 SetPageUptodate(page);
1980 }
1981 if (failb != -1) {
1982 page = rbio_stripe_page(rbio, failb, i);
1983 SetPageUptodate(page);
1984 }
1985 }
1986 }
1987 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1988 /*
1989 * if we're rebuilding a read, we have to use
1990 * pages from the bio list
1991 */
1992 if ((rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1993 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) &&
1994 (stripe == faila || stripe == failb)) {
1995 page = page_in_rbio(rbio, stripe, pagenr, 0);
1996 } else {
1997 page = rbio_stripe_page(rbio, stripe, pagenr);
1998 }
1999 kunmap(page);
2000 }
2001 }
2002
2003 err = BLK_STS_OK;
2004 cleanup:
2005 kfree(pointers);
2006
2007 cleanup_io:
2008 /*
2009 * Similar to READ_REBUILD, REBUILD_MISSING at this point also has a
2010 * valid rbio which is consistent with ondisk content, thus such a
2011 * valid rbio can be cached to avoid further disk reads.
2012 */
2013 if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
2014 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) {
2015 /*
2016 * - In case of two failures, where rbio->failb != -1:
2017 *
2018 * Do not cache this rbio since the above read reconstruction
2019 * (raid6_datap_recov() or raid6_2data_recov()) may have
2020 * changed some content of stripes which are not identical to
2021 * on-disk content any more, otherwise, a later write/recover
2022 * may steal stripe_pages from this rbio and end up with
2023 * corruptions or rebuild failures.
2024 *
2025 * - In case of single failure, where rbio->failb == -1:
2026 *
2027 * Cache this rbio iff the above read reconstruction is
2028 * excuted without problems.
2029 */
2030 if (err == BLK_STS_OK && rbio->failb < 0)
2031 cache_rbio_pages(rbio);
2032 else
2033 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
2034
2035 rbio_orig_end_io(rbio, err);
2036 } else if (err == BLK_STS_OK) {
2037 rbio->faila = -1;
2038 rbio->failb = -1;
2039
2040 if (rbio->operation == BTRFS_RBIO_WRITE)
2041 finish_rmw(rbio);
2042 else if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB)
2043 finish_parity_scrub(rbio, 0);
2044 else
2045 BUG();
2046 } else {
2047 rbio_orig_end_io(rbio, err);
2048 }
2049 }
2050
2051 /*
2052 * This is called only for stripes we've read from disk to
2053 * reconstruct the parity.
2054 */
raid_recover_end_io(struct bio * bio)2055 static void raid_recover_end_io(struct bio *bio)
2056 {
2057 struct btrfs_raid_bio *rbio = bio->bi_private;
2058
2059 /*
2060 * we only read stripe pages off the disk, set them
2061 * up to date if there were no errors
2062 */
2063 if (bio->bi_status)
2064 fail_bio_stripe(rbio, bio);
2065 else
2066 set_bio_pages_uptodate(bio);
2067 bio_put(bio);
2068
2069 if (!atomic_dec_and_test(&rbio->stripes_pending))
2070 return;
2071
2072 if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
2073 rbio_orig_end_io(rbio, BLK_STS_IOERR);
2074 else
2075 __raid_recover_end_io(rbio);
2076 }
2077
2078 /*
2079 * reads everything we need off the disk to reconstruct
2080 * the parity. endio handlers trigger final reconstruction
2081 * when the IO is done.
2082 *
2083 * This is used both for reads from the higher layers and for
2084 * parity construction required to finish a rmw cycle.
2085 */
__raid56_parity_recover(struct btrfs_raid_bio * rbio)2086 static int __raid56_parity_recover(struct btrfs_raid_bio *rbio)
2087 {
2088 int bios_to_read = 0;
2089 struct bio_list bio_list;
2090 int ret;
2091 int pagenr;
2092 int stripe;
2093 struct bio *bio;
2094
2095 bio_list_init(&bio_list);
2096
2097 ret = alloc_rbio_pages(rbio);
2098 if (ret)
2099 goto cleanup;
2100
2101 atomic_set(&rbio->error, 0);
2102
2103 /*
2104 * Read everything that hasn't failed. However this time we will
2105 * not trust any cached sector.
2106 * As we may read out some stale data but higher layer is not reading
2107 * that stale part.
2108 *
2109 * So here we always re-read everything in recovery path.
2110 */
2111 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
2112 if (rbio->faila == stripe || rbio->failb == stripe) {
2113 atomic_inc(&rbio->error);
2114 continue;
2115 }
2116
2117 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
2118 ret = rbio_add_io_page(rbio, &bio_list,
2119 rbio_stripe_page(rbio, stripe, pagenr),
2120 stripe, pagenr, rbio->stripe_len);
2121 if (ret < 0)
2122 goto cleanup;
2123 }
2124 }
2125
2126 bios_to_read = bio_list_size(&bio_list);
2127 if (!bios_to_read) {
2128 /*
2129 * we might have no bios to read just because the pages
2130 * were up to date, or we might have no bios to read because
2131 * the devices were gone.
2132 */
2133 if (atomic_read(&rbio->error) <= rbio->bbio->max_errors) {
2134 __raid_recover_end_io(rbio);
2135 goto out;
2136 } else {
2137 goto cleanup;
2138 }
2139 }
2140
2141 /*
2142 * the bbio may be freed once we submit the last bio. Make sure
2143 * not to touch it after that
2144 */
2145 atomic_set(&rbio->stripes_pending, bios_to_read);
2146 while (1) {
2147 bio = bio_list_pop(&bio_list);
2148 if (!bio)
2149 break;
2150
2151 bio->bi_private = rbio;
2152 bio->bi_end_io = raid_recover_end_io;
2153 bio->bi_opf = REQ_OP_READ;
2154
2155 btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
2156
2157 submit_bio(bio);
2158 }
2159 out:
2160 return 0;
2161
2162 cleanup:
2163 if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
2164 rbio->operation == BTRFS_RBIO_REBUILD_MISSING)
2165 rbio_orig_end_io(rbio, BLK_STS_IOERR);
2166
2167 while ((bio = bio_list_pop(&bio_list)))
2168 bio_put(bio);
2169
2170 return -EIO;
2171 }
2172
2173 /*
2174 * the main entry point for reads from the higher layers. This
2175 * is really only called when the normal read path had a failure,
2176 * so we assume the bio they send down corresponds to a failed part
2177 * of the drive.
2178 */
raid56_parity_recover(struct btrfs_fs_info * fs_info,struct bio * bio,struct btrfs_bio * bbio,u64 stripe_len,int mirror_num,int generic_io)2179 int raid56_parity_recover(struct btrfs_fs_info *fs_info, struct bio *bio,
2180 struct btrfs_bio *bbio, u64 stripe_len,
2181 int mirror_num, int generic_io)
2182 {
2183 struct btrfs_raid_bio *rbio;
2184 int ret;
2185
2186 if (generic_io) {
2187 ASSERT(bbio->mirror_num == mirror_num);
2188 btrfs_io_bio(bio)->mirror_num = mirror_num;
2189 }
2190
2191 rbio = alloc_rbio(fs_info, bbio, stripe_len);
2192 if (IS_ERR(rbio)) {
2193 if (generic_io)
2194 btrfs_put_bbio(bbio);
2195 return PTR_ERR(rbio);
2196 }
2197
2198 rbio->operation = BTRFS_RBIO_READ_REBUILD;
2199 rbio_add_bio(rbio, bio);
2200
2201 rbio->faila = find_logical_bio_stripe(rbio, bio);
2202 if (rbio->faila == -1) {
2203 btrfs_warn(fs_info,
2204 "%s could not find the bad stripe in raid56 so that we cannot recover any more (bio has logical %llu len %llu, bbio has map_type %llu)",
2205 __func__, (u64)bio->bi_iter.bi_sector << 9,
2206 (u64)bio->bi_iter.bi_size, bbio->map_type);
2207 if (generic_io)
2208 btrfs_put_bbio(bbio);
2209 kfree(rbio);
2210 return -EIO;
2211 }
2212
2213 if (generic_io) {
2214 btrfs_bio_counter_inc_noblocked(fs_info);
2215 rbio->generic_bio_cnt = 1;
2216 } else {
2217 btrfs_get_bbio(bbio);
2218 }
2219
2220 /*
2221 * Loop retry:
2222 * for 'mirror == 2', reconstruct from all other stripes.
2223 * for 'mirror_num > 2', select a stripe to fail on every retry.
2224 */
2225 if (mirror_num > 2) {
2226 /*
2227 * 'mirror == 3' is to fail the p stripe and
2228 * reconstruct from the q stripe. 'mirror > 3' is to
2229 * fail a data stripe and reconstruct from p+q stripe.
2230 */
2231 rbio->failb = rbio->real_stripes - (mirror_num - 1);
2232 ASSERT(rbio->failb > 0);
2233 if (rbio->failb <= rbio->faila)
2234 rbio->failb--;
2235 }
2236
2237 ret = lock_stripe_add(rbio);
2238
2239 /*
2240 * __raid56_parity_recover will end the bio with
2241 * any errors it hits. We don't want to return
2242 * its error value up the stack because our caller
2243 * will end up calling bio_endio with any nonzero
2244 * return
2245 */
2246 if (ret == 0)
2247 __raid56_parity_recover(rbio);
2248 /*
2249 * our rbio has been added to the list of
2250 * rbios that will be handled after the
2251 * currently lock owner is done
2252 */
2253 return 0;
2254
2255 }
2256
rmw_work(struct btrfs_work * work)2257 static void rmw_work(struct btrfs_work *work)
2258 {
2259 struct btrfs_raid_bio *rbio;
2260
2261 rbio = container_of(work, struct btrfs_raid_bio, work);
2262 raid56_rmw_stripe(rbio);
2263 }
2264
read_rebuild_work(struct btrfs_work * work)2265 static void read_rebuild_work(struct btrfs_work *work)
2266 {
2267 struct btrfs_raid_bio *rbio;
2268
2269 rbio = container_of(work, struct btrfs_raid_bio, work);
2270 __raid56_parity_recover(rbio);
2271 }
2272
2273 /*
2274 * The following code is used to scrub/replace the parity stripe
2275 *
2276 * Caller must have already increased bio_counter for getting @bbio.
2277 *
2278 * Note: We need make sure all the pages that add into the scrub/replace
2279 * raid bio are correct and not be changed during the scrub/replace. That
2280 * is those pages just hold metadata or file data with checksum.
2281 */
2282
2283 struct btrfs_raid_bio *
raid56_parity_alloc_scrub_rbio(struct btrfs_fs_info * fs_info,struct bio * bio,struct btrfs_bio * bbio,u64 stripe_len,struct btrfs_device * scrub_dev,unsigned long * dbitmap,int stripe_nsectors)2284 raid56_parity_alloc_scrub_rbio(struct btrfs_fs_info *fs_info, struct bio *bio,
2285 struct btrfs_bio *bbio, u64 stripe_len,
2286 struct btrfs_device *scrub_dev,
2287 unsigned long *dbitmap, int stripe_nsectors)
2288 {
2289 struct btrfs_raid_bio *rbio;
2290 int i;
2291
2292 rbio = alloc_rbio(fs_info, bbio, stripe_len);
2293 if (IS_ERR(rbio))
2294 return NULL;
2295 bio_list_add(&rbio->bio_list, bio);
2296 /*
2297 * This is a special bio which is used to hold the completion handler
2298 * and make the scrub rbio is similar to the other types
2299 */
2300 ASSERT(!bio->bi_iter.bi_size);
2301 rbio->operation = BTRFS_RBIO_PARITY_SCRUB;
2302
2303 /*
2304 * After mapping bbio with BTRFS_MAP_WRITE, parities have been sorted
2305 * to the end position, so this search can start from the first parity
2306 * stripe.
2307 */
2308 for (i = rbio->nr_data; i < rbio->real_stripes; i++) {
2309 if (bbio->stripes[i].dev == scrub_dev) {
2310 rbio->scrubp = i;
2311 break;
2312 }
2313 }
2314 ASSERT(i < rbio->real_stripes);
2315
2316 /* Now we just support the sectorsize equals to page size */
2317 ASSERT(fs_info->sectorsize == PAGE_SIZE);
2318 ASSERT(rbio->stripe_npages == stripe_nsectors);
2319 bitmap_copy(rbio->dbitmap, dbitmap, stripe_nsectors);
2320
2321 /*
2322 * We have already increased bio_counter when getting bbio, record it
2323 * so we can free it at rbio_orig_end_io().
2324 */
2325 rbio->generic_bio_cnt = 1;
2326
2327 return rbio;
2328 }
2329
2330 /* Used for both parity scrub and missing. */
raid56_add_scrub_pages(struct btrfs_raid_bio * rbio,struct page * page,u64 logical)2331 void raid56_add_scrub_pages(struct btrfs_raid_bio *rbio, struct page *page,
2332 u64 logical)
2333 {
2334 int stripe_offset;
2335 int index;
2336
2337 ASSERT(logical >= rbio->bbio->raid_map[0]);
2338 ASSERT(logical + PAGE_SIZE <= rbio->bbio->raid_map[0] +
2339 rbio->stripe_len * rbio->nr_data);
2340 stripe_offset = (int)(logical - rbio->bbio->raid_map[0]);
2341 index = stripe_offset >> PAGE_SHIFT;
2342 rbio->bio_pages[index] = page;
2343 }
2344
2345 /*
2346 * We just scrub the parity that we have correct data on the same horizontal,
2347 * so we needn't allocate all pages for all the stripes.
2348 */
alloc_rbio_essential_pages(struct btrfs_raid_bio * rbio)2349 static int alloc_rbio_essential_pages(struct btrfs_raid_bio *rbio)
2350 {
2351 int i;
2352 int bit;
2353 int index;
2354 struct page *page;
2355
2356 for_each_set_bit(bit, rbio->dbitmap, rbio->stripe_npages) {
2357 for (i = 0; i < rbio->real_stripes; i++) {
2358 index = i * rbio->stripe_npages + bit;
2359 if (rbio->stripe_pages[index])
2360 continue;
2361
2362 page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
2363 if (!page)
2364 return -ENOMEM;
2365 rbio->stripe_pages[index] = page;
2366 }
2367 }
2368 return 0;
2369 }
2370
finish_parity_scrub(struct btrfs_raid_bio * rbio,int need_check)2371 static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
2372 int need_check)
2373 {
2374 struct btrfs_bio *bbio = rbio->bbio;
2375 void **pointers = rbio->finish_pointers;
2376 unsigned long *pbitmap = rbio->finish_pbitmap;
2377 int nr_data = rbio->nr_data;
2378 int stripe;
2379 int pagenr;
2380 bool has_qstripe;
2381 struct page *p_page = NULL;
2382 struct page *q_page = NULL;
2383 struct bio_list bio_list;
2384 struct bio *bio;
2385 int is_replace = 0;
2386 int ret;
2387
2388 bio_list_init(&bio_list);
2389
2390 if (rbio->real_stripes - rbio->nr_data == 1)
2391 has_qstripe = false;
2392 else if (rbio->real_stripes - rbio->nr_data == 2)
2393 has_qstripe = true;
2394 else
2395 BUG();
2396
2397 if (bbio->num_tgtdevs && bbio->tgtdev_map[rbio->scrubp]) {
2398 is_replace = 1;
2399 bitmap_copy(pbitmap, rbio->dbitmap, rbio->stripe_npages);
2400 }
2401
2402 /*
2403 * Because the higher layers(scrubber) are unlikely to
2404 * use this area of the disk again soon, so don't cache
2405 * it.
2406 */
2407 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
2408
2409 if (!need_check)
2410 goto writeback;
2411
2412 p_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
2413 if (!p_page)
2414 goto cleanup;
2415 SetPageUptodate(p_page);
2416
2417 if (has_qstripe) {
2418 /* RAID6, allocate and map temp space for the Q stripe */
2419 q_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
2420 if (!q_page) {
2421 __free_page(p_page);
2422 goto cleanup;
2423 }
2424 SetPageUptodate(q_page);
2425 pointers[rbio->real_stripes - 1] = kmap(q_page);
2426 }
2427
2428 atomic_set(&rbio->error, 0);
2429
2430 /* Map the parity stripe just once */
2431 pointers[nr_data] = kmap(p_page);
2432
2433 for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
2434 struct page *p;
2435 void *parity;
2436 /* first collect one page from each data stripe */
2437 for (stripe = 0; stripe < nr_data; stripe++) {
2438 p = page_in_rbio(rbio, stripe, pagenr, 0);
2439 pointers[stripe] = kmap(p);
2440 }
2441
2442 if (has_qstripe) {
2443 /* RAID6, call the library function to fill in our P/Q */
2444 raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE,
2445 pointers);
2446 } else {
2447 /* raid5 */
2448 copy_page(pointers[nr_data], pointers[0]);
2449 run_xor(pointers + 1, nr_data - 1, PAGE_SIZE);
2450 }
2451
2452 /* Check scrubbing parity and repair it */
2453 p = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
2454 parity = kmap(p);
2455 if (memcmp(parity, pointers[rbio->scrubp], PAGE_SIZE))
2456 copy_page(parity, pointers[rbio->scrubp]);
2457 else
2458 /* Parity is right, needn't writeback */
2459 bitmap_clear(rbio->dbitmap, pagenr, 1);
2460 kunmap(p);
2461
2462 for (stripe = 0; stripe < nr_data; stripe++)
2463 kunmap(page_in_rbio(rbio, stripe, pagenr, 0));
2464 }
2465
2466 kunmap(p_page);
2467 __free_page(p_page);
2468 if (q_page) {
2469 kunmap(q_page);
2470 __free_page(q_page);
2471 }
2472
2473 writeback:
2474 /*
2475 * time to start writing. Make bios for everything from the
2476 * higher layers (the bio_list in our rbio) and our p/q. Ignore
2477 * everything else.
2478 */
2479 for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
2480 struct page *page;
2481
2482 page = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
2483 ret = rbio_add_io_page(rbio, &bio_list,
2484 page, rbio->scrubp, pagenr, rbio->stripe_len);
2485 if (ret)
2486 goto cleanup;
2487 }
2488
2489 if (!is_replace)
2490 goto submit_write;
2491
2492 for_each_set_bit(pagenr, pbitmap, rbio->stripe_npages) {
2493 struct page *page;
2494
2495 page = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
2496 ret = rbio_add_io_page(rbio, &bio_list, page,
2497 bbio->tgtdev_map[rbio->scrubp],
2498 pagenr, rbio->stripe_len);
2499 if (ret)
2500 goto cleanup;
2501 }
2502
2503 submit_write:
2504 nr_data = bio_list_size(&bio_list);
2505 if (!nr_data) {
2506 /* Every parity is right */
2507 rbio_orig_end_io(rbio, BLK_STS_OK);
2508 return;
2509 }
2510
2511 atomic_set(&rbio->stripes_pending, nr_data);
2512
2513 while (1) {
2514 bio = bio_list_pop(&bio_list);
2515 if (!bio)
2516 break;
2517
2518 bio->bi_private = rbio;
2519 bio->bi_end_io = raid_write_end_io;
2520 bio->bi_opf = REQ_OP_WRITE;
2521
2522 submit_bio(bio);
2523 }
2524 return;
2525
2526 cleanup:
2527 rbio_orig_end_io(rbio, BLK_STS_IOERR);
2528
2529 while ((bio = bio_list_pop(&bio_list)))
2530 bio_put(bio);
2531 }
2532
is_data_stripe(struct btrfs_raid_bio * rbio,int stripe)2533 static inline int is_data_stripe(struct btrfs_raid_bio *rbio, int stripe)
2534 {
2535 if (stripe >= 0 && stripe < rbio->nr_data)
2536 return 1;
2537 return 0;
2538 }
2539
2540 /*
2541 * While we're doing the parity check and repair, we could have errors
2542 * in reading pages off the disk. This checks for errors and if we're
2543 * not able to read the page it'll trigger parity reconstruction. The
2544 * parity scrub will be finished after we've reconstructed the failed
2545 * stripes
2546 */
validate_rbio_for_parity_scrub(struct btrfs_raid_bio * rbio)2547 static void validate_rbio_for_parity_scrub(struct btrfs_raid_bio *rbio)
2548 {
2549 if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
2550 goto cleanup;
2551
2552 if (rbio->faila >= 0 || rbio->failb >= 0) {
2553 int dfail = 0, failp = -1;
2554
2555 if (is_data_stripe(rbio, rbio->faila))
2556 dfail++;
2557 else if (is_parity_stripe(rbio->faila))
2558 failp = rbio->faila;
2559
2560 if (is_data_stripe(rbio, rbio->failb))
2561 dfail++;
2562 else if (is_parity_stripe(rbio->failb))
2563 failp = rbio->failb;
2564
2565 /*
2566 * Because we can not use a scrubbing parity to repair
2567 * the data, so the capability of the repair is declined.
2568 * (In the case of RAID5, we can not repair anything)
2569 */
2570 if (dfail > rbio->bbio->max_errors - 1)
2571 goto cleanup;
2572
2573 /*
2574 * If all data is good, only parity is correctly, just
2575 * repair the parity.
2576 */
2577 if (dfail == 0) {
2578 finish_parity_scrub(rbio, 0);
2579 return;
2580 }
2581
2582 /*
2583 * Here means we got one corrupted data stripe and one
2584 * corrupted parity on RAID6, if the corrupted parity
2585 * is scrubbing parity, luckily, use the other one to repair
2586 * the data, or we can not repair the data stripe.
2587 */
2588 if (failp != rbio->scrubp)
2589 goto cleanup;
2590
2591 __raid_recover_end_io(rbio);
2592 } else {
2593 finish_parity_scrub(rbio, 1);
2594 }
2595 return;
2596
2597 cleanup:
2598 rbio_orig_end_io(rbio, BLK_STS_IOERR);
2599 }
2600
2601 /*
2602 * end io for the read phase of the rmw cycle. All the bios here are physical
2603 * stripe bios we've read from the disk so we can recalculate the parity of the
2604 * stripe.
2605 *
2606 * This will usually kick off finish_rmw once all the bios are read in, but it
2607 * may trigger parity reconstruction if we had any errors along the way
2608 */
raid56_parity_scrub_end_io(struct bio * bio)2609 static void raid56_parity_scrub_end_io(struct bio *bio)
2610 {
2611 struct btrfs_raid_bio *rbio = bio->bi_private;
2612
2613 if (bio->bi_status)
2614 fail_bio_stripe(rbio, bio);
2615 else
2616 set_bio_pages_uptodate(bio);
2617
2618 bio_put(bio);
2619
2620 if (!atomic_dec_and_test(&rbio->stripes_pending))
2621 return;
2622
2623 /*
2624 * this will normally call finish_rmw to start our write
2625 * but if there are any failed stripes we'll reconstruct
2626 * from parity first
2627 */
2628 validate_rbio_for_parity_scrub(rbio);
2629 }
2630
raid56_parity_scrub_stripe(struct btrfs_raid_bio * rbio)2631 static void raid56_parity_scrub_stripe(struct btrfs_raid_bio *rbio)
2632 {
2633 int bios_to_read = 0;
2634 struct bio_list bio_list;
2635 int ret;
2636 int pagenr;
2637 int stripe;
2638 struct bio *bio;
2639
2640 bio_list_init(&bio_list);
2641
2642 ret = alloc_rbio_essential_pages(rbio);
2643 if (ret)
2644 goto cleanup;
2645
2646 atomic_set(&rbio->error, 0);
2647 /*
2648 * build a list of bios to read all the missing parts of this
2649 * stripe
2650 */
2651 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
2652 for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
2653 struct page *page;
2654 /*
2655 * we want to find all the pages missing from
2656 * the rbio and read them from the disk. If
2657 * page_in_rbio finds a page in the bio list
2658 * we don't need to read it off the stripe.
2659 */
2660 page = page_in_rbio(rbio, stripe, pagenr, 1);
2661 if (page)
2662 continue;
2663
2664 page = rbio_stripe_page(rbio, stripe, pagenr);
2665 /*
2666 * the bio cache may have handed us an uptodate
2667 * page. If so, be happy and use it
2668 */
2669 if (PageUptodate(page))
2670 continue;
2671
2672 ret = rbio_add_io_page(rbio, &bio_list, page,
2673 stripe, pagenr, rbio->stripe_len);
2674 if (ret)
2675 goto cleanup;
2676 }
2677 }
2678
2679 bios_to_read = bio_list_size(&bio_list);
2680 if (!bios_to_read) {
2681 /*
2682 * this can happen if others have merged with
2683 * us, it means there is nothing left to read.
2684 * But if there are missing devices it may not be
2685 * safe to do the full stripe write yet.
2686 */
2687 goto finish;
2688 }
2689
2690 /*
2691 * the bbio may be freed once we submit the last bio. Make sure
2692 * not to touch it after that
2693 */
2694 atomic_set(&rbio->stripes_pending, bios_to_read);
2695 while (1) {
2696 bio = bio_list_pop(&bio_list);
2697 if (!bio)
2698 break;
2699
2700 bio->bi_private = rbio;
2701 bio->bi_end_io = raid56_parity_scrub_end_io;
2702 bio->bi_opf = REQ_OP_READ;
2703
2704 btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
2705
2706 submit_bio(bio);
2707 }
2708 /* the actual write will happen once the reads are done */
2709 return;
2710
2711 cleanup:
2712 rbio_orig_end_io(rbio, BLK_STS_IOERR);
2713
2714 while ((bio = bio_list_pop(&bio_list)))
2715 bio_put(bio);
2716
2717 return;
2718
2719 finish:
2720 validate_rbio_for_parity_scrub(rbio);
2721 }
2722
scrub_parity_work(struct btrfs_work * work)2723 static void scrub_parity_work(struct btrfs_work *work)
2724 {
2725 struct btrfs_raid_bio *rbio;
2726
2727 rbio = container_of(work, struct btrfs_raid_bio, work);
2728 raid56_parity_scrub_stripe(rbio);
2729 }
2730
raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio * rbio)2731 void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio)
2732 {
2733 if (!lock_stripe_add(rbio))
2734 start_async_work(rbio, scrub_parity_work);
2735 }
2736
2737 /* The following code is used for dev replace of a missing RAID 5/6 device. */
2738
2739 struct btrfs_raid_bio *
raid56_alloc_missing_rbio(struct btrfs_fs_info * fs_info,struct bio * bio,struct btrfs_bio * bbio,u64 length)2740 raid56_alloc_missing_rbio(struct btrfs_fs_info *fs_info, struct bio *bio,
2741 struct btrfs_bio *bbio, u64 length)
2742 {
2743 struct btrfs_raid_bio *rbio;
2744
2745 rbio = alloc_rbio(fs_info, bbio, length);
2746 if (IS_ERR(rbio))
2747 return NULL;
2748
2749 rbio->operation = BTRFS_RBIO_REBUILD_MISSING;
2750 bio_list_add(&rbio->bio_list, bio);
2751 /*
2752 * This is a special bio which is used to hold the completion handler
2753 * and make the scrub rbio is similar to the other types
2754 */
2755 ASSERT(!bio->bi_iter.bi_size);
2756
2757 rbio->faila = find_logical_bio_stripe(rbio, bio);
2758 if (rbio->faila == -1) {
2759 BUG();
2760 kfree(rbio);
2761 return NULL;
2762 }
2763
2764 /*
2765 * When we get bbio, we have already increased bio_counter, record it
2766 * so we can free it at rbio_orig_end_io()
2767 */
2768 rbio->generic_bio_cnt = 1;
2769
2770 return rbio;
2771 }
2772
raid56_submit_missing_rbio(struct btrfs_raid_bio * rbio)2773 void raid56_submit_missing_rbio(struct btrfs_raid_bio *rbio)
2774 {
2775 if (!lock_stripe_add(rbio))
2776 start_async_work(rbio, read_rebuild_work);
2777 }
2778