Lines Matching refs:rbio
161 static int __raid56_parity_recover(struct btrfs_raid_bio *rbio);
162 static noinline void finish_rmw(struct btrfs_raid_bio *rbio);
165 static int fail_bio_stripe(struct btrfs_raid_bio *rbio, struct bio *bio);
166 static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed);
167 static void __free_raid_bio(struct btrfs_raid_bio *rbio);
168 static void index_rbio_pages(struct btrfs_raid_bio *rbio);
169 static int alloc_rbio_pages(struct btrfs_raid_bio *rbio);
171 static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
175 static void start_async_work(struct btrfs_raid_bio *rbio, btrfs_func_t work_func) in start_async_work() argument
177 btrfs_init_work(&rbio->work, btrfs_rmw_helper, work_func, NULL, NULL); in start_async_work()
178 btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work); in start_async_work()
236 static void cache_rbio_pages(struct btrfs_raid_bio *rbio) in cache_rbio_pages() argument
243 ret = alloc_rbio_pages(rbio); in cache_rbio_pages()
247 for (i = 0; i < rbio->nr_pages; i++) { in cache_rbio_pages()
248 if (!rbio->bio_pages[i]) in cache_rbio_pages()
251 s = kmap(rbio->bio_pages[i]); in cache_rbio_pages()
252 d = kmap(rbio->stripe_pages[i]); in cache_rbio_pages()
256 kunmap(rbio->bio_pages[i]); in cache_rbio_pages()
257 kunmap(rbio->stripe_pages[i]); in cache_rbio_pages()
258 SetPageUptodate(rbio->stripe_pages[i]); in cache_rbio_pages()
260 set_bit(RBIO_CACHE_READY_BIT, &rbio->flags); in cache_rbio_pages()
266 static int rbio_bucket(struct btrfs_raid_bio *rbio) in rbio_bucket() argument
268 u64 num = rbio->bbio->raid_map[0]; in rbio_bucket()
332 static void __remove_rbio_from_cache(struct btrfs_raid_bio *rbio) in __remove_rbio_from_cache() argument
334 int bucket = rbio_bucket(rbio); in __remove_rbio_from_cache()
342 if (!test_bit(RBIO_CACHE_BIT, &rbio->flags)) in __remove_rbio_from_cache()
345 table = rbio->fs_info->stripe_hash_table; in __remove_rbio_from_cache()
357 spin_lock(&rbio->bio_list_lock); in __remove_rbio_from_cache()
359 if (test_and_clear_bit(RBIO_CACHE_BIT, &rbio->flags)) { in __remove_rbio_from_cache()
360 list_del_init(&rbio->stripe_cache); in __remove_rbio_from_cache()
373 if (bio_list_empty(&rbio->bio_list)) { in __remove_rbio_from_cache()
374 if (!list_empty(&rbio->hash_list)) { in __remove_rbio_from_cache()
375 list_del_init(&rbio->hash_list); in __remove_rbio_from_cache()
376 refcount_dec(&rbio->refs); in __remove_rbio_from_cache()
377 BUG_ON(!list_empty(&rbio->plug_list)); in __remove_rbio_from_cache()
382 spin_unlock(&rbio->bio_list_lock); in __remove_rbio_from_cache()
386 __free_raid_bio(rbio); in __remove_rbio_from_cache()
392 static void remove_rbio_from_cache(struct btrfs_raid_bio *rbio) in remove_rbio_from_cache() argument
397 if (!test_bit(RBIO_CACHE_BIT, &rbio->flags)) in remove_rbio_from_cache()
400 table = rbio->fs_info->stripe_hash_table; in remove_rbio_from_cache()
403 __remove_rbio_from_cache(rbio); in remove_rbio_from_cache()
414 struct btrfs_raid_bio *rbio; in btrfs_clear_rbio_cache() local
420 rbio = list_entry(table->stripe_cache.next, in btrfs_clear_rbio_cache()
423 __remove_rbio_from_cache(rbio); in btrfs_clear_rbio_cache()
452 static void cache_rbio(struct btrfs_raid_bio *rbio) in cache_rbio() argument
457 if (!test_bit(RBIO_CACHE_READY_BIT, &rbio->flags)) in cache_rbio()
460 table = rbio->fs_info->stripe_hash_table; in cache_rbio()
463 spin_lock(&rbio->bio_list_lock); in cache_rbio()
466 if (!test_and_set_bit(RBIO_CACHE_BIT, &rbio->flags)) in cache_rbio()
467 refcount_inc(&rbio->refs); in cache_rbio()
469 if (!list_empty(&rbio->stripe_cache)){ in cache_rbio()
470 list_move(&rbio->stripe_cache, &table->stripe_cache); in cache_rbio()
472 list_add(&rbio->stripe_cache, &table->stripe_cache); in cache_rbio()
476 spin_unlock(&rbio->bio_list_lock); in cache_rbio()
485 if (found != rbio) in cache_rbio()
516 static int rbio_is_full(struct btrfs_raid_bio *rbio) in rbio_is_full() argument
519 unsigned long size = rbio->bio_list_bytes; in rbio_is_full()
522 spin_lock_irqsave(&rbio->bio_list_lock, flags); in rbio_is_full()
523 if (size != rbio->nr_data * rbio->stripe_len) in rbio_is_full()
525 BUG_ON(size > rbio->nr_data * rbio->stripe_len); in rbio_is_full()
526 spin_unlock_irqrestore(&rbio->bio_list_lock, flags); in rbio_is_full()
602 static int rbio_stripe_page_index(struct btrfs_raid_bio *rbio, int stripe, in rbio_stripe_page_index() argument
605 return stripe * rbio->stripe_npages + index; in rbio_stripe_page_index()
612 static struct page *rbio_stripe_page(struct btrfs_raid_bio *rbio, int stripe, in rbio_stripe_page() argument
615 return rbio->stripe_pages[rbio_stripe_page_index(rbio, stripe, index)]; in rbio_stripe_page()
621 static struct page *rbio_pstripe_page(struct btrfs_raid_bio *rbio, int index) in rbio_pstripe_page() argument
623 return rbio_stripe_page(rbio, rbio->nr_data, index); in rbio_pstripe_page()
630 static struct page *rbio_qstripe_page(struct btrfs_raid_bio *rbio, int index) in rbio_qstripe_page() argument
632 if (rbio->nr_data + 1 == rbio->real_stripes) in rbio_qstripe_page()
634 return rbio_stripe_page(rbio, rbio->nr_data + 1, index); in rbio_qstripe_page()
659 static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio) in lock_stripe_add() argument
661 int bucket = rbio_bucket(rbio); in lock_stripe_add()
662 struct btrfs_stripe_hash *h = rbio->fs_info->stripe_hash_table->table + bucket; in lock_stripe_add()
672 if (cur->bbio->raid_map[0] == rbio->bbio->raid_map[0]) { in lock_stripe_add()
683 steal_rbio(cur, rbio); in lock_stripe_add()
691 if (rbio_can_merge(cur, rbio)) { in lock_stripe_add()
692 merge_rbio(cur, rbio); in lock_stripe_add()
694 freeit = rbio; in lock_stripe_add()
710 if (rbio_can_merge(pending, rbio)) { in lock_stripe_add()
711 merge_rbio(pending, rbio); in lock_stripe_add()
713 freeit = rbio; in lock_stripe_add()
723 list_add_tail(&rbio->plug_list, &cur->plug_list); in lock_stripe_add()
730 refcount_inc(&rbio->refs); in lock_stripe_add()
731 list_add(&rbio->hash_list, &h->hash_list); in lock_stripe_add()
745 static noinline void unlock_stripe(struct btrfs_raid_bio *rbio) in unlock_stripe() argument
752 bucket = rbio_bucket(rbio); in unlock_stripe()
753 h = rbio->fs_info->stripe_hash_table->table + bucket; in unlock_stripe()
755 if (list_empty(&rbio->plug_list)) in unlock_stripe()
756 cache_rbio(rbio); in unlock_stripe()
759 spin_lock(&rbio->bio_list_lock); in unlock_stripe()
761 if (!list_empty(&rbio->hash_list)) { in unlock_stripe()
767 if (list_empty(&rbio->plug_list) && in unlock_stripe()
768 test_bit(RBIO_CACHE_BIT, &rbio->flags)) { in unlock_stripe()
770 clear_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags); in unlock_stripe()
771 BUG_ON(!bio_list_empty(&rbio->bio_list)); in unlock_stripe()
775 list_del_init(&rbio->hash_list); in unlock_stripe()
776 refcount_dec(&rbio->refs); in unlock_stripe()
783 if (!list_empty(&rbio->plug_list)) { in unlock_stripe()
785 struct list_head *head = rbio->plug_list.next; in unlock_stripe()
790 list_del_init(&rbio->plug_list); in unlock_stripe()
794 spin_unlock(&rbio->bio_list_lock); in unlock_stripe()
800 steal_rbio(rbio, next); in unlock_stripe()
803 steal_rbio(rbio, next); in unlock_stripe()
806 steal_rbio(rbio, next); in unlock_stripe()
814 spin_unlock(&rbio->bio_list_lock); in unlock_stripe()
819 remove_rbio_from_cache(rbio); in unlock_stripe()
822 static void __free_raid_bio(struct btrfs_raid_bio *rbio) in __free_raid_bio() argument
826 if (!refcount_dec_and_test(&rbio->refs)) in __free_raid_bio()
829 WARN_ON(!list_empty(&rbio->stripe_cache)); in __free_raid_bio()
830 WARN_ON(!list_empty(&rbio->hash_list)); in __free_raid_bio()
831 WARN_ON(!bio_list_empty(&rbio->bio_list)); in __free_raid_bio()
833 for (i = 0; i < rbio->nr_pages; i++) { in __free_raid_bio()
834 if (rbio->stripe_pages[i]) { in __free_raid_bio()
835 __free_page(rbio->stripe_pages[i]); in __free_raid_bio()
836 rbio->stripe_pages[i] = NULL; in __free_raid_bio()
840 btrfs_put_bbio(rbio->bbio); in __free_raid_bio()
841 kfree(rbio); in __free_raid_bio()
861 static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, blk_status_t err) in rbio_orig_end_io() argument
863 struct bio *cur = bio_list_get(&rbio->bio_list); in rbio_orig_end_io()
866 if (rbio->generic_bio_cnt) in rbio_orig_end_io()
867 btrfs_bio_counter_sub(rbio->fs_info, rbio->generic_bio_cnt); in rbio_orig_end_io()
873 bitmap_clear(rbio->dbitmap, 0, rbio->stripe_npages); in rbio_orig_end_io()
883 unlock_stripe(rbio); in rbio_orig_end_io()
884 extra = bio_list_get(&rbio->bio_list); in rbio_orig_end_io()
885 __free_raid_bio(rbio); in rbio_orig_end_io()
898 struct btrfs_raid_bio *rbio = bio->bi_private; in raid_write_end_io() local
903 fail_bio_stripe(rbio, bio); in raid_write_end_io()
907 if (!atomic_dec_and_test(&rbio->stripes_pending)) in raid_write_end_io()
913 max_errors = (rbio->operation == BTRFS_RBIO_PARITY_SCRUB) ? in raid_write_end_io()
914 0 : rbio->bbio->max_errors; in raid_write_end_io()
915 if (atomic_read(&rbio->error) > max_errors) in raid_write_end_io()
918 rbio_orig_end_io(rbio, err); in raid_write_end_io()
937 static struct page *page_in_rbio(struct btrfs_raid_bio *rbio, in page_in_rbio() argument
943 chunk_page = index * (rbio->stripe_len >> PAGE_SHIFT) + pagenr; in page_in_rbio()
945 spin_lock_irq(&rbio->bio_list_lock); in page_in_rbio()
946 p = rbio->bio_pages[chunk_page]; in page_in_rbio()
947 spin_unlock_irq(&rbio->bio_list_lock); in page_in_rbio()
952 return rbio->stripe_pages[chunk_page]; in page_in_rbio()
972 struct btrfs_raid_bio *rbio; in alloc_rbio() local
979 rbio = kzalloc(sizeof(*rbio) + in alloc_rbio()
980 sizeof(*rbio->stripe_pages) * num_pages + in alloc_rbio()
981 sizeof(*rbio->bio_pages) * num_pages + in alloc_rbio()
982 sizeof(*rbio->finish_pointers) * real_stripes + in alloc_rbio()
983 sizeof(*rbio->dbitmap) * BITS_TO_LONGS(stripe_npages) + in alloc_rbio()
984 sizeof(*rbio->finish_pbitmap) * in alloc_rbio()
987 if (!rbio) in alloc_rbio()
990 bio_list_init(&rbio->bio_list); in alloc_rbio()
991 INIT_LIST_HEAD(&rbio->plug_list); in alloc_rbio()
992 spin_lock_init(&rbio->bio_list_lock); in alloc_rbio()
993 INIT_LIST_HEAD(&rbio->stripe_cache); in alloc_rbio()
994 INIT_LIST_HEAD(&rbio->hash_list); in alloc_rbio()
995 rbio->bbio = bbio; in alloc_rbio()
996 rbio->fs_info = fs_info; in alloc_rbio()
997 rbio->stripe_len = stripe_len; in alloc_rbio()
998 rbio->nr_pages = num_pages; in alloc_rbio()
999 rbio->real_stripes = real_stripes; in alloc_rbio()
1000 rbio->stripe_npages = stripe_npages; in alloc_rbio()
1001 rbio->faila = -1; in alloc_rbio()
1002 rbio->failb = -1; in alloc_rbio()
1003 refcount_set(&rbio->refs, 1); in alloc_rbio()
1004 atomic_set(&rbio->error, 0); in alloc_rbio()
1005 atomic_set(&rbio->stripes_pending, 0); in alloc_rbio()
1011 p = rbio + 1; in alloc_rbio()
1016 CONSUME_ALLOC(rbio->stripe_pages, num_pages); in alloc_rbio()
1017 CONSUME_ALLOC(rbio->bio_pages, num_pages); in alloc_rbio()
1018 CONSUME_ALLOC(rbio->finish_pointers, real_stripes); in alloc_rbio()
1019 CONSUME_ALLOC(rbio->dbitmap, BITS_TO_LONGS(stripe_npages)); in alloc_rbio()
1020 CONSUME_ALLOC(rbio->finish_pbitmap, BITS_TO_LONGS(stripe_npages)); in alloc_rbio()
1030 rbio->nr_data = nr_data; in alloc_rbio()
1031 return rbio; in alloc_rbio()
1035 static int alloc_rbio_pages(struct btrfs_raid_bio *rbio) in alloc_rbio_pages() argument
1040 for (i = 0; i < rbio->nr_pages; i++) { in alloc_rbio_pages()
1041 if (rbio->stripe_pages[i]) in alloc_rbio_pages()
1046 rbio->stripe_pages[i] = page; in alloc_rbio_pages()
1052 static int alloc_rbio_parity_pages(struct btrfs_raid_bio *rbio) in alloc_rbio_parity_pages() argument
1057 i = rbio_stripe_page_index(rbio, rbio->nr_data, 0); in alloc_rbio_parity_pages()
1059 for (; i < rbio->nr_pages; i++) { in alloc_rbio_parity_pages()
1060 if (rbio->stripe_pages[i]) in alloc_rbio_parity_pages()
1065 rbio->stripe_pages[i] = page; in alloc_rbio_parity_pages()
1075 static int rbio_add_io_page(struct btrfs_raid_bio *rbio, in rbio_add_io_page() argument
1089 stripe = &rbio->bbio->stripes[stripe_nr]; in rbio_add_io_page()
1094 return fail_rbio_index(rbio, stripe_nr); in rbio_add_io_page()
1133 static void validate_rbio_for_rmw(struct btrfs_raid_bio *rbio) in validate_rbio_for_rmw() argument
1135 if (rbio->faila >= 0 || rbio->failb >= 0) { in validate_rbio_for_rmw()
1136 BUG_ON(rbio->faila == rbio->real_stripes - 1); in validate_rbio_for_rmw()
1137 __raid56_parity_recover(rbio); in validate_rbio_for_rmw()
1139 finish_rmw(rbio); in validate_rbio_for_rmw()
1151 static void index_rbio_pages(struct btrfs_raid_bio *rbio) in index_rbio_pages() argument
1158 spin_lock_irq(&rbio->bio_list_lock); in index_rbio_pages()
1159 bio_list_for_each(bio, &rbio->bio_list) { in index_rbio_pages()
1165 stripe_offset = start - rbio->bbio->raid_map[0]; in index_rbio_pages()
1172 rbio->bio_pages[page_index + i] = bvec.bv_page; in index_rbio_pages()
1176 spin_unlock_irq(&rbio->bio_list_lock); in index_rbio_pages()
1187 static noinline void finish_rmw(struct btrfs_raid_bio *rbio) in finish_rmw() argument
1189 struct btrfs_bio *bbio = rbio->bbio; in finish_rmw()
1190 void **pointers = rbio->finish_pointers; in finish_rmw()
1191 int nr_data = rbio->nr_data; in finish_rmw()
1201 if (rbio->real_stripes - rbio->nr_data == 1) in finish_rmw()
1203 else if (rbio->real_stripes - rbio->nr_data == 2) in finish_rmw()
1209 ASSERT(bitmap_weight(rbio->dbitmap, rbio->stripe_npages)); in finish_rmw()
1219 spin_lock_irq(&rbio->bio_list_lock); in finish_rmw()
1220 set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags); in finish_rmw()
1221 spin_unlock_irq(&rbio->bio_list_lock); in finish_rmw()
1223 atomic_set(&rbio->error, 0); in finish_rmw()
1234 index_rbio_pages(rbio); in finish_rmw()
1235 if (!rbio_is_full(rbio)) in finish_rmw()
1236 cache_rbio_pages(rbio); in finish_rmw()
1238 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags); in finish_rmw()
1240 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) { in finish_rmw()
1244 p = page_in_rbio(rbio, stripe, pagenr, 0); in finish_rmw()
1249 p = rbio_pstripe_page(rbio, pagenr); in finish_rmw()
1259 p = rbio_qstripe_page(rbio, pagenr); in finish_rmw()
1263 raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE, in finish_rmw()
1272 for (stripe = 0; stripe < rbio->real_stripes; stripe++) in finish_rmw()
1273 kunmap(page_in_rbio(rbio, stripe, pagenr, 0)); in finish_rmw()
1281 for (stripe = 0; stripe < rbio->real_stripes; stripe++) { in finish_rmw()
1282 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) { in finish_rmw()
1286 if (!test_bit(pagenr, rbio->dbitmap)) in finish_rmw()
1289 if (stripe < rbio->nr_data) { in finish_rmw()
1290 page = page_in_rbio(rbio, stripe, pagenr, 1); in finish_rmw()
1294 page = rbio_stripe_page(rbio, stripe, pagenr); in finish_rmw()
1297 ret = rbio_add_io_page(rbio, &bio_list, in finish_rmw()
1298 page, stripe, pagenr, rbio->stripe_len); in finish_rmw()
1307 for (stripe = 0; stripe < rbio->real_stripes; stripe++) { in finish_rmw()
1311 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) { in finish_rmw()
1315 if (!test_bit(pagenr, rbio->dbitmap)) in finish_rmw()
1318 if (stripe < rbio->nr_data) { in finish_rmw()
1319 page = page_in_rbio(rbio, stripe, pagenr, 1); in finish_rmw()
1323 page = rbio_stripe_page(rbio, stripe, pagenr); in finish_rmw()
1326 ret = rbio_add_io_page(rbio, &bio_list, page, in finish_rmw()
1327 rbio->bbio->tgtdev_map[stripe], in finish_rmw()
1328 pagenr, rbio->stripe_len); in finish_rmw()
1335 atomic_set(&rbio->stripes_pending, bio_list_size(&bio_list)); in finish_rmw()
1336 BUG_ON(atomic_read(&rbio->stripes_pending) == 0); in finish_rmw()
1343 bio->bi_private = rbio; in finish_rmw()
1352 rbio_orig_end_io(rbio, BLK_STS_IOERR); in finish_rmw()
1363 static int find_bio_stripe(struct btrfs_raid_bio *rbio, in find_bio_stripe() argument
1373 for (i = 0; i < rbio->bbio->num_stripes; i++) { in find_bio_stripe()
1374 stripe = &rbio->bbio->stripes[i]; in find_bio_stripe()
1377 physical < stripe_start + rbio->stripe_len && in find_bio_stripe()
1392 static int find_logical_bio_stripe(struct btrfs_raid_bio *rbio, in find_logical_bio_stripe() argument
1401 for (i = 0; i < rbio->nr_data; i++) { in find_logical_bio_stripe()
1402 stripe_start = rbio->bbio->raid_map[i]; in find_logical_bio_stripe()
1404 logical < stripe_start + rbio->stripe_len) { in find_logical_bio_stripe()
1414 static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed) in fail_rbio_index() argument
1419 spin_lock_irqsave(&rbio->bio_list_lock, flags); in fail_rbio_index()
1422 if (rbio->faila == failed || rbio->failb == failed) in fail_rbio_index()
1425 if (rbio->faila == -1) { in fail_rbio_index()
1427 rbio->faila = failed; in fail_rbio_index()
1428 atomic_inc(&rbio->error); in fail_rbio_index()
1429 } else if (rbio->failb == -1) { in fail_rbio_index()
1431 rbio->failb = failed; in fail_rbio_index()
1432 atomic_inc(&rbio->error); in fail_rbio_index()
1437 spin_unlock_irqrestore(&rbio->bio_list_lock, flags); in fail_rbio_index()
1446 static int fail_bio_stripe(struct btrfs_raid_bio *rbio, in fail_bio_stripe() argument
1449 int failed = find_bio_stripe(rbio, bio); in fail_bio_stripe()
1454 return fail_rbio_index(rbio, failed); in fail_bio_stripe()
1482 struct btrfs_raid_bio *rbio = bio->bi_private; in raid_rmw_end_io() local
1485 fail_bio_stripe(rbio, bio); in raid_rmw_end_io()
1491 if (!atomic_dec_and_test(&rbio->stripes_pending)) in raid_rmw_end_io()
1494 if (atomic_read(&rbio->error) > rbio->bbio->max_errors) in raid_rmw_end_io()
1502 validate_rbio_for_rmw(rbio); in raid_rmw_end_io()
1507 rbio_orig_end_io(rbio, BLK_STS_IOERR); in raid_rmw_end_io()
1514 static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio) in raid56_rmw_stripe() argument
1525 ret = alloc_rbio_pages(rbio); in raid56_rmw_stripe()
1529 index_rbio_pages(rbio); in raid56_rmw_stripe()
1531 atomic_set(&rbio->error, 0); in raid56_rmw_stripe()
1536 for (stripe = 0; stripe < rbio->nr_data; stripe++) { in raid56_rmw_stripe()
1537 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) { in raid56_rmw_stripe()
1545 page = page_in_rbio(rbio, stripe, pagenr, 1); in raid56_rmw_stripe()
1549 page = rbio_stripe_page(rbio, stripe, pagenr); in raid56_rmw_stripe()
1557 ret = rbio_add_io_page(rbio, &bio_list, page, in raid56_rmw_stripe()
1558 stripe, pagenr, rbio->stripe_len); in raid56_rmw_stripe()
1579 atomic_set(&rbio->stripes_pending, bios_to_read); in raid56_rmw_stripe()
1585 bio->bi_private = rbio; in raid56_rmw_stripe()
1589 btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56); in raid56_rmw_stripe()
1597 rbio_orig_end_io(rbio, BLK_STS_IOERR); in raid56_rmw_stripe()
1605 validate_rbio_for_rmw(rbio); in raid56_rmw_stripe()
1613 static int full_stripe_write(struct btrfs_raid_bio *rbio) in full_stripe_write() argument
1617 ret = alloc_rbio_parity_pages(rbio); in full_stripe_write()
1619 __free_raid_bio(rbio); in full_stripe_write()
1623 ret = lock_stripe_add(rbio); in full_stripe_write()
1625 finish_rmw(rbio); in full_stripe_write()
1634 static int partial_stripe_write(struct btrfs_raid_bio *rbio) in partial_stripe_write() argument
1638 ret = lock_stripe_add(rbio); in partial_stripe_write()
1640 start_async_work(rbio, rmw_work); in partial_stripe_write()
1650 static int __raid56_parity_write(struct btrfs_raid_bio *rbio) in __raid56_parity_write() argument
1653 if (!rbio_is_full(rbio)) in __raid56_parity_write()
1654 return partial_stripe_write(rbio); in __raid56_parity_write()
1655 return full_stripe_write(rbio); in __raid56_parity_write()
1759 static void rbio_add_bio(struct btrfs_raid_bio *rbio, struct bio *orig_bio) in rbio_add_bio() argument
1761 const struct btrfs_fs_info *fs_info = rbio->fs_info; in rbio_add_bio()
1763 const u64 full_stripe_start = rbio->bbio->raid_map[0]; in rbio_add_bio()
1770 rbio->nr_data * rbio->stripe_len); in rbio_add_bio()
1772 bio_list_add(&rbio->bio_list, orig_bio); in rbio_add_bio()
1773 rbio->bio_list_bytes += orig_bio->bi_iter.bi_size; in rbio_add_bio()
1779 PAGE_SHIFT) % rbio->stripe_npages; in rbio_add_bio()
1781 set_bit(bit, rbio->dbitmap); in rbio_add_bio()
1791 struct btrfs_raid_bio *rbio; in raid56_parity_write() local
1796 rbio = alloc_rbio(fs_info, bbio, stripe_len); in raid56_parity_write()
1797 if (IS_ERR(rbio)) { in raid56_parity_write()
1799 return PTR_ERR(rbio); in raid56_parity_write()
1801 rbio->operation = BTRFS_RBIO_WRITE; in raid56_parity_write()
1802 rbio_add_bio(rbio, bio); in raid56_parity_write()
1805 rbio->generic_bio_cnt = 1; in raid56_parity_write()
1811 if (rbio_is_full(rbio)) { in raid56_parity_write()
1812 ret = full_stripe_write(rbio); in raid56_parity_write()
1825 list_add_tail(&rbio->plug_list, &plug->rbio_list); in raid56_parity_write()
1828 ret = __raid56_parity_write(rbio); in raid56_parity_write()
1840 static void __raid_recover_end_io(struct btrfs_raid_bio *rbio) in __raid_recover_end_io() argument
1849 pointers = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS); in __raid_recover_end_io()
1855 faila = rbio->faila; in __raid_recover_end_io()
1856 failb = rbio->failb; in __raid_recover_end_io()
1858 if (rbio->operation == BTRFS_RBIO_READ_REBUILD || in __raid_recover_end_io()
1859 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) { in __raid_recover_end_io()
1860 spin_lock_irq(&rbio->bio_list_lock); in __raid_recover_end_io()
1861 set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags); in __raid_recover_end_io()
1862 spin_unlock_irq(&rbio->bio_list_lock); in __raid_recover_end_io()
1865 index_rbio_pages(rbio); in __raid_recover_end_io()
1867 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) { in __raid_recover_end_io()
1872 if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB && in __raid_recover_end_io()
1873 !test_bit(pagenr, rbio->dbitmap)) in __raid_recover_end_io()
1879 for (stripe = 0; stripe < rbio->real_stripes; stripe++) { in __raid_recover_end_io()
1884 if ((rbio->operation == BTRFS_RBIO_READ_REBUILD || in __raid_recover_end_io()
1885 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) && in __raid_recover_end_io()
1887 page = page_in_rbio(rbio, stripe, pagenr, 0); in __raid_recover_end_io()
1889 page = rbio_stripe_page(rbio, stripe, pagenr); in __raid_recover_end_io()
1895 if (rbio->bbio->map_type & BTRFS_BLOCK_GROUP_RAID6) { in __raid_recover_end_io()
1901 if (faila == rbio->nr_data) { in __raid_recover_end_io()
1930 if (rbio->bbio->raid_map[failb] == RAID6_Q_STRIPE) { in __raid_recover_end_io()
1931 if (rbio->bbio->raid_map[faila] == in __raid_recover_end_io()
1943 if (rbio->bbio->raid_map[failb] == RAID5_P_STRIPE) { in __raid_recover_end_io()
1944 raid6_datap_recov(rbio->real_stripes, in __raid_recover_end_io()
1947 raid6_2data_recov(rbio->real_stripes, in __raid_recover_end_io()
1958 copy_page(pointers[faila], pointers[rbio->nr_data]); in __raid_recover_end_io()
1962 for (stripe = faila; stripe < rbio->nr_data - 1; stripe++) in __raid_recover_end_io()
1964 pointers[rbio->nr_data - 1] = p; in __raid_recover_end_io()
1967 run_xor(pointers, rbio->nr_data - 1, PAGE_SIZE); in __raid_recover_end_io()
1975 if (rbio->operation == BTRFS_RBIO_WRITE) { in __raid_recover_end_io()
1976 for (i = 0; i < rbio->stripe_npages; i++) { in __raid_recover_end_io()
1978 page = rbio_stripe_page(rbio, faila, i); in __raid_recover_end_io()
1982 page = rbio_stripe_page(rbio, failb, i); in __raid_recover_end_io()
1987 for (stripe = 0; stripe < rbio->real_stripes; stripe++) { in __raid_recover_end_io()
1992 if ((rbio->operation == BTRFS_RBIO_READ_REBUILD || in __raid_recover_end_io()
1993 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) && in __raid_recover_end_io()
1995 page = page_in_rbio(rbio, stripe, pagenr, 0); in __raid_recover_end_io()
1997 page = rbio_stripe_page(rbio, stripe, pagenr); in __raid_recover_end_io()
2013 if (rbio->operation == BTRFS_RBIO_READ_REBUILD || in __raid_recover_end_io()
2014 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) { in __raid_recover_end_io()
2030 if (err == BLK_STS_OK && rbio->failb < 0) in __raid_recover_end_io()
2031 cache_rbio_pages(rbio); in __raid_recover_end_io()
2033 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags); in __raid_recover_end_io()
2035 rbio_orig_end_io(rbio, err); in __raid_recover_end_io()
2037 rbio->faila = -1; in __raid_recover_end_io()
2038 rbio->failb = -1; in __raid_recover_end_io()
2040 if (rbio->operation == BTRFS_RBIO_WRITE) in __raid_recover_end_io()
2041 finish_rmw(rbio); in __raid_recover_end_io()
2042 else if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB) in __raid_recover_end_io()
2043 finish_parity_scrub(rbio, 0); in __raid_recover_end_io()
2047 rbio_orig_end_io(rbio, err); in __raid_recover_end_io()
2057 struct btrfs_raid_bio *rbio = bio->bi_private; in raid_recover_end_io() local
2064 fail_bio_stripe(rbio, bio); in raid_recover_end_io()
2069 if (!atomic_dec_and_test(&rbio->stripes_pending)) in raid_recover_end_io()
2072 if (atomic_read(&rbio->error) > rbio->bbio->max_errors) in raid_recover_end_io()
2073 rbio_orig_end_io(rbio, BLK_STS_IOERR); in raid_recover_end_io()
2075 __raid_recover_end_io(rbio); in raid_recover_end_io()
2086 static int __raid56_parity_recover(struct btrfs_raid_bio *rbio) in __raid56_parity_recover() argument
2097 ret = alloc_rbio_pages(rbio); in __raid56_parity_recover()
2101 atomic_set(&rbio->error, 0); in __raid56_parity_recover()
2111 for (stripe = 0; stripe < rbio->real_stripes; stripe++) { in __raid56_parity_recover()
2112 if (rbio->faila == stripe || rbio->failb == stripe) { in __raid56_parity_recover()
2113 atomic_inc(&rbio->error); in __raid56_parity_recover()
2117 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) { in __raid56_parity_recover()
2118 ret = rbio_add_io_page(rbio, &bio_list, in __raid56_parity_recover()
2119 rbio_stripe_page(rbio, stripe, pagenr), in __raid56_parity_recover()
2120 stripe, pagenr, rbio->stripe_len); in __raid56_parity_recover()
2133 if (atomic_read(&rbio->error) <= rbio->bbio->max_errors) { in __raid56_parity_recover()
2134 __raid_recover_end_io(rbio); in __raid56_parity_recover()
2145 atomic_set(&rbio->stripes_pending, bios_to_read); in __raid56_parity_recover()
2151 bio->bi_private = rbio; in __raid56_parity_recover()
2155 btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56); in __raid56_parity_recover()
2163 if (rbio->operation == BTRFS_RBIO_READ_REBUILD || in __raid56_parity_recover()
2164 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) in __raid56_parity_recover()
2165 rbio_orig_end_io(rbio, BLK_STS_IOERR); in __raid56_parity_recover()
2183 struct btrfs_raid_bio *rbio; in raid56_parity_recover() local
2191 rbio = alloc_rbio(fs_info, bbio, stripe_len); in raid56_parity_recover()
2192 if (IS_ERR(rbio)) { in raid56_parity_recover()
2195 return PTR_ERR(rbio); in raid56_parity_recover()
2198 rbio->operation = BTRFS_RBIO_READ_REBUILD; in raid56_parity_recover()
2199 rbio_add_bio(rbio, bio); in raid56_parity_recover()
2201 rbio->faila = find_logical_bio_stripe(rbio, bio); in raid56_parity_recover()
2202 if (rbio->faila == -1) { in raid56_parity_recover()
2209 kfree(rbio); in raid56_parity_recover()
2215 rbio->generic_bio_cnt = 1; in raid56_parity_recover()
2231 rbio->failb = rbio->real_stripes - (mirror_num - 1); in raid56_parity_recover()
2232 ASSERT(rbio->failb > 0); in raid56_parity_recover()
2233 if (rbio->failb <= rbio->faila) in raid56_parity_recover()
2234 rbio->failb--; in raid56_parity_recover()
2237 ret = lock_stripe_add(rbio); in raid56_parity_recover()
2247 __raid56_parity_recover(rbio); in raid56_parity_recover()
2259 struct btrfs_raid_bio *rbio; in rmw_work() local
2261 rbio = container_of(work, struct btrfs_raid_bio, work); in rmw_work()
2262 raid56_rmw_stripe(rbio); in rmw_work()
2267 struct btrfs_raid_bio *rbio; in read_rebuild_work() local
2269 rbio = container_of(work, struct btrfs_raid_bio, work); in read_rebuild_work()
2270 __raid56_parity_recover(rbio); in read_rebuild_work()
2289 struct btrfs_raid_bio *rbio; in raid56_parity_alloc_scrub_rbio() local
2292 rbio = alloc_rbio(fs_info, bbio, stripe_len); in raid56_parity_alloc_scrub_rbio()
2293 if (IS_ERR(rbio)) in raid56_parity_alloc_scrub_rbio()
2295 bio_list_add(&rbio->bio_list, bio); in raid56_parity_alloc_scrub_rbio()
2301 rbio->operation = BTRFS_RBIO_PARITY_SCRUB; in raid56_parity_alloc_scrub_rbio()
2308 for (i = rbio->nr_data; i < rbio->real_stripes; i++) { in raid56_parity_alloc_scrub_rbio()
2310 rbio->scrubp = i; in raid56_parity_alloc_scrub_rbio()
2314 ASSERT(i < rbio->real_stripes); in raid56_parity_alloc_scrub_rbio()
2318 ASSERT(rbio->stripe_npages == stripe_nsectors); in raid56_parity_alloc_scrub_rbio()
2319 bitmap_copy(rbio->dbitmap, dbitmap, stripe_nsectors); in raid56_parity_alloc_scrub_rbio()
2325 rbio->generic_bio_cnt = 1; in raid56_parity_alloc_scrub_rbio()
2327 return rbio; in raid56_parity_alloc_scrub_rbio()
2331 void raid56_add_scrub_pages(struct btrfs_raid_bio *rbio, struct page *page, in raid56_add_scrub_pages() argument
2337 ASSERT(logical >= rbio->bbio->raid_map[0]); in raid56_add_scrub_pages()
2338 ASSERT(logical + PAGE_SIZE <= rbio->bbio->raid_map[0] + in raid56_add_scrub_pages()
2339 rbio->stripe_len * rbio->nr_data); in raid56_add_scrub_pages()
2340 stripe_offset = (int)(logical - rbio->bbio->raid_map[0]); in raid56_add_scrub_pages()
2342 rbio->bio_pages[index] = page; in raid56_add_scrub_pages()
2349 static int alloc_rbio_essential_pages(struct btrfs_raid_bio *rbio) in alloc_rbio_essential_pages() argument
2356 for_each_set_bit(bit, rbio->dbitmap, rbio->stripe_npages) { in alloc_rbio_essential_pages()
2357 for (i = 0; i < rbio->real_stripes; i++) { in alloc_rbio_essential_pages()
2358 index = i * rbio->stripe_npages + bit; in alloc_rbio_essential_pages()
2359 if (rbio->stripe_pages[index]) in alloc_rbio_essential_pages()
2365 rbio->stripe_pages[index] = page; in alloc_rbio_essential_pages()
2371 static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio, in finish_parity_scrub() argument
2374 struct btrfs_bio *bbio = rbio->bbio; in finish_parity_scrub()
2375 void **pointers = rbio->finish_pointers; in finish_parity_scrub()
2376 unsigned long *pbitmap = rbio->finish_pbitmap; in finish_parity_scrub()
2377 int nr_data = rbio->nr_data; in finish_parity_scrub()
2390 if (rbio->real_stripes - rbio->nr_data == 1) in finish_parity_scrub()
2392 else if (rbio->real_stripes - rbio->nr_data == 2) in finish_parity_scrub()
2397 if (bbio->num_tgtdevs && bbio->tgtdev_map[rbio->scrubp]) { in finish_parity_scrub()
2399 bitmap_copy(pbitmap, rbio->dbitmap, rbio->stripe_npages); in finish_parity_scrub()
2407 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags); in finish_parity_scrub()
2425 pointers[rbio->real_stripes - 1] = kmap(q_page); in finish_parity_scrub()
2428 atomic_set(&rbio->error, 0); in finish_parity_scrub()
2433 for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) { in finish_parity_scrub()
2438 p = page_in_rbio(rbio, stripe, pagenr, 0); in finish_parity_scrub()
2444 raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE, in finish_parity_scrub()
2453 p = rbio_stripe_page(rbio, rbio->scrubp, pagenr); in finish_parity_scrub()
2455 if (memcmp(parity, pointers[rbio->scrubp], PAGE_SIZE)) in finish_parity_scrub()
2456 copy_page(parity, pointers[rbio->scrubp]); in finish_parity_scrub()
2459 bitmap_clear(rbio->dbitmap, pagenr, 1); in finish_parity_scrub()
2463 kunmap(page_in_rbio(rbio, stripe, pagenr, 0)); in finish_parity_scrub()
2479 for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) { in finish_parity_scrub()
2482 page = rbio_stripe_page(rbio, rbio->scrubp, pagenr); in finish_parity_scrub()
2483 ret = rbio_add_io_page(rbio, &bio_list, in finish_parity_scrub()
2484 page, rbio->scrubp, pagenr, rbio->stripe_len); in finish_parity_scrub()
2492 for_each_set_bit(pagenr, pbitmap, rbio->stripe_npages) { in finish_parity_scrub()
2495 page = rbio_stripe_page(rbio, rbio->scrubp, pagenr); in finish_parity_scrub()
2496 ret = rbio_add_io_page(rbio, &bio_list, page, in finish_parity_scrub()
2497 bbio->tgtdev_map[rbio->scrubp], in finish_parity_scrub()
2498 pagenr, rbio->stripe_len); in finish_parity_scrub()
2507 rbio_orig_end_io(rbio, BLK_STS_OK); in finish_parity_scrub()
2511 atomic_set(&rbio->stripes_pending, nr_data); in finish_parity_scrub()
2518 bio->bi_private = rbio; in finish_parity_scrub()
2527 rbio_orig_end_io(rbio, BLK_STS_IOERR); in finish_parity_scrub()
2533 static inline int is_data_stripe(struct btrfs_raid_bio *rbio, int stripe) in is_data_stripe() argument
2535 if (stripe >= 0 && stripe < rbio->nr_data) in is_data_stripe()
2547 static void validate_rbio_for_parity_scrub(struct btrfs_raid_bio *rbio) in validate_rbio_for_parity_scrub() argument
2549 if (atomic_read(&rbio->error) > rbio->bbio->max_errors) in validate_rbio_for_parity_scrub()
2552 if (rbio->faila >= 0 || rbio->failb >= 0) { in validate_rbio_for_parity_scrub()
2555 if (is_data_stripe(rbio, rbio->faila)) in validate_rbio_for_parity_scrub()
2557 else if (is_parity_stripe(rbio->faila)) in validate_rbio_for_parity_scrub()
2558 failp = rbio->faila; in validate_rbio_for_parity_scrub()
2560 if (is_data_stripe(rbio, rbio->failb)) in validate_rbio_for_parity_scrub()
2562 else if (is_parity_stripe(rbio->failb)) in validate_rbio_for_parity_scrub()
2563 failp = rbio->failb; in validate_rbio_for_parity_scrub()
2570 if (dfail > rbio->bbio->max_errors - 1) in validate_rbio_for_parity_scrub()
2578 finish_parity_scrub(rbio, 0); in validate_rbio_for_parity_scrub()
2588 if (failp != rbio->scrubp) in validate_rbio_for_parity_scrub()
2591 __raid_recover_end_io(rbio); in validate_rbio_for_parity_scrub()
2593 finish_parity_scrub(rbio, 1); in validate_rbio_for_parity_scrub()
2598 rbio_orig_end_io(rbio, BLK_STS_IOERR); in validate_rbio_for_parity_scrub()
2611 struct btrfs_raid_bio *rbio = bio->bi_private; in raid56_parity_scrub_end_io() local
2614 fail_bio_stripe(rbio, bio); in raid56_parity_scrub_end_io()
2620 if (!atomic_dec_and_test(&rbio->stripes_pending)) in raid56_parity_scrub_end_io()
2628 validate_rbio_for_parity_scrub(rbio); in raid56_parity_scrub_end_io()
2631 static void raid56_parity_scrub_stripe(struct btrfs_raid_bio *rbio) in raid56_parity_scrub_stripe() argument
2642 ret = alloc_rbio_essential_pages(rbio); in raid56_parity_scrub_stripe()
2646 atomic_set(&rbio->error, 0); in raid56_parity_scrub_stripe()
2651 for (stripe = 0; stripe < rbio->real_stripes; stripe++) { in raid56_parity_scrub_stripe()
2652 for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) { in raid56_parity_scrub_stripe()
2660 page = page_in_rbio(rbio, stripe, pagenr, 1); in raid56_parity_scrub_stripe()
2664 page = rbio_stripe_page(rbio, stripe, pagenr); in raid56_parity_scrub_stripe()
2672 ret = rbio_add_io_page(rbio, &bio_list, page, in raid56_parity_scrub_stripe()
2673 stripe, pagenr, rbio->stripe_len); in raid56_parity_scrub_stripe()
2694 atomic_set(&rbio->stripes_pending, bios_to_read); in raid56_parity_scrub_stripe()
2700 bio->bi_private = rbio; in raid56_parity_scrub_stripe()
2704 btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56); in raid56_parity_scrub_stripe()
2712 rbio_orig_end_io(rbio, BLK_STS_IOERR); in raid56_parity_scrub_stripe()
2720 validate_rbio_for_parity_scrub(rbio); in raid56_parity_scrub_stripe()
2725 struct btrfs_raid_bio *rbio; in scrub_parity_work() local
2727 rbio = container_of(work, struct btrfs_raid_bio, work); in scrub_parity_work()
2728 raid56_parity_scrub_stripe(rbio); in scrub_parity_work()
2731 void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio) in raid56_parity_submit_scrub_rbio() argument
2733 if (!lock_stripe_add(rbio)) in raid56_parity_submit_scrub_rbio()
2734 start_async_work(rbio, scrub_parity_work); in raid56_parity_submit_scrub_rbio()
2743 struct btrfs_raid_bio *rbio; in raid56_alloc_missing_rbio() local
2745 rbio = alloc_rbio(fs_info, bbio, length); in raid56_alloc_missing_rbio()
2746 if (IS_ERR(rbio)) in raid56_alloc_missing_rbio()
2749 rbio->operation = BTRFS_RBIO_REBUILD_MISSING; in raid56_alloc_missing_rbio()
2750 bio_list_add(&rbio->bio_list, bio); in raid56_alloc_missing_rbio()
2757 rbio->faila = find_logical_bio_stripe(rbio, bio); in raid56_alloc_missing_rbio()
2758 if (rbio->faila == -1) { in raid56_alloc_missing_rbio()
2760 kfree(rbio); in raid56_alloc_missing_rbio()
2768 rbio->generic_bio_cnt = 1; in raid56_alloc_missing_rbio()
2770 return rbio; in raid56_alloc_missing_rbio()
2773 void raid56_submit_missing_rbio(struct btrfs_raid_bio *rbio) in raid56_submit_missing_rbio() argument
2775 if (!lock_stripe_add(rbio)) in raid56_submit_missing_rbio()
2776 start_async_work(rbio, read_rebuild_work); in raid56_submit_missing_rbio()