1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2007 Oracle.  All rights reserved.
4  */
5 
6 #include <linux/sched.h>
7 #include <linux/sched/mm.h>
8 #include <linux/bio.h>
9 #include <linux/slab.h>
10 #include <linux/buffer_head.h>
11 #include <linux/blkdev.h>
12 #include <linux/ratelimit.h>
13 #include <linux/kthread.h>
14 #include <linux/raid/pq.h>
15 #include <linux/semaphore.h>
16 #include <linux/uuid.h>
17 #include <linux/list_sort.h>
18 #include "ctree.h"
19 #include "extent_map.h"
20 #include "disk-io.h"
21 #include "transaction.h"
22 #include "print-tree.h"
23 #include "volumes.h"
24 #include "raid56.h"
25 #include "async-thread.h"
26 #include "check-integrity.h"
27 #include "rcu-string.h"
28 #include "math.h"
29 #include "dev-replace.h"
30 #include "sysfs.h"
31 #include "tree-checker.h"
32 
33 const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = {
34 	[BTRFS_RAID_RAID10] = {
35 		.sub_stripes	= 2,
36 		.dev_stripes	= 1,
37 		.devs_max	= 0,	/* 0 == as many as possible */
38 		.devs_min	= 4,
39 		.tolerated_failures = 1,
40 		.devs_increment	= 2,
41 		.ncopies	= 2,
42 		.raid_name	= "raid10",
43 		.bg_flag	= BTRFS_BLOCK_GROUP_RAID10,
44 		.mindev_error	= BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET,
45 	},
46 	[BTRFS_RAID_RAID1] = {
47 		.sub_stripes	= 1,
48 		.dev_stripes	= 1,
49 		.devs_max	= 2,
50 		.devs_min	= 2,
51 		.tolerated_failures = 1,
52 		.devs_increment	= 2,
53 		.ncopies	= 2,
54 		.raid_name	= "raid1",
55 		.bg_flag	= BTRFS_BLOCK_GROUP_RAID1,
56 		.mindev_error	= BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET,
57 	},
58 	[BTRFS_RAID_DUP] = {
59 		.sub_stripes	= 1,
60 		.dev_stripes	= 2,
61 		.devs_max	= 1,
62 		.devs_min	= 1,
63 		.tolerated_failures = 0,
64 		.devs_increment	= 1,
65 		.ncopies	= 2,
66 		.raid_name	= "dup",
67 		.bg_flag	= BTRFS_BLOCK_GROUP_DUP,
68 		.mindev_error	= 0,
69 	},
70 	[BTRFS_RAID_RAID0] = {
71 		.sub_stripes	= 1,
72 		.dev_stripes	= 1,
73 		.devs_max	= 0,
74 		.devs_min	= 2,
75 		.tolerated_failures = 0,
76 		.devs_increment	= 1,
77 		.ncopies	= 1,
78 		.raid_name	= "raid0",
79 		.bg_flag	= BTRFS_BLOCK_GROUP_RAID0,
80 		.mindev_error	= 0,
81 	},
82 	[BTRFS_RAID_SINGLE] = {
83 		.sub_stripes	= 1,
84 		.dev_stripes	= 1,
85 		.devs_max	= 1,
86 		.devs_min	= 1,
87 		.tolerated_failures = 0,
88 		.devs_increment	= 1,
89 		.ncopies	= 1,
90 		.raid_name	= "single",
91 		.bg_flag	= 0,
92 		.mindev_error	= 0,
93 	},
94 	[BTRFS_RAID_RAID5] = {
95 		.sub_stripes	= 1,
96 		.dev_stripes	= 1,
97 		.devs_max	= 0,
98 		.devs_min	= 2,
99 		.tolerated_failures = 1,
100 		.devs_increment	= 1,
101 		.ncopies	= 1,
102 		.raid_name	= "raid5",
103 		.bg_flag	= BTRFS_BLOCK_GROUP_RAID5,
104 		.mindev_error	= BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET,
105 	},
106 	[BTRFS_RAID_RAID6] = {
107 		.sub_stripes	= 1,
108 		.dev_stripes	= 1,
109 		.devs_max	= 0,
110 		.devs_min	= 3,
111 		.tolerated_failures = 2,
112 		.devs_increment	= 1,
113 		.ncopies	= 1,
114 		.raid_name	= "raid6",
115 		.bg_flag	= BTRFS_BLOCK_GROUP_RAID6,
116 		.mindev_error	= BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET,
117 	},
118 };
119 
get_raid_name(enum btrfs_raid_types type)120 const char *get_raid_name(enum btrfs_raid_types type)
121 {
122 	if (type >= BTRFS_NR_RAID_TYPES)
123 		return NULL;
124 
125 	return btrfs_raid_array[type].raid_name;
126 }
127 
128 static int init_first_rw_device(struct btrfs_trans_handle *trans,
129 				struct btrfs_fs_info *fs_info);
130 static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info);
131 static void __btrfs_reset_dev_stats(struct btrfs_device *dev);
132 static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev);
133 static void btrfs_dev_stat_print_on_load(struct btrfs_device *device);
134 static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
135 			     enum btrfs_map_op op,
136 			     u64 logical, u64 *length,
137 			     struct btrfs_bio **bbio_ret,
138 			     int mirror_num, int need_raid_map);
139 
140 /*
141  * Device locking
142  * ==============
143  *
144  * There are several mutexes that protect manipulation of devices and low-level
145  * structures like chunks but not block groups, extents or files
146  *
147  * uuid_mutex (global lock)
148  * ------------------------
149  * protects the fs_uuids list that tracks all per-fs fs_devices, resulting from
150  * the SCAN_DEV ioctl registration or from mount either implicitly (the first
151  * device) or requested by the device= mount option
152  *
153  * the mutex can be very coarse and can cover long-running operations
154  *
155  * protects: updates to fs_devices counters like missing devices, rw devices,
156  * seeding, structure cloning, openning/closing devices at mount/umount time
157  *
158  * global::fs_devs - add, remove, updates to the global list
159  *
160  * does not protect: manipulation of the fs_devices::devices list in general
161  * but in mount context it could be used to exclude list modifications by eg.
162  * scan ioctl
163  *
164  * btrfs_device::name - renames (write side), read is RCU
165  *
166  * fs_devices::device_list_mutex (per-fs, with RCU)
167  * ------------------------------------------------
168  * protects updates to fs_devices::devices, ie. adding and deleting
169  *
170  * simple list traversal with read-only actions can be done with RCU protection
171  *
172  * may be used to exclude some operations from running concurrently without any
173  * modifications to the list (see write_all_supers)
174  *
175  * Is not required at mount and close times, because our device list is
176  * protected by the uuid_mutex at that point.
177  *
178  * balance_mutex
179  * -------------
180  * protects balance structures (status, state) and context accessed from
181  * several places (internally, ioctl)
182  *
183  * chunk_mutex
184  * -----------
185  * protects chunks, adding or removing during allocation, trim or when a new
186  * device is added/removed
187  *
188  * cleaner_mutex
189  * -------------
190  * a big lock that is held by the cleaner thread and prevents running subvolume
191  * cleaning together with relocation or delayed iputs
192  *
193  *
194  * Lock nesting
195  * ============
196  *
197  * uuid_mutex
198  *   volume_mutex
199  *     device_list_mutex
200  *       chunk_mutex
201  *     balance_mutex
202  *
203  *
204  * Exclusive operations, BTRFS_FS_EXCL_OP
205  * ======================================
206  *
207  * Maintains the exclusivity of the following operations that apply to the
208  * whole filesystem and cannot run in parallel.
209  *
210  * - Balance (*)
211  * - Device add
212  * - Device remove
213  * - Device replace (*)
214  * - Resize
215  *
216  * The device operations (as above) can be in one of the following states:
217  *
218  * - Running state
219  * - Paused state
220  * - Completed state
221  *
222  * Only device operations marked with (*) can go into the Paused state for the
223  * following reasons:
224  *
225  * - ioctl (only Balance can be Paused through ioctl)
226  * - filesystem remounted as read-only
227  * - filesystem unmounted and mounted as read-only
228  * - system power-cycle and filesystem mounted as read-only
229  * - filesystem or device errors leading to forced read-only
230  *
231  * BTRFS_FS_EXCL_OP flag is set and cleared using atomic operations.
232  * During the course of Paused state, the BTRFS_FS_EXCL_OP remains set.
233  * A device operation in Paused or Running state can be canceled or resumed
234  * either by ioctl (Balance only) or when remounted as read-write.
235  * BTRFS_FS_EXCL_OP flag is cleared when the device operation is canceled or
236  * completed.
237  */
238 
239 DEFINE_MUTEX(uuid_mutex);
240 static LIST_HEAD(fs_uuids);
btrfs_get_fs_uuids(void)241 struct list_head *btrfs_get_fs_uuids(void)
242 {
243 	return &fs_uuids;
244 }
245 
246 /*
247  * alloc_fs_devices - allocate struct btrfs_fs_devices
248  * @fsid:	if not NULL, copy the uuid to fs_devices::fsid
249  *
250  * Return a pointer to a new struct btrfs_fs_devices on success, or ERR_PTR().
251  * The returned struct is not linked onto any lists and can be destroyed with
252  * kfree() right away.
253  */
alloc_fs_devices(const u8 * fsid)254 static struct btrfs_fs_devices *alloc_fs_devices(const u8 *fsid)
255 {
256 	struct btrfs_fs_devices *fs_devs;
257 
258 	fs_devs = kzalloc(sizeof(*fs_devs), GFP_KERNEL);
259 	if (!fs_devs)
260 		return ERR_PTR(-ENOMEM);
261 
262 	mutex_init(&fs_devs->device_list_mutex);
263 
264 	INIT_LIST_HEAD(&fs_devs->devices);
265 	INIT_LIST_HEAD(&fs_devs->resized_devices);
266 	INIT_LIST_HEAD(&fs_devs->alloc_list);
267 	INIT_LIST_HEAD(&fs_devs->fs_list);
268 	if (fsid)
269 		memcpy(fs_devs->fsid, fsid, BTRFS_FSID_SIZE);
270 
271 	return fs_devs;
272 }
273 
btrfs_free_device(struct btrfs_device * device)274 void btrfs_free_device(struct btrfs_device *device)
275 {
276 	rcu_string_free(device->name);
277 	bio_put(device->flush_bio);
278 	kfree(device);
279 }
280 
free_fs_devices(struct btrfs_fs_devices * fs_devices)281 static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
282 {
283 	struct btrfs_device *device;
284 	WARN_ON(fs_devices->opened);
285 	while (!list_empty(&fs_devices->devices)) {
286 		device = list_entry(fs_devices->devices.next,
287 				    struct btrfs_device, dev_list);
288 		list_del(&device->dev_list);
289 		btrfs_free_device(device);
290 	}
291 	kfree(fs_devices);
292 }
293 
btrfs_kobject_uevent(struct block_device * bdev,enum kobject_action action)294 static void btrfs_kobject_uevent(struct block_device *bdev,
295 				 enum kobject_action action)
296 {
297 	int ret;
298 
299 	ret = kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, action);
300 	if (ret)
301 		pr_warn("BTRFS: Sending event '%d' to kobject: '%s' (%p): failed\n",
302 			action,
303 			kobject_name(&disk_to_dev(bdev->bd_disk)->kobj),
304 			&disk_to_dev(bdev->bd_disk)->kobj);
305 }
306 
btrfs_cleanup_fs_uuids(void)307 void __exit btrfs_cleanup_fs_uuids(void)
308 {
309 	struct btrfs_fs_devices *fs_devices;
310 
311 	while (!list_empty(&fs_uuids)) {
312 		fs_devices = list_entry(fs_uuids.next,
313 					struct btrfs_fs_devices, fs_list);
314 		list_del(&fs_devices->fs_list);
315 		free_fs_devices(fs_devices);
316 	}
317 }
318 
319 /*
320  * Returns a pointer to a new btrfs_device on success; ERR_PTR() on error.
321  * Returned struct is not linked onto any lists and must be destroyed using
322  * btrfs_free_device.
323  */
__alloc_device(void)324 static struct btrfs_device *__alloc_device(void)
325 {
326 	struct btrfs_device *dev;
327 
328 	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
329 	if (!dev)
330 		return ERR_PTR(-ENOMEM);
331 
332 	/*
333 	 * Preallocate a bio that's always going to be used for flushing device
334 	 * barriers and matches the device lifespan
335 	 */
336 	dev->flush_bio = bio_alloc_bioset(GFP_KERNEL, 0, NULL);
337 	if (!dev->flush_bio) {
338 		kfree(dev);
339 		return ERR_PTR(-ENOMEM);
340 	}
341 
342 	INIT_LIST_HEAD(&dev->dev_list);
343 	INIT_LIST_HEAD(&dev->dev_alloc_list);
344 	INIT_LIST_HEAD(&dev->resized_list);
345 
346 	spin_lock_init(&dev->io_lock);
347 
348 	atomic_set(&dev->reada_in_flight, 0);
349 	atomic_set(&dev->dev_stats_ccnt, 0);
350 	btrfs_device_data_ordered_init(dev);
351 	INIT_RADIX_TREE(&dev->reada_zones, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
352 	INIT_RADIX_TREE(&dev->reada_extents, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
353 
354 	return dev;
355 }
356 
find_fsid(u8 * fsid)357 static noinline struct btrfs_fs_devices *find_fsid(u8 *fsid)
358 {
359 	struct btrfs_fs_devices *fs_devices;
360 
361 	list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
362 		if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
363 			return fs_devices;
364 	}
365 	return NULL;
366 }
367 
368 static int
btrfs_get_bdev_and_sb(const char * device_path,fmode_t flags,void * holder,int flush,struct block_device ** bdev,struct buffer_head ** bh)369 btrfs_get_bdev_and_sb(const char *device_path, fmode_t flags, void *holder,
370 		      int flush, struct block_device **bdev,
371 		      struct buffer_head **bh)
372 {
373 	int ret;
374 
375 	*bdev = blkdev_get_by_path(device_path, flags, holder);
376 
377 	if (IS_ERR(*bdev)) {
378 		ret = PTR_ERR(*bdev);
379 		goto error;
380 	}
381 
382 	if (flush)
383 		filemap_write_and_wait((*bdev)->bd_inode->i_mapping);
384 	ret = set_blocksize(*bdev, BTRFS_BDEV_BLOCKSIZE);
385 	if (ret) {
386 		blkdev_put(*bdev, flags);
387 		goto error;
388 	}
389 	invalidate_bdev(*bdev);
390 	*bh = btrfs_read_dev_super(*bdev);
391 	if (IS_ERR(*bh)) {
392 		ret = PTR_ERR(*bh);
393 		blkdev_put(*bdev, flags);
394 		goto error;
395 	}
396 
397 	return 0;
398 
399 error:
400 	*bdev = NULL;
401 	*bh = NULL;
402 	return ret;
403 }
404 
requeue_list(struct btrfs_pending_bios * pending_bios,struct bio * head,struct bio * tail)405 static void requeue_list(struct btrfs_pending_bios *pending_bios,
406 			struct bio *head, struct bio *tail)
407 {
408 
409 	struct bio *old_head;
410 
411 	old_head = pending_bios->head;
412 	pending_bios->head = head;
413 	if (pending_bios->tail)
414 		tail->bi_next = old_head;
415 	else
416 		pending_bios->tail = tail;
417 }
418 
419 /*
420  * we try to collect pending bios for a device so we don't get a large
421  * number of procs sending bios down to the same device.  This greatly
422  * improves the schedulers ability to collect and merge the bios.
423  *
424  * But, it also turns into a long list of bios to process and that is sure
425  * to eventually make the worker thread block.  The solution here is to
426  * make some progress and then put this work struct back at the end of
427  * the list if the block device is congested.  This way, multiple devices
428  * can make progress from a single worker thread.
429  */
run_scheduled_bios(struct btrfs_device * device)430 static noinline void run_scheduled_bios(struct btrfs_device *device)
431 {
432 	struct btrfs_fs_info *fs_info = device->fs_info;
433 	struct bio *pending;
434 	struct backing_dev_info *bdi;
435 	struct btrfs_pending_bios *pending_bios;
436 	struct bio *tail;
437 	struct bio *cur;
438 	int again = 0;
439 	unsigned long num_run;
440 	unsigned long batch_run = 0;
441 	unsigned long last_waited = 0;
442 	int force_reg = 0;
443 	int sync_pending = 0;
444 	struct blk_plug plug;
445 
446 	/*
447 	 * this function runs all the bios we've collected for
448 	 * a particular device.  We don't want to wander off to
449 	 * another device without first sending all of these down.
450 	 * So, setup a plug here and finish it off before we return
451 	 */
452 	blk_start_plug(&plug);
453 
454 	bdi = device->bdev->bd_bdi;
455 
456 loop:
457 	spin_lock(&device->io_lock);
458 
459 loop_lock:
460 	num_run = 0;
461 
462 	/* take all the bios off the list at once and process them
463 	 * later on (without the lock held).  But, remember the
464 	 * tail and other pointers so the bios can be properly reinserted
465 	 * into the list if we hit congestion
466 	 */
467 	if (!force_reg && device->pending_sync_bios.head) {
468 		pending_bios = &device->pending_sync_bios;
469 		force_reg = 1;
470 	} else {
471 		pending_bios = &device->pending_bios;
472 		force_reg = 0;
473 	}
474 
475 	pending = pending_bios->head;
476 	tail = pending_bios->tail;
477 	WARN_ON(pending && !tail);
478 
479 	/*
480 	 * if pending was null this time around, no bios need processing
481 	 * at all and we can stop.  Otherwise it'll loop back up again
482 	 * and do an additional check so no bios are missed.
483 	 *
484 	 * device->running_pending is used to synchronize with the
485 	 * schedule_bio code.
486 	 */
487 	if (device->pending_sync_bios.head == NULL &&
488 	    device->pending_bios.head == NULL) {
489 		again = 0;
490 		device->running_pending = 0;
491 	} else {
492 		again = 1;
493 		device->running_pending = 1;
494 	}
495 
496 	pending_bios->head = NULL;
497 	pending_bios->tail = NULL;
498 
499 	spin_unlock(&device->io_lock);
500 
501 	while (pending) {
502 
503 		rmb();
504 		/* we want to work on both lists, but do more bios on the
505 		 * sync list than the regular list
506 		 */
507 		if ((num_run > 32 &&
508 		    pending_bios != &device->pending_sync_bios &&
509 		    device->pending_sync_bios.head) ||
510 		   (num_run > 64 && pending_bios == &device->pending_sync_bios &&
511 		    device->pending_bios.head)) {
512 			spin_lock(&device->io_lock);
513 			requeue_list(pending_bios, pending, tail);
514 			goto loop_lock;
515 		}
516 
517 		cur = pending;
518 		pending = pending->bi_next;
519 		cur->bi_next = NULL;
520 
521 		BUG_ON(atomic_read(&cur->__bi_cnt) == 0);
522 
523 		/*
524 		 * if we're doing the sync list, record that our
525 		 * plug has some sync requests on it
526 		 *
527 		 * If we're doing the regular list and there are
528 		 * sync requests sitting around, unplug before
529 		 * we add more
530 		 */
531 		if (pending_bios == &device->pending_sync_bios) {
532 			sync_pending = 1;
533 		} else if (sync_pending) {
534 			blk_finish_plug(&plug);
535 			blk_start_plug(&plug);
536 			sync_pending = 0;
537 		}
538 
539 		btrfsic_submit_bio(cur);
540 		num_run++;
541 		batch_run++;
542 
543 		cond_resched();
544 
545 		/*
546 		 * we made progress, there is more work to do and the bdi
547 		 * is now congested.  Back off and let other work structs
548 		 * run instead
549 		 */
550 		if (pending && bdi_write_congested(bdi) && batch_run > 8 &&
551 		    fs_info->fs_devices->open_devices > 1) {
552 			struct io_context *ioc;
553 
554 			ioc = current->io_context;
555 
556 			/*
557 			 * the main goal here is that we don't want to
558 			 * block if we're going to be able to submit
559 			 * more requests without blocking.
560 			 *
561 			 * This code does two great things, it pokes into
562 			 * the elevator code from a filesystem _and_
563 			 * it makes assumptions about how batching works.
564 			 */
565 			if (ioc && ioc->nr_batch_requests > 0 &&
566 			    time_before(jiffies, ioc->last_waited + HZ/50UL) &&
567 			    (last_waited == 0 ||
568 			     ioc->last_waited == last_waited)) {
569 				/*
570 				 * we want to go through our batch of
571 				 * requests and stop.  So, we copy out
572 				 * the ioc->last_waited time and test
573 				 * against it before looping
574 				 */
575 				last_waited = ioc->last_waited;
576 				cond_resched();
577 				continue;
578 			}
579 			spin_lock(&device->io_lock);
580 			requeue_list(pending_bios, pending, tail);
581 			device->running_pending = 1;
582 
583 			spin_unlock(&device->io_lock);
584 			btrfs_queue_work(fs_info->submit_workers,
585 					 &device->work);
586 			goto done;
587 		}
588 	}
589 
590 	cond_resched();
591 	if (again)
592 		goto loop;
593 
594 	spin_lock(&device->io_lock);
595 	if (device->pending_bios.head || device->pending_sync_bios.head)
596 		goto loop_lock;
597 	spin_unlock(&device->io_lock);
598 
599 done:
600 	blk_finish_plug(&plug);
601 }
602 
pending_bios_fn(struct btrfs_work * work)603 static void pending_bios_fn(struct btrfs_work *work)
604 {
605 	struct btrfs_device *device;
606 
607 	device = container_of(work, struct btrfs_device, work);
608 	run_scheduled_bios(device);
609 }
610 
611 /*
612  *  Search and remove all stale (devices which are not mounted) devices.
613  *  When both inputs are NULL, it will search and release all stale devices.
614  *  path:	Optional. When provided will it release all unmounted devices
615  *		matching this path only.
616  *  skip_dev:	Optional. Will skip this device when searching for the stale
617  *		devices.
618  */
btrfs_free_stale_devices(const char * path,struct btrfs_device * skip_device)619 static void btrfs_free_stale_devices(const char *path,
620 				     struct btrfs_device *skip_device)
621 {
622 	struct btrfs_fs_devices *fs_devices, *tmp_fs_devices;
623 	struct btrfs_device *device, *tmp_device;
624 
625 	list_for_each_entry_safe(fs_devices, tmp_fs_devices, &fs_uuids, fs_list) {
626 		mutex_lock(&fs_devices->device_list_mutex);
627 		if (fs_devices->opened) {
628 			mutex_unlock(&fs_devices->device_list_mutex);
629 			continue;
630 		}
631 
632 		list_for_each_entry_safe(device, tmp_device,
633 					 &fs_devices->devices, dev_list) {
634 			int not_found = 0;
635 
636 			if (skip_device && skip_device == device)
637 				continue;
638 			if (path && !device->name)
639 				continue;
640 
641 			rcu_read_lock();
642 			if (path)
643 				not_found = strcmp(rcu_str_deref(device->name),
644 						   path);
645 			rcu_read_unlock();
646 			if (not_found)
647 				continue;
648 
649 			/* delete the stale device */
650 			fs_devices->num_devices--;
651 			list_del(&device->dev_list);
652 			btrfs_free_device(device);
653 
654 			if (fs_devices->num_devices == 0)
655 				break;
656 		}
657 		mutex_unlock(&fs_devices->device_list_mutex);
658 		if (fs_devices->num_devices == 0) {
659 			btrfs_sysfs_remove_fsid(fs_devices);
660 			list_del(&fs_devices->fs_list);
661 			free_fs_devices(fs_devices);
662 		}
663 	}
664 }
665 
666 /*
667  * This is only used on mount, and we are protected from competing things
668  * messing with our fs_devices by the uuid_mutex, thus we do not need the
669  * fs_devices->device_list_mutex here.
670  */
btrfs_open_one_device(struct btrfs_fs_devices * fs_devices,struct btrfs_device * device,fmode_t flags,void * holder)671 static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices,
672 			struct btrfs_device *device, fmode_t flags,
673 			void *holder)
674 {
675 	struct request_queue *q;
676 	struct block_device *bdev;
677 	struct buffer_head *bh;
678 	struct btrfs_super_block *disk_super;
679 	u64 devid;
680 	int ret;
681 
682 	if (device->bdev)
683 		return -EINVAL;
684 	if (!device->name)
685 		return -EINVAL;
686 
687 	ret = btrfs_get_bdev_and_sb(device->name->str, flags, holder, 1,
688 				    &bdev, &bh);
689 	if (ret)
690 		return ret;
691 
692 	disk_super = (struct btrfs_super_block *)bh->b_data;
693 	devid = btrfs_stack_device_id(&disk_super->dev_item);
694 	if (devid != device->devid)
695 		goto error_brelse;
696 
697 	if (memcmp(device->uuid, disk_super->dev_item.uuid, BTRFS_UUID_SIZE))
698 		goto error_brelse;
699 
700 	device->generation = btrfs_super_generation(disk_super);
701 
702 	if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) {
703 		clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
704 		fs_devices->seeding = 1;
705 	} else {
706 		if (bdev_read_only(bdev))
707 			clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
708 		else
709 			set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
710 	}
711 
712 	q = bdev_get_queue(bdev);
713 	if (!blk_queue_nonrot(q))
714 		fs_devices->rotating = 1;
715 
716 	device->bdev = bdev;
717 	clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
718 	device->mode = flags;
719 
720 	fs_devices->open_devices++;
721 	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
722 	    device->devid != BTRFS_DEV_REPLACE_DEVID) {
723 		fs_devices->rw_devices++;
724 		list_add_tail(&device->dev_alloc_list, &fs_devices->alloc_list);
725 	}
726 	brelse(bh);
727 
728 	return 0;
729 
730 error_brelse:
731 	brelse(bh);
732 	blkdev_put(bdev, flags);
733 
734 	return -EINVAL;
735 }
736 
737 /*
738  * Add new device to list of registered devices
739  *
740  * Returns:
741  * device pointer which was just added or updated when successful
742  * error pointer when failed
743  */
device_list_add(const char * path,struct btrfs_super_block * disk_super,bool * new_device_added)744 static noinline struct btrfs_device *device_list_add(const char *path,
745 			   struct btrfs_super_block *disk_super,
746 			   bool *new_device_added)
747 {
748 	struct btrfs_device *device;
749 	struct btrfs_fs_devices *fs_devices;
750 	struct rcu_string *name;
751 	u64 found_transid = btrfs_super_generation(disk_super);
752 	u64 devid = btrfs_stack_device_id(&disk_super->dev_item);
753 
754 	fs_devices = find_fsid(disk_super->fsid);
755 	if (!fs_devices) {
756 		fs_devices = alloc_fs_devices(disk_super->fsid);
757 		if (IS_ERR(fs_devices))
758 			return ERR_CAST(fs_devices);
759 
760 		mutex_lock(&fs_devices->device_list_mutex);
761 		list_add(&fs_devices->fs_list, &fs_uuids);
762 
763 		device = NULL;
764 	} else {
765 		mutex_lock(&fs_devices->device_list_mutex);
766 		device = btrfs_find_device(fs_devices, devid,
767 				disk_super->dev_item.uuid, NULL, false);
768 	}
769 
770 	if (!device) {
771 		if (fs_devices->opened) {
772 			mutex_unlock(&fs_devices->device_list_mutex);
773 			return ERR_PTR(-EBUSY);
774 		}
775 
776 		device = btrfs_alloc_device(NULL, &devid,
777 					    disk_super->dev_item.uuid);
778 		if (IS_ERR(device)) {
779 			mutex_unlock(&fs_devices->device_list_mutex);
780 			/* we can safely leave the fs_devices entry around */
781 			return device;
782 		}
783 
784 		name = rcu_string_strdup(path, GFP_NOFS);
785 		if (!name) {
786 			btrfs_free_device(device);
787 			mutex_unlock(&fs_devices->device_list_mutex);
788 			return ERR_PTR(-ENOMEM);
789 		}
790 		rcu_assign_pointer(device->name, name);
791 
792 		list_add_rcu(&device->dev_list, &fs_devices->devices);
793 		fs_devices->num_devices++;
794 
795 		device->fs_devices = fs_devices;
796 		*new_device_added = true;
797 
798 		if (disk_super->label[0])
799 			pr_info("BTRFS: device label %s devid %llu transid %llu %s\n",
800 				disk_super->label, devid, found_transid, path);
801 		else
802 			pr_info("BTRFS: device fsid %pU devid %llu transid %llu %s\n",
803 				disk_super->fsid, devid, found_transid, path);
804 
805 	} else if (!device->name || strcmp(device->name->str, path)) {
806 		/*
807 		 * When FS is already mounted.
808 		 * 1. If you are here and if the device->name is NULL that
809 		 *    means this device was missing at time of FS mount.
810 		 * 2. If you are here and if the device->name is different
811 		 *    from 'path' that means either
812 		 *      a. The same device disappeared and reappeared with
813 		 *         different name. or
814 		 *      b. The missing-disk-which-was-replaced, has
815 		 *         reappeared now.
816 		 *
817 		 * We must allow 1 and 2a above. But 2b would be a spurious
818 		 * and unintentional.
819 		 *
820 		 * Further in case of 1 and 2a above, the disk at 'path'
821 		 * would have missed some transaction when it was away and
822 		 * in case of 2a the stale bdev has to be updated as well.
823 		 * 2b must not be allowed at all time.
824 		 */
825 
826 		/*
827 		 * For now, we do allow update to btrfs_fs_device through the
828 		 * btrfs dev scan cli after FS has been mounted.  We're still
829 		 * tracking a problem where systems fail mount by subvolume id
830 		 * when we reject replacement on a mounted FS.
831 		 */
832 		if (!fs_devices->opened && found_transid < device->generation) {
833 			/*
834 			 * That is if the FS is _not_ mounted and if you
835 			 * are here, that means there is more than one
836 			 * disk with same uuid and devid.We keep the one
837 			 * with larger generation number or the last-in if
838 			 * generation are equal.
839 			 */
840 			mutex_unlock(&fs_devices->device_list_mutex);
841 			return ERR_PTR(-EEXIST);
842 		}
843 
844 		/*
845 		 * We are going to replace the device path for a given devid,
846 		 * make sure it's the same device if the device is mounted
847 		 */
848 		if (device->bdev) {
849 			struct block_device *path_bdev;
850 
851 			path_bdev = lookup_bdev(path);
852 			if (IS_ERR(path_bdev)) {
853 				mutex_unlock(&fs_devices->device_list_mutex);
854 				return ERR_CAST(path_bdev);
855 			}
856 
857 			if (device->bdev != path_bdev) {
858 				bdput(path_bdev);
859 				mutex_unlock(&fs_devices->device_list_mutex);
860 				/*
861 				 * device->fs_info may not be reliable here, so
862 				 * pass in a NULL instead. This avoids a
863 				 * possible use-after-free when the fs_info and
864 				 * fs_info->sb are already torn down.
865 				 */
866 				btrfs_warn_in_rcu(NULL,
867 	"duplicate device %s devid %llu generation %llu scanned by %s (%d)",
868 						  path, devid, found_transid,
869 						  current->comm,
870 						  task_pid_nr(current));
871 				return ERR_PTR(-EEXIST);
872 			}
873 			bdput(path_bdev);
874 			btrfs_info_in_rcu(device->fs_info,
875 	"devid %llu device path %s changed to %s scanned by %s (%d)",
876 					  devid, rcu_str_deref(device->name),
877 					  path, current->comm,
878 					  task_pid_nr(current));
879 		}
880 
881 		name = rcu_string_strdup(path, GFP_NOFS);
882 		if (!name) {
883 			mutex_unlock(&fs_devices->device_list_mutex);
884 			return ERR_PTR(-ENOMEM);
885 		}
886 		rcu_string_free(device->name);
887 		rcu_assign_pointer(device->name, name);
888 		if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) {
889 			fs_devices->missing_devices--;
890 			clear_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state);
891 		}
892 	}
893 
894 	/*
895 	 * Unmount does not free the btrfs_device struct but would zero
896 	 * generation along with most of the other members. So just update
897 	 * it back. We need it to pick the disk with largest generation
898 	 * (as above).
899 	 */
900 	if (!fs_devices->opened)
901 		device->generation = found_transid;
902 
903 	fs_devices->total_devices = btrfs_super_num_devices(disk_super);
904 
905 	mutex_unlock(&fs_devices->device_list_mutex);
906 	return device;
907 }
908 
clone_fs_devices(struct btrfs_fs_devices * orig)909 static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
910 {
911 	struct btrfs_fs_devices *fs_devices;
912 	struct btrfs_device *device;
913 	struct btrfs_device *orig_dev;
914 
915 	fs_devices = alloc_fs_devices(orig->fsid);
916 	if (IS_ERR(fs_devices))
917 		return fs_devices;
918 
919 	mutex_lock(&orig->device_list_mutex);
920 	fs_devices->total_devices = orig->total_devices;
921 
922 	/* We have held the volume lock, it is safe to get the devices. */
923 	list_for_each_entry(orig_dev, &orig->devices, dev_list) {
924 		struct rcu_string *name;
925 
926 		device = btrfs_alloc_device(NULL, &orig_dev->devid,
927 					    orig_dev->uuid);
928 		if (IS_ERR(device))
929 			goto error;
930 
931 		/*
932 		 * This is ok to do without rcu read locked because we hold the
933 		 * uuid mutex so nothing we touch in here is going to disappear.
934 		 */
935 		if (orig_dev->name) {
936 			name = rcu_string_strdup(orig_dev->name->str,
937 					GFP_KERNEL);
938 			if (!name) {
939 				btrfs_free_device(device);
940 				goto error;
941 			}
942 			rcu_assign_pointer(device->name, name);
943 		}
944 
945 		list_add(&device->dev_list, &fs_devices->devices);
946 		device->fs_devices = fs_devices;
947 		fs_devices->num_devices++;
948 	}
949 	mutex_unlock(&orig->device_list_mutex);
950 	return fs_devices;
951 error:
952 	mutex_unlock(&orig->device_list_mutex);
953 	free_fs_devices(fs_devices);
954 	return ERR_PTR(-ENOMEM);
955 }
956 
957 /*
958  * After we have read the system tree and know devids belonging to
959  * this filesystem, remove the device which does not belong there.
960  */
btrfs_free_extra_devids(struct btrfs_fs_devices * fs_devices,int step)961 void btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices, int step)
962 {
963 	struct btrfs_device *device, *next;
964 	struct btrfs_device *latest_dev = NULL;
965 
966 	mutex_lock(&uuid_mutex);
967 again:
968 	/* This is the initialized path, it is safe to release the devices. */
969 	list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
970 		if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
971 							&device->dev_state)) {
972 			if (!test_bit(BTRFS_DEV_STATE_REPLACE_TGT,
973 			     &device->dev_state) &&
974 			    !test_bit(BTRFS_DEV_STATE_MISSING,
975 				      &device->dev_state) &&
976 			     (!latest_dev ||
977 			      device->generation > latest_dev->generation)) {
978 				latest_dev = device;
979 			}
980 			continue;
981 		}
982 
983 		/*
984 		 * We have already validated the presence of BTRFS_DEV_REPLACE_DEVID,
985 		 * in btrfs_init_dev_replace() so just continue.
986 		 */
987 		if (device->devid == BTRFS_DEV_REPLACE_DEVID)
988 			continue;
989 
990 		if (device->bdev) {
991 			blkdev_put(device->bdev, device->mode);
992 			device->bdev = NULL;
993 			fs_devices->open_devices--;
994 		}
995 		if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
996 			list_del_init(&device->dev_alloc_list);
997 			clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
998 			fs_devices->rw_devices--;
999 		}
1000 		list_del_init(&device->dev_list);
1001 		fs_devices->num_devices--;
1002 		btrfs_free_device(device);
1003 	}
1004 
1005 	if (fs_devices->seed) {
1006 		fs_devices = fs_devices->seed;
1007 		goto again;
1008 	}
1009 
1010 	fs_devices->latest_bdev = latest_dev->bdev;
1011 
1012 	mutex_unlock(&uuid_mutex);
1013 }
1014 
free_device_rcu(struct rcu_head * head)1015 static void free_device_rcu(struct rcu_head *head)
1016 {
1017 	struct btrfs_device *device;
1018 
1019 	device = container_of(head, struct btrfs_device, rcu);
1020 	btrfs_free_device(device);
1021 }
1022 
btrfs_close_bdev(struct btrfs_device * device)1023 static void btrfs_close_bdev(struct btrfs_device *device)
1024 {
1025 	if (!device->bdev)
1026 		return;
1027 
1028 	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
1029 		sync_blockdev(device->bdev);
1030 		invalidate_bdev(device->bdev);
1031 	}
1032 
1033 	blkdev_put(device->bdev, device->mode);
1034 }
1035 
btrfs_close_one_device(struct btrfs_device * device)1036 static void btrfs_close_one_device(struct btrfs_device *device)
1037 {
1038 	struct btrfs_fs_devices *fs_devices = device->fs_devices;
1039 	struct btrfs_device *new_device;
1040 	struct rcu_string *name;
1041 
1042 	if (device->bdev)
1043 		fs_devices->open_devices--;
1044 
1045 	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
1046 	    device->devid != BTRFS_DEV_REPLACE_DEVID) {
1047 		list_del_init(&device->dev_alloc_list);
1048 		fs_devices->rw_devices--;
1049 	}
1050 
1051 	if (device->devid == BTRFS_DEV_REPLACE_DEVID)
1052 		clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state);
1053 
1054 	if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) {
1055 		clear_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state);
1056 		fs_devices->missing_devices--;
1057 	}
1058 
1059 	btrfs_close_bdev(device);
1060 
1061 	new_device = btrfs_alloc_device(NULL, &device->devid,
1062 					device->uuid);
1063 	BUG_ON(IS_ERR(new_device)); /* -ENOMEM */
1064 
1065 	/* Safe because we are under uuid_mutex */
1066 	if (device->name) {
1067 		name = rcu_string_strdup(device->name->str, GFP_NOFS);
1068 		BUG_ON(!name); /* -ENOMEM */
1069 		rcu_assign_pointer(new_device->name, name);
1070 	}
1071 
1072 	list_replace_rcu(&device->dev_list, &new_device->dev_list);
1073 	new_device->fs_devices = device->fs_devices;
1074 
1075 	call_rcu(&device->rcu, free_device_rcu);
1076 }
1077 
close_fs_devices(struct btrfs_fs_devices * fs_devices)1078 static int close_fs_devices(struct btrfs_fs_devices *fs_devices)
1079 {
1080 	struct btrfs_device *device, *tmp;
1081 
1082 	if (--fs_devices->opened > 0)
1083 		return 0;
1084 
1085 	mutex_lock(&fs_devices->device_list_mutex);
1086 	list_for_each_entry_safe(device, tmp, &fs_devices->devices, dev_list) {
1087 		btrfs_close_one_device(device);
1088 	}
1089 	mutex_unlock(&fs_devices->device_list_mutex);
1090 
1091 	WARN_ON(fs_devices->open_devices);
1092 	WARN_ON(fs_devices->rw_devices);
1093 	fs_devices->opened = 0;
1094 	fs_devices->seeding = 0;
1095 
1096 	return 0;
1097 }
1098 
btrfs_close_devices(struct btrfs_fs_devices * fs_devices)1099 int btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
1100 {
1101 	struct btrfs_fs_devices *seed_devices = NULL;
1102 	int ret;
1103 
1104 	mutex_lock(&uuid_mutex);
1105 	ret = close_fs_devices(fs_devices);
1106 	if (!fs_devices->opened) {
1107 		seed_devices = fs_devices->seed;
1108 		fs_devices->seed = NULL;
1109 	}
1110 	mutex_unlock(&uuid_mutex);
1111 
1112 	while (seed_devices) {
1113 		fs_devices = seed_devices;
1114 		seed_devices = fs_devices->seed;
1115 		close_fs_devices(fs_devices);
1116 		free_fs_devices(fs_devices);
1117 	}
1118 	return ret;
1119 }
1120 
open_fs_devices(struct btrfs_fs_devices * fs_devices,fmode_t flags,void * holder)1121 static int open_fs_devices(struct btrfs_fs_devices *fs_devices,
1122 				fmode_t flags, void *holder)
1123 {
1124 	struct btrfs_device *device;
1125 	struct btrfs_device *latest_dev = NULL;
1126 	int ret = 0;
1127 
1128 	flags |= FMODE_EXCL;
1129 
1130 	list_for_each_entry(device, &fs_devices->devices, dev_list) {
1131 		/* Just open everything we can; ignore failures here */
1132 		if (btrfs_open_one_device(fs_devices, device, flags, holder))
1133 			continue;
1134 
1135 		if (!latest_dev ||
1136 		    device->generation > latest_dev->generation)
1137 			latest_dev = device;
1138 	}
1139 	if (fs_devices->open_devices == 0) {
1140 		ret = -EINVAL;
1141 		goto out;
1142 	}
1143 	fs_devices->opened = 1;
1144 	fs_devices->latest_bdev = latest_dev->bdev;
1145 	fs_devices->total_rw_bytes = 0;
1146 out:
1147 	return ret;
1148 }
1149 
devid_cmp(void * priv,struct list_head * a,struct list_head * b)1150 static int devid_cmp(void *priv, struct list_head *a, struct list_head *b)
1151 {
1152 	struct btrfs_device *dev1, *dev2;
1153 
1154 	dev1 = list_entry(a, struct btrfs_device, dev_list);
1155 	dev2 = list_entry(b, struct btrfs_device, dev_list);
1156 
1157 	if (dev1->devid < dev2->devid)
1158 		return -1;
1159 	else if (dev1->devid > dev2->devid)
1160 		return 1;
1161 	return 0;
1162 }
1163 
btrfs_open_devices(struct btrfs_fs_devices * fs_devices,fmode_t flags,void * holder)1164 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
1165 		       fmode_t flags, void *holder)
1166 {
1167 	int ret;
1168 
1169 	lockdep_assert_held(&uuid_mutex);
1170 	/*
1171 	 * The device_list_mutex cannot be taken here in case opening the
1172 	 * underlying device takes further locks like bd_mutex.
1173 	 *
1174 	 * We also don't need the lock here as this is called during mount and
1175 	 * exclusion is provided by uuid_mutex
1176 	 */
1177 
1178 	if (fs_devices->opened) {
1179 		fs_devices->opened++;
1180 		ret = 0;
1181 	} else {
1182 		list_sort(NULL, &fs_devices->devices, devid_cmp);
1183 		ret = open_fs_devices(fs_devices, flags, holder);
1184 	}
1185 
1186 	return ret;
1187 }
1188 
btrfs_release_disk_super(struct page * page)1189 static void btrfs_release_disk_super(struct page *page)
1190 {
1191 	kunmap(page);
1192 	put_page(page);
1193 }
1194 
btrfs_read_disk_super(struct block_device * bdev,u64 bytenr,struct page ** page,struct btrfs_super_block ** disk_super)1195 static int btrfs_read_disk_super(struct block_device *bdev, u64 bytenr,
1196 				 struct page **page,
1197 				 struct btrfs_super_block **disk_super)
1198 {
1199 	void *p;
1200 	pgoff_t index;
1201 
1202 	/* make sure our super fits in the device */
1203 	if (bytenr + PAGE_SIZE >= i_size_read(bdev->bd_inode))
1204 		return 1;
1205 
1206 	/* make sure our super fits in the page */
1207 	if (sizeof(**disk_super) > PAGE_SIZE)
1208 		return 1;
1209 
1210 	/* make sure our super doesn't straddle pages on disk */
1211 	index = bytenr >> PAGE_SHIFT;
1212 	if ((bytenr + sizeof(**disk_super) - 1) >> PAGE_SHIFT != index)
1213 		return 1;
1214 
1215 	/* pull in the page with our super */
1216 	*page = read_cache_page_gfp(bdev->bd_inode->i_mapping,
1217 				   index, GFP_KERNEL);
1218 
1219 	if (IS_ERR_OR_NULL(*page))
1220 		return 1;
1221 
1222 	p = kmap(*page);
1223 
1224 	/* align our pointer to the offset of the super block */
1225 	*disk_super = p + (bytenr & ~PAGE_MASK);
1226 
1227 	if (btrfs_super_bytenr(*disk_super) != bytenr ||
1228 	    btrfs_super_magic(*disk_super) != BTRFS_MAGIC) {
1229 		btrfs_release_disk_super(*page);
1230 		return 1;
1231 	}
1232 
1233 	if ((*disk_super)->label[0] &&
1234 		(*disk_super)->label[BTRFS_LABEL_SIZE - 1])
1235 		(*disk_super)->label[BTRFS_LABEL_SIZE - 1] = '\0';
1236 
1237 	return 0;
1238 }
1239 
1240 /*
1241  * Look for a btrfs signature on a device. This may be called out of the mount path
1242  * and we are not allowed to call set_blocksize during the scan. The superblock
1243  * is read via pagecache
1244  */
btrfs_scan_one_device(const char * path,fmode_t flags,void * holder)1245 struct btrfs_device *btrfs_scan_one_device(const char *path, fmode_t flags,
1246 					   void *holder)
1247 {
1248 	struct btrfs_super_block *disk_super;
1249 	bool new_device_added = false;
1250 	struct btrfs_device *device = NULL;
1251 	struct block_device *bdev;
1252 	struct page *page;
1253 	u64 bytenr;
1254 
1255 	lockdep_assert_held(&uuid_mutex);
1256 
1257 	/*
1258 	 * we would like to check all the supers, but that would make
1259 	 * a btrfs mount succeed after a mkfs from a different FS.
1260 	 * So, we need to add a special mount option to scan for
1261 	 * later supers, using BTRFS_SUPER_MIRROR_MAX instead
1262 	 */
1263 	bytenr = btrfs_sb_offset(0);
1264 	flags |= FMODE_EXCL;
1265 
1266 	bdev = blkdev_get_by_path(path, flags, holder);
1267 	if (IS_ERR(bdev))
1268 		return ERR_CAST(bdev);
1269 
1270 	if (btrfs_read_disk_super(bdev, bytenr, &page, &disk_super)) {
1271 		device = ERR_PTR(-EINVAL);
1272 		goto error_bdev_put;
1273 	}
1274 
1275 	device = device_list_add(path, disk_super, &new_device_added);
1276 	if (!IS_ERR(device)) {
1277 		if (new_device_added)
1278 			btrfs_free_stale_devices(path, device);
1279 	}
1280 
1281 	btrfs_release_disk_super(page);
1282 
1283 error_bdev_put:
1284 	blkdev_put(bdev, flags);
1285 
1286 	return device;
1287 }
1288 
contains_pending_extent(struct btrfs_transaction * transaction,struct btrfs_device * device,u64 * start,u64 len)1289 static int contains_pending_extent(struct btrfs_transaction *transaction,
1290 				   struct btrfs_device *device,
1291 				   u64 *start, u64 len)
1292 {
1293 	struct btrfs_fs_info *fs_info = device->fs_info;
1294 	struct extent_map *em;
1295 	struct list_head *search_list = &fs_info->pinned_chunks;
1296 	int ret = 0;
1297 	u64 physical_start = *start;
1298 
1299 	if (transaction)
1300 		search_list = &transaction->pending_chunks;
1301 again:
1302 	list_for_each_entry(em, search_list, list) {
1303 		struct map_lookup *map;
1304 		int i;
1305 
1306 		map = em->map_lookup;
1307 		for (i = 0; i < map->num_stripes; i++) {
1308 			u64 end;
1309 
1310 			if (map->stripes[i].dev != device)
1311 				continue;
1312 			if (map->stripes[i].physical >= physical_start + len ||
1313 			    map->stripes[i].physical + em->orig_block_len <=
1314 			    physical_start)
1315 				continue;
1316 			/*
1317 			 * Make sure that while processing the pinned list we do
1318 			 * not override our *start with a lower value, because
1319 			 * we can have pinned chunks that fall within this
1320 			 * device hole and that have lower physical addresses
1321 			 * than the pending chunks we processed before. If we
1322 			 * do not take this special care we can end up getting
1323 			 * 2 pending chunks that start at the same physical
1324 			 * device offsets because the end offset of a pinned
1325 			 * chunk can be equal to the start offset of some
1326 			 * pending chunk.
1327 			 */
1328 			end = map->stripes[i].physical + em->orig_block_len;
1329 			if (end > *start) {
1330 				*start = end;
1331 				ret = 1;
1332 			}
1333 		}
1334 	}
1335 	if (search_list != &fs_info->pinned_chunks) {
1336 		search_list = &fs_info->pinned_chunks;
1337 		goto again;
1338 	}
1339 
1340 	return ret;
1341 }
1342 
1343 
1344 /*
1345  * find_free_dev_extent_start - find free space in the specified device
1346  * @device:	  the device which we search the free space in
1347  * @num_bytes:	  the size of the free space that we need
1348  * @search_start: the position from which to begin the search
1349  * @start:	  store the start of the free space.
1350  * @len:	  the size of the free space. that we find, or the size
1351  *		  of the max free space if we don't find suitable free space
1352  *
1353  * this uses a pretty simple search, the expectation is that it is
1354  * called very infrequently and that a given device has a small number
1355  * of extents
1356  *
1357  * @start is used to store the start of the free space if we find. But if we
1358  * don't find suitable free space, it will be used to store the start position
1359  * of the max free space.
1360  *
1361  * @len is used to store the size of the free space that we find.
1362  * But if we don't find suitable free space, it is used to store the size of
1363  * the max free space.
1364  */
find_free_dev_extent_start(struct btrfs_transaction * transaction,struct btrfs_device * device,u64 num_bytes,u64 search_start,u64 * start,u64 * len)1365 int find_free_dev_extent_start(struct btrfs_transaction *transaction,
1366 			       struct btrfs_device *device, u64 num_bytes,
1367 			       u64 search_start, u64 *start, u64 *len)
1368 {
1369 	struct btrfs_fs_info *fs_info = device->fs_info;
1370 	struct btrfs_root *root = fs_info->dev_root;
1371 	struct btrfs_key key;
1372 	struct btrfs_dev_extent *dev_extent;
1373 	struct btrfs_path *path;
1374 	u64 hole_size;
1375 	u64 max_hole_start;
1376 	u64 max_hole_size;
1377 	u64 extent_end;
1378 	u64 search_end = device->total_bytes;
1379 	int ret;
1380 	int slot;
1381 	struct extent_buffer *l;
1382 
1383 	/*
1384 	 * We don't want to overwrite the superblock on the drive nor any area
1385 	 * used by the boot loader (grub for example), so we make sure to start
1386 	 * at an offset of at least 1MB.
1387 	 */
1388 	search_start = max_t(u64, search_start, SZ_1M);
1389 
1390 	path = btrfs_alloc_path();
1391 	if (!path)
1392 		return -ENOMEM;
1393 
1394 	max_hole_start = search_start;
1395 	max_hole_size = 0;
1396 
1397 again:
1398 	if (search_start >= search_end ||
1399 		test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
1400 		ret = -ENOSPC;
1401 		goto out;
1402 	}
1403 
1404 	path->reada = READA_FORWARD;
1405 	path->search_commit_root = 1;
1406 	path->skip_locking = 1;
1407 
1408 	key.objectid = device->devid;
1409 	key.offset = search_start;
1410 	key.type = BTRFS_DEV_EXTENT_KEY;
1411 
1412 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1413 	if (ret < 0)
1414 		goto out;
1415 	if (ret > 0) {
1416 		ret = btrfs_previous_item(root, path, key.objectid, key.type);
1417 		if (ret < 0)
1418 			goto out;
1419 	}
1420 
1421 	while (search_start < search_end) {
1422 		l = path->nodes[0];
1423 		slot = path->slots[0];
1424 		if (slot >= btrfs_header_nritems(l)) {
1425 			ret = btrfs_next_leaf(root, path);
1426 			if (ret == 0)
1427 				continue;
1428 			if (ret < 0)
1429 				goto out;
1430 
1431 			break;
1432 		}
1433 		btrfs_item_key_to_cpu(l, &key, slot);
1434 
1435 		if (key.objectid < device->devid)
1436 			goto next;
1437 
1438 		if (key.objectid > device->devid)
1439 			break;
1440 
1441 		if (key.type != BTRFS_DEV_EXTENT_KEY)
1442 			goto next;
1443 
1444 		if (key.offset > search_end)
1445 			break;
1446 
1447 		if (key.offset > search_start) {
1448 			hole_size = key.offset - search_start;
1449 
1450 			/*
1451 			 * Have to check before we set max_hole_start, otherwise
1452 			 * we could end up sending back this offset anyway.
1453 			 */
1454 			if (contains_pending_extent(transaction, device,
1455 						    &search_start,
1456 						    hole_size)) {
1457 				if (key.offset >= search_start) {
1458 					hole_size = key.offset - search_start;
1459 				} else {
1460 					WARN_ON_ONCE(1);
1461 					hole_size = 0;
1462 				}
1463 			}
1464 
1465 			if (hole_size > max_hole_size) {
1466 				max_hole_start = search_start;
1467 				max_hole_size = hole_size;
1468 			}
1469 
1470 			/*
1471 			 * If this free space is greater than which we need,
1472 			 * it must be the max free space that we have found
1473 			 * until now, so max_hole_start must point to the start
1474 			 * of this free space and the length of this free space
1475 			 * is stored in max_hole_size. Thus, we return
1476 			 * max_hole_start and max_hole_size and go back to the
1477 			 * caller.
1478 			 */
1479 			if (hole_size >= num_bytes) {
1480 				ret = 0;
1481 				goto out;
1482 			}
1483 		}
1484 
1485 		dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
1486 		extent_end = key.offset + btrfs_dev_extent_length(l,
1487 								  dev_extent);
1488 		if (extent_end > search_start)
1489 			search_start = extent_end;
1490 next:
1491 		path->slots[0]++;
1492 		cond_resched();
1493 	}
1494 
1495 	/*
1496 	 * At this point, search_start should be the end of
1497 	 * allocated dev extents, and when shrinking the device,
1498 	 * search_end may be smaller than search_start.
1499 	 */
1500 	if (search_end > search_start) {
1501 		hole_size = search_end - search_start;
1502 
1503 		if (contains_pending_extent(transaction, device, &search_start,
1504 					    hole_size)) {
1505 			btrfs_release_path(path);
1506 			goto again;
1507 		}
1508 
1509 		if (hole_size > max_hole_size) {
1510 			max_hole_start = search_start;
1511 			max_hole_size = hole_size;
1512 		}
1513 	}
1514 
1515 	/* See above. */
1516 	if (max_hole_size < num_bytes)
1517 		ret = -ENOSPC;
1518 	else
1519 		ret = 0;
1520 
1521 	ASSERT(max_hole_start + max_hole_size <= search_end);
1522 out:
1523 	btrfs_free_path(path);
1524 	*start = max_hole_start;
1525 	if (len)
1526 		*len = max_hole_size;
1527 	return ret;
1528 }
1529 
find_free_dev_extent(struct btrfs_trans_handle * trans,struct btrfs_device * device,u64 num_bytes,u64 * start,u64 * len)1530 int find_free_dev_extent(struct btrfs_trans_handle *trans,
1531 			 struct btrfs_device *device, u64 num_bytes,
1532 			 u64 *start, u64 *len)
1533 {
1534 	/* FIXME use last free of some kind */
1535 	return find_free_dev_extent_start(trans->transaction, device,
1536 					  num_bytes, 0, start, len);
1537 }
1538 
btrfs_free_dev_extent(struct btrfs_trans_handle * trans,struct btrfs_device * device,u64 start,u64 * dev_extent_len)1539 static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
1540 			  struct btrfs_device *device,
1541 			  u64 start, u64 *dev_extent_len)
1542 {
1543 	struct btrfs_fs_info *fs_info = device->fs_info;
1544 	struct btrfs_root *root = fs_info->dev_root;
1545 	int ret;
1546 	struct btrfs_path *path;
1547 	struct btrfs_key key;
1548 	struct btrfs_key found_key;
1549 	struct extent_buffer *leaf = NULL;
1550 	struct btrfs_dev_extent *extent = NULL;
1551 
1552 	path = btrfs_alloc_path();
1553 	if (!path)
1554 		return -ENOMEM;
1555 
1556 	key.objectid = device->devid;
1557 	key.offset = start;
1558 	key.type = BTRFS_DEV_EXTENT_KEY;
1559 again:
1560 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1561 	if (ret > 0) {
1562 		ret = btrfs_previous_item(root, path, key.objectid,
1563 					  BTRFS_DEV_EXTENT_KEY);
1564 		if (ret)
1565 			goto out;
1566 		leaf = path->nodes[0];
1567 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1568 		extent = btrfs_item_ptr(leaf, path->slots[0],
1569 					struct btrfs_dev_extent);
1570 		BUG_ON(found_key.offset > start || found_key.offset +
1571 		       btrfs_dev_extent_length(leaf, extent) < start);
1572 		key = found_key;
1573 		btrfs_release_path(path);
1574 		goto again;
1575 	} else if (ret == 0) {
1576 		leaf = path->nodes[0];
1577 		extent = btrfs_item_ptr(leaf, path->slots[0],
1578 					struct btrfs_dev_extent);
1579 	} else {
1580 		btrfs_handle_fs_error(fs_info, ret, "Slot search failed");
1581 		goto out;
1582 	}
1583 
1584 	*dev_extent_len = btrfs_dev_extent_length(leaf, extent);
1585 
1586 	ret = btrfs_del_item(trans, root, path);
1587 	if (ret) {
1588 		btrfs_handle_fs_error(fs_info, ret,
1589 				      "Failed to remove dev extent item");
1590 	} else {
1591 		set_bit(BTRFS_TRANS_HAVE_FREE_BGS, &trans->transaction->flags);
1592 	}
1593 out:
1594 	btrfs_free_path(path);
1595 	return ret;
1596 }
1597 
btrfs_alloc_dev_extent(struct btrfs_trans_handle * trans,struct btrfs_device * device,u64 chunk_offset,u64 start,u64 num_bytes)1598 static int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
1599 				  struct btrfs_device *device,
1600 				  u64 chunk_offset, u64 start, u64 num_bytes)
1601 {
1602 	int ret;
1603 	struct btrfs_path *path;
1604 	struct btrfs_fs_info *fs_info = device->fs_info;
1605 	struct btrfs_root *root = fs_info->dev_root;
1606 	struct btrfs_dev_extent *extent;
1607 	struct extent_buffer *leaf;
1608 	struct btrfs_key key;
1609 
1610 	WARN_ON(!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state));
1611 	WARN_ON(test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state));
1612 	path = btrfs_alloc_path();
1613 	if (!path)
1614 		return -ENOMEM;
1615 
1616 	key.objectid = device->devid;
1617 	key.offset = start;
1618 	key.type = BTRFS_DEV_EXTENT_KEY;
1619 	ret = btrfs_insert_empty_item(trans, root, path, &key,
1620 				      sizeof(*extent));
1621 	if (ret)
1622 		goto out;
1623 
1624 	leaf = path->nodes[0];
1625 	extent = btrfs_item_ptr(leaf, path->slots[0],
1626 				struct btrfs_dev_extent);
1627 	btrfs_set_dev_extent_chunk_tree(leaf, extent,
1628 					BTRFS_CHUNK_TREE_OBJECTID);
1629 	btrfs_set_dev_extent_chunk_objectid(leaf, extent,
1630 					    BTRFS_FIRST_CHUNK_TREE_OBJECTID);
1631 	btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
1632 
1633 	btrfs_set_dev_extent_length(leaf, extent, num_bytes);
1634 	btrfs_mark_buffer_dirty(leaf);
1635 out:
1636 	btrfs_free_path(path);
1637 	return ret;
1638 }
1639 
find_next_chunk(struct btrfs_fs_info * fs_info)1640 static u64 find_next_chunk(struct btrfs_fs_info *fs_info)
1641 {
1642 	struct extent_map_tree *em_tree;
1643 	struct extent_map *em;
1644 	struct rb_node *n;
1645 	u64 ret = 0;
1646 
1647 	em_tree = &fs_info->mapping_tree.map_tree;
1648 	read_lock(&em_tree->lock);
1649 	n = rb_last(&em_tree->map);
1650 	if (n) {
1651 		em = rb_entry(n, struct extent_map, rb_node);
1652 		ret = em->start + em->len;
1653 	}
1654 	read_unlock(&em_tree->lock);
1655 
1656 	return ret;
1657 }
1658 
find_next_devid(struct btrfs_fs_info * fs_info,u64 * devid_ret)1659 static noinline int find_next_devid(struct btrfs_fs_info *fs_info,
1660 				    u64 *devid_ret)
1661 {
1662 	int ret;
1663 	struct btrfs_key key;
1664 	struct btrfs_key found_key;
1665 	struct btrfs_path *path;
1666 
1667 	path = btrfs_alloc_path();
1668 	if (!path)
1669 		return -ENOMEM;
1670 
1671 	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1672 	key.type = BTRFS_DEV_ITEM_KEY;
1673 	key.offset = (u64)-1;
1674 
1675 	ret = btrfs_search_slot(NULL, fs_info->chunk_root, &key, path, 0, 0);
1676 	if (ret < 0)
1677 		goto error;
1678 
1679 	BUG_ON(ret == 0); /* Corruption */
1680 
1681 	ret = btrfs_previous_item(fs_info->chunk_root, path,
1682 				  BTRFS_DEV_ITEMS_OBJECTID,
1683 				  BTRFS_DEV_ITEM_KEY);
1684 	if (ret) {
1685 		*devid_ret = 1;
1686 	} else {
1687 		btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1688 				      path->slots[0]);
1689 		*devid_ret = found_key.offset + 1;
1690 	}
1691 	ret = 0;
1692 error:
1693 	btrfs_free_path(path);
1694 	return ret;
1695 }
1696 
1697 /*
1698  * the device information is stored in the chunk root
1699  * the btrfs_device struct should be fully filled in
1700  */
btrfs_add_dev_item(struct btrfs_trans_handle * trans,struct btrfs_device * device)1701 static int btrfs_add_dev_item(struct btrfs_trans_handle *trans,
1702 			    struct btrfs_device *device)
1703 {
1704 	int ret;
1705 	struct btrfs_path *path;
1706 	struct btrfs_dev_item *dev_item;
1707 	struct extent_buffer *leaf;
1708 	struct btrfs_key key;
1709 	unsigned long ptr;
1710 
1711 	path = btrfs_alloc_path();
1712 	if (!path)
1713 		return -ENOMEM;
1714 
1715 	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1716 	key.type = BTRFS_DEV_ITEM_KEY;
1717 	key.offset = device->devid;
1718 
1719 	ret = btrfs_insert_empty_item(trans, trans->fs_info->chunk_root, path,
1720 				      &key, sizeof(*dev_item));
1721 	if (ret)
1722 		goto out;
1723 
1724 	leaf = path->nodes[0];
1725 	dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1726 
1727 	btrfs_set_device_id(leaf, dev_item, device->devid);
1728 	btrfs_set_device_generation(leaf, dev_item, 0);
1729 	btrfs_set_device_type(leaf, dev_item, device->type);
1730 	btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1731 	btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1732 	btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1733 	btrfs_set_device_total_bytes(leaf, dev_item,
1734 				     btrfs_device_get_disk_total_bytes(device));
1735 	btrfs_set_device_bytes_used(leaf, dev_item,
1736 				    btrfs_device_get_bytes_used(device));
1737 	btrfs_set_device_group(leaf, dev_item, 0);
1738 	btrfs_set_device_seek_speed(leaf, dev_item, 0);
1739 	btrfs_set_device_bandwidth(leaf, dev_item, 0);
1740 	btrfs_set_device_start_offset(leaf, dev_item, 0);
1741 
1742 	ptr = btrfs_device_uuid(dev_item);
1743 	write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
1744 	ptr = btrfs_device_fsid(dev_item);
1745 	write_extent_buffer(leaf, trans->fs_info->fsid, ptr, BTRFS_FSID_SIZE);
1746 	btrfs_mark_buffer_dirty(leaf);
1747 
1748 	ret = 0;
1749 out:
1750 	btrfs_free_path(path);
1751 	return ret;
1752 }
1753 
1754 /*
1755  * Function to update ctime/mtime for a given device path.
1756  * Mainly used for ctime/mtime based probe like libblkid.
1757  */
update_dev_time(const char * path_name)1758 static void update_dev_time(const char *path_name)
1759 {
1760 	struct file *filp;
1761 
1762 	filp = filp_open(path_name, O_RDWR, 0);
1763 	if (IS_ERR(filp))
1764 		return;
1765 	file_update_time(filp);
1766 	filp_close(filp, NULL);
1767 }
1768 
btrfs_rm_dev_item(struct btrfs_fs_info * fs_info,struct btrfs_device * device)1769 static int btrfs_rm_dev_item(struct btrfs_fs_info *fs_info,
1770 			     struct btrfs_device *device)
1771 {
1772 	struct btrfs_root *root = fs_info->chunk_root;
1773 	int ret;
1774 	struct btrfs_path *path;
1775 	struct btrfs_key key;
1776 	struct btrfs_trans_handle *trans;
1777 
1778 	path = btrfs_alloc_path();
1779 	if (!path)
1780 		return -ENOMEM;
1781 
1782 	trans = btrfs_start_transaction(root, 0);
1783 	if (IS_ERR(trans)) {
1784 		btrfs_free_path(path);
1785 		return PTR_ERR(trans);
1786 	}
1787 	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1788 	key.type = BTRFS_DEV_ITEM_KEY;
1789 	key.offset = device->devid;
1790 
1791 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1792 	if (ret) {
1793 		if (ret > 0)
1794 			ret = -ENOENT;
1795 		btrfs_abort_transaction(trans, ret);
1796 		btrfs_end_transaction(trans);
1797 		goto out;
1798 	}
1799 
1800 	ret = btrfs_del_item(trans, root, path);
1801 	if (ret) {
1802 		btrfs_abort_transaction(trans, ret);
1803 		btrfs_end_transaction(trans);
1804 	}
1805 
1806 out:
1807 	btrfs_free_path(path);
1808 	if (!ret)
1809 		ret = btrfs_commit_transaction(trans);
1810 	return ret;
1811 }
1812 
1813 /*
1814  * Verify that @num_devices satisfies the RAID profile constraints in the whole
1815  * filesystem. It's up to the caller to adjust that number regarding eg. device
1816  * replace.
1817  */
btrfs_check_raid_min_devices(struct btrfs_fs_info * fs_info,u64 num_devices)1818 static int btrfs_check_raid_min_devices(struct btrfs_fs_info *fs_info,
1819 		u64 num_devices)
1820 {
1821 	u64 all_avail;
1822 	unsigned seq;
1823 	int i;
1824 
1825 	do {
1826 		seq = read_seqbegin(&fs_info->profiles_lock);
1827 
1828 		all_avail = fs_info->avail_data_alloc_bits |
1829 			    fs_info->avail_system_alloc_bits |
1830 			    fs_info->avail_metadata_alloc_bits;
1831 	} while (read_seqretry(&fs_info->profiles_lock, seq));
1832 
1833 	for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
1834 		if (!(all_avail & btrfs_raid_array[i].bg_flag))
1835 			continue;
1836 
1837 		if (num_devices < btrfs_raid_array[i].devs_min) {
1838 			int ret = btrfs_raid_array[i].mindev_error;
1839 
1840 			if (ret)
1841 				return ret;
1842 		}
1843 	}
1844 
1845 	return 0;
1846 }
1847 
btrfs_find_next_active_device(struct btrfs_fs_devices * fs_devs,struct btrfs_device * device)1848 static struct btrfs_device * btrfs_find_next_active_device(
1849 		struct btrfs_fs_devices *fs_devs, struct btrfs_device *device)
1850 {
1851 	struct btrfs_device *next_device;
1852 
1853 	list_for_each_entry(next_device, &fs_devs->devices, dev_list) {
1854 		if (next_device != device &&
1855 		    !test_bit(BTRFS_DEV_STATE_MISSING, &next_device->dev_state)
1856 		    && next_device->bdev)
1857 			return next_device;
1858 	}
1859 
1860 	return NULL;
1861 }
1862 
1863 /*
1864  * Helper function to check if the given device is part of s_bdev / latest_bdev
1865  * and replace it with the provided or the next active device, in the context
1866  * where this function called, there should be always be another device (or
1867  * this_dev) which is active.
1868  */
btrfs_assign_next_active_device(struct btrfs_device * device,struct btrfs_device * this_dev)1869 void btrfs_assign_next_active_device(struct btrfs_device *device,
1870 				     struct btrfs_device *this_dev)
1871 {
1872 	struct btrfs_fs_info *fs_info = device->fs_info;
1873 	struct btrfs_device *next_device;
1874 
1875 	if (this_dev)
1876 		next_device = this_dev;
1877 	else
1878 		next_device = btrfs_find_next_active_device(fs_info->fs_devices,
1879 								device);
1880 	ASSERT(next_device);
1881 
1882 	if (fs_info->sb->s_bdev &&
1883 			(fs_info->sb->s_bdev == device->bdev))
1884 		fs_info->sb->s_bdev = next_device->bdev;
1885 
1886 	if (fs_info->fs_devices->latest_bdev == device->bdev)
1887 		fs_info->fs_devices->latest_bdev = next_device->bdev;
1888 }
1889 
btrfs_rm_device(struct btrfs_fs_info * fs_info,const char * device_path,u64 devid)1890 int btrfs_rm_device(struct btrfs_fs_info *fs_info, const char *device_path,
1891 		u64 devid)
1892 {
1893 	struct btrfs_device *device;
1894 	struct btrfs_fs_devices *cur_devices;
1895 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
1896 	u64 num_devices;
1897 	int ret = 0;
1898 
1899 	mutex_lock(&uuid_mutex);
1900 
1901 	num_devices = fs_devices->num_devices;
1902 	btrfs_dev_replace_read_lock(&fs_info->dev_replace);
1903 	if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) {
1904 		WARN_ON(num_devices < 1);
1905 		num_devices--;
1906 	}
1907 	btrfs_dev_replace_read_unlock(&fs_info->dev_replace);
1908 
1909 	ret = btrfs_check_raid_min_devices(fs_info, num_devices - 1);
1910 	if (ret)
1911 		goto out;
1912 
1913 	ret = btrfs_find_device_by_devspec(fs_info, devid, device_path,
1914 					   &device);
1915 	if (ret)
1916 		goto out;
1917 
1918 	if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
1919 		ret = BTRFS_ERROR_DEV_TGT_REPLACE;
1920 		goto out;
1921 	}
1922 
1923 	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
1924 	    fs_info->fs_devices->rw_devices == 1) {
1925 		ret = BTRFS_ERROR_DEV_ONLY_WRITABLE;
1926 		goto out;
1927 	}
1928 
1929 	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
1930 		mutex_lock(&fs_info->chunk_mutex);
1931 		list_del_init(&device->dev_alloc_list);
1932 		device->fs_devices->rw_devices--;
1933 		mutex_unlock(&fs_info->chunk_mutex);
1934 	}
1935 
1936 	mutex_unlock(&uuid_mutex);
1937 	ret = btrfs_shrink_device(device, 0);
1938 	mutex_lock(&uuid_mutex);
1939 	if (ret)
1940 		goto error_undo;
1941 
1942 	/*
1943 	 * TODO: the superblock still includes this device in its num_devices
1944 	 * counter although write_all_supers() is not locked out. This
1945 	 * could give a filesystem state which requires a degraded mount.
1946 	 */
1947 	ret = btrfs_rm_dev_item(fs_info, device);
1948 	if (ret)
1949 		goto error_undo;
1950 
1951 	clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
1952 	btrfs_scrub_cancel_dev(fs_info, device);
1953 
1954 	/*
1955 	 * the device list mutex makes sure that we don't change
1956 	 * the device list while someone else is writing out all
1957 	 * the device supers. Whoever is writing all supers, should
1958 	 * lock the device list mutex before getting the number of
1959 	 * devices in the super block (super_copy). Conversely,
1960 	 * whoever updates the number of devices in the super block
1961 	 * (super_copy) should hold the device list mutex.
1962 	 */
1963 
1964 	/*
1965 	 * In normal cases the cur_devices == fs_devices. But in case
1966 	 * of deleting a seed device, the cur_devices should point to
1967 	 * its own fs_devices listed under the fs_devices->seed.
1968 	 */
1969 	cur_devices = device->fs_devices;
1970 	mutex_lock(&fs_devices->device_list_mutex);
1971 	list_del_rcu(&device->dev_list);
1972 
1973 	cur_devices->num_devices--;
1974 	cur_devices->total_devices--;
1975 	/* Update total_devices of the parent fs_devices if it's seed */
1976 	if (cur_devices != fs_devices)
1977 		fs_devices->total_devices--;
1978 
1979 	if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state))
1980 		cur_devices->missing_devices--;
1981 
1982 	btrfs_assign_next_active_device(device, NULL);
1983 
1984 	if (device->bdev) {
1985 		cur_devices->open_devices--;
1986 		/* remove sysfs entry */
1987 		btrfs_sysfs_rm_device_link(fs_devices, device);
1988 	}
1989 
1990 	num_devices = btrfs_super_num_devices(fs_info->super_copy) - 1;
1991 	btrfs_set_super_num_devices(fs_info->super_copy, num_devices);
1992 	mutex_unlock(&fs_devices->device_list_mutex);
1993 
1994 	/*
1995 	 * at this point, the device is zero sized and detached from
1996 	 * the devices list.  All that's left is to zero out the old
1997 	 * supers and free the device.
1998 	 */
1999 	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
2000 		btrfs_scratch_superblocks(device->bdev, device->name->str);
2001 
2002 	btrfs_close_bdev(device);
2003 	call_rcu(&device->rcu, free_device_rcu);
2004 
2005 	if (cur_devices->open_devices == 0) {
2006 		while (fs_devices) {
2007 			if (fs_devices->seed == cur_devices) {
2008 				fs_devices->seed = cur_devices->seed;
2009 				break;
2010 			}
2011 			fs_devices = fs_devices->seed;
2012 		}
2013 		cur_devices->seed = NULL;
2014 		close_fs_devices(cur_devices);
2015 		free_fs_devices(cur_devices);
2016 	}
2017 
2018 out:
2019 	mutex_unlock(&uuid_mutex);
2020 	return ret;
2021 
2022 error_undo:
2023 	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
2024 		mutex_lock(&fs_info->chunk_mutex);
2025 		list_add(&device->dev_alloc_list,
2026 			 &fs_devices->alloc_list);
2027 		device->fs_devices->rw_devices++;
2028 		mutex_unlock(&fs_info->chunk_mutex);
2029 	}
2030 	goto out;
2031 }
2032 
btrfs_rm_dev_replace_remove_srcdev(struct btrfs_device * srcdev)2033 void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_device *srcdev)
2034 {
2035 	struct btrfs_fs_devices *fs_devices;
2036 
2037 	lockdep_assert_held(&srcdev->fs_info->fs_devices->device_list_mutex);
2038 
2039 	/*
2040 	 * in case of fs with no seed, srcdev->fs_devices will point
2041 	 * to fs_devices of fs_info. However when the dev being replaced is
2042 	 * a seed dev it will point to the seed's local fs_devices. In short
2043 	 * srcdev will have its correct fs_devices in both the cases.
2044 	 */
2045 	fs_devices = srcdev->fs_devices;
2046 
2047 	list_del_rcu(&srcdev->dev_list);
2048 	list_del(&srcdev->dev_alloc_list);
2049 	fs_devices->num_devices--;
2050 	if (test_bit(BTRFS_DEV_STATE_MISSING, &srcdev->dev_state))
2051 		fs_devices->missing_devices--;
2052 
2053 	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &srcdev->dev_state))
2054 		fs_devices->rw_devices--;
2055 
2056 	if (srcdev->bdev)
2057 		fs_devices->open_devices--;
2058 }
2059 
btrfs_rm_dev_replace_free_srcdev(struct btrfs_fs_info * fs_info,struct btrfs_device * srcdev)2060 void btrfs_rm_dev_replace_free_srcdev(struct btrfs_fs_info *fs_info,
2061 				      struct btrfs_device *srcdev)
2062 {
2063 	struct btrfs_fs_devices *fs_devices = srcdev->fs_devices;
2064 
2065 	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &srcdev->dev_state)) {
2066 		/* zero out the old super if it is writable */
2067 		btrfs_scratch_superblocks(srcdev->bdev, srcdev->name->str);
2068 	}
2069 
2070 	btrfs_close_bdev(srcdev);
2071 	call_rcu(&srcdev->rcu, free_device_rcu);
2072 
2073 	/* if this is no devs we rather delete the fs_devices */
2074 	if (!fs_devices->num_devices) {
2075 		struct btrfs_fs_devices *tmp_fs_devices;
2076 
2077 		/*
2078 		 * On a mounted FS, num_devices can't be zero unless it's a
2079 		 * seed. In case of a seed device being replaced, the replace
2080 		 * target added to the sprout FS, so there will be no more
2081 		 * device left under the seed FS.
2082 		 */
2083 		ASSERT(fs_devices->seeding);
2084 
2085 		tmp_fs_devices = fs_info->fs_devices;
2086 		while (tmp_fs_devices) {
2087 			if (tmp_fs_devices->seed == fs_devices) {
2088 				tmp_fs_devices->seed = fs_devices->seed;
2089 				break;
2090 			}
2091 			tmp_fs_devices = tmp_fs_devices->seed;
2092 		}
2093 		fs_devices->seed = NULL;
2094 		close_fs_devices(fs_devices);
2095 		free_fs_devices(fs_devices);
2096 	}
2097 }
2098 
btrfs_destroy_dev_replace_tgtdev(struct btrfs_device * tgtdev)2099 void btrfs_destroy_dev_replace_tgtdev(struct btrfs_device *tgtdev)
2100 {
2101 	struct btrfs_fs_devices *fs_devices = tgtdev->fs_info->fs_devices;
2102 
2103 	WARN_ON(!tgtdev);
2104 	mutex_lock(&fs_devices->device_list_mutex);
2105 
2106 	btrfs_sysfs_rm_device_link(fs_devices, tgtdev);
2107 
2108 	if (tgtdev->bdev)
2109 		fs_devices->open_devices--;
2110 
2111 	fs_devices->num_devices--;
2112 
2113 	btrfs_assign_next_active_device(tgtdev, NULL);
2114 
2115 	list_del_rcu(&tgtdev->dev_list);
2116 
2117 	mutex_unlock(&fs_devices->device_list_mutex);
2118 
2119 	/*
2120 	 * The update_dev_time() with in btrfs_scratch_superblocks()
2121 	 * may lead to a call to btrfs_show_devname() which will try
2122 	 * to hold device_list_mutex. And here this device
2123 	 * is already out of device list, so we don't have to hold
2124 	 * the device_list_mutex lock.
2125 	 */
2126 	btrfs_scratch_superblocks(tgtdev->bdev, tgtdev->name->str);
2127 
2128 	btrfs_close_bdev(tgtdev);
2129 	call_rcu(&tgtdev->rcu, free_device_rcu);
2130 }
2131 
btrfs_find_device_by_path(struct btrfs_fs_info * fs_info,const char * device_path,struct btrfs_device ** device)2132 static int btrfs_find_device_by_path(struct btrfs_fs_info *fs_info,
2133 				     const char *device_path,
2134 				     struct btrfs_device **device)
2135 {
2136 	int ret = 0;
2137 	struct btrfs_super_block *disk_super;
2138 	u64 devid;
2139 	u8 *dev_uuid;
2140 	struct block_device *bdev;
2141 	struct buffer_head *bh;
2142 
2143 	*device = NULL;
2144 	ret = btrfs_get_bdev_and_sb(device_path, FMODE_READ,
2145 				    fs_info->bdev_holder, 0, &bdev, &bh);
2146 	if (ret)
2147 		return ret;
2148 	disk_super = (struct btrfs_super_block *)bh->b_data;
2149 	devid = btrfs_stack_device_id(&disk_super->dev_item);
2150 	dev_uuid = disk_super->dev_item.uuid;
2151 	*device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid,
2152 				    disk_super->fsid, true);
2153 	brelse(bh);
2154 	if (!*device)
2155 		ret = -ENOENT;
2156 	blkdev_put(bdev, FMODE_READ);
2157 	return ret;
2158 }
2159 
btrfs_find_device_missing_or_by_path(struct btrfs_fs_info * fs_info,const char * device_path,struct btrfs_device ** device)2160 int btrfs_find_device_missing_or_by_path(struct btrfs_fs_info *fs_info,
2161 					 const char *device_path,
2162 					 struct btrfs_device **device)
2163 {
2164 	*device = NULL;
2165 	if (strcmp(device_path, "missing") == 0) {
2166 		struct list_head *devices;
2167 		struct btrfs_device *tmp;
2168 
2169 		devices = &fs_info->fs_devices->devices;
2170 		list_for_each_entry(tmp, devices, dev_list) {
2171 			if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
2172 					&tmp->dev_state) && !tmp->bdev) {
2173 				*device = tmp;
2174 				break;
2175 			}
2176 		}
2177 
2178 		if (!*device)
2179 			return BTRFS_ERROR_DEV_MISSING_NOT_FOUND;
2180 
2181 		return 0;
2182 	} else {
2183 		return btrfs_find_device_by_path(fs_info, device_path, device);
2184 	}
2185 }
2186 
2187 /*
2188  * Lookup a device given by device id, or the path if the id is 0.
2189  */
btrfs_find_device_by_devspec(struct btrfs_fs_info * fs_info,u64 devid,const char * devpath,struct btrfs_device ** device)2190 int btrfs_find_device_by_devspec(struct btrfs_fs_info *fs_info, u64 devid,
2191 				 const char *devpath,
2192 				 struct btrfs_device **device)
2193 {
2194 	int ret;
2195 
2196 	if (devid) {
2197 		ret = 0;
2198 		*device = btrfs_find_device(fs_info->fs_devices, devid,
2199 					    NULL, NULL, true);
2200 		if (!*device)
2201 			ret = -ENOENT;
2202 	} else {
2203 		if (!devpath || !devpath[0])
2204 			return -EINVAL;
2205 
2206 		ret = btrfs_find_device_missing_or_by_path(fs_info, devpath,
2207 							   device);
2208 	}
2209 	return ret;
2210 }
2211 
2212 /*
2213  * does all the dirty work required for changing file system's UUID.
2214  */
btrfs_prepare_sprout(struct btrfs_fs_info * fs_info)2215 static int btrfs_prepare_sprout(struct btrfs_fs_info *fs_info)
2216 {
2217 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2218 	struct btrfs_fs_devices *old_devices;
2219 	struct btrfs_fs_devices *seed_devices;
2220 	struct btrfs_super_block *disk_super = fs_info->super_copy;
2221 	struct btrfs_device *device;
2222 	u64 super_flags;
2223 
2224 	lockdep_assert_held(&uuid_mutex);
2225 	if (!fs_devices->seeding)
2226 		return -EINVAL;
2227 
2228 	seed_devices = alloc_fs_devices(NULL);
2229 	if (IS_ERR(seed_devices))
2230 		return PTR_ERR(seed_devices);
2231 
2232 	old_devices = clone_fs_devices(fs_devices);
2233 	if (IS_ERR(old_devices)) {
2234 		kfree(seed_devices);
2235 		return PTR_ERR(old_devices);
2236 	}
2237 
2238 	list_add(&old_devices->fs_list, &fs_uuids);
2239 
2240 	memcpy(seed_devices, fs_devices, sizeof(*seed_devices));
2241 	seed_devices->opened = 1;
2242 	INIT_LIST_HEAD(&seed_devices->devices);
2243 	INIT_LIST_HEAD(&seed_devices->alloc_list);
2244 	mutex_init(&seed_devices->device_list_mutex);
2245 
2246 	mutex_lock(&fs_devices->device_list_mutex);
2247 	list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices,
2248 			      synchronize_rcu);
2249 	list_for_each_entry(device, &seed_devices->devices, dev_list)
2250 		device->fs_devices = seed_devices;
2251 
2252 	mutex_lock(&fs_info->chunk_mutex);
2253 	list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list);
2254 	mutex_unlock(&fs_info->chunk_mutex);
2255 
2256 	fs_devices->seeding = 0;
2257 	fs_devices->num_devices = 0;
2258 	fs_devices->open_devices = 0;
2259 	fs_devices->missing_devices = 0;
2260 	fs_devices->rotating = 0;
2261 	fs_devices->seed = seed_devices;
2262 
2263 	generate_random_uuid(fs_devices->fsid);
2264 	memcpy(fs_info->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
2265 	memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
2266 	mutex_unlock(&fs_devices->device_list_mutex);
2267 
2268 	super_flags = btrfs_super_flags(disk_super) &
2269 		      ~BTRFS_SUPER_FLAG_SEEDING;
2270 	btrfs_set_super_flags(disk_super, super_flags);
2271 
2272 	return 0;
2273 }
2274 
2275 /*
2276  * Store the expected generation for seed devices in device items.
2277  */
btrfs_finish_sprout(struct btrfs_trans_handle * trans,struct btrfs_fs_info * fs_info)2278 static int btrfs_finish_sprout(struct btrfs_trans_handle *trans,
2279 			       struct btrfs_fs_info *fs_info)
2280 {
2281 	struct btrfs_root *root = fs_info->chunk_root;
2282 	struct btrfs_path *path;
2283 	struct extent_buffer *leaf;
2284 	struct btrfs_dev_item *dev_item;
2285 	struct btrfs_device *device;
2286 	struct btrfs_key key;
2287 	u8 fs_uuid[BTRFS_FSID_SIZE];
2288 	u8 dev_uuid[BTRFS_UUID_SIZE];
2289 	u64 devid;
2290 	int ret;
2291 
2292 	path = btrfs_alloc_path();
2293 	if (!path)
2294 		return -ENOMEM;
2295 
2296 	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2297 	key.offset = 0;
2298 	key.type = BTRFS_DEV_ITEM_KEY;
2299 
2300 	while (1) {
2301 		ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2302 		if (ret < 0)
2303 			goto error;
2304 
2305 		leaf = path->nodes[0];
2306 next_slot:
2307 		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
2308 			ret = btrfs_next_leaf(root, path);
2309 			if (ret > 0)
2310 				break;
2311 			if (ret < 0)
2312 				goto error;
2313 			leaf = path->nodes[0];
2314 			btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2315 			btrfs_release_path(path);
2316 			continue;
2317 		}
2318 
2319 		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2320 		if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID ||
2321 		    key.type != BTRFS_DEV_ITEM_KEY)
2322 			break;
2323 
2324 		dev_item = btrfs_item_ptr(leaf, path->slots[0],
2325 					  struct btrfs_dev_item);
2326 		devid = btrfs_device_id(leaf, dev_item);
2327 		read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item),
2328 				   BTRFS_UUID_SIZE);
2329 		read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item),
2330 				   BTRFS_FSID_SIZE);
2331 		device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid,
2332 					   fs_uuid, true);
2333 		BUG_ON(!device); /* Logic error */
2334 
2335 		if (device->fs_devices->seeding) {
2336 			btrfs_set_device_generation(leaf, dev_item,
2337 						    device->generation);
2338 			btrfs_mark_buffer_dirty(leaf);
2339 		}
2340 
2341 		path->slots[0]++;
2342 		goto next_slot;
2343 	}
2344 	ret = 0;
2345 error:
2346 	btrfs_free_path(path);
2347 	return ret;
2348 }
2349 
btrfs_init_new_device(struct btrfs_fs_info * fs_info,const char * device_path)2350 int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path)
2351 {
2352 	struct btrfs_root *root = fs_info->dev_root;
2353 	struct request_queue *q;
2354 	struct btrfs_trans_handle *trans;
2355 	struct btrfs_device *device;
2356 	struct block_device *bdev;
2357 	struct super_block *sb = fs_info->sb;
2358 	struct rcu_string *name;
2359 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2360 	u64 orig_super_total_bytes;
2361 	u64 orig_super_num_devices;
2362 	int seeding_dev = 0;
2363 	int ret = 0;
2364 	bool unlocked = false;
2365 
2366 	if (sb_rdonly(sb) && !fs_devices->seeding)
2367 		return -EROFS;
2368 
2369 	bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
2370 				  fs_info->bdev_holder);
2371 	if (IS_ERR(bdev))
2372 		return PTR_ERR(bdev);
2373 
2374 	if (fs_devices->seeding) {
2375 		seeding_dev = 1;
2376 		down_write(&sb->s_umount);
2377 		mutex_lock(&uuid_mutex);
2378 	}
2379 
2380 	filemap_write_and_wait(bdev->bd_inode->i_mapping);
2381 
2382 	mutex_lock(&fs_devices->device_list_mutex);
2383 	list_for_each_entry(device, &fs_devices->devices, dev_list) {
2384 		if (device->bdev == bdev) {
2385 			ret = -EEXIST;
2386 			mutex_unlock(
2387 				&fs_devices->device_list_mutex);
2388 			goto error;
2389 		}
2390 	}
2391 	mutex_unlock(&fs_devices->device_list_mutex);
2392 
2393 	device = btrfs_alloc_device(fs_info, NULL, NULL);
2394 	if (IS_ERR(device)) {
2395 		/* we can safely leave the fs_devices entry around */
2396 		ret = PTR_ERR(device);
2397 		goto error;
2398 	}
2399 
2400 	name = rcu_string_strdup(device_path, GFP_KERNEL);
2401 	if (!name) {
2402 		ret = -ENOMEM;
2403 		goto error_free_device;
2404 	}
2405 	rcu_assign_pointer(device->name, name);
2406 
2407 	trans = btrfs_start_transaction(root, 0);
2408 	if (IS_ERR(trans)) {
2409 		ret = PTR_ERR(trans);
2410 		goto error_free_device;
2411 	}
2412 
2413 	q = bdev_get_queue(bdev);
2414 	set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
2415 	device->generation = trans->transid;
2416 	device->io_width = fs_info->sectorsize;
2417 	device->io_align = fs_info->sectorsize;
2418 	device->sector_size = fs_info->sectorsize;
2419 	device->total_bytes = round_down(i_size_read(bdev->bd_inode),
2420 					 fs_info->sectorsize);
2421 	device->disk_total_bytes = device->total_bytes;
2422 	device->commit_total_bytes = device->total_bytes;
2423 	device->fs_info = fs_info;
2424 	device->bdev = bdev;
2425 	set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
2426 	clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state);
2427 	device->mode = FMODE_EXCL;
2428 	device->dev_stats_valid = 1;
2429 	set_blocksize(device->bdev, BTRFS_BDEV_BLOCKSIZE);
2430 
2431 	if (seeding_dev) {
2432 		sb->s_flags &= ~SB_RDONLY;
2433 		ret = btrfs_prepare_sprout(fs_info);
2434 		if (ret) {
2435 			btrfs_abort_transaction(trans, ret);
2436 			goto error_trans;
2437 		}
2438 	}
2439 
2440 	device->fs_devices = fs_devices;
2441 
2442 	mutex_lock(&fs_devices->device_list_mutex);
2443 	mutex_lock(&fs_info->chunk_mutex);
2444 	list_add_rcu(&device->dev_list, &fs_devices->devices);
2445 	list_add(&device->dev_alloc_list, &fs_devices->alloc_list);
2446 	fs_devices->num_devices++;
2447 	fs_devices->open_devices++;
2448 	fs_devices->rw_devices++;
2449 	fs_devices->total_devices++;
2450 	fs_devices->total_rw_bytes += device->total_bytes;
2451 
2452 	atomic64_add(device->total_bytes, &fs_info->free_chunk_space);
2453 
2454 	if (!blk_queue_nonrot(q))
2455 		fs_devices->rotating = 1;
2456 
2457 	orig_super_total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
2458 	btrfs_set_super_total_bytes(fs_info->super_copy,
2459 		round_down(orig_super_total_bytes + device->total_bytes,
2460 			   fs_info->sectorsize));
2461 
2462 	orig_super_num_devices = btrfs_super_num_devices(fs_info->super_copy);
2463 	btrfs_set_super_num_devices(fs_info->super_copy,
2464 				    orig_super_num_devices + 1);
2465 
2466 	/*
2467 	 * we've got more storage, clear any full flags on the space
2468 	 * infos
2469 	 */
2470 	btrfs_clear_space_info_full(fs_info);
2471 
2472 	mutex_unlock(&fs_info->chunk_mutex);
2473 
2474 	/* Add sysfs device entry */
2475 	btrfs_sysfs_add_device_link(fs_devices, device);
2476 
2477 	mutex_unlock(&fs_devices->device_list_mutex);
2478 
2479 	if (seeding_dev) {
2480 		mutex_lock(&fs_info->chunk_mutex);
2481 		ret = init_first_rw_device(trans, fs_info);
2482 		mutex_unlock(&fs_info->chunk_mutex);
2483 		if (ret) {
2484 			btrfs_abort_transaction(trans, ret);
2485 			goto error_sysfs;
2486 		}
2487 	}
2488 
2489 	ret = btrfs_add_dev_item(trans, device);
2490 	if (ret) {
2491 		btrfs_abort_transaction(trans, ret);
2492 		goto error_sysfs;
2493 	}
2494 
2495 	if (seeding_dev) {
2496 		char fsid_buf[BTRFS_UUID_UNPARSED_SIZE];
2497 
2498 		ret = btrfs_finish_sprout(trans, fs_info);
2499 		if (ret) {
2500 			btrfs_abort_transaction(trans, ret);
2501 			goto error_sysfs;
2502 		}
2503 
2504 		/* Sprouting would change fsid of the mounted root,
2505 		 * so rename the fsid on the sysfs
2506 		 */
2507 		snprintf(fsid_buf, BTRFS_UUID_UNPARSED_SIZE, "%pU",
2508 						fs_info->fsid);
2509 		if (kobject_rename(&fs_devices->fsid_kobj, fsid_buf))
2510 			btrfs_warn(fs_info,
2511 				   "sysfs: failed to create fsid for sprout");
2512 	}
2513 
2514 	ret = btrfs_commit_transaction(trans);
2515 
2516 	if (seeding_dev) {
2517 		mutex_unlock(&uuid_mutex);
2518 		up_write(&sb->s_umount);
2519 		unlocked = true;
2520 
2521 		if (ret) /* transaction commit */
2522 			return ret;
2523 
2524 		ret = btrfs_relocate_sys_chunks(fs_info);
2525 		if (ret < 0)
2526 			btrfs_handle_fs_error(fs_info, ret,
2527 				    "Failed to relocate sys chunks after device initialization. This can be fixed using the \"btrfs balance\" command.");
2528 		trans = btrfs_attach_transaction(root);
2529 		if (IS_ERR(trans)) {
2530 			if (PTR_ERR(trans) == -ENOENT)
2531 				return 0;
2532 			ret = PTR_ERR(trans);
2533 			trans = NULL;
2534 			goto error_sysfs;
2535 		}
2536 		ret = btrfs_commit_transaction(trans);
2537 	}
2538 
2539 	/* Update ctime/mtime for libblkid */
2540 	update_dev_time(device_path);
2541 	return ret;
2542 
2543 error_sysfs:
2544 	btrfs_sysfs_rm_device_link(fs_devices, device);
2545 	mutex_lock(&fs_info->fs_devices->device_list_mutex);
2546 	mutex_lock(&fs_info->chunk_mutex);
2547 	list_del_rcu(&device->dev_list);
2548 	list_del(&device->dev_alloc_list);
2549 	fs_info->fs_devices->num_devices--;
2550 	fs_info->fs_devices->open_devices--;
2551 	fs_info->fs_devices->rw_devices--;
2552 	fs_info->fs_devices->total_devices--;
2553 	fs_info->fs_devices->total_rw_bytes -= device->total_bytes;
2554 	atomic64_sub(device->total_bytes, &fs_info->free_chunk_space);
2555 	btrfs_set_super_total_bytes(fs_info->super_copy,
2556 				    orig_super_total_bytes);
2557 	btrfs_set_super_num_devices(fs_info->super_copy,
2558 				    orig_super_num_devices);
2559 	mutex_unlock(&fs_info->chunk_mutex);
2560 	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2561 error_trans:
2562 	if (seeding_dev)
2563 		sb->s_flags |= SB_RDONLY;
2564 	if (trans)
2565 		btrfs_end_transaction(trans);
2566 error_free_device:
2567 	btrfs_free_device(device);
2568 error:
2569 	blkdev_put(bdev, FMODE_EXCL);
2570 	if (seeding_dev && !unlocked) {
2571 		mutex_unlock(&uuid_mutex);
2572 		up_write(&sb->s_umount);
2573 	}
2574 	return ret;
2575 }
2576 
btrfs_update_device(struct btrfs_trans_handle * trans,struct btrfs_device * device)2577 static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
2578 					struct btrfs_device *device)
2579 {
2580 	int ret;
2581 	struct btrfs_path *path;
2582 	struct btrfs_root *root = device->fs_info->chunk_root;
2583 	struct btrfs_dev_item *dev_item;
2584 	struct extent_buffer *leaf;
2585 	struct btrfs_key key;
2586 
2587 	path = btrfs_alloc_path();
2588 	if (!path)
2589 		return -ENOMEM;
2590 
2591 	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2592 	key.type = BTRFS_DEV_ITEM_KEY;
2593 	key.offset = device->devid;
2594 
2595 	ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2596 	if (ret < 0)
2597 		goto out;
2598 
2599 	if (ret > 0) {
2600 		ret = -ENOENT;
2601 		goto out;
2602 	}
2603 
2604 	leaf = path->nodes[0];
2605 	dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
2606 
2607 	btrfs_set_device_id(leaf, dev_item, device->devid);
2608 	btrfs_set_device_type(leaf, dev_item, device->type);
2609 	btrfs_set_device_io_align(leaf, dev_item, device->io_align);
2610 	btrfs_set_device_io_width(leaf, dev_item, device->io_width);
2611 	btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
2612 	btrfs_set_device_total_bytes(leaf, dev_item,
2613 				     btrfs_device_get_disk_total_bytes(device));
2614 	btrfs_set_device_bytes_used(leaf, dev_item,
2615 				    btrfs_device_get_bytes_used(device));
2616 	btrfs_mark_buffer_dirty(leaf);
2617 
2618 out:
2619 	btrfs_free_path(path);
2620 	return ret;
2621 }
2622 
btrfs_grow_device(struct btrfs_trans_handle * trans,struct btrfs_device * device,u64 new_size)2623 int btrfs_grow_device(struct btrfs_trans_handle *trans,
2624 		      struct btrfs_device *device, u64 new_size)
2625 {
2626 	struct btrfs_fs_info *fs_info = device->fs_info;
2627 	struct btrfs_super_block *super_copy = fs_info->super_copy;
2628 	struct btrfs_fs_devices *fs_devices;
2629 	u64 old_total;
2630 	u64 diff;
2631 
2632 	if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
2633 		return -EACCES;
2634 
2635 	new_size = round_down(new_size, fs_info->sectorsize);
2636 
2637 	mutex_lock(&fs_info->chunk_mutex);
2638 	old_total = btrfs_super_total_bytes(super_copy);
2639 	diff = round_down(new_size - device->total_bytes, fs_info->sectorsize);
2640 
2641 	if (new_size <= device->total_bytes ||
2642 	    test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
2643 		mutex_unlock(&fs_info->chunk_mutex);
2644 		return -EINVAL;
2645 	}
2646 
2647 	fs_devices = fs_info->fs_devices;
2648 
2649 	btrfs_set_super_total_bytes(super_copy,
2650 			round_down(old_total + diff, fs_info->sectorsize));
2651 	device->fs_devices->total_rw_bytes += diff;
2652 
2653 	btrfs_device_set_total_bytes(device, new_size);
2654 	btrfs_device_set_disk_total_bytes(device, new_size);
2655 	btrfs_clear_space_info_full(device->fs_info);
2656 	if (list_empty(&device->resized_list))
2657 		list_add_tail(&device->resized_list,
2658 			      &fs_devices->resized_devices);
2659 	mutex_unlock(&fs_info->chunk_mutex);
2660 
2661 	return btrfs_update_device(trans, device);
2662 }
2663 
btrfs_free_chunk(struct btrfs_trans_handle * trans,u64 chunk_offset)2664 static int btrfs_free_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset)
2665 {
2666 	struct btrfs_fs_info *fs_info = trans->fs_info;
2667 	struct btrfs_root *root = fs_info->chunk_root;
2668 	int ret;
2669 	struct btrfs_path *path;
2670 	struct btrfs_key key;
2671 
2672 	path = btrfs_alloc_path();
2673 	if (!path)
2674 		return -ENOMEM;
2675 
2676 	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2677 	key.offset = chunk_offset;
2678 	key.type = BTRFS_CHUNK_ITEM_KEY;
2679 
2680 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2681 	if (ret < 0)
2682 		goto out;
2683 	else if (ret > 0) { /* Logic error or corruption */
2684 		btrfs_handle_fs_error(fs_info, -ENOENT,
2685 				      "Failed lookup while freeing chunk.");
2686 		ret = -ENOENT;
2687 		goto out;
2688 	}
2689 
2690 	ret = btrfs_del_item(trans, root, path);
2691 	if (ret < 0)
2692 		btrfs_handle_fs_error(fs_info, ret,
2693 				      "Failed to delete chunk item.");
2694 out:
2695 	btrfs_free_path(path);
2696 	return ret;
2697 }
2698 
btrfs_del_sys_chunk(struct btrfs_fs_info * fs_info,u64 chunk_offset)2699 static int btrfs_del_sys_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset)
2700 {
2701 	struct btrfs_super_block *super_copy = fs_info->super_copy;
2702 	struct btrfs_disk_key *disk_key;
2703 	struct btrfs_chunk *chunk;
2704 	u8 *ptr;
2705 	int ret = 0;
2706 	u32 num_stripes;
2707 	u32 array_size;
2708 	u32 len = 0;
2709 	u32 cur;
2710 	struct btrfs_key key;
2711 
2712 	mutex_lock(&fs_info->chunk_mutex);
2713 	array_size = btrfs_super_sys_array_size(super_copy);
2714 
2715 	ptr = super_copy->sys_chunk_array;
2716 	cur = 0;
2717 
2718 	while (cur < array_size) {
2719 		disk_key = (struct btrfs_disk_key *)ptr;
2720 		btrfs_disk_key_to_cpu(&key, disk_key);
2721 
2722 		len = sizeof(*disk_key);
2723 
2724 		if (key.type == BTRFS_CHUNK_ITEM_KEY) {
2725 			chunk = (struct btrfs_chunk *)(ptr + len);
2726 			num_stripes = btrfs_stack_chunk_num_stripes(chunk);
2727 			len += btrfs_chunk_item_size(num_stripes);
2728 		} else {
2729 			ret = -EIO;
2730 			break;
2731 		}
2732 		if (key.objectid == BTRFS_FIRST_CHUNK_TREE_OBJECTID &&
2733 		    key.offset == chunk_offset) {
2734 			memmove(ptr, ptr + len, array_size - (cur + len));
2735 			array_size -= len;
2736 			btrfs_set_super_sys_array_size(super_copy, array_size);
2737 		} else {
2738 			ptr += len;
2739 			cur += len;
2740 		}
2741 	}
2742 	mutex_unlock(&fs_info->chunk_mutex);
2743 	return ret;
2744 }
2745 
get_chunk_map(struct btrfs_fs_info * fs_info,u64 logical,u64 length)2746 static struct extent_map *get_chunk_map(struct btrfs_fs_info *fs_info,
2747 					u64 logical, u64 length)
2748 {
2749 	struct extent_map_tree *em_tree;
2750 	struct extent_map *em;
2751 
2752 	em_tree = &fs_info->mapping_tree.map_tree;
2753 	read_lock(&em_tree->lock);
2754 	em = lookup_extent_mapping(em_tree, logical, length);
2755 	read_unlock(&em_tree->lock);
2756 
2757 	if (!em) {
2758 		btrfs_crit(fs_info, "unable to find logical %llu length %llu",
2759 			   logical, length);
2760 		return ERR_PTR(-EINVAL);
2761 	}
2762 
2763 	if (em->start > logical || em->start + em->len < logical) {
2764 		btrfs_crit(fs_info,
2765 			   "found a bad mapping, wanted %llu-%llu, found %llu-%llu",
2766 			   logical, length, em->start, em->start + em->len);
2767 		free_extent_map(em);
2768 		return ERR_PTR(-EINVAL);
2769 	}
2770 
2771 	/* callers are responsible for dropping em's ref. */
2772 	return em;
2773 }
2774 
btrfs_remove_chunk(struct btrfs_trans_handle * trans,u64 chunk_offset)2775 int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset)
2776 {
2777 	struct btrfs_fs_info *fs_info = trans->fs_info;
2778 	struct extent_map *em;
2779 	struct map_lookup *map;
2780 	u64 dev_extent_len = 0;
2781 	int i, ret = 0;
2782 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2783 
2784 	em = get_chunk_map(fs_info, chunk_offset, 1);
2785 	if (IS_ERR(em)) {
2786 		/*
2787 		 * This is a logic error, but we don't want to just rely on the
2788 		 * user having built with ASSERT enabled, so if ASSERT doesn't
2789 		 * do anything we still error out.
2790 		 */
2791 		ASSERT(0);
2792 		return PTR_ERR(em);
2793 	}
2794 	map = em->map_lookup;
2795 	mutex_lock(&fs_info->chunk_mutex);
2796 	check_system_chunk(trans, map->type);
2797 	mutex_unlock(&fs_info->chunk_mutex);
2798 
2799 	/*
2800 	 * Take the device list mutex to prevent races with the final phase of
2801 	 * a device replace operation that replaces the device object associated
2802 	 * with map stripes (dev-replace.c:btrfs_dev_replace_finishing()).
2803 	 */
2804 	mutex_lock(&fs_devices->device_list_mutex);
2805 	for (i = 0; i < map->num_stripes; i++) {
2806 		struct btrfs_device *device = map->stripes[i].dev;
2807 		ret = btrfs_free_dev_extent(trans, device,
2808 					    map->stripes[i].physical,
2809 					    &dev_extent_len);
2810 		if (ret) {
2811 			mutex_unlock(&fs_devices->device_list_mutex);
2812 			btrfs_abort_transaction(trans, ret);
2813 			goto out;
2814 		}
2815 
2816 		if (device->bytes_used > 0) {
2817 			mutex_lock(&fs_info->chunk_mutex);
2818 			btrfs_device_set_bytes_used(device,
2819 					device->bytes_used - dev_extent_len);
2820 			atomic64_add(dev_extent_len, &fs_info->free_chunk_space);
2821 			btrfs_clear_space_info_full(fs_info);
2822 			mutex_unlock(&fs_info->chunk_mutex);
2823 		}
2824 
2825 		if (map->stripes[i].dev) {
2826 			ret = btrfs_update_device(trans, map->stripes[i].dev);
2827 			if (ret) {
2828 				mutex_unlock(&fs_devices->device_list_mutex);
2829 				btrfs_abort_transaction(trans, ret);
2830 				goto out;
2831 			}
2832 		}
2833 	}
2834 	mutex_unlock(&fs_devices->device_list_mutex);
2835 
2836 	ret = btrfs_free_chunk(trans, chunk_offset);
2837 	if (ret) {
2838 		btrfs_abort_transaction(trans, ret);
2839 		goto out;
2840 	}
2841 
2842 	trace_btrfs_chunk_free(fs_info, map, chunk_offset, em->len);
2843 
2844 	if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
2845 		ret = btrfs_del_sys_chunk(fs_info, chunk_offset);
2846 		if (ret) {
2847 			btrfs_abort_transaction(trans, ret);
2848 			goto out;
2849 		}
2850 	}
2851 
2852 	ret = btrfs_remove_block_group(trans, chunk_offset, em);
2853 	if (ret) {
2854 		btrfs_abort_transaction(trans, ret);
2855 		goto out;
2856 	}
2857 
2858 out:
2859 	/* once for us */
2860 	free_extent_map(em);
2861 	return ret;
2862 }
2863 
btrfs_relocate_chunk(struct btrfs_fs_info * fs_info,u64 chunk_offset)2864 static int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset)
2865 {
2866 	struct btrfs_root *root = fs_info->chunk_root;
2867 	struct btrfs_trans_handle *trans;
2868 	int ret;
2869 
2870 	/*
2871 	 * Prevent races with automatic removal of unused block groups.
2872 	 * After we relocate and before we remove the chunk with offset
2873 	 * chunk_offset, automatic removal of the block group can kick in,
2874 	 * resulting in a failure when calling btrfs_remove_chunk() below.
2875 	 *
2876 	 * Make sure to acquire this mutex before doing a tree search (dev
2877 	 * or chunk trees) to find chunks. Otherwise the cleaner kthread might
2878 	 * call btrfs_remove_chunk() (through btrfs_delete_unused_bgs()) after
2879 	 * we release the path used to search the chunk/dev tree and before
2880 	 * the current task acquires this mutex and calls us.
2881 	 */
2882 	lockdep_assert_held(&fs_info->delete_unused_bgs_mutex);
2883 
2884 	ret = btrfs_can_relocate(fs_info, chunk_offset);
2885 	if (ret)
2886 		return -ENOSPC;
2887 
2888 	/* step one, relocate all the extents inside this chunk */
2889 	btrfs_scrub_pause(fs_info);
2890 	ret = btrfs_relocate_block_group(fs_info, chunk_offset);
2891 	btrfs_scrub_continue(fs_info);
2892 	if (ret)
2893 		return ret;
2894 
2895 	/*
2896 	 * We add the kobjects here (and after forcing data chunk creation)
2897 	 * since relocation is the only place we'll create chunks of a new
2898 	 * type at runtime.  The only place where we'll remove the last
2899 	 * chunk of a type is the call immediately below this one.  Even
2900 	 * so, we're protected against races with the cleaner thread since
2901 	 * we're covered by the delete_unused_bgs_mutex.
2902 	 */
2903 	btrfs_add_raid_kobjects(fs_info);
2904 
2905 	trans = btrfs_start_trans_remove_block_group(root->fs_info,
2906 						     chunk_offset);
2907 	if (IS_ERR(trans)) {
2908 		ret = PTR_ERR(trans);
2909 		btrfs_handle_fs_error(root->fs_info, ret, NULL);
2910 		return ret;
2911 	}
2912 
2913 	/*
2914 	 * step two, delete the device extents and the
2915 	 * chunk tree entries
2916 	 */
2917 	ret = btrfs_remove_chunk(trans, chunk_offset);
2918 	btrfs_end_transaction(trans);
2919 	return ret;
2920 }
2921 
btrfs_relocate_sys_chunks(struct btrfs_fs_info * fs_info)2922 static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info)
2923 {
2924 	struct btrfs_root *chunk_root = fs_info->chunk_root;
2925 	struct btrfs_path *path;
2926 	struct extent_buffer *leaf;
2927 	struct btrfs_chunk *chunk;
2928 	struct btrfs_key key;
2929 	struct btrfs_key found_key;
2930 	u64 chunk_type;
2931 	bool retried = false;
2932 	int failed = 0;
2933 	int ret;
2934 
2935 	path = btrfs_alloc_path();
2936 	if (!path)
2937 		return -ENOMEM;
2938 
2939 again:
2940 	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2941 	key.offset = (u64)-1;
2942 	key.type = BTRFS_CHUNK_ITEM_KEY;
2943 
2944 	while (1) {
2945 		mutex_lock(&fs_info->delete_unused_bgs_mutex);
2946 		ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
2947 		if (ret < 0) {
2948 			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
2949 			goto error;
2950 		}
2951 		BUG_ON(ret == 0); /* Corruption */
2952 
2953 		ret = btrfs_previous_item(chunk_root, path, key.objectid,
2954 					  key.type);
2955 		if (ret)
2956 			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
2957 		if (ret < 0)
2958 			goto error;
2959 		if (ret > 0)
2960 			break;
2961 
2962 		leaf = path->nodes[0];
2963 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2964 
2965 		chunk = btrfs_item_ptr(leaf, path->slots[0],
2966 				       struct btrfs_chunk);
2967 		chunk_type = btrfs_chunk_type(leaf, chunk);
2968 		btrfs_release_path(path);
2969 
2970 		if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) {
2971 			ret = btrfs_relocate_chunk(fs_info, found_key.offset);
2972 			if (ret == -ENOSPC)
2973 				failed++;
2974 			else
2975 				BUG_ON(ret);
2976 		}
2977 		mutex_unlock(&fs_info->delete_unused_bgs_mutex);
2978 
2979 		if (found_key.offset == 0)
2980 			break;
2981 		key.offset = found_key.offset - 1;
2982 	}
2983 	ret = 0;
2984 	if (failed && !retried) {
2985 		failed = 0;
2986 		retried = true;
2987 		goto again;
2988 	} else if (WARN_ON(failed && retried)) {
2989 		ret = -ENOSPC;
2990 	}
2991 error:
2992 	btrfs_free_path(path);
2993 	return ret;
2994 }
2995 
2996 /*
2997  * return 1 : allocate a data chunk successfully,
2998  * return <0: errors during allocating a data chunk,
2999  * return 0 : no need to allocate a data chunk.
3000  */
btrfs_may_alloc_data_chunk(struct btrfs_fs_info * fs_info,u64 chunk_offset)3001 static int btrfs_may_alloc_data_chunk(struct btrfs_fs_info *fs_info,
3002 				      u64 chunk_offset)
3003 {
3004 	struct btrfs_block_group_cache *cache;
3005 	u64 bytes_used;
3006 	u64 chunk_type;
3007 
3008 	cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3009 	ASSERT(cache);
3010 	chunk_type = cache->flags;
3011 	btrfs_put_block_group(cache);
3012 
3013 	if (chunk_type & BTRFS_BLOCK_GROUP_DATA) {
3014 		spin_lock(&fs_info->data_sinfo->lock);
3015 		bytes_used = fs_info->data_sinfo->bytes_used;
3016 		spin_unlock(&fs_info->data_sinfo->lock);
3017 
3018 		if (!bytes_used) {
3019 			struct btrfs_trans_handle *trans;
3020 			int ret;
3021 
3022 			trans =	btrfs_join_transaction(fs_info->tree_root);
3023 			if (IS_ERR(trans))
3024 				return PTR_ERR(trans);
3025 
3026 			ret = btrfs_force_chunk_alloc(trans,
3027 						      BTRFS_BLOCK_GROUP_DATA);
3028 			btrfs_end_transaction(trans);
3029 			if (ret < 0)
3030 				return ret;
3031 
3032 			btrfs_add_raid_kobjects(fs_info);
3033 
3034 			return 1;
3035 		}
3036 	}
3037 	return 0;
3038 }
3039 
insert_balance_item(struct btrfs_fs_info * fs_info,struct btrfs_balance_control * bctl)3040 static int insert_balance_item(struct btrfs_fs_info *fs_info,
3041 			       struct btrfs_balance_control *bctl)
3042 {
3043 	struct btrfs_root *root = fs_info->tree_root;
3044 	struct btrfs_trans_handle *trans;
3045 	struct btrfs_balance_item *item;
3046 	struct btrfs_disk_balance_args disk_bargs;
3047 	struct btrfs_path *path;
3048 	struct extent_buffer *leaf;
3049 	struct btrfs_key key;
3050 	int ret, err;
3051 
3052 	path = btrfs_alloc_path();
3053 	if (!path)
3054 		return -ENOMEM;
3055 
3056 	trans = btrfs_start_transaction(root, 0);
3057 	if (IS_ERR(trans)) {
3058 		btrfs_free_path(path);
3059 		return PTR_ERR(trans);
3060 	}
3061 
3062 	key.objectid = BTRFS_BALANCE_OBJECTID;
3063 	key.type = BTRFS_TEMPORARY_ITEM_KEY;
3064 	key.offset = 0;
3065 
3066 	ret = btrfs_insert_empty_item(trans, root, path, &key,
3067 				      sizeof(*item));
3068 	if (ret)
3069 		goto out;
3070 
3071 	leaf = path->nodes[0];
3072 	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
3073 
3074 	memzero_extent_buffer(leaf, (unsigned long)item, sizeof(*item));
3075 
3076 	btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->data);
3077 	btrfs_set_balance_data(leaf, item, &disk_bargs);
3078 	btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->meta);
3079 	btrfs_set_balance_meta(leaf, item, &disk_bargs);
3080 	btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys);
3081 	btrfs_set_balance_sys(leaf, item, &disk_bargs);
3082 
3083 	btrfs_set_balance_flags(leaf, item, bctl->flags);
3084 
3085 	btrfs_mark_buffer_dirty(leaf);
3086 out:
3087 	btrfs_free_path(path);
3088 	err = btrfs_commit_transaction(trans);
3089 	if (err && !ret)
3090 		ret = err;
3091 	return ret;
3092 }
3093 
del_balance_item(struct btrfs_fs_info * fs_info)3094 static int del_balance_item(struct btrfs_fs_info *fs_info)
3095 {
3096 	struct btrfs_root *root = fs_info->tree_root;
3097 	struct btrfs_trans_handle *trans;
3098 	struct btrfs_path *path;
3099 	struct btrfs_key key;
3100 	int ret, err;
3101 
3102 	path = btrfs_alloc_path();
3103 	if (!path)
3104 		return -ENOMEM;
3105 
3106 	trans = btrfs_start_transaction(root, 0);
3107 	if (IS_ERR(trans)) {
3108 		btrfs_free_path(path);
3109 		return PTR_ERR(trans);
3110 	}
3111 
3112 	key.objectid = BTRFS_BALANCE_OBJECTID;
3113 	key.type = BTRFS_TEMPORARY_ITEM_KEY;
3114 	key.offset = 0;
3115 
3116 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
3117 	if (ret < 0)
3118 		goto out;
3119 	if (ret > 0) {
3120 		ret = -ENOENT;
3121 		goto out;
3122 	}
3123 
3124 	ret = btrfs_del_item(trans, root, path);
3125 out:
3126 	btrfs_free_path(path);
3127 	err = btrfs_commit_transaction(trans);
3128 	if (err && !ret)
3129 		ret = err;
3130 	return ret;
3131 }
3132 
3133 /*
3134  * This is a heuristic used to reduce the number of chunks balanced on
3135  * resume after balance was interrupted.
3136  */
update_balance_args(struct btrfs_balance_control * bctl)3137 static void update_balance_args(struct btrfs_balance_control *bctl)
3138 {
3139 	/*
3140 	 * Turn on soft mode for chunk types that were being converted.
3141 	 */
3142 	if (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)
3143 		bctl->data.flags |= BTRFS_BALANCE_ARGS_SOFT;
3144 	if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)
3145 		bctl->sys.flags |= BTRFS_BALANCE_ARGS_SOFT;
3146 	if (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)
3147 		bctl->meta.flags |= BTRFS_BALANCE_ARGS_SOFT;
3148 
3149 	/*
3150 	 * Turn on usage filter if is not already used.  The idea is
3151 	 * that chunks that we have already balanced should be
3152 	 * reasonably full.  Don't do it for chunks that are being
3153 	 * converted - that will keep us from relocating unconverted
3154 	 * (albeit full) chunks.
3155 	 */
3156 	if (!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3157 	    !(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3158 	    !(bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
3159 		bctl->data.flags |= BTRFS_BALANCE_ARGS_USAGE;
3160 		bctl->data.usage = 90;
3161 	}
3162 	if (!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3163 	    !(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3164 	    !(bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
3165 		bctl->sys.flags |= BTRFS_BALANCE_ARGS_USAGE;
3166 		bctl->sys.usage = 90;
3167 	}
3168 	if (!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3169 	    !(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3170 	    !(bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
3171 		bctl->meta.flags |= BTRFS_BALANCE_ARGS_USAGE;
3172 		bctl->meta.usage = 90;
3173 	}
3174 }
3175 
3176 /*
3177  * Clear the balance status in fs_info and delete the balance item from disk.
3178  */
reset_balance_state(struct btrfs_fs_info * fs_info)3179 static void reset_balance_state(struct btrfs_fs_info *fs_info)
3180 {
3181 	struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3182 	int ret;
3183 
3184 	BUG_ON(!fs_info->balance_ctl);
3185 
3186 	spin_lock(&fs_info->balance_lock);
3187 	fs_info->balance_ctl = NULL;
3188 	spin_unlock(&fs_info->balance_lock);
3189 
3190 	kfree(bctl);
3191 	ret = del_balance_item(fs_info);
3192 	if (ret)
3193 		btrfs_handle_fs_error(fs_info, ret, NULL);
3194 }
3195 
3196 /*
3197  * Balance filters.  Return 1 if chunk should be filtered out
3198  * (should not be balanced).
3199  */
chunk_profiles_filter(u64 chunk_type,struct btrfs_balance_args * bargs)3200 static int chunk_profiles_filter(u64 chunk_type,
3201 				 struct btrfs_balance_args *bargs)
3202 {
3203 	chunk_type = chunk_to_extended(chunk_type) &
3204 				BTRFS_EXTENDED_PROFILE_MASK;
3205 
3206 	if (bargs->profiles & chunk_type)
3207 		return 0;
3208 
3209 	return 1;
3210 }
3211 
chunk_usage_range_filter(struct btrfs_fs_info * fs_info,u64 chunk_offset,struct btrfs_balance_args * bargs)3212 static int chunk_usage_range_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
3213 			      struct btrfs_balance_args *bargs)
3214 {
3215 	struct btrfs_block_group_cache *cache;
3216 	u64 chunk_used;
3217 	u64 user_thresh_min;
3218 	u64 user_thresh_max;
3219 	int ret = 1;
3220 
3221 	cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3222 	chunk_used = btrfs_block_group_used(&cache->item);
3223 
3224 	if (bargs->usage_min == 0)
3225 		user_thresh_min = 0;
3226 	else
3227 		user_thresh_min = div_factor_fine(cache->key.offset,
3228 					bargs->usage_min);
3229 
3230 	if (bargs->usage_max == 0)
3231 		user_thresh_max = 1;
3232 	else if (bargs->usage_max > 100)
3233 		user_thresh_max = cache->key.offset;
3234 	else
3235 		user_thresh_max = div_factor_fine(cache->key.offset,
3236 					bargs->usage_max);
3237 
3238 	if (user_thresh_min <= chunk_used && chunk_used < user_thresh_max)
3239 		ret = 0;
3240 
3241 	btrfs_put_block_group(cache);
3242 	return ret;
3243 }
3244 
chunk_usage_filter(struct btrfs_fs_info * fs_info,u64 chunk_offset,struct btrfs_balance_args * bargs)3245 static int chunk_usage_filter(struct btrfs_fs_info *fs_info,
3246 		u64 chunk_offset, struct btrfs_balance_args *bargs)
3247 {
3248 	struct btrfs_block_group_cache *cache;
3249 	u64 chunk_used, user_thresh;
3250 	int ret = 1;
3251 
3252 	cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3253 	chunk_used = btrfs_block_group_used(&cache->item);
3254 
3255 	if (bargs->usage_min == 0)
3256 		user_thresh = 1;
3257 	else if (bargs->usage > 100)
3258 		user_thresh = cache->key.offset;
3259 	else
3260 		user_thresh = div_factor_fine(cache->key.offset,
3261 					      bargs->usage);
3262 
3263 	if (chunk_used < user_thresh)
3264 		ret = 0;
3265 
3266 	btrfs_put_block_group(cache);
3267 	return ret;
3268 }
3269 
chunk_devid_filter(struct extent_buffer * leaf,struct btrfs_chunk * chunk,struct btrfs_balance_args * bargs)3270 static int chunk_devid_filter(struct extent_buffer *leaf,
3271 			      struct btrfs_chunk *chunk,
3272 			      struct btrfs_balance_args *bargs)
3273 {
3274 	struct btrfs_stripe *stripe;
3275 	int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3276 	int i;
3277 
3278 	for (i = 0; i < num_stripes; i++) {
3279 		stripe = btrfs_stripe_nr(chunk, i);
3280 		if (btrfs_stripe_devid(leaf, stripe) == bargs->devid)
3281 			return 0;
3282 	}
3283 
3284 	return 1;
3285 }
3286 
3287 /* [pstart, pend) */
chunk_drange_filter(struct extent_buffer * leaf,struct btrfs_chunk * chunk,struct btrfs_balance_args * bargs)3288 static int chunk_drange_filter(struct extent_buffer *leaf,
3289 			       struct btrfs_chunk *chunk,
3290 			       struct btrfs_balance_args *bargs)
3291 {
3292 	struct btrfs_stripe *stripe;
3293 	int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3294 	u64 stripe_offset;
3295 	u64 stripe_length;
3296 	int factor;
3297 	int i;
3298 
3299 	if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID))
3300 		return 0;
3301 
3302 	if (btrfs_chunk_type(leaf, chunk) & (BTRFS_BLOCK_GROUP_DUP |
3303 	     BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10)) {
3304 		factor = num_stripes / 2;
3305 	} else if (btrfs_chunk_type(leaf, chunk) & BTRFS_BLOCK_GROUP_RAID5) {
3306 		factor = num_stripes - 1;
3307 	} else if (btrfs_chunk_type(leaf, chunk) & BTRFS_BLOCK_GROUP_RAID6) {
3308 		factor = num_stripes - 2;
3309 	} else {
3310 		factor = num_stripes;
3311 	}
3312 
3313 	for (i = 0; i < num_stripes; i++) {
3314 		stripe = btrfs_stripe_nr(chunk, i);
3315 		if (btrfs_stripe_devid(leaf, stripe) != bargs->devid)
3316 			continue;
3317 
3318 		stripe_offset = btrfs_stripe_offset(leaf, stripe);
3319 		stripe_length = btrfs_chunk_length(leaf, chunk);
3320 		stripe_length = div_u64(stripe_length, factor);
3321 
3322 		if (stripe_offset < bargs->pend &&
3323 		    stripe_offset + stripe_length > bargs->pstart)
3324 			return 0;
3325 	}
3326 
3327 	return 1;
3328 }
3329 
3330 /* [vstart, vend) */
chunk_vrange_filter(struct extent_buffer * leaf,struct btrfs_chunk * chunk,u64 chunk_offset,struct btrfs_balance_args * bargs)3331 static int chunk_vrange_filter(struct extent_buffer *leaf,
3332 			       struct btrfs_chunk *chunk,
3333 			       u64 chunk_offset,
3334 			       struct btrfs_balance_args *bargs)
3335 {
3336 	if (chunk_offset < bargs->vend &&
3337 	    chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart)
3338 		/* at least part of the chunk is inside this vrange */
3339 		return 0;
3340 
3341 	return 1;
3342 }
3343 
chunk_stripes_range_filter(struct extent_buffer * leaf,struct btrfs_chunk * chunk,struct btrfs_balance_args * bargs)3344 static int chunk_stripes_range_filter(struct extent_buffer *leaf,
3345 			       struct btrfs_chunk *chunk,
3346 			       struct btrfs_balance_args *bargs)
3347 {
3348 	int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3349 
3350 	if (bargs->stripes_min <= num_stripes
3351 			&& num_stripes <= bargs->stripes_max)
3352 		return 0;
3353 
3354 	return 1;
3355 }
3356 
chunk_soft_convert_filter(u64 chunk_type,struct btrfs_balance_args * bargs)3357 static int chunk_soft_convert_filter(u64 chunk_type,
3358 				     struct btrfs_balance_args *bargs)
3359 {
3360 	if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT))
3361 		return 0;
3362 
3363 	chunk_type = chunk_to_extended(chunk_type) &
3364 				BTRFS_EXTENDED_PROFILE_MASK;
3365 
3366 	if (bargs->target == chunk_type)
3367 		return 1;
3368 
3369 	return 0;
3370 }
3371 
should_balance_chunk(struct btrfs_fs_info * fs_info,struct extent_buffer * leaf,struct btrfs_chunk * chunk,u64 chunk_offset)3372 static int should_balance_chunk(struct btrfs_fs_info *fs_info,
3373 				struct extent_buffer *leaf,
3374 				struct btrfs_chunk *chunk, u64 chunk_offset)
3375 {
3376 	struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3377 	struct btrfs_balance_args *bargs = NULL;
3378 	u64 chunk_type = btrfs_chunk_type(leaf, chunk);
3379 
3380 	/* type filter */
3381 	if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) &
3382 	      (bctl->flags & BTRFS_BALANCE_TYPE_MASK))) {
3383 		return 0;
3384 	}
3385 
3386 	if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
3387 		bargs = &bctl->data;
3388 	else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
3389 		bargs = &bctl->sys;
3390 	else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
3391 		bargs = &bctl->meta;
3392 
3393 	/* profiles filter */
3394 	if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) &&
3395 	    chunk_profiles_filter(chunk_type, bargs)) {
3396 		return 0;
3397 	}
3398 
3399 	/* usage filter */
3400 	if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) &&
3401 	    chunk_usage_filter(fs_info, chunk_offset, bargs)) {
3402 		return 0;
3403 	} else if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3404 	    chunk_usage_range_filter(fs_info, chunk_offset, bargs)) {
3405 		return 0;
3406 	}
3407 
3408 	/* devid filter */
3409 	if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) &&
3410 	    chunk_devid_filter(leaf, chunk, bargs)) {
3411 		return 0;
3412 	}
3413 
3414 	/* drange filter, makes sense only with devid filter */
3415 	if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) &&
3416 	    chunk_drange_filter(leaf, chunk, bargs)) {
3417 		return 0;
3418 	}
3419 
3420 	/* vrange filter */
3421 	if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) &&
3422 	    chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) {
3423 		return 0;
3424 	}
3425 
3426 	/* stripes filter */
3427 	if ((bargs->flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE) &&
3428 	    chunk_stripes_range_filter(leaf, chunk, bargs)) {
3429 		return 0;
3430 	}
3431 
3432 	/* soft profile changing mode */
3433 	if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) &&
3434 	    chunk_soft_convert_filter(chunk_type, bargs)) {
3435 		return 0;
3436 	}
3437 
3438 	/*
3439 	 * limited by count, must be the last filter
3440 	 */
3441 	if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT)) {
3442 		if (bargs->limit == 0)
3443 			return 0;
3444 		else
3445 			bargs->limit--;
3446 	} else if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE)) {
3447 		/*
3448 		 * Same logic as the 'limit' filter; the minimum cannot be
3449 		 * determined here because we do not have the global information
3450 		 * about the count of all chunks that satisfy the filters.
3451 		 */
3452 		if (bargs->limit_max == 0)
3453 			return 0;
3454 		else
3455 			bargs->limit_max--;
3456 	}
3457 
3458 	return 1;
3459 }
3460 
__btrfs_balance(struct btrfs_fs_info * fs_info)3461 static int __btrfs_balance(struct btrfs_fs_info *fs_info)
3462 {
3463 	struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3464 	struct btrfs_root *chunk_root = fs_info->chunk_root;
3465 	struct btrfs_root *dev_root = fs_info->dev_root;
3466 	struct list_head *devices;
3467 	struct btrfs_device *device;
3468 	u64 old_size;
3469 	u64 size_to_free;
3470 	u64 chunk_type;
3471 	struct btrfs_chunk *chunk;
3472 	struct btrfs_path *path = NULL;
3473 	struct btrfs_key key;
3474 	struct btrfs_key found_key;
3475 	struct btrfs_trans_handle *trans;
3476 	struct extent_buffer *leaf;
3477 	int slot;
3478 	int ret;
3479 	int enospc_errors = 0;
3480 	bool counting = true;
3481 	/* The single value limit and min/max limits use the same bytes in the */
3482 	u64 limit_data = bctl->data.limit;
3483 	u64 limit_meta = bctl->meta.limit;
3484 	u64 limit_sys = bctl->sys.limit;
3485 	u32 count_data = 0;
3486 	u32 count_meta = 0;
3487 	u32 count_sys = 0;
3488 	int chunk_reserved = 0;
3489 
3490 	/* step one make some room on all the devices */
3491 	devices = &fs_info->fs_devices->devices;
3492 	list_for_each_entry(device, devices, dev_list) {
3493 		old_size = btrfs_device_get_total_bytes(device);
3494 		size_to_free = div_factor(old_size, 1);
3495 		size_to_free = min_t(u64, size_to_free, SZ_1M);
3496 		if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) ||
3497 		    btrfs_device_get_total_bytes(device) -
3498 		    btrfs_device_get_bytes_used(device) > size_to_free ||
3499 		    test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state))
3500 			continue;
3501 
3502 		ret = btrfs_shrink_device(device, old_size - size_to_free);
3503 		if (ret == -ENOSPC)
3504 			break;
3505 		if (ret) {
3506 			/* btrfs_shrink_device never returns ret > 0 */
3507 			WARN_ON(ret > 0);
3508 			goto error;
3509 		}
3510 
3511 		trans = btrfs_start_transaction(dev_root, 0);
3512 		if (IS_ERR(trans)) {
3513 			ret = PTR_ERR(trans);
3514 			btrfs_info_in_rcu(fs_info,
3515 		 "resize: unable to start transaction after shrinking device %s (error %d), old size %llu, new size %llu",
3516 					  rcu_str_deref(device->name), ret,
3517 					  old_size, old_size - size_to_free);
3518 			goto error;
3519 		}
3520 
3521 		ret = btrfs_grow_device(trans, device, old_size);
3522 		if (ret) {
3523 			btrfs_end_transaction(trans);
3524 			/* btrfs_grow_device never returns ret > 0 */
3525 			WARN_ON(ret > 0);
3526 			btrfs_info_in_rcu(fs_info,
3527 		 "resize: unable to grow device after shrinking device %s (error %d), old size %llu, new size %llu",
3528 					  rcu_str_deref(device->name), ret,
3529 					  old_size, old_size - size_to_free);
3530 			goto error;
3531 		}
3532 
3533 		btrfs_end_transaction(trans);
3534 	}
3535 
3536 	/* step two, relocate all the chunks */
3537 	path = btrfs_alloc_path();
3538 	if (!path) {
3539 		ret = -ENOMEM;
3540 		goto error;
3541 	}
3542 
3543 	/* zero out stat counters */
3544 	spin_lock(&fs_info->balance_lock);
3545 	memset(&bctl->stat, 0, sizeof(bctl->stat));
3546 	spin_unlock(&fs_info->balance_lock);
3547 again:
3548 	if (!counting) {
3549 		/*
3550 		 * The single value limit and min/max limits use the same bytes
3551 		 * in the
3552 		 */
3553 		bctl->data.limit = limit_data;
3554 		bctl->meta.limit = limit_meta;
3555 		bctl->sys.limit = limit_sys;
3556 	}
3557 	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
3558 	key.offset = (u64)-1;
3559 	key.type = BTRFS_CHUNK_ITEM_KEY;
3560 
3561 	while (1) {
3562 		if ((!counting && atomic_read(&fs_info->balance_pause_req)) ||
3563 		    atomic_read(&fs_info->balance_cancel_req)) {
3564 			ret = -ECANCELED;
3565 			goto error;
3566 		}
3567 
3568 		mutex_lock(&fs_info->delete_unused_bgs_mutex);
3569 		ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
3570 		if (ret < 0) {
3571 			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3572 			goto error;
3573 		}
3574 
3575 		/*
3576 		 * this shouldn't happen, it means the last relocate
3577 		 * failed
3578 		 */
3579 		if (ret == 0)
3580 			BUG(); /* FIXME break ? */
3581 
3582 		ret = btrfs_previous_item(chunk_root, path, 0,
3583 					  BTRFS_CHUNK_ITEM_KEY);
3584 		if (ret) {
3585 			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3586 			ret = 0;
3587 			break;
3588 		}
3589 
3590 		leaf = path->nodes[0];
3591 		slot = path->slots[0];
3592 		btrfs_item_key_to_cpu(leaf, &found_key, slot);
3593 
3594 		if (found_key.objectid != key.objectid) {
3595 			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3596 			break;
3597 		}
3598 
3599 		chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
3600 		chunk_type = btrfs_chunk_type(leaf, chunk);
3601 
3602 		if (!counting) {
3603 			spin_lock(&fs_info->balance_lock);
3604 			bctl->stat.considered++;
3605 			spin_unlock(&fs_info->balance_lock);
3606 		}
3607 
3608 		ret = should_balance_chunk(fs_info, leaf, chunk,
3609 					   found_key.offset);
3610 
3611 		btrfs_release_path(path);
3612 		if (!ret) {
3613 			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3614 			goto loop;
3615 		}
3616 
3617 		if (counting) {
3618 			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3619 			spin_lock(&fs_info->balance_lock);
3620 			bctl->stat.expected++;
3621 			spin_unlock(&fs_info->balance_lock);
3622 
3623 			if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
3624 				count_data++;
3625 			else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
3626 				count_sys++;
3627 			else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
3628 				count_meta++;
3629 
3630 			goto loop;
3631 		}
3632 
3633 		/*
3634 		 * Apply limit_min filter, no need to check if the LIMITS
3635 		 * filter is used, limit_min is 0 by default
3636 		 */
3637 		if (((chunk_type & BTRFS_BLOCK_GROUP_DATA) &&
3638 					count_data < bctl->data.limit_min)
3639 				|| ((chunk_type & BTRFS_BLOCK_GROUP_METADATA) &&
3640 					count_meta < bctl->meta.limit_min)
3641 				|| ((chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) &&
3642 					count_sys < bctl->sys.limit_min)) {
3643 			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3644 			goto loop;
3645 		}
3646 
3647 		if (!chunk_reserved) {
3648 			/*
3649 			 * We may be relocating the only data chunk we have,
3650 			 * which could potentially end up with losing data's
3651 			 * raid profile, so lets allocate an empty one in
3652 			 * advance.
3653 			 */
3654 			ret = btrfs_may_alloc_data_chunk(fs_info,
3655 							 found_key.offset);
3656 			if (ret < 0) {
3657 				mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3658 				goto error;
3659 			} else if (ret == 1) {
3660 				chunk_reserved = 1;
3661 			}
3662 		}
3663 
3664 		ret = btrfs_relocate_chunk(fs_info, found_key.offset);
3665 		mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3666 		if (ret && ret != -ENOSPC)
3667 			goto error;
3668 		if (ret == -ENOSPC) {
3669 			enospc_errors++;
3670 		} else {
3671 			spin_lock(&fs_info->balance_lock);
3672 			bctl->stat.completed++;
3673 			spin_unlock(&fs_info->balance_lock);
3674 		}
3675 loop:
3676 		if (found_key.offset == 0)
3677 			break;
3678 		key.offset = found_key.offset - 1;
3679 	}
3680 
3681 	if (counting) {
3682 		btrfs_release_path(path);
3683 		counting = false;
3684 		goto again;
3685 	}
3686 error:
3687 	btrfs_free_path(path);
3688 	if (enospc_errors) {
3689 		btrfs_info(fs_info, "%d enospc errors during balance",
3690 			   enospc_errors);
3691 		if (!ret)
3692 			ret = -ENOSPC;
3693 	}
3694 
3695 	return ret;
3696 }
3697 
3698 /**
3699  * alloc_profile_is_valid - see if a given profile is valid and reduced
3700  * @flags: profile to validate
3701  * @extended: if true @flags is treated as an extended profile
3702  */
alloc_profile_is_valid(u64 flags,int extended)3703 static int alloc_profile_is_valid(u64 flags, int extended)
3704 {
3705 	u64 mask = (extended ? BTRFS_EXTENDED_PROFILE_MASK :
3706 			       BTRFS_BLOCK_GROUP_PROFILE_MASK);
3707 
3708 	flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK;
3709 
3710 	/* 1) check that all other bits are zeroed */
3711 	if (flags & ~mask)
3712 		return 0;
3713 
3714 	/* 2) see if profile is reduced */
3715 	if (flags == 0)
3716 		return !extended; /* "0" is valid for usual profiles */
3717 
3718 	/* true if exactly one bit set */
3719 	return (flags & (flags - 1)) == 0;
3720 }
3721 
balance_need_close(struct btrfs_fs_info * fs_info)3722 static inline int balance_need_close(struct btrfs_fs_info *fs_info)
3723 {
3724 	/* cancel requested || normal exit path */
3725 	return atomic_read(&fs_info->balance_cancel_req) ||
3726 		(atomic_read(&fs_info->balance_pause_req) == 0 &&
3727 		 atomic_read(&fs_info->balance_cancel_req) == 0);
3728 }
3729 
3730 /* Non-zero return value signifies invalidity */
validate_convert_profile(struct btrfs_balance_args * bctl_arg,u64 allowed)3731 static inline int validate_convert_profile(struct btrfs_balance_args *bctl_arg,
3732 		u64 allowed)
3733 {
3734 	return ((bctl_arg->flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3735 		(!alloc_profile_is_valid(bctl_arg->target, 1) ||
3736 		 (bctl_arg->target & ~allowed)));
3737 }
3738 
3739 /*
3740  * Should be called with balance mutexe held
3741  */
btrfs_balance(struct btrfs_fs_info * fs_info,struct btrfs_balance_control * bctl,struct btrfs_ioctl_balance_args * bargs)3742 int btrfs_balance(struct btrfs_fs_info *fs_info,
3743 		  struct btrfs_balance_control *bctl,
3744 		  struct btrfs_ioctl_balance_args *bargs)
3745 {
3746 	u64 meta_target, data_target;
3747 	u64 allowed;
3748 	int mixed = 0;
3749 	int ret;
3750 	u64 num_devices;
3751 	unsigned seq;
3752 	bool reducing_integrity;
3753 
3754 	if (btrfs_fs_closing(fs_info) ||
3755 	    atomic_read(&fs_info->balance_pause_req) ||
3756 	    atomic_read(&fs_info->balance_cancel_req)) {
3757 		ret = -EINVAL;
3758 		goto out;
3759 	}
3760 
3761 	allowed = btrfs_super_incompat_flags(fs_info->super_copy);
3762 	if (allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
3763 		mixed = 1;
3764 
3765 	/*
3766 	 * In case of mixed groups both data and meta should be picked,
3767 	 * and identical options should be given for both of them.
3768 	 */
3769 	allowed = BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA;
3770 	if (mixed && (bctl->flags & allowed)) {
3771 		if (!(bctl->flags & BTRFS_BALANCE_DATA) ||
3772 		    !(bctl->flags & BTRFS_BALANCE_METADATA) ||
3773 		    memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) {
3774 			btrfs_err(fs_info,
3775 	  "balance: mixed groups data and metadata options must be the same");
3776 			ret = -EINVAL;
3777 			goto out;
3778 		}
3779 	}
3780 
3781 	num_devices = fs_info->fs_devices->num_devices;
3782 	btrfs_dev_replace_read_lock(&fs_info->dev_replace);
3783 	if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) {
3784 		BUG_ON(num_devices < 1);
3785 		num_devices--;
3786 	}
3787 	btrfs_dev_replace_read_unlock(&fs_info->dev_replace);
3788 	allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE | BTRFS_BLOCK_GROUP_DUP;
3789 	if (num_devices > 1)
3790 		allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1);
3791 	if (num_devices > 2)
3792 		allowed |= BTRFS_BLOCK_GROUP_RAID5;
3793 	if (num_devices > 3)
3794 		allowed |= (BTRFS_BLOCK_GROUP_RAID10 |
3795 			    BTRFS_BLOCK_GROUP_RAID6);
3796 	if (validate_convert_profile(&bctl->data, allowed)) {
3797 		int index = btrfs_bg_flags_to_raid_index(bctl->data.target);
3798 
3799 		btrfs_err(fs_info,
3800 			  "balance: invalid convert data profile %s",
3801 			  get_raid_name(index));
3802 		ret = -EINVAL;
3803 		goto out;
3804 	}
3805 	if (validate_convert_profile(&bctl->meta, allowed)) {
3806 		int index = btrfs_bg_flags_to_raid_index(bctl->meta.target);
3807 
3808 		btrfs_err(fs_info,
3809 			  "balance: invalid convert metadata profile %s",
3810 			  get_raid_name(index));
3811 		ret = -EINVAL;
3812 		goto out;
3813 	}
3814 	if (validate_convert_profile(&bctl->sys, allowed)) {
3815 		int index = btrfs_bg_flags_to_raid_index(bctl->sys.target);
3816 
3817 		btrfs_err(fs_info,
3818 			  "balance: invalid convert system profile %s",
3819 			  get_raid_name(index));
3820 		ret = -EINVAL;
3821 		goto out;
3822 	}
3823 
3824 	/* allow to reduce meta or sys integrity only if force set */
3825 	allowed = BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
3826 			BTRFS_BLOCK_GROUP_RAID10 |
3827 			BTRFS_BLOCK_GROUP_RAID5 |
3828 			BTRFS_BLOCK_GROUP_RAID6;
3829 	do {
3830 		seq = read_seqbegin(&fs_info->profiles_lock);
3831 
3832 		if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3833 		     (fs_info->avail_system_alloc_bits & allowed) &&
3834 		     !(bctl->sys.target & allowed)) ||
3835 		    ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3836 		     (fs_info->avail_metadata_alloc_bits & allowed) &&
3837 		     !(bctl->meta.target & allowed)))
3838 			reducing_integrity = true;
3839 		else
3840 			reducing_integrity = false;
3841 
3842 		/* if we're not converting, the target field is uninitialized */
3843 		meta_target = (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) ?
3844 			bctl->meta.target : fs_info->avail_metadata_alloc_bits;
3845 		data_target = (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) ?
3846 			bctl->data.target : fs_info->avail_data_alloc_bits;
3847 	} while (read_seqretry(&fs_info->profiles_lock, seq));
3848 
3849 	if (reducing_integrity) {
3850 		if (bctl->flags & BTRFS_BALANCE_FORCE) {
3851 			btrfs_info(fs_info,
3852 				   "balance: force reducing metadata integrity");
3853 		} else {
3854 			btrfs_err(fs_info,
3855 	  "balance: reduces metadata integrity, use --force if you want this");
3856 			ret = -EINVAL;
3857 			goto out;
3858 		}
3859 	}
3860 
3861 	if (btrfs_get_num_tolerated_disk_barrier_failures(meta_target) <
3862 		btrfs_get_num_tolerated_disk_barrier_failures(data_target)) {
3863 		int meta_index = btrfs_bg_flags_to_raid_index(meta_target);
3864 		int data_index = btrfs_bg_flags_to_raid_index(data_target);
3865 
3866 		btrfs_warn(fs_info,
3867 	"balance: metadata profile %s has lower redundancy than data profile %s",
3868 			   get_raid_name(meta_index), get_raid_name(data_index));
3869 	}
3870 
3871 	ret = insert_balance_item(fs_info, bctl);
3872 	if (ret && ret != -EEXIST)
3873 		goto out;
3874 
3875 	if (!(bctl->flags & BTRFS_BALANCE_RESUME)) {
3876 		BUG_ON(ret == -EEXIST);
3877 		BUG_ON(fs_info->balance_ctl);
3878 		spin_lock(&fs_info->balance_lock);
3879 		fs_info->balance_ctl = bctl;
3880 		spin_unlock(&fs_info->balance_lock);
3881 	} else {
3882 		BUG_ON(ret != -EEXIST);
3883 		spin_lock(&fs_info->balance_lock);
3884 		update_balance_args(bctl);
3885 		spin_unlock(&fs_info->balance_lock);
3886 	}
3887 
3888 	ASSERT(!test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
3889 	set_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags);
3890 	mutex_unlock(&fs_info->balance_mutex);
3891 
3892 	ret = __btrfs_balance(fs_info);
3893 
3894 	mutex_lock(&fs_info->balance_mutex);
3895 	clear_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags);
3896 
3897 	if (bargs) {
3898 		memset(bargs, 0, sizeof(*bargs));
3899 		btrfs_update_ioctl_balance_args(fs_info, bargs);
3900 	}
3901 
3902 	if ((ret && ret != -ECANCELED && ret != -ENOSPC) ||
3903 	    balance_need_close(fs_info)) {
3904 		reset_balance_state(fs_info);
3905 		clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags);
3906 	}
3907 
3908 	wake_up(&fs_info->balance_wait_q);
3909 
3910 	return ret;
3911 out:
3912 	if (bctl->flags & BTRFS_BALANCE_RESUME)
3913 		reset_balance_state(fs_info);
3914 	else
3915 		kfree(bctl);
3916 	clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags);
3917 
3918 	return ret;
3919 }
3920 
balance_kthread(void * data)3921 static int balance_kthread(void *data)
3922 {
3923 	struct btrfs_fs_info *fs_info = data;
3924 	int ret = 0;
3925 
3926 	mutex_lock(&fs_info->balance_mutex);
3927 	if (fs_info->balance_ctl) {
3928 		btrfs_info(fs_info, "balance: resuming");
3929 		ret = btrfs_balance(fs_info, fs_info->balance_ctl, NULL);
3930 	}
3931 	mutex_unlock(&fs_info->balance_mutex);
3932 
3933 	return ret;
3934 }
3935 
btrfs_resume_balance_async(struct btrfs_fs_info * fs_info)3936 int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info)
3937 {
3938 	struct task_struct *tsk;
3939 
3940 	mutex_lock(&fs_info->balance_mutex);
3941 	if (!fs_info->balance_ctl) {
3942 		mutex_unlock(&fs_info->balance_mutex);
3943 		return 0;
3944 	}
3945 	mutex_unlock(&fs_info->balance_mutex);
3946 
3947 	if (btrfs_test_opt(fs_info, SKIP_BALANCE)) {
3948 		btrfs_info(fs_info, "balance: resume skipped");
3949 		return 0;
3950 	}
3951 
3952 	/*
3953 	 * A ro->rw remount sequence should continue with the paused balance
3954 	 * regardless of who pauses it, system or the user as of now, so set
3955 	 * the resume flag.
3956 	 */
3957 	spin_lock(&fs_info->balance_lock);
3958 	fs_info->balance_ctl->flags |= BTRFS_BALANCE_RESUME;
3959 	spin_unlock(&fs_info->balance_lock);
3960 
3961 	tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance");
3962 	return PTR_ERR_OR_ZERO(tsk);
3963 }
3964 
btrfs_recover_balance(struct btrfs_fs_info * fs_info)3965 int btrfs_recover_balance(struct btrfs_fs_info *fs_info)
3966 {
3967 	struct btrfs_balance_control *bctl;
3968 	struct btrfs_balance_item *item;
3969 	struct btrfs_disk_balance_args disk_bargs;
3970 	struct btrfs_path *path;
3971 	struct extent_buffer *leaf;
3972 	struct btrfs_key key;
3973 	int ret;
3974 
3975 	path = btrfs_alloc_path();
3976 	if (!path)
3977 		return -ENOMEM;
3978 
3979 	key.objectid = BTRFS_BALANCE_OBJECTID;
3980 	key.type = BTRFS_TEMPORARY_ITEM_KEY;
3981 	key.offset = 0;
3982 
3983 	ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
3984 	if (ret < 0)
3985 		goto out;
3986 	if (ret > 0) { /* ret = -ENOENT; */
3987 		ret = 0;
3988 		goto out;
3989 	}
3990 
3991 	bctl = kzalloc(sizeof(*bctl), GFP_NOFS);
3992 	if (!bctl) {
3993 		ret = -ENOMEM;
3994 		goto out;
3995 	}
3996 
3997 	leaf = path->nodes[0];
3998 	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
3999 
4000 	bctl->flags = btrfs_balance_flags(leaf, item);
4001 	bctl->flags |= BTRFS_BALANCE_RESUME;
4002 
4003 	btrfs_balance_data(leaf, item, &disk_bargs);
4004 	btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs);
4005 	btrfs_balance_meta(leaf, item, &disk_bargs);
4006 	btrfs_disk_balance_args_to_cpu(&bctl->meta, &disk_bargs);
4007 	btrfs_balance_sys(leaf, item, &disk_bargs);
4008 	btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs);
4009 
4010 	/*
4011 	 * This should never happen, as the paused balance state is recovered
4012 	 * during mount without any chance of other exclusive ops to collide.
4013 	 *
4014 	 * This gives the exclusive op status to balance and keeps in paused
4015 	 * state until user intervention (cancel or umount). If the ownership
4016 	 * cannot be assigned, show a message but do not fail. The balance
4017 	 * is in a paused state and must have fs_info::balance_ctl properly
4018 	 * set up.
4019 	 */
4020 	if (test_and_set_bit(BTRFS_FS_EXCL_OP, &fs_info->flags))
4021 		btrfs_warn(fs_info,
4022 	"balance: cannot set exclusive op status, resume manually");
4023 
4024 	btrfs_release_path(path);
4025 
4026 	mutex_lock(&fs_info->balance_mutex);
4027 	BUG_ON(fs_info->balance_ctl);
4028 	spin_lock(&fs_info->balance_lock);
4029 	fs_info->balance_ctl = bctl;
4030 	spin_unlock(&fs_info->balance_lock);
4031 	mutex_unlock(&fs_info->balance_mutex);
4032 out:
4033 	btrfs_free_path(path);
4034 	return ret;
4035 }
4036 
btrfs_pause_balance(struct btrfs_fs_info * fs_info)4037 int btrfs_pause_balance(struct btrfs_fs_info *fs_info)
4038 {
4039 	int ret = 0;
4040 
4041 	mutex_lock(&fs_info->balance_mutex);
4042 	if (!fs_info->balance_ctl) {
4043 		mutex_unlock(&fs_info->balance_mutex);
4044 		return -ENOTCONN;
4045 	}
4046 
4047 	if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) {
4048 		atomic_inc(&fs_info->balance_pause_req);
4049 		mutex_unlock(&fs_info->balance_mutex);
4050 
4051 		wait_event(fs_info->balance_wait_q,
4052 			   !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4053 
4054 		mutex_lock(&fs_info->balance_mutex);
4055 		/* we are good with balance_ctl ripped off from under us */
4056 		BUG_ON(test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4057 		atomic_dec(&fs_info->balance_pause_req);
4058 	} else {
4059 		ret = -ENOTCONN;
4060 	}
4061 
4062 	mutex_unlock(&fs_info->balance_mutex);
4063 	return ret;
4064 }
4065 
btrfs_cancel_balance(struct btrfs_fs_info * fs_info)4066 int btrfs_cancel_balance(struct btrfs_fs_info *fs_info)
4067 {
4068 	mutex_lock(&fs_info->balance_mutex);
4069 	if (!fs_info->balance_ctl) {
4070 		mutex_unlock(&fs_info->balance_mutex);
4071 		return -ENOTCONN;
4072 	}
4073 
4074 	/*
4075 	 * A paused balance with the item stored on disk can be resumed at
4076 	 * mount time if the mount is read-write. Otherwise it's still paused
4077 	 * and we must not allow cancelling as it deletes the item.
4078 	 */
4079 	if (sb_rdonly(fs_info->sb)) {
4080 		mutex_unlock(&fs_info->balance_mutex);
4081 		return -EROFS;
4082 	}
4083 
4084 	atomic_inc(&fs_info->balance_cancel_req);
4085 	/*
4086 	 * if we are running just wait and return, balance item is
4087 	 * deleted in btrfs_balance in this case
4088 	 */
4089 	if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) {
4090 		mutex_unlock(&fs_info->balance_mutex);
4091 		wait_event(fs_info->balance_wait_q,
4092 			   !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4093 		mutex_lock(&fs_info->balance_mutex);
4094 	} else {
4095 		mutex_unlock(&fs_info->balance_mutex);
4096 		/*
4097 		 * Lock released to allow other waiters to continue, we'll
4098 		 * reexamine the status again.
4099 		 */
4100 		mutex_lock(&fs_info->balance_mutex);
4101 
4102 		if (fs_info->balance_ctl) {
4103 			reset_balance_state(fs_info);
4104 			clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags);
4105 			btrfs_info(fs_info, "balance: canceled");
4106 		}
4107 	}
4108 
4109 	ASSERT(!test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4110 	atomic_dec(&fs_info->balance_cancel_req);
4111 	mutex_unlock(&fs_info->balance_mutex);
4112 	return 0;
4113 }
4114 
btrfs_uuid_scan_kthread(void * data)4115 static int btrfs_uuid_scan_kthread(void *data)
4116 {
4117 	struct btrfs_fs_info *fs_info = data;
4118 	struct btrfs_root *root = fs_info->tree_root;
4119 	struct btrfs_key key;
4120 	struct btrfs_path *path = NULL;
4121 	int ret = 0;
4122 	struct extent_buffer *eb;
4123 	int slot;
4124 	struct btrfs_root_item root_item;
4125 	u32 item_size;
4126 	struct btrfs_trans_handle *trans = NULL;
4127 
4128 	path = btrfs_alloc_path();
4129 	if (!path) {
4130 		ret = -ENOMEM;
4131 		goto out;
4132 	}
4133 
4134 	key.objectid = 0;
4135 	key.type = BTRFS_ROOT_ITEM_KEY;
4136 	key.offset = 0;
4137 
4138 	while (1) {
4139 		ret = btrfs_search_forward(root, &key, path,
4140 				BTRFS_OLDEST_GENERATION);
4141 		if (ret) {
4142 			if (ret > 0)
4143 				ret = 0;
4144 			break;
4145 		}
4146 
4147 		if (key.type != BTRFS_ROOT_ITEM_KEY ||
4148 		    (key.objectid < BTRFS_FIRST_FREE_OBJECTID &&
4149 		     key.objectid != BTRFS_FS_TREE_OBJECTID) ||
4150 		    key.objectid > BTRFS_LAST_FREE_OBJECTID)
4151 			goto skip;
4152 
4153 		eb = path->nodes[0];
4154 		slot = path->slots[0];
4155 		item_size = btrfs_item_size_nr(eb, slot);
4156 		if (item_size < sizeof(root_item))
4157 			goto skip;
4158 
4159 		read_extent_buffer(eb, &root_item,
4160 				   btrfs_item_ptr_offset(eb, slot),
4161 				   (int)sizeof(root_item));
4162 		if (btrfs_root_refs(&root_item) == 0)
4163 			goto skip;
4164 
4165 		if (!btrfs_is_empty_uuid(root_item.uuid) ||
4166 		    !btrfs_is_empty_uuid(root_item.received_uuid)) {
4167 			if (trans)
4168 				goto update_tree;
4169 
4170 			btrfs_release_path(path);
4171 			/*
4172 			 * 1 - subvol uuid item
4173 			 * 1 - received_subvol uuid item
4174 			 */
4175 			trans = btrfs_start_transaction(fs_info->uuid_root, 2);
4176 			if (IS_ERR(trans)) {
4177 				ret = PTR_ERR(trans);
4178 				break;
4179 			}
4180 			continue;
4181 		} else {
4182 			goto skip;
4183 		}
4184 update_tree:
4185 		btrfs_release_path(path);
4186 		if (!btrfs_is_empty_uuid(root_item.uuid)) {
4187 			ret = btrfs_uuid_tree_add(trans, root_item.uuid,
4188 						  BTRFS_UUID_KEY_SUBVOL,
4189 						  key.objectid);
4190 			if (ret < 0) {
4191 				btrfs_warn(fs_info, "uuid_tree_add failed %d",
4192 					ret);
4193 				break;
4194 			}
4195 		}
4196 
4197 		if (!btrfs_is_empty_uuid(root_item.received_uuid)) {
4198 			ret = btrfs_uuid_tree_add(trans,
4199 						  root_item.received_uuid,
4200 						 BTRFS_UUID_KEY_RECEIVED_SUBVOL,
4201 						  key.objectid);
4202 			if (ret < 0) {
4203 				btrfs_warn(fs_info, "uuid_tree_add failed %d",
4204 					ret);
4205 				break;
4206 			}
4207 		}
4208 
4209 skip:
4210 		btrfs_release_path(path);
4211 		if (trans) {
4212 			ret = btrfs_end_transaction(trans);
4213 			trans = NULL;
4214 			if (ret)
4215 				break;
4216 		}
4217 
4218 		if (key.offset < (u64)-1) {
4219 			key.offset++;
4220 		} else if (key.type < BTRFS_ROOT_ITEM_KEY) {
4221 			key.offset = 0;
4222 			key.type = BTRFS_ROOT_ITEM_KEY;
4223 		} else if (key.objectid < (u64)-1) {
4224 			key.offset = 0;
4225 			key.type = BTRFS_ROOT_ITEM_KEY;
4226 			key.objectid++;
4227 		} else {
4228 			break;
4229 		}
4230 		cond_resched();
4231 	}
4232 
4233 out:
4234 	btrfs_free_path(path);
4235 	if (trans && !IS_ERR(trans))
4236 		btrfs_end_transaction(trans);
4237 	if (ret)
4238 		btrfs_warn(fs_info, "btrfs_uuid_scan_kthread failed %d", ret);
4239 	else
4240 		set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags);
4241 	up(&fs_info->uuid_tree_rescan_sem);
4242 	return 0;
4243 }
4244 
4245 /*
4246  * Callback for btrfs_uuid_tree_iterate().
4247  * returns:
4248  * 0	check succeeded, the entry is not outdated.
4249  * < 0	if an error occurred.
4250  * > 0	if the check failed, which means the caller shall remove the entry.
4251  */
btrfs_check_uuid_tree_entry(struct btrfs_fs_info * fs_info,u8 * uuid,u8 type,u64 subid)4252 static int btrfs_check_uuid_tree_entry(struct btrfs_fs_info *fs_info,
4253 				       u8 *uuid, u8 type, u64 subid)
4254 {
4255 	struct btrfs_key key;
4256 	int ret = 0;
4257 	struct btrfs_root *subvol_root;
4258 
4259 	if (type != BTRFS_UUID_KEY_SUBVOL &&
4260 	    type != BTRFS_UUID_KEY_RECEIVED_SUBVOL)
4261 		goto out;
4262 
4263 	key.objectid = subid;
4264 	key.type = BTRFS_ROOT_ITEM_KEY;
4265 	key.offset = (u64)-1;
4266 	subvol_root = btrfs_read_fs_root_no_name(fs_info, &key);
4267 	if (IS_ERR(subvol_root)) {
4268 		ret = PTR_ERR(subvol_root);
4269 		if (ret == -ENOENT)
4270 			ret = 1;
4271 		goto out;
4272 	}
4273 
4274 	switch (type) {
4275 	case BTRFS_UUID_KEY_SUBVOL:
4276 		if (memcmp(uuid, subvol_root->root_item.uuid, BTRFS_UUID_SIZE))
4277 			ret = 1;
4278 		break;
4279 	case BTRFS_UUID_KEY_RECEIVED_SUBVOL:
4280 		if (memcmp(uuid, subvol_root->root_item.received_uuid,
4281 			   BTRFS_UUID_SIZE))
4282 			ret = 1;
4283 		break;
4284 	}
4285 
4286 out:
4287 	return ret;
4288 }
4289 
btrfs_uuid_rescan_kthread(void * data)4290 static int btrfs_uuid_rescan_kthread(void *data)
4291 {
4292 	struct btrfs_fs_info *fs_info = (struct btrfs_fs_info *)data;
4293 	int ret;
4294 
4295 	/*
4296 	 * 1st step is to iterate through the existing UUID tree and
4297 	 * to delete all entries that contain outdated data.
4298 	 * 2nd step is to add all missing entries to the UUID tree.
4299 	 */
4300 	ret = btrfs_uuid_tree_iterate(fs_info, btrfs_check_uuid_tree_entry);
4301 	if (ret < 0) {
4302 		btrfs_warn(fs_info, "iterating uuid_tree failed %d", ret);
4303 		up(&fs_info->uuid_tree_rescan_sem);
4304 		return ret;
4305 	}
4306 	return btrfs_uuid_scan_kthread(data);
4307 }
4308 
btrfs_create_uuid_tree(struct btrfs_fs_info * fs_info)4309 int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info)
4310 {
4311 	struct btrfs_trans_handle *trans;
4312 	struct btrfs_root *tree_root = fs_info->tree_root;
4313 	struct btrfs_root *uuid_root;
4314 	struct task_struct *task;
4315 	int ret;
4316 
4317 	/*
4318 	 * 1 - root node
4319 	 * 1 - root item
4320 	 */
4321 	trans = btrfs_start_transaction(tree_root, 2);
4322 	if (IS_ERR(trans))
4323 		return PTR_ERR(trans);
4324 
4325 	uuid_root = btrfs_create_tree(trans, fs_info,
4326 				      BTRFS_UUID_TREE_OBJECTID);
4327 	if (IS_ERR(uuid_root)) {
4328 		ret = PTR_ERR(uuid_root);
4329 		btrfs_abort_transaction(trans, ret);
4330 		btrfs_end_transaction(trans);
4331 		return ret;
4332 	}
4333 
4334 	fs_info->uuid_root = uuid_root;
4335 
4336 	ret = btrfs_commit_transaction(trans);
4337 	if (ret)
4338 		return ret;
4339 
4340 	down(&fs_info->uuid_tree_rescan_sem);
4341 	task = kthread_run(btrfs_uuid_scan_kthread, fs_info, "btrfs-uuid");
4342 	if (IS_ERR(task)) {
4343 		/* fs_info->update_uuid_tree_gen remains 0 in all error case */
4344 		btrfs_warn(fs_info, "failed to start uuid_scan task");
4345 		up(&fs_info->uuid_tree_rescan_sem);
4346 		return PTR_ERR(task);
4347 	}
4348 
4349 	return 0;
4350 }
4351 
btrfs_check_uuid_tree(struct btrfs_fs_info * fs_info)4352 int btrfs_check_uuid_tree(struct btrfs_fs_info *fs_info)
4353 {
4354 	struct task_struct *task;
4355 
4356 	down(&fs_info->uuid_tree_rescan_sem);
4357 	task = kthread_run(btrfs_uuid_rescan_kthread, fs_info, "btrfs-uuid");
4358 	if (IS_ERR(task)) {
4359 		/* fs_info->update_uuid_tree_gen remains 0 in all error case */
4360 		btrfs_warn(fs_info, "failed to start uuid_rescan task");
4361 		up(&fs_info->uuid_tree_rescan_sem);
4362 		return PTR_ERR(task);
4363 	}
4364 
4365 	return 0;
4366 }
4367 
4368 /*
4369  * shrinking a device means finding all of the device extents past
4370  * the new size, and then following the back refs to the chunks.
4371  * The chunk relocation code actually frees the device extent
4372  */
btrfs_shrink_device(struct btrfs_device * device,u64 new_size)4373 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
4374 {
4375 	struct btrfs_fs_info *fs_info = device->fs_info;
4376 	struct btrfs_root *root = fs_info->dev_root;
4377 	struct btrfs_trans_handle *trans;
4378 	struct btrfs_dev_extent *dev_extent = NULL;
4379 	struct btrfs_path *path;
4380 	u64 length;
4381 	u64 chunk_offset;
4382 	int ret;
4383 	int slot;
4384 	int failed = 0;
4385 	bool retried = false;
4386 	bool checked_pending_chunks = false;
4387 	struct extent_buffer *l;
4388 	struct btrfs_key key;
4389 	struct btrfs_super_block *super_copy = fs_info->super_copy;
4390 	u64 old_total = btrfs_super_total_bytes(super_copy);
4391 	u64 old_size = btrfs_device_get_total_bytes(device);
4392 	u64 diff;
4393 
4394 	new_size = round_down(new_size, fs_info->sectorsize);
4395 	diff = round_down(old_size - new_size, fs_info->sectorsize);
4396 
4397 	if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state))
4398 		return -EINVAL;
4399 
4400 	path = btrfs_alloc_path();
4401 	if (!path)
4402 		return -ENOMEM;
4403 
4404 	path->reada = READA_BACK;
4405 
4406 	mutex_lock(&fs_info->chunk_mutex);
4407 
4408 	btrfs_device_set_total_bytes(device, new_size);
4409 	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
4410 		device->fs_devices->total_rw_bytes -= diff;
4411 		atomic64_sub(diff, &fs_info->free_chunk_space);
4412 	}
4413 	mutex_unlock(&fs_info->chunk_mutex);
4414 
4415 again:
4416 	key.objectid = device->devid;
4417 	key.offset = (u64)-1;
4418 	key.type = BTRFS_DEV_EXTENT_KEY;
4419 
4420 	do {
4421 		mutex_lock(&fs_info->delete_unused_bgs_mutex);
4422 		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4423 		if (ret < 0) {
4424 			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
4425 			goto done;
4426 		}
4427 
4428 		ret = btrfs_previous_item(root, path, 0, key.type);
4429 		if (ret)
4430 			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
4431 		if (ret < 0)
4432 			goto done;
4433 		if (ret) {
4434 			ret = 0;
4435 			btrfs_release_path(path);
4436 			break;
4437 		}
4438 
4439 		l = path->nodes[0];
4440 		slot = path->slots[0];
4441 		btrfs_item_key_to_cpu(l, &key, path->slots[0]);
4442 
4443 		if (key.objectid != device->devid) {
4444 			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
4445 			btrfs_release_path(path);
4446 			break;
4447 		}
4448 
4449 		dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
4450 		length = btrfs_dev_extent_length(l, dev_extent);
4451 
4452 		if (key.offset + length <= new_size) {
4453 			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
4454 			btrfs_release_path(path);
4455 			break;
4456 		}
4457 
4458 		chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
4459 		btrfs_release_path(path);
4460 
4461 		/*
4462 		 * We may be relocating the only data chunk we have,
4463 		 * which could potentially end up with losing data's
4464 		 * raid profile, so lets allocate an empty one in
4465 		 * advance.
4466 		 */
4467 		ret = btrfs_may_alloc_data_chunk(fs_info, chunk_offset);
4468 		if (ret < 0) {
4469 			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
4470 			goto done;
4471 		}
4472 
4473 		ret = btrfs_relocate_chunk(fs_info, chunk_offset);
4474 		mutex_unlock(&fs_info->delete_unused_bgs_mutex);
4475 		if (ret && ret != -ENOSPC)
4476 			goto done;
4477 		if (ret == -ENOSPC)
4478 			failed++;
4479 	} while (key.offset-- > 0);
4480 
4481 	if (failed && !retried) {
4482 		failed = 0;
4483 		retried = true;
4484 		goto again;
4485 	} else if (failed && retried) {
4486 		ret = -ENOSPC;
4487 		goto done;
4488 	}
4489 
4490 	/* Shrinking succeeded, else we would be at "done". */
4491 	trans = btrfs_start_transaction(root, 0);
4492 	if (IS_ERR(trans)) {
4493 		ret = PTR_ERR(trans);
4494 		goto done;
4495 	}
4496 
4497 	mutex_lock(&fs_info->chunk_mutex);
4498 
4499 	/*
4500 	 * We checked in the above loop all device extents that were already in
4501 	 * the device tree. However before we have updated the device's
4502 	 * total_bytes to the new size, we might have had chunk allocations that
4503 	 * have not complete yet (new block groups attached to transaction
4504 	 * handles), and therefore their device extents were not yet in the
4505 	 * device tree and we missed them in the loop above. So if we have any
4506 	 * pending chunk using a device extent that overlaps the device range
4507 	 * that we can not use anymore, commit the current transaction and
4508 	 * repeat the search on the device tree - this way we guarantee we will
4509 	 * not have chunks using device extents that end beyond 'new_size'.
4510 	 */
4511 	if (!checked_pending_chunks) {
4512 		u64 start = new_size;
4513 		u64 len = old_size - new_size;
4514 
4515 		if (contains_pending_extent(trans->transaction, device,
4516 					    &start, len)) {
4517 			mutex_unlock(&fs_info->chunk_mutex);
4518 			checked_pending_chunks = true;
4519 			failed = 0;
4520 			retried = false;
4521 			ret = btrfs_commit_transaction(trans);
4522 			if (ret)
4523 				goto done;
4524 			goto again;
4525 		}
4526 	}
4527 
4528 	btrfs_device_set_disk_total_bytes(device, new_size);
4529 	if (list_empty(&device->resized_list))
4530 		list_add_tail(&device->resized_list,
4531 			      &fs_info->fs_devices->resized_devices);
4532 
4533 	WARN_ON(diff > old_total);
4534 	btrfs_set_super_total_bytes(super_copy,
4535 			round_down(old_total - diff, fs_info->sectorsize));
4536 	mutex_unlock(&fs_info->chunk_mutex);
4537 
4538 	/* Now btrfs_update_device() will change the on-disk size. */
4539 	ret = btrfs_update_device(trans, device);
4540 	if (ret < 0) {
4541 		btrfs_abort_transaction(trans, ret);
4542 		btrfs_end_transaction(trans);
4543 	} else {
4544 		ret = btrfs_commit_transaction(trans);
4545 	}
4546 done:
4547 	btrfs_free_path(path);
4548 	if (ret) {
4549 		mutex_lock(&fs_info->chunk_mutex);
4550 		btrfs_device_set_total_bytes(device, old_size);
4551 		if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
4552 			device->fs_devices->total_rw_bytes += diff;
4553 		atomic64_add(diff, &fs_info->free_chunk_space);
4554 		mutex_unlock(&fs_info->chunk_mutex);
4555 	}
4556 	return ret;
4557 }
4558 
btrfs_add_system_chunk(struct btrfs_fs_info * fs_info,struct btrfs_key * key,struct btrfs_chunk * chunk,int item_size)4559 static int btrfs_add_system_chunk(struct btrfs_fs_info *fs_info,
4560 			   struct btrfs_key *key,
4561 			   struct btrfs_chunk *chunk, int item_size)
4562 {
4563 	struct btrfs_super_block *super_copy = fs_info->super_copy;
4564 	struct btrfs_disk_key disk_key;
4565 	u32 array_size;
4566 	u8 *ptr;
4567 
4568 	mutex_lock(&fs_info->chunk_mutex);
4569 	array_size = btrfs_super_sys_array_size(super_copy);
4570 	if (array_size + item_size + sizeof(disk_key)
4571 			> BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) {
4572 		mutex_unlock(&fs_info->chunk_mutex);
4573 		return -EFBIG;
4574 	}
4575 
4576 	ptr = super_copy->sys_chunk_array + array_size;
4577 	btrfs_cpu_key_to_disk(&disk_key, key);
4578 	memcpy(ptr, &disk_key, sizeof(disk_key));
4579 	ptr += sizeof(disk_key);
4580 	memcpy(ptr, chunk, item_size);
4581 	item_size += sizeof(disk_key);
4582 	btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
4583 	mutex_unlock(&fs_info->chunk_mutex);
4584 
4585 	return 0;
4586 }
4587 
4588 /*
4589  * sort the devices in descending order by max_avail, total_avail
4590  */
btrfs_cmp_device_info(const void * a,const void * b)4591 static int btrfs_cmp_device_info(const void *a, const void *b)
4592 {
4593 	const struct btrfs_device_info *di_a = a;
4594 	const struct btrfs_device_info *di_b = b;
4595 
4596 	if (di_a->max_avail > di_b->max_avail)
4597 		return -1;
4598 	if (di_a->max_avail < di_b->max_avail)
4599 		return 1;
4600 	if (di_a->total_avail > di_b->total_avail)
4601 		return -1;
4602 	if (di_a->total_avail < di_b->total_avail)
4603 		return 1;
4604 	return 0;
4605 }
4606 
check_raid56_incompat_flag(struct btrfs_fs_info * info,u64 type)4607 static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type)
4608 {
4609 	if (!(type & BTRFS_BLOCK_GROUP_RAID56_MASK))
4610 		return;
4611 
4612 	btrfs_set_fs_incompat(info, RAID56);
4613 }
4614 
__btrfs_alloc_chunk(struct btrfs_trans_handle * trans,u64 start,u64 type)4615 static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
4616 			       u64 start, u64 type)
4617 {
4618 	struct btrfs_fs_info *info = trans->fs_info;
4619 	struct btrfs_fs_devices *fs_devices = info->fs_devices;
4620 	struct btrfs_device *device;
4621 	struct map_lookup *map = NULL;
4622 	struct extent_map_tree *em_tree;
4623 	struct extent_map *em;
4624 	struct btrfs_device_info *devices_info = NULL;
4625 	u64 total_avail;
4626 	int num_stripes;	/* total number of stripes to allocate */
4627 	int data_stripes;	/* number of stripes that count for
4628 				   block group size */
4629 	int sub_stripes;	/* sub_stripes info for map */
4630 	int dev_stripes;	/* stripes per dev */
4631 	int devs_max;		/* max devs to use */
4632 	int devs_min;		/* min devs needed */
4633 	int devs_increment;	/* ndevs has to be a multiple of this */
4634 	int ncopies;		/* how many copies to data has */
4635 	int ret;
4636 	u64 max_stripe_size;
4637 	u64 max_chunk_size;
4638 	u64 stripe_size;
4639 	u64 num_bytes;
4640 	int ndevs;
4641 	int i;
4642 	int j;
4643 	int index;
4644 
4645 	BUG_ON(!alloc_profile_is_valid(type, 0));
4646 
4647 	if (list_empty(&fs_devices->alloc_list)) {
4648 		if (btrfs_test_opt(info, ENOSPC_DEBUG))
4649 			btrfs_debug(info, "%s: no writable device", __func__);
4650 		return -ENOSPC;
4651 	}
4652 
4653 	index = btrfs_bg_flags_to_raid_index(type);
4654 
4655 	sub_stripes = btrfs_raid_array[index].sub_stripes;
4656 	dev_stripes = btrfs_raid_array[index].dev_stripes;
4657 	devs_max = btrfs_raid_array[index].devs_max;
4658 	devs_min = btrfs_raid_array[index].devs_min;
4659 	devs_increment = btrfs_raid_array[index].devs_increment;
4660 	ncopies = btrfs_raid_array[index].ncopies;
4661 
4662 	if (type & BTRFS_BLOCK_GROUP_DATA) {
4663 		max_stripe_size = SZ_1G;
4664 		max_chunk_size = BTRFS_MAX_DATA_CHUNK_SIZE;
4665 		if (!devs_max)
4666 			devs_max = BTRFS_MAX_DEVS(info);
4667 	} else if (type & BTRFS_BLOCK_GROUP_METADATA) {
4668 		/* for larger filesystems, use larger metadata chunks */
4669 		if (fs_devices->total_rw_bytes > 50ULL * SZ_1G)
4670 			max_stripe_size = SZ_1G;
4671 		else
4672 			max_stripe_size = SZ_256M;
4673 		max_chunk_size = max_stripe_size;
4674 		if (!devs_max)
4675 			devs_max = BTRFS_MAX_DEVS(info);
4676 	} else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
4677 		max_stripe_size = SZ_32M;
4678 		max_chunk_size = 2 * max_stripe_size;
4679 		if (!devs_max)
4680 			devs_max = BTRFS_MAX_DEVS_SYS_CHUNK;
4681 	} else {
4682 		btrfs_err(info, "invalid chunk type 0x%llx requested",
4683 		       type);
4684 		BUG_ON(1);
4685 	}
4686 
4687 	/* we don't want a chunk larger than 10% of writeable space */
4688 	max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1),
4689 			     max_chunk_size);
4690 
4691 	devices_info = kcalloc(fs_devices->rw_devices, sizeof(*devices_info),
4692 			       GFP_NOFS);
4693 	if (!devices_info)
4694 		return -ENOMEM;
4695 
4696 	/*
4697 	 * in the first pass through the devices list, we gather information
4698 	 * about the available holes on each device.
4699 	 */
4700 	ndevs = 0;
4701 	list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
4702 		u64 max_avail;
4703 		u64 dev_offset;
4704 
4705 		if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
4706 			WARN(1, KERN_ERR
4707 			       "BTRFS: read-only device in alloc_list\n");
4708 			continue;
4709 		}
4710 
4711 		if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
4712 					&device->dev_state) ||
4713 		    test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state))
4714 			continue;
4715 
4716 		if (device->total_bytes > device->bytes_used)
4717 			total_avail = device->total_bytes - device->bytes_used;
4718 		else
4719 			total_avail = 0;
4720 
4721 		/* If there is no space on this device, skip it. */
4722 		if (total_avail == 0)
4723 			continue;
4724 
4725 		ret = find_free_dev_extent(trans, device,
4726 					   max_stripe_size * dev_stripes,
4727 					   &dev_offset, &max_avail);
4728 		if (ret && ret != -ENOSPC)
4729 			goto error;
4730 
4731 		if (ret == 0)
4732 			max_avail = max_stripe_size * dev_stripes;
4733 
4734 		if (max_avail < BTRFS_STRIPE_LEN * dev_stripes) {
4735 			if (btrfs_test_opt(info, ENOSPC_DEBUG))
4736 				btrfs_debug(info,
4737 			"%s: devid %llu has no free space, have=%llu want=%u",
4738 					    __func__, device->devid, max_avail,
4739 					    BTRFS_STRIPE_LEN * dev_stripes);
4740 			continue;
4741 		}
4742 
4743 		if (ndevs == fs_devices->rw_devices) {
4744 			WARN(1, "%s: found more than %llu devices\n",
4745 			     __func__, fs_devices->rw_devices);
4746 			break;
4747 		}
4748 		devices_info[ndevs].dev_offset = dev_offset;
4749 		devices_info[ndevs].max_avail = max_avail;
4750 		devices_info[ndevs].total_avail = total_avail;
4751 		devices_info[ndevs].dev = device;
4752 		++ndevs;
4753 	}
4754 
4755 	/*
4756 	 * now sort the devices by hole size / available space
4757 	 */
4758 	sort(devices_info, ndevs, sizeof(struct btrfs_device_info),
4759 	     btrfs_cmp_device_info, NULL);
4760 
4761 	/* round down to number of usable stripes */
4762 	ndevs = round_down(ndevs, devs_increment);
4763 
4764 	if (ndevs < devs_min) {
4765 		ret = -ENOSPC;
4766 		if (btrfs_test_opt(info, ENOSPC_DEBUG)) {
4767 			btrfs_debug(info,
4768 	"%s: not enough devices with free space: have=%d minimum required=%d",
4769 				    __func__, ndevs, devs_min);
4770 		}
4771 		goto error;
4772 	}
4773 
4774 	ndevs = min(ndevs, devs_max);
4775 
4776 	/*
4777 	 * The primary goal is to maximize the number of stripes, so use as
4778 	 * many devices as possible, even if the stripes are not maximum sized.
4779 	 *
4780 	 * The DUP profile stores more than one stripe per device, the
4781 	 * max_avail is the total size so we have to adjust.
4782 	 */
4783 	stripe_size = div_u64(devices_info[ndevs - 1].max_avail, dev_stripes);
4784 	num_stripes = ndevs * dev_stripes;
4785 
4786 	/*
4787 	 * this will have to be fixed for RAID1 and RAID10 over
4788 	 * more drives
4789 	 */
4790 	data_stripes = num_stripes / ncopies;
4791 
4792 	if (type & BTRFS_BLOCK_GROUP_RAID5)
4793 		data_stripes = num_stripes - 1;
4794 
4795 	if (type & BTRFS_BLOCK_GROUP_RAID6)
4796 		data_stripes = num_stripes - 2;
4797 
4798 	/*
4799 	 * Use the number of data stripes to figure out how big this chunk
4800 	 * is really going to be in terms of logical address space,
4801 	 * and compare that answer with the max chunk size. If it's higher,
4802 	 * we try to reduce stripe_size.
4803 	 */
4804 	if (stripe_size * data_stripes > max_chunk_size) {
4805 		/*
4806 		 * Reduce stripe_size, round it up to a 16MB boundary again and
4807 		 * then use it, unless it ends up being even bigger than the
4808 		 * previous value we had already.
4809 		 */
4810 		stripe_size = min(round_up(div_u64(max_chunk_size,
4811 						   data_stripes), SZ_16M),
4812 				  stripe_size);
4813 	}
4814 
4815 	/* align to BTRFS_STRIPE_LEN */
4816 	stripe_size = round_down(stripe_size, BTRFS_STRIPE_LEN);
4817 
4818 	map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
4819 	if (!map) {
4820 		ret = -ENOMEM;
4821 		goto error;
4822 	}
4823 	map->num_stripes = num_stripes;
4824 
4825 	for (i = 0; i < ndevs; ++i) {
4826 		for (j = 0; j < dev_stripes; ++j) {
4827 			int s = i * dev_stripes + j;
4828 			map->stripes[s].dev = devices_info[i].dev;
4829 			map->stripes[s].physical = devices_info[i].dev_offset +
4830 						   j * stripe_size;
4831 		}
4832 	}
4833 	map->stripe_len = BTRFS_STRIPE_LEN;
4834 	map->io_align = BTRFS_STRIPE_LEN;
4835 	map->io_width = BTRFS_STRIPE_LEN;
4836 	map->type = type;
4837 	map->sub_stripes = sub_stripes;
4838 
4839 	num_bytes = stripe_size * data_stripes;
4840 
4841 	trace_btrfs_chunk_alloc(info, map, start, num_bytes);
4842 
4843 	em = alloc_extent_map();
4844 	if (!em) {
4845 		kfree(map);
4846 		ret = -ENOMEM;
4847 		goto error;
4848 	}
4849 	set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags);
4850 	em->map_lookup = map;
4851 	em->start = start;
4852 	em->len = num_bytes;
4853 	em->block_start = 0;
4854 	em->block_len = em->len;
4855 	em->orig_block_len = stripe_size;
4856 
4857 	em_tree = &info->mapping_tree.map_tree;
4858 	write_lock(&em_tree->lock);
4859 	ret = add_extent_mapping(em_tree, em, 0);
4860 	if (ret) {
4861 		write_unlock(&em_tree->lock);
4862 		free_extent_map(em);
4863 		goto error;
4864 	}
4865 
4866 	list_add_tail(&em->list, &trans->transaction->pending_chunks);
4867 	refcount_inc(&em->refs);
4868 	write_unlock(&em_tree->lock);
4869 
4870 	ret = btrfs_make_block_group(trans, 0, type, start, num_bytes);
4871 	if (ret)
4872 		goto error_del_extent;
4873 
4874 	for (i = 0; i < map->num_stripes; i++) {
4875 		num_bytes = map->stripes[i].dev->bytes_used + stripe_size;
4876 		btrfs_device_set_bytes_used(map->stripes[i].dev, num_bytes);
4877 		map->stripes[i].dev->has_pending_chunks = true;
4878 	}
4879 
4880 	atomic64_sub(stripe_size * map->num_stripes, &info->free_chunk_space);
4881 
4882 	free_extent_map(em);
4883 	check_raid56_incompat_flag(info, type);
4884 
4885 	kfree(devices_info);
4886 	return 0;
4887 
4888 error_del_extent:
4889 	write_lock(&em_tree->lock);
4890 	remove_extent_mapping(em_tree, em);
4891 	write_unlock(&em_tree->lock);
4892 
4893 	/* One for our allocation */
4894 	free_extent_map(em);
4895 	/* One for the tree reference */
4896 	free_extent_map(em);
4897 	/* One for the pending_chunks list reference */
4898 	free_extent_map(em);
4899 error:
4900 	kfree(devices_info);
4901 	return ret;
4902 }
4903 
btrfs_finish_chunk_alloc(struct btrfs_trans_handle * trans,u64 chunk_offset,u64 chunk_size)4904 int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans,
4905 			     u64 chunk_offset, u64 chunk_size)
4906 {
4907 	struct btrfs_fs_info *fs_info = trans->fs_info;
4908 	struct btrfs_root *extent_root = fs_info->extent_root;
4909 	struct btrfs_root *chunk_root = fs_info->chunk_root;
4910 	struct btrfs_key key;
4911 	struct btrfs_device *device;
4912 	struct btrfs_chunk *chunk;
4913 	struct btrfs_stripe *stripe;
4914 	struct extent_map *em;
4915 	struct map_lookup *map;
4916 	size_t item_size;
4917 	u64 dev_offset;
4918 	u64 stripe_size;
4919 	int i = 0;
4920 	int ret = 0;
4921 
4922 	em = get_chunk_map(fs_info, chunk_offset, chunk_size);
4923 	if (IS_ERR(em))
4924 		return PTR_ERR(em);
4925 
4926 	map = em->map_lookup;
4927 	item_size = btrfs_chunk_item_size(map->num_stripes);
4928 	stripe_size = em->orig_block_len;
4929 
4930 	chunk = kzalloc(item_size, GFP_NOFS);
4931 	if (!chunk) {
4932 		ret = -ENOMEM;
4933 		goto out;
4934 	}
4935 
4936 	/*
4937 	 * Take the device list mutex to prevent races with the final phase of
4938 	 * a device replace operation that replaces the device object associated
4939 	 * with the map's stripes, because the device object's id can change
4940 	 * at any time during that final phase of the device replace operation
4941 	 * (dev-replace.c:btrfs_dev_replace_finishing()).
4942 	 */
4943 	mutex_lock(&fs_info->fs_devices->device_list_mutex);
4944 	for (i = 0; i < map->num_stripes; i++) {
4945 		device = map->stripes[i].dev;
4946 		dev_offset = map->stripes[i].physical;
4947 
4948 		ret = btrfs_update_device(trans, device);
4949 		if (ret)
4950 			break;
4951 		ret = btrfs_alloc_dev_extent(trans, device, chunk_offset,
4952 					     dev_offset, stripe_size);
4953 		if (ret)
4954 			break;
4955 	}
4956 	if (ret) {
4957 		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4958 		goto out;
4959 	}
4960 
4961 	stripe = &chunk->stripe;
4962 	for (i = 0; i < map->num_stripes; i++) {
4963 		device = map->stripes[i].dev;
4964 		dev_offset = map->stripes[i].physical;
4965 
4966 		btrfs_set_stack_stripe_devid(stripe, device->devid);
4967 		btrfs_set_stack_stripe_offset(stripe, dev_offset);
4968 		memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
4969 		stripe++;
4970 	}
4971 	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4972 
4973 	btrfs_set_stack_chunk_length(chunk, chunk_size);
4974 	btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
4975 	btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len);
4976 	btrfs_set_stack_chunk_type(chunk, map->type);
4977 	btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes);
4978 	btrfs_set_stack_chunk_io_align(chunk, map->stripe_len);
4979 	btrfs_set_stack_chunk_io_width(chunk, map->stripe_len);
4980 	btrfs_set_stack_chunk_sector_size(chunk, fs_info->sectorsize);
4981 	btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes);
4982 
4983 	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
4984 	key.type = BTRFS_CHUNK_ITEM_KEY;
4985 	key.offset = chunk_offset;
4986 
4987 	ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size);
4988 	if (ret == 0 && map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
4989 		/*
4990 		 * TODO: Cleanup of inserted chunk root in case of
4991 		 * failure.
4992 		 */
4993 		ret = btrfs_add_system_chunk(fs_info, &key, chunk, item_size);
4994 	}
4995 
4996 out:
4997 	kfree(chunk);
4998 	free_extent_map(em);
4999 	return ret;
5000 }
5001 
5002 /*
5003  * Chunk allocation falls into two parts. The first part does works
5004  * that make the new allocated chunk useable, but not do any operation
5005  * that modifies the chunk tree. The second part does the works that
5006  * require modifying the chunk tree. This division is important for the
5007  * bootstrap process of adding storage to a seed btrfs.
5008  */
btrfs_alloc_chunk(struct btrfs_trans_handle * trans,u64 type)5009 int btrfs_alloc_chunk(struct btrfs_trans_handle *trans, u64 type)
5010 {
5011 	u64 chunk_offset;
5012 
5013 	lockdep_assert_held(&trans->fs_info->chunk_mutex);
5014 	chunk_offset = find_next_chunk(trans->fs_info);
5015 	return __btrfs_alloc_chunk(trans, chunk_offset, type);
5016 }
5017 
init_first_rw_device(struct btrfs_trans_handle * trans,struct btrfs_fs_info * fs_info)5018 static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,
5019 					 struct btrfs_fs_info *fs_info)
5020 {
5021 	u64 chunk_offset;
5022 	u64 sys_chunk_offset;
5023 	u64 alloc_profile;
5024 	int ret;
5025 
5026 	chunk_offset = find_next_chunk(fs_info);
5027 	alloc_profile = btrfs_metadata_alloc_profile(fs_info);
5028 	ret = __btrfs_alloc_chunk(trans, chunk_offset, alloc_profile);
5029 	if (ret)
5030 		return ret;
5031 
5032 	sys_chunk_offset = find_next_chunk(fs_info);
5033 	alloc_profile = btrfs_system_alloc_profile(fs_info);
5034 	ret = __btrfs_alloc_chunk(trans, sys_chunk_offset, alloc_profile);
5035 	return ret;
5036 }
5037 
btrfs_chunk_max_errors(struct map_lookup * map)5038 static inline int btrfs_chunk_max_errors(struct map_lookup *map)
5039 {
5040 	int max_errors;
5041 
5042 	if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
5043 			 BTRFS_BLOCK_GROUP_RAID10 |
5044 			 BTRFS_BLOCK_GROUP_RAID5)) {
5045 		max_errors = 1;
5046 	} else if (map->type & BTRFS_BLOCK_GROUP_RAID6) {
5047 		max_errors = 2;
5048 	} else {
5049 		max_errors = 0;
5050 	}
5051 
5052 	return max_errors;
5053 }
5054 
btrfs_chunk_readonly(struct btrfs_fs_info * fs_info,u64 chunk_offset)5055 int btrfs_chunk_readonly(struct btrfs_fs_info *fs_info, u64 chunk_offset)
5056 {
5057 	struct extent_map *em;
5058 	struct map_lookup *map;
5059 	int readonly = 0;
5060 	int miss_ndevs = 0;
5061 	int i;
5062 
5063 	em = get_chunk_map(fs_info, chunk_offset, 1);
5064 	if (IS_ERR(em))
5065 		return 1;
5066 
5067 	map = em->map_lookup;
5068 	for (i = 0; i < map->num_stripes; i++) {
5069 		if (test_bit(BTRFS_DEV_STATE_MISSING,
5070 					&map->stripes[i].dev->dev_state)) {
5071 			miss_ndevs++;
5072 			continue;
5073 		}
5074 		if (!test_bit(BTRFS_DEV_STATE_WRITEABLE,
5075 					&map->stripes[i].dev->dev_state)) {
5076 			readonly = 1;
5077 			goto end;
5078 		}
5079 	}
5080 
5081 	/*
5082 	 * If the number of missing devices is larger than max errors,
5083 	 * we can not write the data into that chunk successfully, so
5084 	 * set it readonly.
5085 	 */
5086 	if (miss_ndevs > btrfs_chunk_max_errors(map))
5087 		readonly = 1;
5088 end:
5089 	free_extent_map(em);
5090 	return readonly;
5091 }
5092 
btrfs_mapping_init(struct btrfs_mapping_tree * tree)5093 void btrfs_mapping_init(struct btrfs_mapping_tree *tree)
5094 {
5095 	extent_map_tree_init(&tree->map_tree);
5096 }
5097 
btrfs_mapping_tree_free(struct btrfs_mapping_tree * tree)5098 void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree)
5099 {
5100 	struct extent_map *em;
5101 
5102 	while (1) {
5103 		write_lock(&tree->map_tree.lock);
5104 		em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1);
5105 		if (em)
5106 			remove_extent_mapping(&tree->map_tree, em);
5107 		write_unlock(&tree->map_tree.lock);
5108 		if (!em)
5109 			break;
5110 		/* once for us */
5111 		free_extent_map(em);
5112 		/* once for the tree */
5113 		free_extent_map(em);
5114 	}
5115 }
5116 
btrfs_num_copies(struct btrfs_fs_info * fs_info,u64 logical,u64 len)5117 int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
5118 {
5119 	struct extent_map *em;
5120 	struct map_lookup *map;
5121 	int ret;
5122 
5123 	em = get_chunk_map(fs_info, logical, len);
5124 	if (IS_ERR(em))
5125 		/*
5126 		 * We could return errors for these cases, but that could get
5127 		 * ugly and we'd probably do the same thing which is just not do
5128 		 * anything else and exit, so return 1 so the callers don't try
5129 		 * to use other copies.
5130 		 */
5131 		return 1;
5132 
5133 	map = em->map_lookup;
5134 	if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1))
5135 		ret = map->num_stripes;
5136 	else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
5137 		ret = map->sub_stripes;
5138 	else if (map->type & BTRFS_BLOCK_GROUP_RAID5)
5139 		ret = 2;
5140 	else if (map->type & BTRFS_BLOCK_GROUP_RAID6)
5141 		/*
5142 		 * There could be two corrupted data stripes, we need
5143 		 * to loop retry in order to rebuild the correct data.
5144 		 *
5145 		 * Fail a stripe at a time on every retry except the
5146 		 * stripe under reconstruction.
5147 		 */
5148 		ret = map->num_stripes;
5149 	else
5150 		ret = 1;
5151 	free_extent_map(em);
5152 
5153 	btrfs_dev_replace_read_lock(&fs_info->dev_replace);
5154 	if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace) &&
5155 	    fs_info->dev_replace.tgtdev)
5156 		ret++;
5157 	btrfs_dev_replace_read_unlock(&fs_info->dev_replace);
5158 
5159 	return ret;
5160 }
5161 
btrfs_full_stripe_len(struct btrfs_fs_info * fs_info,u64 logical)5162 unsigned long btrfs_full_stripe_len(struct btrfs_fs_info *fs_info,
5163 				    u64 logical)
5164 {
5165 	struct extent_map *em;
5166 	struct map_lookup *map;
5167 	unsigned long len = fs_info->sectorsize;
5168 
5169 	em = get_chunk_map(fs_info, logical, len);
5170 
5171 	if (!WARN_ON(IS_ERR(em))) {
5172 		map = em->map_lookup;
5173 		if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
5174 			len = map->stripe_len * nr_data_stripes(map);
5175 		free_extent_map(em);
5176 	}
5177 	return len;
5178 }
5179 
btrfs_is_parity_mirror(struct btrfs_fs_info * fs_info,u64 logical,u64 len)5180 int btrfs_is_parity_mirror(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
5181 {
5182 	struct extent_map *em;
5183 	struct map_lookup *map;
5184 	int ret = 0;
5185 
5186 	em = get_chunk_map(fs_info, logical, len);
5187 
5188 	if(!WARN_ON(IS_ERR(em))) {
5189 		map = em->map_lookup;
5190 		if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
5191 			ret = 1;
5192 		free_extent_map(em);
5193 	}
5194 	return ret;
5195 }
5196 
find_live_mirror(struct btrfs_fs_info * fs_info,struct map_lookup * map,int first,int dev_replace_is_ongoing)5197 static int find_live_mirror(struct btrfs_fs_info *fs_info,
5198 			    struct map_lookup *map, int first,
5199 			    int dev_replace_is_ongoing)
5200 {
5201 	int i;
5202 	int num_stripes;
5203 	int preferred_mirror;
5204 	int tolerance;
5205 	struct btrfs_device *srcdev;
5206 
5207 	ASSERT((map->type &
5208 		 (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10)));
5209 
5210 	if (map->type & BTRFS_BLOCK_GROUP_RAID10)
5211 		num_stripes = map->sub_stripes;
5212 	else
5213 		num_stripes = map->num_stripes;
5214 
5215 	preferred_mirror = first + current->pid % num_stripes;
5216 
5217 	if (dev_replace_is_ongoing &&
5218 	    fs_info->dev_replace.cont_reading_from_srcdev_mode ==
5219 	     BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID)
5220 		srcdev = fs_info->dev_replace.srcdev;
5221 	else
5222 		srcdev = NULL;
5223 
5224 	/*
5225 	 * try to avoid the drive that is the source drive for a
5226 	 * dev-replace procedure, only choose it if no other non-missing
5227 	 * mirror is available
5228 	 */
5229 	for (tolerance = 0; tolerance < 2; tolerance++) {
5230 		if (map->stripes[preferred_mirror].dev->bdev &&
5231 		    (tolerance || map->stripes[preferred_mirror].dev != srcdev))
5232 			return preferred_mirror;
5233 		for (i = first; i < first + num_stripes; i++) {
5234 			if (map->stripes[i].dev->bdev &&
5235 			    (tolerance || map->stripes[i].dev != srcdev))
5236 				return i;
5237 		}
5238 	}
5239 
5240 	/* we couldn't find one that doesn't fail.  Just return something
5241 	 * and the io error handling code will clean up eventually
5242 	 */
5243 	return preferred_mirror;
5244 }
5245 
parity_smaller(u64 a,u64 b)5246 static inline int parity_smaller(u64 a, u64 b)
5247 {
5248 	return a > b;
5249 }
5250 
5251 /* Bubble-sort the stripe set to put the parity/syndrome stripes last */
sort_parity_stripes(struct btrfs_bio * bbio,int num_stripes)5252 static void sort_parity_stripes(struct btrfs_bio *bbio, int num_stripes)
5253 {
5254 	struct btrfs_bio_stripe s;
5255 	int i;
5256 	u64 l;
5257 	int again = 1;
5258 
5259 	while (again) {
5260 		again = 0;
5261 		for (i = 0; i < num_stripes - 1; i++) {
5262 			if (parity_smaller(bbio->raid_map[i],
5263 					   bbio->raid_map[i+1])) {
5264 				s = bbio->stripes[i];
5265 				l = bbio->raid_map[i];
5266 				bbio->stripes[i] = bbio->stripes[i+1];
5267 				bbio->raid_map[i] = bbio->raid_map[i+1];
5268 				bbio->stripes[i+1] = s;
5269 				bbio->raid_map[i+1] = l;
5270 
5271 				again = 1;
5272 			}
5273 		}
5274 	}
5275 }
5276 
alloc_btrfs_bio(int total_stripes,int real_stripes)5277 static struct btrfs_bio *alloc_btrfs_bio(int total_stripes, int real_stripes)
5278 {
5279 	struct btrfs_bio *bbio = kzalloc(
5280 		 /* the size of the btrfs_bio */
5281 		sizeof(struct btrfs_bio) +
5282 		/* plus the variable array for the stripes */
5283 		sizeof(struct btrfs_bio_stripe) * (total_stripes) +
5284 		/* plus the variable array for the tgt dev */
5285 		sizeof(int) * (real_stripes) +
5286 		/*
5287 		 * plus the raid_map, which includes both the tgt dev
5288 		 * and the stripes
5289 		 */
5290 		sizeof(u64) * (total_stripes),
5291 		GFP_NOFS|__GFP_NOFAIL);
5292 
5293 	atomic_set(&bbio->error, 0);
5294 	refcount_set(&bbio->refs, 1);
5295 
5296 	return bbio;
5297 }
5298 
btrfs_get_bbio(struct btrfs_bio * bbio)5299 void btrfs_get_bbio(struct btrfs_bio *bbio)
5300 {
5301 	WARN_ON(!refcount_read(&bbio->refs));
5302 	refcount_inc(&bbio->refs);
5303 }
5304 
btrfs_put_bbio(struct btrfs_bio * bbio)5305 void btrfs_put_bbio(struct btrfs_bio *bbio)
5306 {
5307 	if (!bbio)
5308 		return;
5309 	if (refcount_dec_and_test(&bbio->refs))
5310 		kfree(bbio);
5311 }
5312 
5313 /* can REQ_OP_DISCARD be sent with other REQ like REQ_OP_WRITE? */
5314 /*
5315  * Please note that, discard won't be sent to target device of device
5316  * replace.
5317  */
__btrfs_map_block_for_discard(struct btrfs_fs_info * fs_info,u64 logical,u64 length,struct btrfs_bio ** bbio_ret)5318 static int __btrfs_map_block_for_discard(struct btrfs_fs_info *fs_info,
5319 					 u64 logical, u64 length,
5320 					 struct btrfs_bio **bbio_ret)
5321 {
5322 	struct extent_map *em;
5323 	struct map_lookup *map;
5324 	struct btrfs_bio *bbio;
5325 	u64 offset;
5326 	u64 stripe_nr;
5327 	u64 stripe_nr_end;
5328 	u64 stripe_end_offset;
5329 	u64 stripe_cnt;
5330 	u64 stripe_len;
5331 	u64 stripe_offset;
5332 	u64 num_stripes;
5333 	u32 stripe_index;
5334 	u32 factor = 0;
5335 	u32 sub_stripes = 0;
5336 	u64 stripes_per_dev = 0;
5337 	u32 remaining_stripes = 0;
5338 	u32 last_stripe = 0;
5339 	int ret = 0;
5340 	int i;
5341 
5342 	/* discard always return a bbio */
5343 	ASSERT(bbio_ret);
5344 
5345 	em = get_chunk_map(fs_info, logical, length);
5346 	if (IS_ERR(em))
5347 		return PTR_ERR(em);
5348 
5349 	map = em->map_lookup;
5350 	/* we don't discard raid56 yet */
5351 	if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
5352 		ret = -EOPNOTSUPP;
5353 		goto out;
5354 	}
5355 
5356 	offset = logical - em->start;
5357 	length = min_t(u64, em->len - offset, length);
5358 
5359 	stripe_len = map->stripe_len;
5360 	/*
5361 	 * stripe_nr counts the total number of stripes we have to stride
5362 	 * to get to this block
5363 	 */
5364 	stripe_nr = div64_u64(offset, stripe_len);
5365 
5366 	/* stripe_offset is the offset of this block in its stripe */
5367 	stripe_offset = offset - stripe_nr * stripe_len;
5368 
5369 	stripe_nr_end = round_up(offset + length, map->stripe_len);
5370 	stripe_nr_end = div64_u64(stripe_nr_end, map->stripe_len);
5371 	stripe_cnt = stripe_nr_end - stripe_nr;
5372 	stripe_end_offset = stripe_nr_end * map->stripe_len -
5373 			    (offset + length);
5374 	/*
5375 	 * after this, stripe_nr is the number of stripes on this
5376 	 * device we have to walk to find the data, and stripe_index is
5377 	 * the number of our device in the stripe array
5378 	 */
5379 	num_stripes = 1;
5380 	stripe_index = 0;
5381 	if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
5382 			 BTRFS_BLOCK_GROUP_RAID10)) {
5383 		if (map->type & BTRFS_BLOCK_GROUP_RAID0)
5384 			sub_stripes = 1;
5385 		else
5386 			sub_stripes = map->sub_stripes;
5387 
5388 		factor = map->num_stripes / sub_stripes;
5389 		num_stripes = min_t(u64, map->num_stripes,
5390 				    sub_stripes * stripe_cnt);
5391 		stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index);
5392 		stripe_index *= sub_stripes;
5393 		stripes_per_dev = div_u64_rem(stripe_cnt, factor,
5394 					      &remaining_stripes);
5395 		div_u64_rem(stripe_nr_end - 1, factor, &last_stripe);
5396 		last_stripe *= sub_stripes;
5397 	} else if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
5398 				BTRFS_BLOCK_GROUP_DUP)) {
5399 		num_stripes = map->num_stripes;
5400 	} else {
5401 		stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
5402 					&stripe_index);
5403 	}
5404 
5405 	bbio = alloc_btrfs_bio(num_stripes, 0);
5406 	if (!bbio) {
5407 		ret = -ENOMEM;
5408 		goto out;
5409 	}
5410 
5411 	for (i = 0; i < num_stripes; i++) {
5412 		bbio->stripes[i].physical =
5413 			map->stripes[stripe_index].physical +
5414 			stripe_offset + stripe_nr * map->stripe_len;
5415 		bbio->stripes[i].dev = map->stripes[stripe_index].dev;
5416 
5417 		if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
5418 				 BTRFS_BLOCK_GROUP_RAID10)) {
5419 			bbio->stripes[i].length = stripes_per_dev *
5420 				map->stripe_len;
5421 
5422 			if (i / sub_stripes < remaining_stripes)
5423 				bbio->stripes[i].length +=
5424 					map->stripe_len;
5425 
5426 			/*
5427 			 * Special for the first stripe and
5428 			 * the last stripe:
5429 			 *
5430 			 * |-------|...|-------|
5431 			 *     |----------|
5432 			 *    off     end_off
5433 			 */
5434 			if (i < sub_stripes)
5435 				bbio->stripes[i].length -=
5436 					stripe_offset;
5437 
5438 			if (stripe_index >= last_stripe &&
5439 			    stripe_index <= (last_stripe +
5440 					     sub_stripes - 1))
5441 				bbio->stripes[i].length -=
5442 					stripe_end_offset;
5443 
5444 			if (i == sub_stripes - 1)
5445 				stripe_offset = 0;
5446 		} else {
5447 			bbio->stripes[i].length = length;
5448 		}
5449 
5450 		stripe_index++;
5451 		if (stripe_index == map->num_stripes) {
5452 			stripe_index = 0;
5453 			stripe_nr++;
5454 		}
5455 	}
5456 
5457 	*bbio_ret = bbio;
5458 	bbio->map_type = map->type;
5459 	bbio->num_stripes = num_stripes;
5460 out:
5461 	free_extent_map(em);
5462 	return ret;
5463 }
5464 
5465 /*
5466  * In dev-replace case, for repair case (that's the only case where the mirror
5467  * is selected explicitly when calling btrfs_map_block), blocks left of the
5468  * left cursor can also be read from the target drive.
5469  *
5470  * For REQ_GET_READ_MIRRORS, the target drive is added as the last one to the
5471  * array of stripes.
5472  * For READ, it also needs to be supported using the same mirror number.
5473  *
5474  * If the requested block is not left of the left cursor, EIO is returned. This
5475  * can happen because btrfs_num_copies() returns one more in the dev-replace
5476  * case.
5477  */
get_extra_mirror_from_replace(struct btrfs_fs_info * fs_info,u64 logical,u64 length,u64 srcdev_devid,int * mirror_num,u64 * physical)5478 static int get_extra_mirror_from_replace(struct btrfs_fs_info *fs_info,
5479 					 u64 logical, u64 length,
5480 					 u64 srcdev_devid, int *mirror_num,
5481 					 u64 *physical)
5482 {
5483 	struct btrfs_bio *bbio = NULL;
5484 	int num_stripes;
5485 	int index_srcdev = 0;
5486 	int found = 0;
5487 	u64 physical_of_found = 0;
5488 	int i;
5489 	int ret = 0;
5490 
5491 	ret = __btrfs_map_block(fs_info, BTRFS_MAP_GET_READ_MIRRORS,
5492 				logical, &length, &bbio, 0, 0);
5493 	if (ret) {
5494 		ASSERT(bbio == NULL);
5495 		return ret;
5496 	}
5497 
5498 	num_stripes = bbio->num_stripes;
5499 	if (*mirror_num > num_stripes) {
5500 		/*
5501 		 * BTRFS_MAP_GET_READ_MIRRORS does not contain this mirror,
5502 		 * that means that the requested area is not left of the left
5503 		 * cursor
5504 		 */
5505 		btrfs_put_bbio(bbio);
5506 		return -EIO;
5507 	}
5508 
5509 	/*
5510 	 * process the rest of the function using the mirror_num of the source
5511 	 * drive. Therefore look it up first.  At the end, patch the device
5512 	 * pointer to the one of the target drive.
5513 	 */
5514 	for (i = 0; i < num_stripes; i++) {
5515 		if (bbio->stripes[i].dev->devid != srcdev_devid)
5516 			continue;
5517 
5518 		/*
5519 		 * In case of DUP, in order to keep it simple, only add the
5520 		 * mirror with the lowest physical address
5521 		 */
5522 		if (found &&
5523 		    physical_of_found <= bbio->stripes[i].physical)
5524 			continue;
5525 
5526 		index_srcdev = i;
5527 		found = 1;
5528 		physical_of_found = bbio->stripes[i].physical;
5529 	}
5530 
5531 	btrfs_put_bbio(bbio);
5532 
5533 	ASSERT(found);
5534 	if (!found)
5535 		return -EIO;
5536 
5537 	*mirror_num = index_srcdev + 1;
5538 	*physical = physical_of_found;
5539 	return ret;
5540 }
5541 
handle_ops_on_dev_replace(enum btrfs_map_op op,struct btrfs_bio ** bbio_ret,struct btrfs_dev_replace * dev_replace,int * num_stripes_ret,int * max_errors_ret)5542 static void handle_ops_on_dev_replace(enum btrfs_map_op op,
5543 				      struct btrfs_bio **bbio_ret,
5544 				      struct btrfs_dev_replace *dev_replace,
5545 				      int *num_stripes_ret, int *max_errors_ret)
5546 {
5547 	struct btrfs_bio *bbio = *bbio_ret;
5548 	u64 srcdev_devid = dev_replace->srcdev->devid;
5549 	int tgtdev_indexes = 0;
5550 	int num_stripes = *num_stripes_ret;
5551 	int max_errors = *max_errors_ret;
5552 	int i;
5553 
5554 	if (op == BTRFS_MAP_WRITE) {
5555 		int index_where_to_add;
5556 
5557 		/*
5558 		 * duplicate the write operations while the dev replace
5559 		 * procedure is running. Since the copying of the old disk to
5560 		 * the new disk takes place at run time while the filesystem is
5561 		 * mounted writable, the regular write operations to the old
5562 		 * disk have to be duplicated to go to the new disk as well.
5563 		 *
5564 		 * Note that device->missing is handled by the caller, and that
5565 		 * the write to the old disk is already set up in the stripes
5566 		 * array.
5567 		 */
5568 		index_where_to_add = num_stripes;
5569 		for (i = 0; i < num_stripes; i++) {
5570 			if (bbio->stripes[i].dev->devid == srcdev_devid) {
5571 				/* write to new disk, too */
5572 				struct btrfs_bio_stripe *new =
5573 					bbio->stripes + index_where_to_add;
5574 				struct btrfs_bio_stripe *old =
5575 					bbio->stripes + i;
5576 
5577 				new->physical = old->physical;
5578 				new->length = old->length;
5579 				new->dev = dev_replace->tgtdev;
5580 				bbio->tgtdev_map[i] = index_where_to_add;
5581 				index_where_to_add++;
5582 				max_errors++;
5583 				tgtdev_indexes++;
5584 			}
5585 		}
5586 		num_stripes = index_where_to_add;
5587 	} else if (op == BTRFS_MAP_GET_READ_MIRRORS) {
5588 		int index_srcdev = 0;
5589 		int found = 0;
5590 		u64 physical_of_found = 0;
5591 
5592 		/*
5593 		 * During the dev-replace procedure, the target drive can also
5594 		 * be used to read data in case it is needed to repair a corrupt
5595 		 * block elsewhere. This is possible if the requested area is
5596 		 * left of the left cursor. In this area, the target drive is a
5597 		 * full copy of the source drive.
5598 		 */
5599 		for (i = 0; i < num_stripes; i++) {
5600 			if (bbio->stripes[i].dev->devid == srcdev_devid) {
5601 				/*
5602 				 * In case of DUP, in order to keep it simple,
5603 				 * only add the mirror with the lowest physical
5604 				 * address
5605 				 */
5606 				if (found &&
5607 				    physical_of_found <=
5608 				     bbio->stripes[i].physical)
5609 					continue;
5610 				index_srcdev = i;
5611 				found = 1;
5612 				physical_of_found = bbio->stripes[i].physical;
5613 			}
5614 		}
5615 		if (found) {
5616 			struct btrfs_bio_stripe *tgtdev_stripe =
5617 				bbio->stripes + num_stripes;
5618 
5619 			tgtdev_stripe->physical = physical_of_found;
5620 			tgtdev_stripe->length =
5621 				bbio->stripes[index_srcdev].length;
5622 			tgtdev_stripe->dev = dev_replace->tgtdev;
5623 			bbio->tgtdev_map[index_srcdev] = num_stripes;
5624 
5625 			tgtdev_indexes++;
5626 			num_stripes++;
5627 		}
5628 	}
5629 
5630 	*num_stripes_ret = num_stripes;
5631 	*max_errors_ret = max_errors;
5632 	bbio->num_tgtdevs = tgtdev_indexes;
5633 	*bbio_ret = bbio;
5634 }
5635 
need_full_stripe(enum btrfs_map_op op)5636 static bool need_full_stripe(enum btrfs_map_op op)
5637 {
5638 	return (op == BTRFS_MAP_WRITE || op == BTRFS_MAP_GET_READ_MIRRORS);
5639 }
5640 
__btrfs_map_block(struct btrfs_fs_info * fs_info,enum btrfs_map_op op,u64 logical,u64 * length,struct btrfs_bio ** bbio_ret,int mirror_num,int need_raid_map)5641 static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
5642 			     enum btrfs_map_op op,
5643 			     u64 logical, u64 *length,
5644 			     struct btrfs_bio **bbio_ret,
5645 			     int mirror_num, int need_raid_map)
5646 {
5647 	struct extent_map *em;
5648 	struct map_lookup *map;
5649 	u64 offset;
5650 	u64 stripe_offset;
5651 	u64 stripe_nr;
5652 	u64 stripe_len;
5653 	u32 stripe_index;
5654 	int i;
5655 	int ret = 0;
5656 	int num_stripes;
5657 	int max_errors = 0;
5658 	int tgtdev_indexes = 0;
5659 	struct btrfs_bio *bbio = NULL;
5660 	struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
5661 	int dev_replace_is_ongoing = 0;
5662 	int num_alloc_stripes;
5663 	int patch_the_first_stripe_for_dev_replace = 0;
5664 	u64 physical_to_patch_in_first_stripe = 0;
5665 	u64 raid56_full_stripe_start = (u64)-1;
5666 
5667 	if (op == BTRFS_MAP_DISCARD)
5668 		return __btrfs_map_block_for_discard(fs_info, logical,
5669 						     *length, bbio_ret);
5670 
5671 	em = get_chunk_map(fs_info, logical, *length);
5672 	if (IS_ERR(em))
5673 		return PTR_ERR(em);
5674 
5675 	map = em->map_lookup;
5676 	offset = logical - em->start;
5677 
5678 	stripe_len = map->stripe_len;
5679 	stripe_nr = offset;
5680 	/*
5681 	 * stripe_nr counts the total number of stripes we have to stride
5682 	 * to get to this block
5683 	 */
5684 	stripe_nr = div64_u64(stripe_nr, stripe_len);
5685 
5686 	stripe_offset = stripe_nr * stripe_len;
5687 	if (offset < stripe_offset) {
5688 		btrfs_crit(fs_info,
5689 			   "stripe math has gone wrong, stripe_offset=%llu, offset=%llu, start=%llu, logical=%llu, stripe_len=%llu",
5690 			   stripe_offset, offset, em->start, logical,
5691 			   stripe_len);
5692 		free_extent_map(em);
5693 		return -EINVAL;
5694 	}
5695 
5696 	/* stripe_offset is the offset of this block in its stripe*/
5697 	stripe_offset = offset - stripe_offset;
5698 
5699 	/* if we're here for raid56, we need to know the stripe aligned start */
5700 	if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
5701 		unsigned long full_stripe_len = stripe_len * nr_data_stripes(map);
5702 		raid56_full_stripe_start = offset;
5703 
5704 		/* allow a write of a full stripe, but make sure we don't
5705 		 * allow straddling of stripes
5706 		 */
5707 		raid56_full_stripe_start = div64_u64(raid56_full_stripe_start,
5708 				full_stripe_len);
5709 		raid56_full_stripe_start *= full_stripe_len;
5710 	}
5711 
5712 	if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
5713 		u64 max_len;
5714 		/* For writes to RAID[56], allow a full stripeset across all disks.
5715 		   For other RAID types and for RAID[56] reads, just allow a single
5716 		   stripe (on a single disk). */
5717 		if ((map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) &&
5718 		    (op == BTRFS_MAP_WRITE)) {
5719 			max_len = stripe_len * nr_data_stripes(map) -
5720 				(offset - raid56_full_stripe_start);
5721 		} else {
5722 			/* we limit the length of each bio to what fits in a stripe */
5723 			max_len = stripe_len - stripe_offset;
5724 		}
5725 		*length = min_t(u64, em->len - offset, max_len);
5726 	} else {
5727 		*length = em->len - offset;
5728 	}
5729 
5730 	/* This is for when we're called from btrfs_merge_bio_hook() and all
5731 	   it cares about is the length */
5732 	if (!bbio_ret)
5733 		goto out;
5734 
5735 	btrfs_dev_replace_read_lock(dev_replace);
5736 	dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace);
5737 	if (!dev_replace_is_ongoing)
5738 		btrfs_dev_replace_read_unlock(dev_replace);
5739 	else
5740 		btrfs_dev_replace_set_lock_blocking(dev_replace);
5741 
5742 	if (dev_replace_is_ongoing && mirror_num == map->num_stripes + 1 &&
5743 	    !need_full_stripe(op) && dev_replace->tgtdev != NULL) {
5744 		ret = get_extra_mirror_from_replace(fs_info, logical, *length,
5745 						    dev_replace->srcdev->devid,
5746 						    &mirror_num,
5747 					    &physical_to_patch_in_first_stripe);
5748 		if (ret)
5749 			goto out;
5750 		else
5751 			patch_the_first_stripe_for_dev_replace = 1;
5752 	} else if (mirror_num > map->num_stripes) {
5753 		mirror_num = 0;
5754 	}
5755 
5756 	num_stripes = 1;
5757 	stripe_index = 0;
5758 	if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
5759 		stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
5760 				&stripe_index);
5761 		if (!need_full_stripe(op))
5762 			mirror_num = 1;
5763 	} else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
5764 		if (need_full_stripe(op))
5765 			num_stripes = map->num_stripes;
5766 		else if (mirror_num)
5767 			stripe_index = mirror_num - 1;
5768 		else {
5769 			stripe_index = find_live_mirror(fs_info, map, 0,
5770 					    dev_replace_is_ongoing);
5771 			mirror_num = stripe_index + 1;
5772 		}
5773 
5774 	} else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
5775 		if (need_full_stripe(op)) {
5776 			num_stripes = map->num_stripes;
5777 		} else if (mirror_num) {
5778 			stripe_index = mirror_num - 1;
5779 		} else {
5780 			mirror_num = 1;
5781 		}
5782 
5783 	} else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
5784 		u32 factor = map->num_stripes / map->sub_stripes;
5785 
5786 		stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index);
5787 		stripe_index *= map->sub_stripes;
5788 
5789 		if (need_full_stripe(op))
5790 			num_stripes = map->sub_stripes;
5791 		else if (mirror_num)
5792 			stripe_index += mirror_num - 1;
5793 		else {
5794 			int old_stripe_index = stripe_index;
5795 			stripe_index = find_live_mirror(fs_info, map,
5796 					      stripe_index,
5797 					      dev_replace_is_ongoing);
5798 			mirror_num = stripe_index - old_stripe_index + 1;
5799 		}
5800 
5801 	} else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
5802 		if (need_raid_map && (need_full_stripe(op) || mirror_num > 1)) {
5803 			/* push stripe_nr back to the start of the full stripe */
5804 			stripe_nr = div64_u64(raid56_full_stripe_start,
5805 					stripe_len * nr_data_stripes(map));
5806 
5807 			/* RAID[56] write or recovery. Return all stripes */
5808 			num_stripes = map->num_stripes;
5809 			max_errors = nr_parity_stripes(map);
5810 
5811 			*length = map->stripe_len;
5812 			stripe_index = 0;
5813 			stripe_offset = 0;
5814 		} else {
5815 			/*
5816 			 * Mirror #0 or #1 means the original data block.
5817 			 * Mirror #2 is RAID5 parity block.
5818 			 * Mirror #3 is RAID6 Q block.
5819 			 */
5820 			stripe_nr = div_u64_rem(stripe_nr,
5821 					nr_data_stripes(map), &stripe_index);
5822 			if (mirror_num > 1)
5823 				stripe_index = nr_data_stripes(map) +
5824 						mirror_num - 2;
5825 
5826 			/* We distribute the parity blocks across stripes */
5827 			div_u64_rem(stripe_nr + stripe_index, map->num_stripes,
5828 					&stripe_index);
5829 			if (!need_full_stripe(op) && mirror_num <= 1)
5830 				mirror_num = 1;
5831 		}
5832 	} else {
5833 		/*
5834 		 * after this, stripe_nr is the number of stripes on this
5835 		 * device we have to walk to find the data, and stripe_index is
5836 		 * the number of our device in the stripe array
5837 		 */
5838 		stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
5839 				&stripe_index);
5840 		mirror_num = stripe_index + 1;
5841 	}
5842 	if (stripe_index >= map->num_stripes) {
5843 		btrfs_crit(fs_info,
5844 			   "stripe index math went horribly wrong, got stripe_index=%u, num_stripes=%u",
5845 			   stripe_index, map->num_stripes);
5846 		ret = -EINVAL;
5847 		goto out;
5848 	}
5849 
5850 	num_alloc_stripes = num_stripes;
5851 	if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL) {
5852 		if (op == BTRFS_MAP_WRITE)
5853 			num_alloc_stripes <<= 1;
5854 		if (op == BTRFS_MAP_GET_READ_MIRRORS)
5855 			num_alloc_stripes++;
5856 		tgtdev_indexes = num_stripes;
5857 	}
5858 
5859 	bbio = alloc_btrfs_bio(num_alloc_stripes, tgtdev_indexes);
5860 	if (!bbio) {
5861 		ret = -ENOMEM;
5862 		goto out;
5863 	}
5864 	if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL)
5865 		bbio->tgtdev_map = (int *)(bbio->stripes + num_alloc_stripes);
5866 
5867 	/* build raid_map */
5868 	if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK && need_raid_map &&
5869 	    (need_full_stripe(op) || mirror_num > 1)) {
5870 		u64 tmp;
5871 		unsigned rot;
5872 
5873 		bbio->raid_map = (u64 *)((void *)bbio->stripes +
5874 				 sizeof(struct btrfs_bio_stripe) *
5875 				 num_alloc_stripes +
5876 				 sizeof(int) * tgtdev_indexes);
5877 
5878 		/* Work out the disk rotation on this stripe-set */
5879 		div_u64_rem(stripe_nr, num_stripes, &rot);
5880 
5881 		/* Fill in the logical address of each stripe */
5882 		tmp = stripe_nr * nr_data_stripes(map);
5883 		for (i = 0; i < nr_data_stripes(map); i++)
5884 			bbio->raid_map[(i+rot) % num_stripes] =
5885 				em->start + (tmp + i) * map->stripe_len;
5886 
5887 		bbio->raid_map[(i+rot) % map->num_stripes] = RAID5_P_STRIPE;
5888 		if (map->type & BTRFS_BLOCK_GROUP_RAID6)
5889 			bbio->raid_map[(i+rot+1) % num_stripes] =
5890 				RAID6_Q_STRIPE;
5891 	}
5892 
5893 
5894 	for (i = 0; i < num_stripes; i++) {
5895 		bbio->stripes[i].physical =
5896 			map->stripes[stripe_index].physical +
5897 			stripe_offset +
5898 			stripe_nr * map->stripe_len;
5899 		bbio->stripes[i].dev =
5900 			map->stripes[stripe_index].dev;
5901 		stripe_index++;
5902 	}
5903 
5904 	if (need_full_stripe(op))
5905 		max_errors = btrfs_chunk_max_errors(map);
5906 
5907 	if (bbio->raid_map)
5908 		sort_parity_stripes(bbio, num_stripes);
5909 
5910 	if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL &&
5911 	    need_full_stripe(op)) {
5912 		handle_ops_on_dev_replace(op, &bbio, dev_replace, &num_stripes,
5913 					  &max_errors);
5914 	}
5915 
5916 	*bbio_ret = bbio;
5917 	bbio->map_type = map->type;
5918 	bbio->num_stripes = num_stripes;
5919 	bbio->max_errors = max_errors;
5920 	bbio->mirror_num = mirror_num;
5921 
5922 	/*
5923 	 * this is the case that REQ_READ && dev_replace_is_ongoing &&
5924 	 * mirror_num == num_stripes + 1 && dev_replace target drive is
5925 	 * available as a mirror
5926 	 */
5927 	if (patch_the_first_stripe_for_dev_replace && num_stripes > 0) {
5928 		WARN_ON(num_stripes > 1);
5929 		bbio->stripes[0].dev = dev_replace->tgtdev;
5930 		bbio->stripes[0].physical = physical_to_patch_in_first_stripe;
5931 		bbio->mirror_num = map->num_stripes + 1;
5932 	}
5933 out:
5934 	if (dev_replace_is_ongoing) {
5935 		btrfs_dev_replace_clear_lock_blocking(dev_replace);
5936 		btrfs_dev_replace_read_unlock(dev_replace);
5937 	}
5938 	free_extent_map(em);
5939 	return ret;
5940 }
5941 
btrfs_map_block(struct btrfs_fs_info * fs_info,enum btrfs_map_op op,u64 logical,u64 * length,struct btrfs_bio ** bbio_ret,int mirror_num)5942 int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
5943 		      u64 logical, u64 *length,
5944 		      struct btrfs_bio **bbio_ret, int mirror_num)
5945 {
5946 	return __btrfs_map_block(fs_info, op, logical, length, bbio_ret,
5947 				 mirror_num, 0);
5948 }
5949 
5950 /* For Scrub/replace */
btrfs_map_sblock(struct btrfs_fs_info * fs_info,enum btrfs_map_op op,u64 logical,u64 * length,struct btrfs_bio ** bbio_ret)5951 int btrfs_map_sblock(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
5952 		     u64 logical, u64 *length,
5953 		     struct btrfs_bio **bbio_ret)
5954 {
5955 	return __btrfs_map_block(fs_info, op, logical, length, bbio_ret, 0, 1);
5956 }
5957 
btrfs_rmap_block(struct btrfs_fs_info * fs_info,u64 chunk_start,u64 physical,u64 ** logical,int * naddrs,int * stripe_len)5958 int btrfs_rmap_block(struct btrfs_fs_info *fs_info, u64 chunk_start,
5959 		     u64 physical, u64 **logical, int *naddrs, int *stripe_len)
5960 {
5961 	struct extent_map *em;
5962 	struct map_lookup *map;
5963 	u64 *buf;
5964 	u64 bytenr;
5965 	u64 length;
5966 	u64 stripe_nr;
5967 	u64 rmap_len;
5968 	int i, j, nr = 0;
5969 
5970 	em = get_chunk_map(fs_info, chunk_start, 1);
5971 	if (IS_ERR(em))
5972 		return -EIO;
5973 
5974 	map = em->map_lookup;
5975 	length = em->len;
5976 	rmap_len = map->stripe_len;
5977 
5978 	if (map->type & BTRFS_BLOCK_GROUP_RAID10)
5979 		length = div_u64(length, map->num_stripes / map->sub_stripes);
5980 	else if (map->type & BTRFS_BLOCK_GROUP_RAID0)
5981 		length = div_u64(length, map->num_stripes);
5982 	else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
5983 		length = div_u64(length, nr_data_stripes(map));
5984 		rmap_len = map->stripe_len * nr_data_stripes(map);
5985 	}
5986 
5987 	buf = kcalloc(map->num_stripes, sizeof(u64), GFP_NOFS);
5988 	BUG_ON(!buf); /* -ENOMEM */
5989 
5990 	for (i = 0; i < map->num_stripes; i++) {
5991 		if (map->stripes[i].physical > physical ||
5992 		    map->stripes[i].physical + length <= physical)
5993 			continue;
5994 
5995 		stripe_nr = physical - map->stripes[i].physical;
5996 		stripe_nr = div64_u64(stripe_nr, map->stripe_len);
5997 
5998 		if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
5999 			stripe_nr = stripe_nr * map->num_stripes + i;
6000 			stripe_nr = div_u64(stripe_nr, map->sub_stripes);
6001 		} else if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
6002 			stripe_nr = stripe_nr * map->num_stripes + i;
6003 		} /* else if RAID[56], multiply by nr_data_stripes().
6004 		   * Alternatively, just use rmap_len below instead of
6005 		   * map->stripe_len */
6006 
6007 		bytenr = chunk_start + stripe_nr * rmap_len;
6008 		WARN_ON(nr >= map->num_stripes);
6009 		for (j = 0; j < nr; j++) {
6010 			if (buf[j] == bytenr)
6011 				break;
6012 		}
6013 		if (j == nr) {
6014 			WARN_ON(nr >= map->num_stripes);
6015 			buf[nr++] = bytenr;
6016 		}
6017 	}
6018 
6019 	*logical = buf;
6020 	*naddrs = nr;
6021 	*stripe_len = rmap_len;
6022 
6023 	free_extent_map(em);
6024 	return 0;
6025 }
6026 
btrfs_end_bbio(struct btrfs_bio * bbio,struct bio * bio)6027 static inline void btrfs_end_bbio(struct btrfs_bio *bbio, struct bio *bio)
6028 {
6029 	bio->bi_private = bbio->private;
6030 	bio->bi_end_io = bbio->end_io;
6031 	bio_endio(bio);
6032 
6033 	btrfs_put_bbio(bbio);
6034 }
6035 
btrfs_end_bio(struct bio * bio)6036 static void btrfs_end_bio(struct bio *bio)
6037 {
6038 	struct btrfs_bio *bbio = bio->bi_private;
6039 	int is_orig_bio = 0;
6040 
6041 	if (bio->bi_status) {
6042 		atomic_inc(&bbio->error);
6043 		if (bio->bi_status == BLK_STS_IOERR ||
6044 		    bio->bi_status == BLK_STS_TARGET) {
6045 			unsigned int stripe_index =
6046 				btrfs_io_bio(bio)->stripe_index;
6047 			struct btrfs_device *dev;
6048 
6049 			BUG_ON(stripe_index >= bbio->num_stripes);
6050 			dev = bbio->stripes[stripe_index].dev;
6051 			if (dev->bdev) {
6052 				if (bio_op(bio) == REQ_OP_WRITE)
6053 					btrfs_dev_stat_inc_and_print(dev,
6054 						BTRFS_DEV_STAT_WRITE_ERRS);
6055 				else if (!(bio->bi_opf & REQ_RAHEAD))
6056 					btrfs_dev_stat_inc_and_print(dev,
6057 						BTRFS_DEV_STAT_READ_ERRS);
6058 				if (bio->bi_opf & REQ_PREFLUSH)
6059 					btrfs_dev_stat_inc_and_print(dev,
6060 						BTRFS_DEV_STAT_FLUSH_ERRS);
6061 			}
6062 		}
6063 	}
6064 
6065 	if (bio == bbio->orig_bio)
6066 		is_orig_bio = 1;
6067 
6068 	btrfs_bio_counter_dec(bbio->fs_info);
6069 
6070 	if (atomic_dec_and_test(&bbio->stripes_pending)) {
6071 		if (!is_orig_bio) {
6072 			bio_put(bio);
6073 			bio = bbio->orig_bio;
6074 		}
6075 
6076 		btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
6077 		/* only send an error to the higher layers if it is
6078 		 * beyond the tolerance of the btrfs bio
6079 		 */
6080 		if (atomic_read(&bbio->error) > bbio->max_errors) {
6081 			bio->bi_status = BLK_STS_IOERR;
6082 		} else {
6083 			/*
6084 			 * this bio is actually up to date, we didn't
6085 			 * go over the max number of errors
6086 			 */
6087 			bio->bi_status = BLK_STS_OK;
6088 		}
6089 
6090 		btrfs_end_bbio(bbio, bio);
6091 	} else if (!is_orig_bio) {
6092 		bio_put(bio);
6093 	}
6094 }
6095 
6096 /*
6097  * see run_scheduled_bios for a description of why bios are collected for
6098  * async submit.
6099  *
6100  * This will add one bio to the pending list for a device and make sure
6101  * the work struct is scheduled.
6102  */
btrfs_schedule_bio(struct btrfs_device * device,struct bio * bio)6103 static noinline void btrfs_schedule_bio(struct btrfs_device *device,
6104 					struct bio *bio)
6105 {
6106 	struct btrfs_fs_info *fs_info = device->fs_info;
6107 	int should_queue = 1;
6108 	struct btrfs_pending_bios *pending_bios;
6109 
6110 	/* don't bother with additional async steps for reads, right now */
6111 	if (bio_op(bio) == REQ_OP_READ) {
6112 		btrfsic_submit_bio(bio);
6113 		return;
6114 	}
6115 
6116 	WARN_ON(bio->bi_next);
6117 	bio->bi_next = NULL;
6118 
6119 	spin_lock(&device->io_lock);
6120 	if (op_is_sync(bio->bi_opf))
6121 		pending_bios = &device->pending_sync_bios;
6122 	else
6123 		pending_bios = &device->pending_bios;
6124 
6125 	if (pending_bios->tail)
6126 		pending_bios->tail->bi_next = bio;
6127 
6128 	pending_bios->tail = bio;
6129 	if (!pending_bios->head)
6130 		pending_bios->head = bio;
6131 	if (device->running_pending)
6132 		should_queue = 0;
6133 
6134 	spin_unlock(&device->io_lock);
6135 
6136 	if (should_queue)
6137 		btrfs_queue_work(fs_info->submit_workers, &device->work);
6138 }
6139 
submit_stripe_bio(struct btrfs_bio * bbio,struct bio * bio,u64 physical,int dev_nr,int async)6140 static void submit_stripe_bio(struct btrfs_bio *bbio, struct bio *bio,
6141 			      u64 physical, int dev_nr, int async)
6142 {
6143 	struct btrfs_device *dev = bbio->stripes[dev_nr].dev;
6144 	struct btrfs_fs_info *fs_info = bbio->fs_info;
6145 
6146 	bio->bi_private = bbio;
6147 	btrfs_io_bio(bio)->stripe_index = dev_nr;
6148 	bio->bi_end_io = btrfs_end_bio;
6149 	bio->bi_iter.bi_sector = physical >> 9;
6150 	btrfs_debug_in_rcu(fs_info,
6151 	"btrfs_map_bio: rw %d 0x%x, sector=%llu, dev=%lu (%s id %llu), size=%u",
6152 		bio_op(bio), bio->bi_opf, (u64)bio->bi_iter.bi_sector,
6153 		(u_long)dev->bdev->bd_dev, rcu_str_deref(dev->name), dev->devid,
6154 		bio->bi_iter.bi_size);
6155 	bio_set_dev(bio, dev->bdev);
6156 
6157 	btrfs_bio_counter_inc_noblocked(fs_info);
6158 
6159 	if (async)
6160 		btrfs_schedule_bio(dev, bio);
6161 	else
6162 		btrfsic_submit_bio(bio);
6163 }
6164 
bbio_error(struct btrfs_bio * bbio,struct bio * bio,u64 logical)6165 static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical)
6166 {
6167 	atomic_inc(&bbio->error);
6168 	if (atomic_dec_and_test(&bbio->stripes_pending)) {
6169 		/* Should be the original bio. */
6170 		WARN_ON(bio != bbio->orig_bio);
6171 
6172 		btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
6173 		bio->bi_iter.bi_sector = logical >> 9;
6174 		if (atomic_read(&bbio->error) > bbio->max_errors)
6175 			bio->bi_status = BLK_STS_IOERR;
6176 		else
6177 			bio->bi_status = BLK_STS_OK;
6178 		btrfs_end_bbio(bbio, bio);
6179 	}
6180 }
6181 
btrfs_map_bio(struct btrfs_fs_info * fs_info,struct bio * bio,int mirror_num,int async_submit)6182 blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
6183 			   int mirror_num, int async_submit)
6184 {
6185 	struct btrfs_device *dev;
6186 	struct bio *first_bio = bio;
6187 	u64 logical = (u64)bio->bi_iter.bi_sector << 9;
6188 	u64 length = 0;
6189 	u64 map_length;
6190 	int ret;
6191 	int dev_nr;
6192 	int total_devs;
6193 	struct btrfs_bio *bbio = NULL;
6194 
6195 	length = bio->bi_iter.bi_size;
6196 	map_length = length;
6197 
6198 	btrfs_bio_counter_inc_blocked(fs_info);
6199 	ret = __btrfs_map_block(fs_info, btrfs_op(bio), logical,
6200 				&map_length, &bbio, mirror_num, 1);
6201 	if (ret) {
6202 		btrfs_bio_counter_dec(fs_info);
6203 		return errno_to_blk_status(ret);
6204 	}
6205 
6206 	total_devs = bbio->num_stripes;
6207 	bbio->orig_bio = first_bio;
6208 	bbio->private = first_bio->bi_private;
6209 	bbio->end_io = first_bio->bi_end_io;
6210 	bbio->fs_info = fs_info;
6211 	atomic_set(&bbio->stripes_pending, bbio->num_stripes);
6212 
6213 	if ((bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) &&
6214 	    ((bio_op(bio) == REQ_OP_WRITE) || (mirror_num > 1))) {
6215 		/* In this case, map_length has been set to the length of
6216 		   a single stripe; not the whole write */
6217 		if (bio_op(bio) == REQ_OP_WRITE) {
6218 			ret = raid56_parity_write(fs_info, bio, bbio,
6219 						  map_length);
6220 		} else {
6221 			ret = raid56_parity_recover(fs_info, bio, bbio,
6222 						    map_length, mirror_num, 1);
6223 		}
6224 
6225 		btrfs_bio_counter_dec(fs_info);
6226 		return errno_to_blk_status(ret);
6227 	}
6228 
6229 	if (map_length < length) {
6230 		btrfs_crit(fs_info,
6231 			   "mapping failed logical %llu bio len %llu len %llu",
6232 			   logical, length, map_length);
6233 		BUG();
6234 	}
6235 
6236 	for (dev_nr = 0; dev_nr < total_devs; dev_nr++) {
6237 		dev = bbio->stripes[dev_nr].dev;
6238 		if (!dev || !dev->bdev || test_bit(BTRFS_DEV_STATE_MISSING,
6239 						   &dev->dev_state) ||
6240 		    (bio_op(first_bio) == REQ_OP_WRITE &&
6241 		    !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))) {
6242 			bbio_error(bbio, first_bio, logical);
6243 			continue;
6244 		}
6245 
6246 		if (dev_nr < total_devs - 1)
6247 			bio = btrfs_bio_clone(first_bio);
6248 		else
6249 			bio = first_bio;
6250 
6251 		submit_stripe_bio(bbio, bio, bbio->stripes[dev_nr].physical,
6252 				  dev_nr, async_submit);
6253 	}
6254 	btrfs_bio_counter_dec(fs_info);
6255 	return BLK_STS_OK;
6256 }
6257 
6258 /*
6259  * Find a device specified by @devid or @uuid in the list of @fs_devices, or
6260  * return NULL.
6261  *
6262  * If devid and uuid are both specified, the match must be exact, otherwise
6263  * only devid is used.
6264  *
6265  * If @seed is true, traverse through the seed devices.
6266  */
btrfs_find_device(struct btrfs_fs_devices * fs_devices,u64 devid,u8 * uuid,u8 * fsid,bool seed)6267 struct btrfs_device *btrfs_find_device(struct btrfs_fs_devices *fs_devices,
6268 					u64 devid, u8 *uuid, u8 *fsid,
6269 					bool seed)
6270 {
6271 	struct btrfs_device *device;
6272 
6273 	while (fs_devices) {
6274 		if (!fsid ||
6275 		    !memcmp(fs_devices->fsid, fsid, BTRFS_FSID_SIZE)) {
6276 			list_for_each_entry(device, &fs_devices->devices,
6277 					    dev_list) {
6278 				if (device->devid == devid &&
6279 				    (!uuid || memcmp(device->uuid, uuid,
6280 						     BTRFS_UUID_SIZE) == 0))
6281 					return device;
6282 			}
6283 		}
6284 		if (seed)
6285 			fs_devices = fs_devices->seed;
6286 		else
6287 			return NULL;
6288 	}
6289 	return NULL;
6290 }
6291 
add_missing_dev(struct btrfs_fs_devices * fs_devices,u64 devid,u8 * dev_uuid)6292 static struct btrfs_device *add_missing_dev(struct btrfs_fs_devices *fs_devices,
6293 					    u64 devid, u8 *dev_uuid)
6294 {
6295 	struct btrfs_device *device;
6296 	unsigned int nofs_flag;
6297 
6298 	/*
6299 	 * We call this under the chunk_mutex, so we want to use NOFS for this
6300 	 * allocation, however we don't want to change btrfs_alloc_device() to
6301 	 * always do NOFS because we use it in a lot of other GFP_KERNEL safe
6302 	 * places.
6303 	 */
6304 	nofs_flag = memalloc_nofs_save();
6305 	device = btrfs_alloc_device(NULL, &devid, dev_uuid);
6306 	memalloc_nofs_restore(nofs_flag);
6307 	if (IS_ERR(device))
6308 		return device;
6309 
6310 	list_add(&device->dev_list, &fs_devices->devices);
6311 	device->fs_devices = fs_devices;
6312 	fs_devices->num_devices++;
6313 
6314 	set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state);
6315 	fs_devices->missing_devices++;
6316 
6317 	return device;
6318 }
6319 
6320 /**
6321  * btrfs_alloc_device - allocate struct btrfs_device
6322  * @fs_info:	used only for generating a new devid, can be NULL if
6323  *		devid is provided (i.e. @devid != NULL).
6324  * @devid:	a pointer to devid for this device.  If NULL a new devid
6325  *		is generated.
6326  * @uuid:	a pointer to UUID for this device.  If NULL a new UUID
6327  *		is generated.
6328  *
6329  * Return: a pointer to a new &struct btrfs_device on success; ERR_PTR()
6330  * on error.  Returned struct is not linked onto any lists and must be
6331  * destroyed with btrfs_free_device.
6332  */
btrfs_alloc_device(struct btrfs_fs_info * fs_info,const u64 * devid,const u8 * uuid)6333 struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info,
6334 					const u64 *devid,
6335 					const u8 *uuid)
6336 {
6337 	struct btrfs_device *dev;
6338 	u64 tmp;
6339 
6340 	if (WARN_ON(!devid && !fs_info))
6341 		return ERR_PTR(-EINVAL);
6342 
6343 	dev = __alloc_device();
6344 	if (IS_ERR(dev))
6345 		return dev;
6346 
6347 	if (devid)
6348 		tmp = *devid;
6349 	else {
6350 		int ret;
6351 
6352 		ret = find_next_devid(fs_info, &tmp);
6353 		if (ret) {
6354 			btrfs_free_device(dev);
6355 			return ERR_PTR(ret);
6356 		}
6357 	}
6358 	dev->devid = tmp;
6359 
6360 	if (uuid)
6361 		memcpy(dev->uuid, uuid, BTRFS_UUID_SIZE);
6362 	else
6363 		generate_random_uuid(dev->uuid);
6364 
6365 	btrfs_init_work(&dev->work, btrfs_submit_helper,
6366 			pending_bios_fn, NULL, NULL);
6367 
6368 	return dev;
6369 }
6370 
btrfs_report_missing_device(struct btrfs_fs_info * fs_info,u64 devid,u8 * uuid,bool error)6371 static void btrfs_report_missing_device(struct btrfs_fs_info *fs_info,
6372 					u64 devid, u8 *uuid, bool error)
6373 {
6374 	if (error)
6375 		btrfs_err_rl(fs_info, "devid %llu uuid %pU is missing",
6376 			      devid, uuid);
6377 	else
6378 		btrfs_warn_rl(fs_info, "devid %llu uuid %pU is missing",
6379 			      devid, uuid);
6380 }
6381 
read_one_chunk(struct btrfs_fs_info * fs_info,struct btrfs_key * key,struct extent_buffer * leaf,struct btrfs_chunk * chunk)6382 static int read_one_chunk(struct btrfs_fs_info *fs_info, struct btrfs_key *key,
6383 			  struct extent_buffer *leaf,
6384 			  struct btrfs_chunk *chunk)
6385 {
6386 	struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
6387 	struct map_lookup *map;
6388 	struct extent_map *em;
6389 	u64 logical;
6390 	u64 length;
6391 	u64 devid;
6392 	u8 uuid[BTRFS_UUID_SIZE];
6393 	int num_stripes;
6394 	int ret;
6395 	int i;
6396 
6397 	logical = key->offset;
6398 	length = btrfs_chunk_length(leaf, chunk);
6399 	num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
6400 
6401 	/*
6402 	 * Only need to verify chunk item if we're reading from sys chunk array,
6403 	 * as chunk item in tree block is already verified by tree-checker.
6404 	 */
6405 	if (leaf->start == BTRFS_SUPER_INFO_OFFSET) {
6406 		ret = btrfs_check_chunk_valid(fs_info, leaf, chunk, logical);
6407 		if (ret)
6408 			return ret;
6409 	}
6410 
6411 	read_lock(&map_tree->map_tree.lock);
6412 	em = lookup_extent_mapping(&map_tree->map_tree, logical, 1);
6413 	read_unlock(&map_tree->map_tree.lock);
6414 
6415 	/* already mapped? */
6416 	if (em && em->start <= logical && em->start + em->len > logical) {
6417 		free_extent_map(em);
6418 		return 0;
6419 	} else if (em) {
6420 		free_extent_map(em);
6421 	}
6422 
6423 	em = alloc_extent_map();
6424 	if (!em)
6425 		return -ENOMEM;
6426 	map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
6427 	if (!map) {
6428 		free_extent_map(em);
6429 		return -ENOMEM;
6430 	}
6431 
6432 	set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags);
6433 	em->map_lookup = map;
6434 	em->start = logical;
6435 	em->len = length;
6436 	em->orig_start = 0;
6437 	em->block_start = 0;
6438 	em->block_len = em->len;
6439 
6440 	map->num_stripes = num_stripes;
6441 	map->io_width = btrfs_chunk_io_width(leaf, chunk);
6442 	map->io_align = btrfs_chunk_io_align(leaf, chunk);
6443 	map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
6444 	map->type = btrfs_chunk_type(leaf, chunk);
6445 	map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
6446 	map->verified_stripes = 0;
6447 	for (i = 0; i < num_stripes; i++) {
6448 		map->stripes[i].physical =
6449 			btrfs_stripe_offset_nr(leaf, chunk, i);
6450 		devid = btrfs_stripe_devid_nr(leaf, chunk, i);
6451 		read_extent_buffer(leaf, uuid, (unsigned long)
6452 				   btrfs_stripe_dev_uuid_nr(chunk, i),
6453 				   BTRFS_UUID_SIZE);
6454 		map->stripes[i].dev = btrfs_find_device(fs_info->fs_devices,
6455 						devid, uuid, NULL, true);
6456 		if (!map->stripes[i].dev &&
6457 		    !btrfs_test_opt(fs_info, DEGRADED)) {
6458 			free_extent_map(em);
6459 			btrfs_report_missing_device(fs_info, devid, uuid, true);
6460 			return -ENOENT;
6461 		}
6462 		if (!map->stripes[i].dev) {
6463 			map->stripes[i].dev =
6464 				add_missing_dev(fs_info->fs_devices, devid,
6465 						uuid);
6466 			if (IS_ERR(map->stripes[i].dev)) {
6467 				free_extent_map(em);
6468 				btrfs_err(fs_info,
6469 					"failed to init missing dev %llu: %ld",
6470 					devid, PTR_ERR(map->stripes[i].dev));
6471 				return PTR_ERR(map->stripes[i].dev);
6472 			}
6473 			btrfs_report_missing_device(fs_info, devid, uuid, false);
6474 		}
6475 		set_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
6476 				&(map->stripes[i].dev->dev_state));
6477 
6478 	}
6479 
6480 	write_lock(&map_tree->map_tree.lock);
6481 	ret = add_extent_mapping(&map_tree->map_tree, em, 0);
6482 	write_unlock(&map_tree->map_tree.lock);
6483 	if (ret < 0) {
6484 		btrfs_err(fs_info,
6485 			  "failed to add chunk map, start=%llu len=%llu: %d",
6486 			  em->start, em->len, ret);
6487 	}
6488 	free_extent_map(em);
6489 
6490 	return ret;
6491 }
6492 
fill_device_from_item(struct extent_buffer * leaf,struct btrfs_dev_item * dev_item,struct btrfs_device * device)6493 static void fill_device_from_item(struct extent_buffer *leaf,
6494 				 struct btrfs_dev_item *dev_item,
6495 				 struct btrfs_device *device)
6496 {
6497 	unsigned long ptr;
6498 
6499 	device->devid = btrfs_device_id(leaf, dev_item);
6500 	device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item);
6501 	device->total_bytes = device->disk_total_bytes;
6502 	device->commit_total_bytes = device->disk_total_bytes;
6503 	device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
6504 	device->commit_bytes_used = device->bytes_used;
6505 	device->type = btrfs_device_type(leaf, dev_item);
6506 	device->io_align = btrfs_device_io_align(leaf, dev_item);
6507 	device->io_width = btrfs_device_io_width(leaf, dev_item);
6508 	device->sector_size = btrfs_device_sector_size(leaf, dev_item);
6509 	WARN_ON(device->devid == BTRFS_DEV_REPLACE_DEVID);
6510 	clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state);
6511 
6512 	ptr = btrfs_device_uuid(dev_item);
6513 	read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
6514 }
6515 
open_seed_devices(struct btrfs_fs_info * fs_info,u8 * fsid)6516 static struct btrfs_fs_devices *open_seed_devices(struct btrfs_fs_info *fs_info,
6517 						  u8 *fsid)
6518 {
6519 	struct btrfs_fs_devices *fs_devices;
6520 	int ret;
6521 
6522 	lockdep_assert_held(&uuid_mutex);
6523 	ASSERT(fsid);
6524 
6525 	fs_devices = fs_info->fs_devices->seed;
6526 	while (fs_devices) {
6527 		if (!memcmp(fs_devices->fsid, fsid, BTRFS_FSID_SIZE))
6528 			return fs_devices;
6529 
6530 		fs_devices = fs_devices->seed;
6531 	}
6532 
6533 	fs_devices = find_fsid(fsid);
6534 	if (!fs_devices) {
6535 		if (!btrfs_test_opt(fs_info, DEGRADED))
6536 			return ERR_PTR(-ENOENT);
6537 
6538 		fs_devices = alloc_fs_devices(fsid);
6539 		if (IS_ERR(fs_devices))
6540 			return fs_devices;
6541 
6542 		fs_devices->seeding = 1;
6543 		fs_devices->opened = 1;
6544 		return fs_devices;
6545 	}
6546 
6547 	fs_devices = clone_fs_devices(fs_devices);
6548 	if (IS_ERR(fs_devices))
6549 		return fs_devices;
6550 
6551 	ret = open_fs_devices(fs_devices, FMODE_READ, fs_info->bdev_holder);
6552 	if (ret) {
6553 		free_fs_devices(fs_devices);
6554 		fs_devices = ERR_PTR(ret);
6555 		goto out;
6556 	}
6557 
6558 	if (!fs_devices->seeding) {
6559 		close_fs_devices(fs_devices);
6560 		free_fs_devices(fs_devices);
6561 		fs_devices = ERR_PTR(-EINVAL);
6562 		goto out;
6563 	}
6564 
6565 	fs_devices->seed = fs_info->fs_devices->seed;
6566 	fs_info->fs_devices->seed = fs_devices;
6567 out:
6568 	return fs_devices;
6569 }
6570 
read_one_dev(struct btrfs_fs_info * fs_info,struct extent_buffer * leaf,struct btrfs_dev_item * dev_item)6571 static int read_one_dev(struct btrfs_fs_info *fs_info,
6572 			struct extent_buffer *leaf,
6573 			struct btrfs_dev_item *dev_item)
6574 {
6575 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
6576 	struct btrfs_device *device;
6577 	u64 devid;
6578 	int ret;
6579 	u8 fs_uuid[BTRFS_FSID_SIZE];
6580 	u8 dev_uuid[BTRFS_UUID_SIZE];
6581 
6582 	devid = btrfs_device_id(leaf, dev_item);
6583 	read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item),
6584 			   BTRFS_UUID_SIZE);
6585 	read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item),
6586 			   BTRFS_FSID_SIZE);
6587 
6588 	if (memcmp(fs_uuid, fs_info->fsid, BTRFS_FSID_SIZE)) {
6589 		fs_devices = open_seed_devices(fs_info, fs_uuid);
6590 		if (IS_ERR(fs_devices))
6591 			return PTR_ERR(fs_devices);
6592 	}
6593 
6594 	device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid,
6595 				   fs_uuid, true);
6596 	if (!device) {
6597 		if (!btrfs_test_opt(fs_info, DEGRADED)) {
6598 			btrfs_report_missing_device(fs_info, devid,
6599 							dev_uuid, true);
6600 			return -ENOENT;
6601 		}
6602 
6603 		device = add_missing_dev(fs_devices, devid, dev_uuid);
6604 		if (IS_ERR(device)) {
6605 			btrfs_err(fs_info,
6606 				"failed to add missing dev %llu: %ld",
6607 				devid, PTR_ERR(device));
6608 			return PTR_ERR(device);
6609 		}
6610 		btrfs_report_missing_device(fs_info, devid, dev_uuid, false);
6611 	} else {
6612 		if (!device->bdev) {
6613 			if (!btrfs_test_opt(fs_info, DEGRADED)) {
6614 				btrfs_report_missing_device(fs_info,
6615 						devid, dev_uuid, true);
6616 				return -ENOENT;
6617 			}
6618 			btrfs_report_missing_device(fs_info, devid,
6619 							dev_uuid, false);
6620 		}
6621 
6622 		if (!device->bdev &&
6623 		    !test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) {
6624 			/*
6625 			 * this happens when a device that was properly setup
6626 			 * in the device info lists suddenly goes bad.
6627 			 * device->bdev is NULL, and so we have to set
6628 			 * device->missing to one here
6629 			 */
6630 			device->fs_devices->missing_devices++;
6631 			set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state);
6632 		}
6633 
6634 		/* Move the device to its own fs_devices */
6635 		if (device->fs_devices != fs_devices) {
6636 			ASSERT(test_bit(BTRFS_DEV_STATE_MISSING,
6637 							&device->dev_state));
6638 
6639 			list_move(&device->dev_list, &fs_devices->devices);
6640 			device->fs_devices->num_devices--;
6641 			fs_devices->num_devices++;
6642 
6643 			device->fs_devices->missing_devices--;
6644 			fs_devices->missing_devices++;
6645 
6646 			device->fs_devices = fs_devices;
6647 		}
6648 	}
6649 
6650 	if (device->fs_devices != fs_info->fs_devices) {
6651 		BUG_ON(test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state));
6652 		if (device->generation !=
6653 		    btrfs_device_generation(leaf, dev_item))
6654 			return -EINVAL;
6655 	}
6656 
6657 	fill_device_from_item(leaf, dev_item, device);
6658 	set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
6659 	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
6660 	   !test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
6661 		device->fs_devices->total_rw_bytes += device->total_bytes;
6662 		atomic64_add(device->total_bytes - device->bytes_used,
6663 				&fs_info->free_chunk_space);
6664 	}
6665 	ret = 0;
6666 	return ret;
6667 }
6668 
btrfs_read_sys_array(struct btrfs_fs_info * fs_info)6669 int btrfs_read_sys_array(struct btrfs_fs_info *fs_info)
6670 {
6671 	struct btrfs_root *root = fs_info->tree_root;
6672 	struct btrfs_super_block *super_copy = fs_info->super_copy;
6673 	struct extent_buffer *sb;
6674 	struct btrfs_disk_key *disk_key;
6675 	struct btrfs_chunk *chunk;
6676 	u8 *array_ptr;
6677 	unsigned long sb_array_offset;
6678 	int ret = 0;
6679 	u32 num_stripes;
6680 	u32 array_size;
6681 	u32 len = 0;
6682 	u32 cur_offset;
6683 	u64 type;
6684 	struct btrfs_key key;
6685 
6686 	ASSERT(BTRFS_SUPER_INFO_SIZE <= fs_info->nodesize);
6687 	/*
6688 	 * This will create extent buffer of nodesize, superblock size is
6689 	 * fixed to BTRFS_SUPER_INFO_SIZE. If nodesize > sb size, this will
6690 	 * overallocate but we can keep it as-is, only the first page is used.
6691 	 */
6692 	sb = btrfs_find_create_tree_block(fs_info, BTRFS_SUPER_INFO_OFFSET);
6693 	if (IS_ERR(sb))
6694 		return PTR_ERR(sb);
6695 	set_extent_buffer_uptodate(sb);
6696 	btrfs_set_buffer_lockdep_class(root->root_key.objectid, sb, 0);
6697 	/*
6698 	 * The sb extent buffer is artificial and just used to read the system array.
6699 	 * set_extent_buffer_uptodate() call does not properly mark all it's
6700 	 * pages up-to-date when the page is larger: extent does not cover the
6701 	 * whole page and consequently check_page_uptodate does not find all
6702 	 * the page's extents up-to-date (the hole beyond sb),
6703 	 * write_extent_buffer then triggers a WARN_ON.
6704 	 *
6705 	 * Regular short extents go through mark_extent_buffer_dirty/writeback cycle,
6706 	 * but sb spans only this function. Add an explicit SetPageUptodate call
6707 	 * to silence the warning eg. on PowerPC 64.
6708 	 */
6709 	if (PAGE_SIZE > BTRFS_SUPER_INFO_SIZE)
6710 		SetPageUptodate(sb->pages[0]);
6711 
6712 	write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
6713 	array_size = btrfs_super_sys_array_size(super_copy);
6714 
6715 	array_ptr = super_copy->sys_chunk_array;
6716 	sb_array_offset = offsetof(struct btrfs_super_block, sys_chunk_array);
6717 	cur_offset = 0;
6718 
6719 	while (cur_offset < array_size) {
6720 		disk_key = (struct btrfs_disk_key *)array_ptr;
6721 		len = sizeof(*disk_key);
6722 		if (cur_offset + len > array_size)
6723 			goto out_short_read;
6724 
6725 		btrfs_disk_key_to_cpu(&key, disk_key);
6726 
6727 		array_ptr += len;
6728 		sb_array_offset += len;
6729 		cur_offset += len;
6730 
6731 		if (key.type == BTRFS_CHUNK_ITEM_KEY) {
6732 			chunk = (struct btrfs_chunk *)sb_array_offset;
6733 			/*
6734 			 * At least one btrfs_chunk with one stripe must be
6735 			 * present, exact stripe count check comes afterwards
6736 			 */
6737 			len = btrfs_chunk_item_size(1);
6738 			if (cur_offset + len > array_size)
6739 				goto out_short_read;
6740 
6741 			num_stripes = btrfs_chunk_num_stripes(sb, chunk);
6742 			if (!num_stripes) {
6743 				btrfs_err(fs_info,
6744 					"invalid number of stripes %u in sys_array at offset %u",
6745 					num_stripes, cur_offset);
6746 				ret = -EIO;
6747 				break;
6748 			}
6749 
6750 			type = btrfs_chunk_type(sb, chunk);
6751 			if ((type & BTRFS_BLOCK_GROUP_SYSTEM) == 0) {
6752 				btrfs_err(fs_info,
6753 			    "invalid chunk type %llu in sys_array at offset %u",
6754 					type, cur_offset);
6755 				ret = -EIO;
6756 				break;
6757 			}
6758 
6759 			len = btrfs_chunk_item_size(num_stripes);
6760 			if (cur_offset + len > array_size)
6761 				goto out_short_read;
6762 
6763 			ret = read_one_chunk(fs_info, &key, sb, chunk);
6764 			if (ret)
6765 				break;
6766 		} else {
6767 			btrfs_err(fs_info,
6768 			    "unexpected item type %u in sys_array at offset %u",
6769 				  (u32)key.type, cur_offset);
6770 			ret = -EIO;
6771 			break;
6772 		}
6773 		array_ptr += len;
6774 		sb_array_offset += len;
6775 		cur_offset += len;
6776 	}
6777 	clear_extent_buffer_uptodate(sb);
6778 	free_extent_buffer_stale(sb);
6779 	return ret;
6780 
6781 out_short_read:
6782 	btrfs_err(fs_info, "sys_array too short to read %u bytes at offset %u",
6783 			len, cur_offset);
6784 	clear_extent_buffer_uptodate(sb);
6785 	free_extent_buffer_stale(sb);
6786 	return -EIO;
6787 }
6788 
6789 /*
6790  * Check if all chunks in the fs are OK for read-write degraded mount
6791  *
6792  * If the @failing_dev is specified, it's accounted as missing.
6793  *
6794  * Return true if all chunks meet the minimal RW mount requirements.
6795  * Return false if any chunk doesn't meet the minimal RW mount requirements.
6796  */
btrfs_check_rw_degradable(struct btrfs_fs_info * fs_info,struct btrfs_device * failing_dev)6797 bool btrfs_check_rw_degradable(struct btrfs_fs_info *fs_info,
6798 					struct btrfs_device *failing_dev)
6799 {
6800 	struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
6801 	struct extent_map *em;
6802 	u64 next_start = 0;
6803 	bool ret = true;
6804 
6805 	read_lock(&map_tree->map_tree.lock);
6806 	em = lookup_extent_mapping(&map_tree->map_tree, 0, (u64)-1);
6807 	read_unlock(&map_tree->map_tree.lock);
6808 	/* No chunk at all? Return false anyway */
6809 	if (!em) {
6810 		ret = false;
6811 		goto out;
6812 	}
6813 	while (em) {
6814 		struct map_lookup *map;
6815 		int missing = 0;
6816 		int max_tolerated;
6817 		int i;
6818 
6819 		map = em->map_lookup;
6820 		max_tolerated =
6821 			btrfs_get_num_tolerated_disk_barrier_failures(
6822 					map->type);
6823 		for (i = 0; i < map->num_stripes; i++) {
6824 			struct btrfs_device *dev = map->stripes[i].dev;
6825 
6826 			if (!dev || !dev->bdev ||
6827 			    test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) ||
6828 			    dev->last_flush_error)
6829 				missing++;
6830 			else if (failing_dev && failing_dev == dev)
6831 				missing++;
6832 		}
6833 		if (missing > max_tolerated) {
6834 			if (!failing_dev)
6835 				btrfs_warn(fs_info,
6836 	"chunk %llu missing %d devices, max tolerance is %d for writeable mount",
6837 				   em->start, missing, max_tolerated);
6838 			free_extent_map(em);
6839 			ret = false;
6840 			goto out;
6841 		}
6842 		next_start = extent_map_end(em);
6843 		free_extent_map(em);
6844 
6845 		read_lock(&map_tree->map_tree.lock);
6846 		em = lookup_extent_mapping(&map_tree->map_tree, next_start,
6847 					   (u64)(-1) - next_start);
6848 		read_unlock(&map_tree->map_tree.lock);
6849 	}
6850 out:
6851 	return ret;
6852 }
6853 
btrfs_read_chunk_tree(struct btrfs_fs_info * fs_info)6854 int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info)
6855 {
6856 	struct btrfs_root *root = fs_info->chunk_root;
6857 	struct btrfs_path *path;
6858 	struct extent_buffer *leaf;
6859 	struct btrfs_key key;
6860 	struct btrfs_key found_key;
6861 	int ret;
6862 	int slot;
6863 	u64 total_dev = 0;
6864 
6865 	path = btrfs_alloc_path();
6866 	if (!path)
6867 		return -ENOMEM;
6868 
6869 	/*
6870 	 * uuid_mutex is needed only if we are mounting a sprout FS
6871 	 * otherwise we don't need it.
6872 	 */
6873 	mutex_lock(&uuid_mutex);
6874 	mutex_lock(&fs_info->chunk_mutex);
6875 
6876 	/*
6877 	 * It is possible for mount and umount to race in such a way that
6878 	 * we execute this code path, but open_fs_devices failed to clear
6879 	 * total_rw_bytes. We certainly want it cleared before reading the
6880 	 * device items, so clear it here.
6881 	 */
6882 	fs_info->fs_devices->total_rw_bytes = 0;
6883 
6884 	/*
6885 	 * Read all device items, and then all the chunk items. All
6886 	 * device items are found before any chunk item (their object id
6887 	 * is smaller than the lowest possible object id for a chunk
6888 	 * item - BTRFS_FIRST_CHUNK_TREE_OBJECTID).
6889 	 */
6890 	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
6891 	key.offset = 0;
6892 	key.type = 0;
6893 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
6894 	if (ret < 0)
6895 		goto error;
6896 	while (1) {
6897 		leaf = path->nodes[0];
6898 		slot = path->slots[0];
6899 		if (slot >= btrfs_header_nritems(leaf)) {
6900 			ret = btrfs_next_leaf(root, path);
6901 			if (ret == 0)
6902 				continue;
6903 			if (ret < 0)
6904 				goto error;
6905 			break;
6906 		}
6907 		btrfs_item_key_to_cpu(leaf, &found_key, slot);
6908 		if (found_key.type == BTRFS_DEV_ITEM_KEY) {
6909 			struct btrfs_dev_item *dev_item;
6910 			dev_item = btrfs_item_ptr(leaf, slot,
6911 						  struct btrfs_dev_item);
6912 			ret = read_one_dev(fs_info, leaf, dev_item);
6913 			if (ret)
6914 				goto error;
6915 			total_dev++;
6916 		} else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
6917 			struct btrfs_chunk *chunk;
6918 			chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
6919 			ret = read_one_chunk(fs_info, &found_key, leaf, chunk);
6920 			if (ret)
6921 				goto error;
6922 		}
6923 		path->slots[0]++;
6924 	}
6925 
6926 	/*
6927 	 * After loading chunk tree, we've got all device information,
6928 	 * do another round of validation checks.
6929 	 */
6930 	if (total_dev != fs_info->fs_devices->total_devices) {
6931 		btrfs_warn(fs_info,
6932 "super block num_devices %llu mismatch with DEV_ITEM count %llu, will be repaired on next transaction commit",
6933 			  btrfs_super_num_devices(fs_info->super_copy),
6934 			  total_dev);
6935 		fs_info->fs_devices->total_devices = total_dev;
6936 		btrfs_set_super_num_devices(fs_info->super_copy, total_dev);
6937 	}
6938 	if (btrfs_super_total_bytes(fs_info->super_copy) <
6939 	    fs_info->fs_devices->total_rw_bytes) {
6940 		btrfs_err(fs_info,
6941 	"super_total_bytes %llu mismatch with fs_devices total_rw_bytes %llu",
6942 			  btrfs_super_total_bytes(fs_info->super_copy),
6943 			  fs_info->fs_devices->total_rw_bytes);
6944 		ret = -EINVAL;
6945 		goto error;
6946 	}
6947 	ret = 0;
6948 error:
6949 	mutex_unlock(&fs_info->chunk_mutex);
6950 	mutex_unlock(&uuid_mutex);
6951 
6952 	btrfs_free_path(path);
6953 	return ret;
6954 }
6955 
btrfs_init_devices_late(struct btrfs_fs_info * fs_info)6956 void btrfs_init_devices_late(struct btrfs_fs_info *fs_info)
6957 {
6958 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
6959 	struct btrfs_device *device;
6960 
6961 	while (fs_devices) {
6962 		mutex_lock(&fs_devices->device_list_mutex);
6963 		list_for_each_entry(device, &fs_devices->devices, dev_list)
6964 			device->fs_info = fs_info;
6965 		mutex_unlock(&fs_devices->device_list_mutex);
6966 
6967 		fs_devices = fs_devices->seed;
6968 	}
6969 }
6970 
__btrfs_reset_dev_stats(struct btrfs_device * dev)6971 static void __btrfs_reset_dev_stats(struct btrfs_device *dev)
6972 {
6973 	int i;
6974 
6975 	for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
6976 		btrfs_dev_stat_reset(dev, i);
6977 }
6978 
btrfs_init_dev_stats(struct btrfs_fs_info * fs_info)6979 int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info)
6980 {
6981 	struct btrfs_key key;
6982 	struct btrfs_key found_key;
6983 	struct btrfs_root *dev_root = fs_info->dev_root;
6984 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
6985 	struct extent_buffer *eb;
6986 	int slot;
6987 	int ret = 0;
6988 	struct btrfs_device *device;
6989 	struct btrfs_path *path = NULL;
6990 	int i;
6991 
6992 	path = btrfs_alloc_path();
6993 	if (!path) {
6994 		ret = -ENOMEM;
6995 		goto out;
6996 	}
6997 
6998 	mutex_lock(&fs_devices->device_list_mutex);
6999 	list_for_each_entry(device, &fs_devices->devices, dev_list) {
7000 		int item_size;
7001 		struct btrfs_dev_stats_item *ptr;
7002 
7003 		key.objectid = BTRFS_DEV_STATS_OBJECTID;
7004 		key.type = BTRFS_PERSISTENT_ITEM_KEY;
7005 		key.offset = device->devid;
7006 		ret = btrfs_search_slot(NULL, dev_root, &key, path, 0, 0);
7007 		if (ret) {
7008 			__btrfs_reset_dev_stats(device);
7009 			device->dev_stats_valid = 1;
7010 			btrfs_release_path(path);
7011 			continue;
7012 		}
7013 		slot = path->slots[0];
7014 		eb = path->nodes[0];
7015 		btrfs_item_key_to_cpu(eb, &found_key, slot);
7016 		item_size = btrfs_item_size_nr(eb, slot);
7017 
7018 		ptr = btrfs_item_ptr(eb, slot,
7019 				     struct btrfs_dev_stats_item);
7020 
7021 		for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
7022 			if (item_size >= (1 + i) * sizeof(__le64))
7023 				btrfs_dev_stat_set(device, i,
7024 					btrfs_dev_stats_value(eb, ptr, i));
7025 			else
7026 				btrfs_dev_stat_reset(device, i);
7027 		}
7028 
7029 		device->dev_stats_valid = 1;
7030 		btrfs_dev_stat_print_on_load(device);
7031 		btrfs_release_path(path);
7032 	}
7033 	mutex_unlock(&fs_devices->device_list_mutex);
7034 
7035 out:
7036 	btrfs_free_path(path);
7037 	return ret < 0 ? ret : 0;
7038 }
7039 
update_dev_stat_item(struct btrfs_trans_handle * trans,struct btrfs_device * device)7040 static int update_dev_stat_item(struct btrfs_trans_handle *trans,
7041 				struct btrfs_device *device)
7042 {
7043 	struct btrfs_fs_info *fs_info = trans->fs_info;
7044 	struct btrfs_root *dev_root = fs_info->dev_root;
7045 	struct btrfs_path *path;
7046 	struct btrfs_key key;
7047 	struct extent_buffer *eb;
7048 	struct btrfs_dev_stats_item *ptr;
7049 	int ret;
7050 	int i;
7051 
7052 	key.objectid = BTRFS_DEV_STATS_OBJECTID;
7053 	key.type = BTRFS_PERSISTENT_ITEM_KEY;
7054 	key.offset = device->devid;
7055 
7056 	path = btrfs_alloc_path();
7057 	if (!path)
7058 		return -ENOMEM;
7059 	ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1);
7060 	if (ret < 0) {
7061 		btrfs_warn_in_rcu(fs_info,
7062 			"error %d while searching for dev_stats item for device %s",
7063 			      ret, rcu_str_deref(device->name));
7064 		goto out;
7065 	}
7066 
7067 	if (ret == 0 &&
7068 	    btrfs_item_size_nr(path->nodes[0], path->slots[0]) < sizeof(*ptr)) {
7069 		/* need to delete old one and insert a new one */
7070 		ret = btrfs_del_item(trans, dev_root, path);
7071 		if (ret != 0) {
7072 			btrfs_warn_in_rcu(fs_info,
7073 				"delete too small dev_stats item for device %s failed %d",
7074 				      rcu_str_deref(device->name), ret);
7075 			goto out;
7076 		}
7077 		ret = 1;
7078 	}
7079 
7080 	if (ret == 1) {
7081 		/* need to insert a new item */
7082 		btrfs_release_path(path);
7083 		ret = btrfs_insert_empty_item(trans, dev_root, path,
7084 					      &key, sizeof(*ptr));
7085 		if (ret < 0) {
7086 			btrfs_warn_in_rcu(fs_info,
7087 				"insert dev_stats item for device %s failed %d",
7088 				rcu_str_deref(device->name), ret);
7089 			goto out;
7090 		}
7091 	}
7092 
7093 	eb = path->nodes[0];
7094 	ptr = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dev_stats_item);
7095 	for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
7096 		btrfs_set_dev_stats_value(eb, ptr, i,
7097 					  btrfs_dev_stat_read(device, i));
7098 	btrfs_mark_buffer_dirty(eb);
7099 
7100 out:
7101 	btrfs_free_path(path);
7102 	return ret;
7103 }
7104 
7105 /*
7106  * called from commit_transaction. Writes all changed device stats to disk.
7107  */
btrfs_run_dev_stats(struct btrfs_trans_handle * trans,struct btrfs_fs_info * fs_info)7108 int btrfs_run_dev_stats(struct btrfs_trans_handle *trans,
7109 			struct btrfs_fs_info *fs_info)
7110 {
7111 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
7112 	struct btrfs_device *device;
7113 	int stats_cnt;
7114 	int ret = 0;
7115 
7116 	mutex_lock(&fs_devices->device_list_mutex);
7117 	list_for_each_entry(device, &fs_devices->devices, dev_list) {
7118 		stats_cnt = atomic_read(&device->dev_stats_ccnt);
7119 		if (!device->dev_stats_valid || stats_cnt == 0)
7120 			continue;
7121 
7122 
7123 		/*
7124 		 * There is a LOAD-LOAD control dependency between the value of
7125 		 * dev_stats_ccnt and updating the on-disk values which requires
7126 		 * reading the in-memory counters. Such control dependencies
7127 		 * require explicit read memory barriers.
7128 		 *
7129 		 * This memory barriers pairs with smp_mb__before_atomic in
7130 		 * btrfs_dev_stat_inc/btrfs_dev_stat_set and with the full
7131 		 * barrier implied by atomic_xchg in
7132 		 * btrfs_dev_stats_read_and_reset
7133 		 */
7134 		smp_rmb();
7135 
7136 		ret = update_dev_stat_item(trans, device);
7137 		if (!ret)
7138 			atomic_sub(stats_cnt, &device->dev_stats_ccnt);
7139 	}
7140 	mutex_unlock(&fs_devices->device_list_mutex);
7141 
7142 	return ret;
7143 }
7144 
btrfs_dev_stat_inc_and_print(struct btrfs_device * dev,int index)7145 void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index)
7146 {
7147 	btrfs_dev_stat_inc(dev, index);
7148 	btrfs_dev_stat_print_on_error(dev);
7149 }
7150 
btrfs_dev_stat_print_on_error(struct btrfs_device * dev)7151 static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev)
7152 {
7153 	if (!dev->dev_stats_valid)
7154 		return;
7155 	btrfs_err_rl_in_rcu(dev->fs_info,
7156 		"bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u",
7157 			   rcu_str_deref(dev->name),
7158 			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
7159 			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
7160 			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
7161 			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
7162 			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
7163 }
7164 
btrfs_dev_stat_print_on_load(struct btrfs_device * dev)7165 static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev)
7166 {
7167 	int i;
7168 
7169 	for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
7170 		if (btrfs_dev_stat_read(dev, i) != 0)
7171 			break;
7172 	if (i == BTRFS_DEV_STAT_VALUES_MAX)
7173 		return; /* all values == 0, suppress message */
7174 
7175 	btrfs_info_in_rcu(dev->fs_info,
7176 		"bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u",
7177 	       rcu_str_deref(dev->name),
7178 	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
7179 	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
7180 	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
7181 	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
7182 	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
7183 }
7184 
btrfs_get_dev_stats(struct btrfs_fs_info * fs_info,struct btrfs_ioctl_get_dev_stats * stats)7185 int btrfs_get_dev_stats(struct btrfs_fs_info *fs_info,
7186 			struct btrfs_ioctl_get_dev_stats *stats)
7187 {
7188 	struct btrfs_device *dev;
7189 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
7190 	int i;
7191 
7192 	mutex_lock(&fs_devices->device_list_mutex);
7193 	dev = btrfs_find_device(fs_info->fs_devices, stats->devid,
7194 				NULL, NULL, true);
7195 	mutex_unlock(&fs_devices->device_list_mutex);
7196 
7197 	if (!dev) {
7198 		btrfs_warn(fs_info, "get dev_stats failed, device not found");
7199 		return -ENODEV;
7200 	} else if (!dev->dev_stats_valid) {
7201 		btrfs_warn(fs_info, "get dev_stats failed, not yet valid");
7202 		return -ENODEV;
7203 	} else if (stats->flags & BTRFS_DEV_STATS_RESET) {
7204 		for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
7205 			if (stats->nr_items > i)
7206 				stats->values[i] =
7207 					btrfs_dev_stat_read_and_reset(dev, i);
7208 			else
7209 				btrfs_dev_stat_reset(dev, i);
7210 		}
7211 		btrfs_info(fs_info, "device stats zeroed by %s (%d)",
7212 			   current->comm, task_pid_nr(current));
7213 	} else {
7214 		for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
7215 			if (stats->nr_items > i)
7216 				stats->values[i] = btrfs_dev_stat_read(dev, i);
7217 	}
7218 	if (stats->nr_items > BTRFS_DEV_STAT_VALUES_MAX)
7219 		stats->nr_items = BTRFS_DEV_STAT_VALUES_MAX;
7220 	return 0;
7221 }
7222 
btrfs_scratch_superblocks(struct block_device * bdev,const char * device_path)7223 void btrfs_scratch_superblocks(struct block_device *bdev, const char *device_path)
7224 {
7225 	struct buffer_head *bh;
7226 	struct btrfs_super_block *disk_super;
7227 	int copy_num;
7228 
7229 	if (!bdev)
7230 		return;
7231 
7232 	for (copy_num = 0; copy_num < BTRFS_SUPER_MIRROR_MAX;
7233 		copy_num++) {
7234 
7235 		if (btrfs_read_dev_one_super(bdev, copy_num, &bh))
7236 			continue;
7237 
7238 		disk_super = (struct btrfs_super_block *)bh->b_data;
7239 
7240 		memset(&disk_super->magic, 0, sizeof(disk_super->magic));
7241 		set_buffer_dirty(bh);
7242 		sync_dirty_buffer(bh);
7243 		brelse(bh);
7244 	}
7245 
7246 	/* Notify udev that device has changed */
7247 	btrfs_kobject_uevent(bdev, KOBJ_CHANGE);
7248 
7249 	/* Update ctime/mtime for device path for libblkid */
7250 	update_dev_time(device_path);
7251 }
7252 
7253 /*
7254  * Update the size of all devices, which is used for writing out the
7255  * super blocks.
7256  */
btrfs_update_commit_device_size(struct btrfs_fs_info * fs_info)7257 void btrfs_update_commit_device_size(struct btrfs_fs_info *fs_info)
7258 {
7259 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
7260 	struct btrfs_device *curr, *next;
7261 
7262 	if (list_empty(&fs_devices->resized_devices))
7263 		return;
7264 
7265 	mutex_lock(&fs_devices->device_list_mutex);
7266 	mutex_lock(&fs_info->chunk_mutex);
7267 	list_for_each_entry_safe(curr, next, &fs_devices->resized_devices,
7268 				 resized_list) {
7269 		list_del_init(&curr->resized_list);
7270 		curr->commit_total_bytes = curr->disk_total_bytes;
7271 	}
7272 	mutex_unlock(&fs_info->chunk_mutex);
7273 	mutex_unlock(&fs_devices->device_list_mutex);
7274 }
7275 
7276 /* Must be invoked during the transaction commit */
btrfs_update_commit_device_bytes_used(struct btrfs_transaction * trans)7277 void btrfs_update_commit_device_bytes_used(struct btrfs_transaction *trans)
7278 {
7279 	struct btrfs_fs_info *fs_info = trans->fs_info;
7280 	struct extent_map *em;
7281 	struct map_lookup *map;
7282 	struct btrfs_device *dev;
7283 	int i;
7284 
7285 	if (list_empty(&trans->pending_chunks))
7286 		return;
7287 
7288 	/* In order to kick the device replace finish process */
7289 	mutex_lock(&fs_info->chunk_mutex);
7290 	list_for_each_entry(em, &trans->pending_chunks, list) {
7291 		map = em->map_lookup;
7292 
7293 		for (i = 0; i < map->num_stripes; i++) {
7294 			dev = map->stripes[i].dev;
7295 			dev->commit_bytes_used = dev->bytes_used;
7296 			dev->has_pending_chunks = false;
7297 		}
7298 	}
7299 	mutex_unlock(&fs_info->chunk_mutex);
7300 }
7301 
btrfs_set_fs_info_ptr(struct btrfs_fs_info * fs_info)7302 void btrfs_set_fs_info_ptr(struct btrfs_fs_info *fs_info)
7303 {
7304 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
7305 	while (fs_devices) {
7306 		fs_devices->fs_info = fs_info;
7307 		fs_devices = fs_devices->seed;
7308 	}
7309 }
7310 
btrfs_reset_fs_info_ptr(struct btrfs_fs_info * fs_info)7311 void btrfs_reset_fs_info_ptr(struct btrfs_fs_info *fs_info)
7312 {
7313 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
7314 	while (fs_devices) {
7315 		fs_devices->fs_info = NULL;
7316 		fs_devices = fs_devices->seed;
7317 	}
7318 }
7319 
7320 /*
7321  * Multiplicity factor for simple profiles: DUP, RAID1-like and RAID10.
7322  */
btrfs_bg_type_to_factor(u64 flags)7323 int btrfs_bg_type_to_factor(u64 flags)
7324 {
7325 	if (flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
7326 		     BTRFS_BLOCK_GROUP_RAID10))
7327 		return 2;
7328 	return 1;
7329 }
7330 
7331 
calc_stripe_length(u64 type,u64 chunk_len,int num_stripes)7332 static u64 calc_stripe_length(u64 type, u64 chunk_len, int num_stripes)
7333 {
7334 	int index = btrfs_bg_flags_to_raid_index(type);
7335 	int ncopies = btrfs_raid_array[index].ncopies;
7336 	int data_stripes;
7337 
7338 	switch (type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
7339 	case BTRFS_BLOCK_GROUP_RAID5:
7340 		data_stripes = num_stripes - 1;
7341 		break;
7342 	case BTRFS_BLOCK_GROUP_RAID6:
7343 		data_stripes = num_stripes - 2;
7344 		break;
7345 	default:
7346 		data_stripes = num_stripes / ncopies;
7347 		break;
7348 	}
7349 	return div_u64(chunk_len, data_stripes);
7350 }
7351 
verify_one_dev_extent(struct btrfs_fs_info * fs_info,u64 chunk_offset,u64 devid,u64 physical_offset,u64 physical_len)7352 static int verify_one_dev_extent(struct btrfs_fs_info *fs_info,
7353 				 u64 chunk_offset, u64 devid,
7354 				 u64 physical_offset, u64 physical_len)
7355 {
7356 	struct extent_map_tree *em_tree = &fs_info->mapping_tree.map_tree;
7357 	struct extent_map *em;
7358 	struct map_lookup *map;
7359 	struct btrfs_device *dev;
7360 	u64 stripe_len;
7361 	bool found = false;
7362 	int ret = 0;
7363 	int i;
7364 
7365 	read_lock(&em_tree->lock);
7366 	em = lookup_extent_mapping(em_tree, chunk_offset, 1);
7367 	read_unlock(&em_tree->lock);
7368 
7369 	if (!em) {
7370 		btrfs_err(fs_info,
7371 "dev extent physical offset %llu on devid %llu doesn't have corresponding chunk",
7372 			  physical_offset, devid);
7373 		ret = -EUCLEAN;
7374 		goto out;
7375 	}
7376 
7377 	map = em->map_lookup;
7378 	stripe_len = calc_stripe_length(map->type, em->len, map->num_stripes);
7379 	if (physical_len != stripe_len) {
7380 		btrfs_err(fs_info,
7381 "dev extent physical offset %llu on devid %llu length doesn't match chunk %llu, have %llu expect %llu",
7382 			  physical_offset, devid, em->start, physical_len,
7383 			  stripe_len);
7384 		ret = -EUCLEAN;
7385 		goto out;
7386 	}
7387 
7388 	for (i = 0; i < map->num_stripes; i++) {
7389 		if (map->stripes[i].dev->devid == devid &&
7390 		    map->stripes[i].physical == physical_offset) {
7391 			found = true;
7392 			if (map->verified_stripes >= map->num_stripes) {
7393 				btrfs_err(fs_info,
7394 				"too many dev extents for chunk %llu found",
7395 					  em->start);
7396 				ret = -EUCLEAN;
7397 				goto out;
7398 			}
7399 			map->verified_stripes++;
7400 			break;
7401 		}
7402 	}
7403 	if (!found) {
7404 		btrfs_err(fs_info,
7405 	"dev extent physical offset %llu devid %llu has no corresponding chunk",
7406 			physical_offset, devid);
7407 		ret = -EUCLEAN;
7408 	}
7409 
7410 	/* Make sure no dev extent is beyond device bondary */
7411 	dev = btrfs_find_device(fs_info->fs_devices, devid, NULL, NULL, true);
7412 	if (!dev) {
7413 		btrfs_err(fs_info, "failed to find devid %llu", devid);
7414 		ret = -EUCLEAN;
7415 		goto out;
7416 	}
7417 
7418 	/* It's possible this device is a dummy for seed device */
7419 	if (dev->disk_total_bytes == 0) {
7420 		dev = btrfs_find_device(fs_info->fs_devices->seed, devid,
7421 					NULL, NULL, false);
7422 		if (!dev) {
7423 			btrfs_err(fs_info, "failed to find seed devid %llu",
7424 				  devid);
7425 			ret = -EUCLEAN;
7426 			goto out;
7427 		}
7428 	}
7429 
7430 	if (physical_offset + physical_len > dev->disk_total_bytes) {
7431 		btrfs_err(fs_info,
7432 "dev extent devid %llu physical offset %llu len %llu is beyond device boundary %llu",
7433 			  devid, physical_offset, physical_len,
7434 			  dev->disk_total_bytes);
7435 		ret = -EUCLEAN;
7436 		goto out;
7437 	}
7438 out:
7439 	free_extent_map(em);
7440 	return ret;
7441 }
7442 
verify_chunk_dev_extent_mapping(struct btrfs_fs_info * fs_info)7443 static int verify_chunk_dev_extent_mapping(struct btrfs_fs_info *fs_info)
7444 {
7445 	struct extent_map_tree *em_tree = &fs_info->mapping_tree.map_tree;
7446 	struct extent_map *em;
7447 	struct rb_node *node;
7448 	int ret = 0;
7449 
7450 	read_lock(&em_tree->lock);
7451 	for (node = rb_first(&em_tree->map); node; node = rb_next(node)) {
7452 		em = rb_entry(node, struct extent_map, rb_node);
7453 		if (em->map_lookup->num_stripes !=
7454 		    em->map_lookup->verified_stripes) {
7455 			btrfs_err(fs_info,
7456 			"chunk %llu has missing dev extent, have %d expect %d",
7457 				  em->start, em->map_lookup->verified_stripes,
7458 				  em->map_lookup->num_stripes);
7459 			ret = -EUCLEAN;
7460 			goto out;
7461 		}
7462 	}
7463 out:
7464 	read_unlock(&em_tree->lock);
7465 	return ret;
7466 }
7467 
7468 /*
7469  * Ensure that all dev extents are mapped to correct chunk, otherwise
7470  * later chunk allocation/free would cause unexpected behavior.
7471  *
7472  * NOTE: This will iterate through the whole device tree, which should be of
7473  * the same size level as the chunk tree.  This slightly increases mount time.
7474  */
btrfs_verify_dev_extents(struct btrfs_fs_info * fs_info)7475 int btrfs_verify_dev_extents(struct btrfs_fs_info *fs_info)
7476 {
7477 	struct btrfs_path *path;
7478 	struct btrfs_root *root = fs_info->dev_root;
7479 	struct btrfs_key key;
7480 	u64 prev_devid = 0;
7481 	u64 prev_dev_ext_end = 0;
7482 	int ret = 0;
7483 
7484 	key.objectid = 1;
7485 	key.type = BTRFS_DEV_EXTENT_KEY;
7486 	key.offset = 0;
7487 
7488 	path = btrfs_alloc_path();
7489 	if (!path)
7490 		return -ENOMEM;
7491 
7492 	path->reada = READA_FORWARD;
7493 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
7494 	if (ret < 0)
7495 		goto out;
7496 
7497 	if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
7498 		ret = btrfs_next_item(root, path);
7499 		if (ret < 0)
7500 			goto out;
7501 		/* No dev extents at all? Not good */
7502 		if (ret > 0) {
7503 			ret = -EUCLEAN;
7504 			goto out;
7505 		}
7506 	}
7507 	while (1) {
7508 		struct extent_buffer *leaf = path->nodes[0];
7509 		struct btrfs_dev_extent *dext;
7510 		int slot = path->slots[0];
7511 		u64 chunk_offset;
7512 		u64 physical_offset;
7513 		u64 physical_len;
7514 		u64 devid;
7515 
7516 		btrfs_item_key_to_cpu(leaf, &key, slot);
7517 		if (key.type != BTRFS_DEV_EXTENT_KEY)
7518 			break;
7519 		devid = key.objectid;
7520 		physical_offset = key.offset;
7521 
7522 		dext = btrfs_item_ptr(leaf, slot, struct btrfs_dev_extent);
7523 		chunk_offset = btrfs_dev_extent_chunk_offset(leaf, dext);
7524 		physical_len = btrfs_dev_extent_length(leaf, dext);
7525 
7526 		/* Check if this dev extent overlaps with the previous one */
7527 		if (devid == prev_devid && physical_offset < prev_dev_ext_end) {
7528 			btrfs_err(fs_info,
7529 "dev extent devid %llu physical offset %llu overlap with previous dev extent end %llu",
7530 				  devid, physical_offset, prev_dev_ext_end);
7531 			ret = -EUCLEAN;
7532 			goto out;
7533 		}
7534 
7535 		ret = verify_one_dev_extent(fs_info, chunk_offset, devid,
7536 					    physical_offset, physical_len);
7537 		if (ret < 0)
7538 			goto out;
7539 		prev_devid = devid;
7540 		prev_dev_ext_end = physical_offset + physical_len;
7541 
7542 		ret = btrfs_next_item(root, path);
7543 		if (ret < 0)
7544 			goto out;
7545 		if (ret > 0) {
7546 			ret = 0;
7547 			break;
7548 		}
7549 	}
7550 
7551 	/* Ensure all chunks have corresponding dev extents */
7552 	ret = verify_chunk_dev_extent_mapping(fs_info);
7553 out:
7554 	btrfs_free_path(path);
7555 	return ret;
7556 }
7557