1 /*
2 * Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README
3 */
4
5 /*
6 * Written by Alexander Zarochentcev.
7 *
8 * The kernel part of the (on-line) reiserfs resizer.
9 */
10
11 #include <linux/kernel.h>
12 #include <linux/mm.h>
13 #include <linux/vmalloc.h>
14 #include <linux/string.h>
15 #include <linux/errno.h>
16 #include "reiserfs.h"
17 #include <linux/buffer_head.h>
18
reiserfs_resize(struct super_block * s,unsigned long block_count_new)19 int reiserfs_resize(struct super_block *s, unsigned long block_count_new)
20 {
21 int err = 0;
22 struct reiserfs_super_block *sb;
23 struct reiserfs_bitmap_info *bitmap;
24 struct reiserfs_bitmap_info *info;
25 struct reiserfs_bitmap_info *old_bitmap = SB_AP_BITMAP(s);
26 struct buffer_head *bh;
27 struct reiserfs_transaction_handle th;
28 unsigned int bmap_nr_new, bmap_nr;
29 unsigned int block_r_new, block_r;
30
31 struct reiserfs_list_bitmap *jb;
32 struct reiserfs_list_bitmap jbitmap[JOURNAL_NUM_BITMAPS];
33
34 unsigned long int block_count, free_blocks;
35 int i;
36 int copy_size;
37 int depth;
38
39 sb = SB_DISK_SUPER_BLOCK(s);
40
41 if (SB_BLOCK_COUNT(s) >= block_count_new) {
42 printk("can\'t shrink filesystem on-line\n");
43 return -EINVAL;
44 }
45
46 /* check the device size */
47 depth = reiserfs_write_unlock_nested(s);
48 bh = sb_bread(s, block_count_new - 1);
49 reiserfs_write_lock_nested(s, depth);
50 if (!bh) {
51 printk("reiserfs_resize: can\'t read last block\n");
52 return -EINVAL;
53 }
54 bforget(bh);
55
56 /*
57 * old disk layout detection; those partitions can be mounted, but
58 * cannot be resized
59 */
60 if (SB_BUFFER_WITH_SB(s)->b_blocknr * SB_BUFFER_WITH_SB(s)->b_size
61 != REISERFS_DISK_OFFSET_IN_BYTES) {
62 printk
63 ("reiserfs_resize: unable to resize a reiserfs without distributed bitmap (fs version < 3.5.12)\n");
64 return -ENOTSUPP;
65 }
66
67 /* count used bits in last bitmap block */
68 block_r = SB_BLOCK_COUNT(s) -
69 (reiserfs_bmap_count(s) - 1) * s->s_blocksize * 8;
70
71 /* count bitmap blocks in new fs */
72 bmap_nr_new = block_count_new / (s->s_blocksize * 8);
73 block_r_new = block_count_new - bmap_nr_new * s->s_blocksize * 8;
74 if (block_r_new)
75 bmap_nr_new++;
76 else
77 block_r_new = s->s_blocksize * 8;
78
79 /* save old values */
80 block_count = SB_BLOCK_COUNT(s);
81 bmap_nr = reiserfs_bmap_count(s);
82
83 /* resizing of reiserfs bitmaps (journal and real), if needed */
84 if (bmap_nr_new > bmap_nr) {
85 /* reallocate journal bitmaps */
86 if (reiserfs_allocate_list_bitmaps(s, jbitmap, bmap_nr_new) < 0) {
87 printk
88 ("reiserfs_resize: unable to allocate memory for journal bitmaps\n");
89 return -ENOMEM;
90 }
91 /*
92 * the new journal bitmaps are zero filled, now we copy i
93 * the bitmap node pointers from the old journal bitmap
94 * structs, and then transfer the new data structures
95 * into the journal struct.
96 *
97 * using the copy_size var below allows this code to work for
98 * both shrinking and expanding the FS.
99 */
100 copy_size = bmap_nr_new < bmap_nr ? bmap_nr_new : bmap_nr;
101 copy_size =
102 copy_size * sizeof(struct reiserfs_list_bitmap_node *);
103 for (i = 0; i < JOURNAL_NUM_BITMAPS; i++) {
104 struct reiserfs_bitmap_node **node_tmp;
105 jb = SB_JOURNAL(s)->j_list_bitmap + i;
106 memcpy(jbitmap[i].bitmaps, jb->bitmaps, copy_size);
107
108 /*
109 * just in case vfree schedules on us, copy the new
110 * pointer into the journal struct before freeing the
111 * old one
112 */
113 node_tmp = jb->bitmaps;
114 jb->bitmaps = jbitmap[i].bitmaps;
115 vfree(node_tmp);
116 }
117
118 /*
119 * allocate additional bitmap blocks, reallocate
120 * array of bitmap block pointers
121 */
122 bitmap =
123 vzalloc(array_size(bmap_nr_new,
124 sizeof(struct reiserfs_bitmap_info)));
125 if (!bitmap) {
126 /*
127 * Journal bitmaps are still supersized, but the
128 * memory isn't leaked, so I guess it's ok
129 */
130 printk("reiserfs_resize: unable to allocate memory.\n");
131 return -ENOMEM;
132 }
133 for (i = 0; i < bmap_nr; i++)
134 bitmap[i] = old_bitmap[i];
135
136 /*
137 * This doesn't go through the journal, but it doesn't have to.
138 * The changes are still atomic: We're synced up when the
139 * journal transaction begins, and the new bitmaps don't
140 * matter if the transaction fails.
141 */
142 for (i = bmap_nr; i < bmap_nr_new; i++) {
143 int depth;
144 /*
145 * don't use read_bitmap_block since it will cache
146 * the uninitialized bitmap
147 */
148 depth = reiserfs_write_unlock_nested(s);
149 bh = sb_bread(s, i * s->s_blocksize * 8);
150 reiserfs_write_lock_nested(s, depth);
151 if (!bh) {
152 vfree(bitmap);
153 return -EIO;
154 }
155 memset(bh->b_data, 0, sb_blocksize(sb));
156 reiserfs_set_le_bit(0, bh->b_data);
157 reiserfs_cache_bitmap_metadata(s, bh, bitmap + i);
158
159 set_buffer_uptodate(bh);
160 mark_buffer_dirty(bh);
161 depth = reiserfs_write_unlock_nested(s);
162 sync_dirty_buffer(bh);
163 reiserfs_write_lock_nested(s, depth);
164 /* update bitmap_info stuff */
165 bitmap[i].free_count = sb_blocksize(sb) * 8 - 1;
166 brelse(bh);
167 }
168 /* free old bitmap blocks array */
169 SB_AP_BITMAP(s) = bitmap;
170 vfree(old_bitmap);
171 }
172
173 /*
174 * begin transaction, if there was an error, it's fine. Yes, we have
175 * incorrect bitmaps now, but none of it is ever going to touch the
176 * disk anyway.
177 */
178 err = journal_begin(&th, s, 10);
179 if (err)
180 return err;
181
182 /* Extend old last bitmap block - new blocks have been made available */
183 info = SB_AP_BITMAP(s) + bmap_nr - 1;
184 bh = reiserfs_read_bitmap_block(s, bmap_nr - 1);
185 if (!bh) {
186 int jerr = journal_end(&th);
187 if (jerr)
188 return jerr;
189 return -EIO;
190 }
191
192 reiserfs_prepare_for_journal(s, bh, 1);
193 for (i = block_r; i < s->s_blocksize * 8; i++)
194 reiserfs_clear_le_bit(i, bh->b_data);
195 info->free_count += s->s_blocksize * 8 - block_r;
196
197 journal_mark_dirty(&th, bh);
198 brelse(bh);
199
200 /* Correct new last bitmap block - It may not be full */
201 info = SB_AP_BITMAP(s) + bmap_nr_new - 1;
202 bh = reiserfs_read_bitmap_block(s, bmap_nr_new - 1);
203 if (!bh) {
204 int jerr = journal_end(&th);
205 if (jerr)
206 return jerr;
207 return -EIO;
208 }
209
210 reiserfs_prepare_for_journal(s, bh, 1);
211 for (i = block_r_new; i < s->s_blocksize * 8; i++)
212 reiserfs_set_le_bit(i, bh->b_data);
213 journal_mark_dirty(&th, bh);
214 brelse(bh);
215
216 info->free_count -= s->s_blocksize * 8 - block_r_new;
217 /* update super */
218 reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1);
219 free_blocks = SB_FREE_BLOCKS(s);
220 PUT_SB_FREE_BLOCKS(s,
221 free_blocks + (block_count_new - block_count -
222 (bmap_nr_new - bmap_nr)));
223 PUT_SB_BLOCK_COUNT(s, block_count_new);
224 PUT_SB_BMAP_NR(s, bmap_would_wrap(bmap_nr_new) ? : bmap_nr_new);
225
226 journal_mark_dirty(&th, SB_BUFFER_WITH_SB(s));
227
228 SB_JOURNAL(s)->j_must_wait = 1;
229 return journal_end(&th);
230 }
231