1 /*
2 * balloc.c
3 *
4 * PURPOSE
5 * Block allocation handling routines for the OSTA-UDF(tm) filesystem.
6 *
7 * COPYRIGHT
8 * This file is distributed under the terms of the GNU General Public
9 * License (GPL). Copies of the GPL can be obtained from:
10 * ftp://prep.ai.mit.edu/pub/gnu/GPL
11 * Each contributing author retains all rights to their own work.
12 *
13 * (C) 1999-2001 Ben Fennema
14 * (C) 1999 Stelias Computing Inc
15 *
16 * HISTORY
17 *
18 * 02/24/99 blf Created.
19 *
20 */
21
22 #include "udfdecl.h"
23
24 #include <linux/bitops.h>
25
26 #include "udf_i.h"
27 #include "udf_sb.h"
28
29 #define udf_clear_bit __test_and_clear_bit_le
30 #define udf_set_bit __test_and_set_bit_le
31 #define udf_test_bit test_bit_le
32 #define udf_find_next_one_bit find_next_bit_le
33
read_block_bitmap(struct super_block * sb,struct udf_bitmap * bitmap,unsigned int block,unsigned long bitmap_nr)34 static int read_block_bitmap(struct super_block *sb,
35 struct udf_bitmap *bitmap, unsigned int block,
36 unsigned long bitmap_nr)
37 {
38 struct buffer_head *bh = NULL;
39 int i;
40 int max_bits, off, count;
41 struct kernel_lb_addr loc;
42
43 loc.logicalBlockNum = bitmap->s_extPosition;
44 loc.partitionReferenceNum = UDF_SB(sb)->s_partition;
45
46 bh = udf_tread(sb, udf_get_lb_pblock(sb, &loc, block));
47 bitmap->s_block_bitmap[bitmap_nr] = bh;
48 if (!bh)
49 return -EIO;
50
51 /* Check consistency of Space Bitmap buffer. */
52 max_bits = sb->s_blocksize * 8;
53 if (!bitmap_nr) {
54 off = sizeof(struct spaceBitmapDesc) << 3;
55 count = min(max_bits - off, bitmap->s_nr_groups);
56 } else {
57 /*
58 * Rough check if bitmap number is too big to have any bitmap
59 * blocks reserved.
60 */
61 if (bitmap_nr >
62 (bitmap->s_nr_groups >> (sb->s_blocksize_bits + 3)) + 2)
63 return 0;
64 off = 0;
65 count = bitmap->s_nr_groups - bitmap_nr * max_bits +
66 (sizeof(struct spaceBitmapDesc) << 3);
67 count = min(count, max_bits);
68 }
69
70 for (i = 0; i < count; i++)
71 if (udf_test_bit(i + off, bh->b_data))
72 return -EFSCORRUPTED;
73 return 0;
74 }
75
__load_block_bitmap(struct super_block * sb,struct udf_bitmap * bitmap,unsigned int block_group)76 static int __load_block_bitmap(struct super_block *sb,
77 struct udf_bitmap *bitmap,
78 unsigned int block_group)
79 {
80 int retval = 0;
81 int nr_groups = bitmap->s_nr_groups;
82
83 if (block_group >= nr_groups) {
84 udf_debug("block_group (%u) > nr_groups (%d)\n",
85 block_group, nr_groups);
86 }
87
88 if (bitmap->s_block_bitmap[block_group])
89 return block_group;
90
91 retval = read_block_bitmap(sb, bitmap, block_group, block_group);
92 if (retval < 0)
93 return retval;
94
95 return block_group;
96 }
97
load_block_bitmap(struct super_block * sb,struct udf_bitmap * bitmap,unsigned int block_group)98 static inline int load_block_bitmap(struct super_block *sb,
99 struct udf_bitmap *bitmap,
100 unsigned int block_group)
101 {
102 int slot;
103
104 slot = __load_block_bitmap(sb, bitmap, block_group);
105
106 if (slot < 0)
107 return slot;
108
109 if (!bitmap->s_block_bitmap[slot])
110 return -EIO;
111
112 return slot;
113 }
114
udf_add_free_space(struct super_block * sb,u16 partition,u32 cnt)115 static void udf_add_free_space(struct super_block *sb, u16 partition, u32 cnt)
116 {
117 struct udf_sb_info *sbi = UDF_SB(sb);
118 struct logicalVolIntegrityDesc *lvid;
119
120 if (!sbi->s_lvid_bh)
121 return;
122
123 lvid = (struct logicalVolIntegrityDesc *)sbi->s_lvid_bh->b_data;
124 le32_add_cpu(&lvid->freeSpaceTable[partition], cnt);
125 udf_updated_lvid(sb);
126 }
127
udf_bitmap_free_blocks(struct super_block * sb,struct udf_bitmap * bitmap,struct kernel_lb_addr * bloc,uint32_t offset,uint32_t count)128 static void udf_bitmap_free_blocks(struct super_block *sb,
129 struct udf_bitmap *bitmap,
130 struct kernel_lb_addr *bloc,
131 uint32_t offset,
132 uint32_t count)
133 {
134 struct udf_sb_info *sbi = UDF_SB(sb);
135 struct buffer_head *bh = NULL;
136 struct udf_part_map *partmap;
137 unsigned long block;
138 unsigned long block_group;
139 unsigned long bit;
140 unsigned long i;
141 int bitmap_nr;
142 unsigned long overflow;
143
144 mutex_lock(&sbi->s_alloc_mutex);
145 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
146 if (bloc->logicalBlockNum + count < count ||
147 (bloc->logicalBlockNum + count) > partmap->s_partition_len) {
148 udf_debug("%u < %d || %u + %u > %u\n",
149 bloc->logicalBlockNum, 0,
150 bloc->logicalBlockNum, count,
151 partmap->s_partition_len);
152 goto error_return;
153 }
154
155 block = bloc->logicalBlockNum + offset +
156 (sizeof(struct spaceBitmapDesc) << 3);
157
158 do {
159 overflow = 0;
160 block_group = block >> (sb->s_blocksize_bits + 3);
161 bit = block % (sb->s_blocksize << 3);
162
163 /*
164 * Check to see if we are freeing blocks across a group boundary.
165 */
166 if (bit + count > (sb->s_blocksize << 3)) {
167 overflow = bit + count - (sb->s_blocksize << 3);
168 count -= overflow;
169 }
170 bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
171 if (bitmap_nr < 0)
172 goto error_return;
173
174 bh = bitmap->s_block_bitmap[bitmap_nr];
175 for (i = 0; i < count; i++) {
176 if (udf_set_bit(bit + i, bh->b_data)) {
177 udf_debug("bit %lu already set\n", bit + i);
178 udf_debug("byte=%2x\n",
179 ((__u8 *)bh->b_data)[(bit + i) >> 3]);
180 }
181 }
182 udf_add_free_space(sb, sbi->s_partition, count);
183 mark_buffer_dirty(bh);
184 if (overflow) {
185 block += count;
186 count = overflow;
187 }
188 } while (overflow);
189
190 error_return:
191 mutex_unlock(&sbi->s_alloc_mutex);
192 }
193
udf_bitmap_prealloc_blocks(struct super_block * sb,struct udf_bitmap * bitmap,uint16_t partition,uint32_t first_block,uint32_t block_count)194 static int udf_bitmap_prealloc_blocks(struct super_block *sb,
195 struct udf_bitmap *bitmap,
196 uint16_t partition, uint32_t first_block,
197 uint32_t block_count)
198 {
199 struct udf_sb_info *sbi = UDF_SB(sb);
200 int alloc_count = 0;
201 int bit, block, block_group, group_start;
202 int nr_groups, bitmap_nr;
203 struct buffer_head *bh;
204 __u32 part_len;
205
206 mutex_lock(&sbi->s_alloc_mutex);
207 part_len = sbi->s_partmaps[partition].s_partition_len;
208 if (first_block >= part_len)
209 goto out;
210
211 if (first_block + block_count > part_len)
212 block_count = part_len - first_block;
213
214 do {
215 nr_groups = udf_compute_nr_groups(sb, partition);
216 block = first_block + (sizeof(struct spaceBitmapDesc) << 3);
217 block_group = block >> (sb->s_blocksize_bits + 3);
218 group_start = block_group ? 0 : sizeof(struct spaceBitmapDesc);
219
220 bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
221 if (bitmap_nr < 0)
222 goto out;
223 bh = bitmap->s_block_bitmap[bitmap_nr];
224
225 bit = block % (sb->s_blocksize << 3);
226
227 while (bit < (sb->s_blocksize << 3) && block_count > 0) {
228 if (!udf_clear_bit(bit, bh->b_data))
229 goto out;
230 block_count--;
231 alloc_count++;
232 bit++;
233 block++;
234 }
235 mark_buffer_dirty(bh);
236 } while (block_count > 0);
237
238 out:
239 udf_add_free_space(sb, partition, -alloc_count);
240 mutex_unlock(&sbi->s_alloc_mutex);
241 return alloc_count;
242 }
243
udf_bitmap_new_block(struct super_block * sb,struct udf_bitmap * bitmap,uint16_t partition,uint32_t goal,int * err)244 static udf_pblk_t udf_bitmap_new_block(struct super_block *sb,
245 struct udf_bitmap *bitmap, uint16_t partition,
246 uint32_t goal, int *err)
247 {
248 struct udf_sb_info *sbi = UDF_SB(sb);
249 int newbit, bit = 0;
250 udf_pblk_t block;
251 int block_group, group_start;
252 int end_goal, nr_groups, bitmap_nr, i;
253 struct buffer_head *bh = NULL;
254 char *ptr;
255 udf_pblk_t newblock = 0;
256
257 *err = -ENOSPC;
258 mutex_lock(&sbi->s_alloc_mutex);
259
260 repeat:
261 if (goal >= sbi->s_partmaps[partition].s_partition_len)
262 goal = 0;
263
264 nr_groups = bitmap->s_nr_groups;
265 block = goal + (sizeof(struct spaceBitmapDesc) << 3);
266 block_group = block >> (sb->s_blocksize_bits + 3);
267 group_start = block_group ? 0 : sizeof(struct spaceBitmapDesc);
268
269 bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
270 if (bitmap_nr < 0)
271 goto error_return;
272 bh = bitmap->s_block_bitmap[bitmap_nr];
273 ptr = memscan((char *)bh->b_data + group_start, 0xFF,
274 sb->s_blocksize - group_start);
275
276 if ((ptr - ((char *)bh->b_data)) < sb->s_blocksize) {
277 bit = block % (sb->s_blocksize << 3);
278 if (udf_test_bit(bit, bh->b_data))
279 goto got_block;
280
281 end_goal = (bit + 63) & ~63;
282 bit = udf_find_next_one_bit(bh->b_data, end_goal, bit);
283 if (bit < end_goal)
284 goto got_block;
285
286 ptr = memscan((char *)bh->b_data + (bit >> 3), 0xFF,
287 sb->s_blocksize - ((bit + 7) >> 3));
288 newbit = (ptr - ((char *)bh->b_data)) << 3;
289 if (newbit < sb->s_blocksize << 3) {
290 bit = newbit;
291 goto search_back;
292 }
293
294 newbit = udf_find_next_one_bit(bh->b_data,
295 sb->s_blocksize << 3, bit);
296 if (newbit < sb->s_blocksize << 3) {
297 bit = newbit;
298 goto got_block;
299 }
300 }
301
302 for (i = 0; i < (nr_groups * 2); i++) {
303 block_group++;
304 if (block_group >= nr_groups)
305 block_group = 0;
306 group_start = block_group ? 0 : sizeof(struct spaceBitmapDesc);
307
308 bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
309 if (bitmap_nr < 0)
310 goto error_return;
311 bh = bitmap->s_block_bitmap[bitmap_nr];
312 if (i < nr_groups) {
313 ptr = memscan((char *)bh->b_data + group_start, 0xFF,
314 sb->s_blocksize - group_start);
315 if ((ptr - ((char *)bh->b_data)) < sb->s_blocksize) {
316 bit = (ptr - ((char *)bh->b_data)) << 3;
317 break;
318 }
319 } else {
320 bit = udf_find_next_one_bit(bh->b_data,
321 sb->s_blocksize << 3,
322 group_start << 3);
323 if (bit < sb->s_blocksize << 3)
324 break;
325 }
326 }
327 if (i >= (nr_groups * 2)) {
328 mutex_unlock(&sbi->s_alloc_mutex);
329 return newblock;
330 }
331 if (bit < sb->s_blocksize << 3)
332 goto search_back;
333 else
334 bit = udf_find_next_one_bit(bh->b_data, sb->s_blocksize << 3,
335 group_start << 3);
336 if (bit >= sb->s_blocksize << 3) {
337 mutex_unlock(&sbi->s_alloc_mutex);
338 return 0;
339 }
340
341 search_back:
342 i = 0;
343 while (i < 7 && bit > (group_start << 3) &&
344 udf_test_bit(bit - 1, bh->b_data)) {
345 ++i;
346 --bit;
347 }
348
349 got_block:
350 newblock = bit + (block_group << (sb->s_blocksize_bits + 3)) -
351 (sizeof(struct spaceBitmapDesc) << 3);
352
353 if (!udf_clear_bit(bit, bh->b_data)) {
354 udf_debug("bit already cleared for block %d\n", bit);
355 goto repeat;
356 }
357
358 mark_buffer_dirty(bh);
359
360 udf_add_free_space(sb, partition, -1);
361 mutex_unlock(&sbi->s_alloc_mutex);
362 *err = 0;
363 return newblock;
364
365 error_return:
366 *err = -EIO;
367 mutex_unlock(&sbi->s_alloc_mutex);
368 return 0;
369 }
370
udf_table_free_blocks(struct super_block * sb,struct inode * table,struct kernel_lb_addr * bloc,uint32_t offset,uint32_t count)371 static void udf_table_free_blocks(struct super_block *sb,
372 struct inode *table,
373 struct kernel_lb_addr *bloc,
374 uint32_t offset,
375 uint32_t count)
376 {
377 struct udf_sb_info *sbi = UDF_SB(sb);
378 struct udf_part_map *partmap;
379 uint32_t start, end;
380 uint32_t elen;
381 struct kernel_lb_addr eloc;
382 struct extent_position oepos, epos;
383 int8_t etype;
384 struct udf_inode_info *iinfo;
385
386 mutex_lock(&sbi->s_alloc_mutex);
387 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
388 if (bloc->logicalBlockNum + count < count ||
389 (bloc->logicalBlockNum + count) > partmap->s_partition_len) {
390 udf_debug("%u < %d || %u + %u > %u\n",
391 bloc->logicalBlockNum, 0,
392 bloc->logicalBlockNum, count,
393 partmap->s_partition_len);
394 goto error_return;
395 }
396
397 iinfo = UDF_I(table);
398 udf_add_free_space(sb, sbi->s_partition, count);
399
400 start = bloc->logicalBlockNum + offset;
401 end = bloc->logicalBlockNum + offset + count - 1;
402
403 epos.offset = oepos.offset = sizeof(struct unallocSpaceEntry);
404 elen = 0;
405 epos.block = oepos.block = iinfo->i_location;
406 epos.bh = oepos.bh = NULL;
407
408 while (count &&
409 (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) {
410 if (((eloc.logicalBlockNum +
411 (elen >> sb->s_blocksize_bits)) == start)) {
412 if ((0x3FFFFFFF - elen) <
413 (count << sb->s_blocksize_bits)) {
414 uint32_t tmp = ((0x3FFFFFFF - elen) >>
415 sb->s_blocksize_bits);
416 count -= tmp;
417 start += tmp;
418 elen = (etype << 30) |
419 (0x40000000 - sb->s_blocksize);
420 } else {
421 elen = (etype << 30) |
422 (elen +
423 (count << sb->s_blocksize_bits));
424 start += count;
425 count = 0;
426 }
427 udf_write_aext(table, &oepos, &eloc, elen, 1);
428 } else if (eloc.logicalBlockNum == (end + 1)) {
429 if ((0x3FFFFFFF - elen) <
430 (count << sb->s_blocksize_bits)) {
431 uint32_t tmp = ((0x3FFFFFFF - elen) >>
432 sb->s_blocksize_bits);
433 count -= tmp;
434 end -= tmp;
435 eloc.logicalBlockNum -= tmp;
436 elen = (etype << 30) |
437 (0x40000000 - sb->s_blocksize);
438 } else {
439 eloc.logicalBlockNum = start;
440 elen = (etype << 30) |
441 (elen +
442 (count << sb->s_blocksize_bits));
443 end -= count;
444 count = 0;
445 }
446 udf_write_aext(table, &oepos, &eloc, elen, 1);
447 }
448
449 if (epos.bh != oepos.bh) {
450 oepos.block = epos.block;
451 brelse(oepos.bh);
452 get_bh(epos.bh);
453 oepos.bh = epos.bh;
454 oepos.offset = 0;
455 } else {
456 oepos.offset = epos.offset;
457 }
458 }
459
460 if (count) {
461 /*
462 * NOTE: we CANNOT use udf_add_aext here, as it can try to
463 * allocate a new block, and since we hold the super block
464 * lock already very bad things would happen :)
465 *
466 * We copy the behavior of udf_add_aext, but instead of
467 * trying to allocate a new block close to the existing one,
468 * we just steal a block from the extent we are trying to add.
469 *
470 * It would be nice if the blocks were close together, but it
471 * isn't required.
472 */
473
474 int adsize;
475
476 eloc.logicalBlockNum = start;
477 elen = EXT_RECORDED_ALLOCATED |
478 (count << sb->s_blocksize_bits);
479
480 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
481 adsize = sizeof(struct short_ad);
482 else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
483 adsize = sizeof(struct long_ad);
484 else {
485 brelse(oepos.bh);
486 brelse(epos.bh);
487 goto error_return;
488 }
489
490 if (epos.offset + (2 * adsize) > sb->s_blocksize) {
491 /* Steal a block from the extent being free'd */
492 udf_setup_indirect_aext(table, eloc.logicalBlockNum,
493 &epos);
494
495 eloc.logicalBlockNum++;
496 elen -= sb->s_blocksize;
497 }
498
499 /* It's possible that stealing the block emptied the extent */
500 if (elen)
501 __udf_add_aext(table, &epos, &eloc, elen, 1);
502 }
503
504 brelse(epos.bh);
505 brelse(oepos.bh);
506
507 error_return:
508 mutex_unlock(&sbi->s_alloc_mutex);
509 return;
510 }
511
udf_table_prealloc_blocks(struct super_block * sb,struct inode * table,uint16_t partition,uint32_t first_block,uint32_t block_count)512 static int udf_table_prealloc_blocks(struct super_block *sb,
513 struct inode *table, uint16_t partition,
514 uint32_t first_block, uint32_t block_count)
515 {
516 struct udf_sb_info *sbi = UDF_SB(sb);
517 int alloc_count = 0;
518 uint32_t elen, adsize;
519 struct kernel_lb_addr eloc;
520 struct extent_position epos;
521 int8_t etype = -1;
522 struct udf_inode_info *iinfo;
523
524 if (first_block >= sbi->s_partmaps[partition].s_partition_len)
525 return 0;
526
527 iinfo = UDF_I(table);
528 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
529 adsize = sizeof(struct short_ad);
530 else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
531 adsize = sizeof(struct long_ad);
532 else
533 return 0;
534
535 mutex_lock(&sbi->s_alloc_mutex);
536 epos.offset = sizeof(struct unallocSpaceEntry);
537 epos.block = iinfo->i_location;
538 epos.bh = NULL;
539 eloc.logicalBlockNum = 0xFFFFFFFF;
540
541 while (first_block != eloc.logicalBlockNum &&
542 (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) {
543 udf_debug("eloc=%u, elen=%u, first_block=%u\n",
544 eloc.logicalBlockNum, elen, first_block);
545 ; /* empty loop body */
546 }
547
548 if (first_block == eloc.logicalBlockNum) {
549 epos.offset -= adsize;
550
551 alloc_count = (elen >> sb->s_blocksize_bits);
552 if (alloc_count > block_count) {
553 alloc_count = block_count;
554 eloc.logicalBlockNum += alloc_count;
555 elen -= (alloc_count << sb->s_blocksize_bits);
556 udf_write_aext(table, &epos, &eloc,
557 (etype << 30) | elen, 1);
558 } else
559 udf_delete_aext(table, epos);
560 } else {
561 alloc_count = 0;
562 }
563
564 brelse(epos.bh);
565
566 if (alloc_count)
567 udf_add_free_space(sb, partition, -alloc_count);
568 mutex_unlock(&sbi->s_alloc_mutex);
569 return alloc_count;
570 }
571
udf_table_new_block(struct super_block * sb,struct inode * table,uint16_t partition,uint32_t goal,int * err)572 static udf_pblk_t udf_table_new_block(struct super_block *sb,
573 struct inode *table, uint16_t partition,
574 uint32_t goal, int *err)
575 {
576 struct udf_sb_info *sbi = UDF_SB(sb);
577 uint32_t spread = 0xFFFFFFFF, nspread = 0xFFFFFFFF;
578 udf_pblk_t newblock = 0;
579 uint32_t adsize;
580 uint32_t elen, goal_elen = 0;
581 struct kernel_lb_addr eloc, goal_eloc;
582 struct extent_position epos, goal_epos;
583 int8_t etype;
584 struct udf_inode_info *iinfo = UDF_I(table);
585
586 *err = -ENOSPC;
587
588 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
589 adsize = sizeof(struct short_ad);
590 else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
591 adsize = sizeof(struct long_ad);
592 else
593 return newblock;
594
595 mutex_lock(&sbi->s_alloc_mutex);
596 if (goal >= sbi->s_partmaps[partition].s_partition_len)
597 goal = 0;
598
599 /* We search for the closest matching block to goal. If we find
600 a exact hit, we stop. Otherwise we keep going till we run out
601 of extents. We store the buffer_head, bloc, and extoffset
602 of the current closest match and use that when we are done.
603 */
604 epos.offset = sizeof(struct unallocSpaceEntry);
605 epos.block = iinfo->i_location;
606 epos.bh = goal_epos.bh = NULL;
607
608 while (spread &&
609 (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) {
610 if (goal >= eloc.logicalBlockNum) {
611 if (goal < eloc.logicalBlockNum +
612 (elen >> sb->s_blocksize_bits))
613 nspread = 0;
614 else
615 nspread = goal - eloc.logicalBlockNum -
616 (elen >> sb->s_blocksize_bits);
617 } else {
618 nspread = eloc.logicalBlockNum - goal;
619 }
620
621 if (nspread < spread) {
622 spread = nspread;
623 if (goal_epos.bh != epos.bh) {
624 brelse(goal_epos.bh);
625 goal_epos.bh = epos.bh;
626 get_bh(goal_epos.bh);
627 }
628 goal_epos.block = epos.block;
629 goal_epos.offset = epos.offset - adsize;
630 goal_eloc = eloc;
631 goal_elen = (etype << 30) | elen;
632 }
633 }
634
635 brelse(epos.bh);
636
637 if (spread == 0xFFFFFFFF) {
638 brelse(goal_epos.bh);
639 mutex_unlock(&sbi->s_alloc_mutex);
640 return 0;
641 }
642
643 /* Only allocate blocks from the beginning of the extent.
644 That way, we only delete (empty) extents, never have to insert an
645 extent because of splitting */
646 /* This works, but very poorly.... */
647
648 newblock = goal_eloc.logicalBlockNum;
649 goal_eloc.logicalBlockNum++;
650 goal_elen -= sb->s_blocksize;
651
652 if (goal_elen)
653 udf_write_aext(table, &goal_epos, &goal_eloc, goal_elen, 1);
654 else
655 udf_delete_aext(table, goal_epos);
656 brelse(goal_epos.bh);
657
658 udf_add_free_space(sb, partition, -1);
659
660 mutex_unlock(&sbi->s_alloc_mutex);
661 *err = 0;
662 return newblock;
663 }
664
udf_free_blocks(struct super_block * sb,struct inode * inode,struct kernel_lb_addr * bloc,uint32_t offset,uint32_t count)665 void udf_free_blocks(struct super_block *sb, struct inode *inode,
666 struct kernel_lb_addr *bloc, uint32_t offset,
667 uint32_t count)
668 {
669 uint16_t partition = bloc->partitionReferenceNum;
670 struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition];
671
672 if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP) {
673 udf_bitmap_free_blocks(sb, map->s_uspace.s_bitmap,
674 bloc, offset, count);
675 } else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE) {
676 udf_table_free_blocks(sb, map->s_uspace.s_table,
677 bloc, offset, count);
678 } else if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP) {
679 udf_bitmap_free_blocks(sb, map->s_fspace.s_bitmap,
680 bloc, offset, count);
681 } else if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE) {
682 udf_table_free_blocks(sb, map->s_fspace.s_table,
683 bloc, offset, count);
684 }
685
686 if (inode) {
687 inode_sub_bytes(inode,
688 ((sector_t)count) << sb->s_blocksize_bits);
689 }
690 }
691
udf_prealloc_blocks(struct super_block * sb,struct inode * inode,uint16_t partition,uint32_t first_block,uint32_t block_count)692 inline int udf_prealloc_blocks(struct super_block *sb,
693 struct inode *inode,
694 uint16_t partition, uint32_t first_block,
695 uint32_t block_count)
696 {
697 struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition];
698 int allocated;
699
700 if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP)
701 allocated = udf_bitmap_prealloc_blocks(sb,
702 map->s_uspace.s_bitmap,
703 partition, first_block,
704 block_count);
705 else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE)
706 allocated = udf_table_prealloc_blocks(sb,
707 map->s_uspace.s_table,
708 partition, first_block,
709 block_count);
710 else if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP)
711 allocated = udf_bitmap_prealloc_blocks(sb,
712 map->s_fspace.s_bitmap,
713 partition, first_block,
714 block_count);
715 else if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE)
716 allocated = udf_table_prealloc_blocks(sb,
717 map->s_fspace.s_table,
718 partition, first_block,
719 block_count);
720 else
721 return 0;
722
723 if (inode && allocated > 0)
724 inode_add_bytes(inode, allocated << sb->s_blocksize_bits);
725 return allocated;
726 }
727
udf_new_block(struct super_block * sb,struct inode * inode,uint16_t partition,uint32_t goal,int * err)728 inline udf_pblk_t udf_new_block(struct super_block *sb,
729 struct inode *inode,
730 uint16_t partition, uint32_t goal, int *err)
731 {
732 struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition];
733 udf_pblk_t block;
734
735 if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP)
736 block = udf_bitmap_new_block(sb,
737 map->s_uspace.s_bitmap,
738 partition, goal, err);
739 else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE)
740 block = udf_table_new_block(sb,
741 map->s_uspace.s_table,
742 partition, goal, err);
743 else if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP)
744 block = udf_bitmap_new_block(sb,
745 map->s_fspace.s_bitmap,
746 partition, goal, err);
747 else if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE)
748 block = udf_table_new_block(sb,
749 map->s_fspace.s_table,
750 partition, goal, err);
751 else {
752 *err = -EIO;
753 return 0;
754 }
755 if (inode && block)
756 inode_add_bytes(inode, sb->s_blocksize);
757 return block;
758 }
759