1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4 * Copyright (c) 2013 Red Hat, Inc.
5 * All Rights Reserved.
6 */
7 #include "xfs.h"
8 #include "xfs_fs.h"
9 #include "xfs_shared.h"
10 #include "xfs_format.h"
11 #include "xfs_log_format.h"
12 #include "xfs_trans_resv.h"
13 #include "xfs_bit.h"
14 #include "xfs_mount.h"
15 #include "xfs_da_format.h"
16 #include "xfs_da_btree.h"
17 #include "xfs_dir2.h"
18 #include "xfs_dir2_priv.h"
19 #include "xfs_inode.h"
20 #include "xfs_trans.h"
21 #include "xfs_inode_item.h"
22 #include "xfs_alloc.h"
23 #include "xfs_bmap.h"
24 #include "xfs_attr.h"
25 #include "xfs_attr_leaf.h"
26 #include "xfs_error.h"
27 #include "xfs_trace.h"
28 #include "xfs_cksum.h"
29 #include "xfs_buf_item.h"
30 #include "xfs_log.h"
31
32 /*
33 * xfs_da_btree.c
34 *
35 * Routines to implement directories as Btrees of hashed names.
36 */
37
38 /*========================================================================
39 * Function prototypes for the kernel.
40 *========================================================================*/
41
42 /*
43 * Routines used for growing the Btree.
44 */
45 STATIC int xfs_da3_root_split(xfs_da_state_t *state,
46 xfs_da_state_blk_t *existing_root,
47 xfs_da_state_blk_t *new_child);
48 STATIC int xfs_da3_node_split(xfs_da_state_t *state,
49 xfs_da_state_blk_t *existing_blk,
50 xfs_da_state_blk_t *split_blk,
51 xfs_da_state_blk_t *blk_to_add,
52 int treelevel,
53 int *result);
54 STATIC void xfs_da3_node_rebalance(xfs_da_state_t *state,
55 xfs_da_state_blk_t *node_blk_1,
56 xfs_da_state_blk_t *node_blk_2);
57 STATIC void xfs_da3_node_add(xfs_da_state_t *state,
58 xfs_da_state_blk_t *old_node_blk,
59 xfs_da_state_blk_t *new_node_blk);
60
61 /*
62 * Routines used for shrinking the Btree.
63 */
64 STATIC int xfs_da3_root_join(xfs_da_state_t *state,
65 xfs_da_state_blk_t *root_blk);
66 STATIC int xfs_da3_node_toosmall(xfs_da_state_t *state, int *retval);
67 STATIC void xfs_da3_node_remove(xfs_da_state_t *state,
68 xfs_da_state_blk_t *drop_blk);
69 STATIC void xfs_da3_node_unbalance(xfs_da_state_t *state,
70 xfs_da_state_blk_t *src_node_blk,
71 xfs_da_state_blk_t *dst_node_blk);
72
73 /*
74 * Utility routines.
75 */
76 STATIC int xfs_da3_blk_unlink(xfs_da_state_t *state,
77 xfs_da_state_blk_t *drop_blk,
78 xfs_da_state_blk_t *save_blk);
79
80
81 kmem_zone_t *xfs_da_state_zone; /* anchor for state struct zone */
82
83 /*
84 * Allocate a dir-state structure.
85 * We don't put them on the stack since they're large.
86 */
87 xfs_da_state_t *
xfs_da_state_alloc(void)88 xfs_da_state_alloc(void)
89 {
90 return kmem_zone_zalloc(xfs_da_state_zone, KM_NOFS);
91 }
92
93 /*
94 * Kill the altpath contents of a da-state structure.
95 */
96 STATIC void
xfs_da_state_kill_altpath(xfs_da_state_t * state)97 xfs_da_state_kill_altpath(xfs_da_state_t *state)
98 {
99 int i;
100
101 for (i = 0; i < state->altpath.active; i++)
102 state->altpath.blk[i].bp = NULL;
103 state->altpath.active = 0;
104 }
105
106 /*
107 * Free a da-state structure.
108 */
109 void
xfs_da_state_free(xfs_da_state_t * state)110 xfs_da_state_free(xfs_da_state_t *state)
111 {
112 xfs_da_state_kill_altpath(state);
113 #ifdef DEBUG
114 memset((char *)state, 0, sizeof(*state));
115 #endif /* DEBUG */
116 kmem_zone_free(xfs_da_state_zone, state);
117 }
118
119 static xfs_failaddr_t
xfs_da3_node_verify(struct xfs_buf * bp)120 xfs_da3_node_verify(
121 struct xfs_buf *bp)
122 {
123 struct xfs_mount *mp = bp->b_target->bt_mount;
124 struct xfs_da_intnode *hdr = bp->b_addr;
125 struct xfs_da3_icnode_hdr ichdr;
126 const struct xfs_dir_ops *ops;
127
128 ops = xfs_dir_get_ops(mp, NULL);
129
130 ops->node_hdr_from_disk(&ichdr, hdr);
131
132 if (xfs_sb_version_hascrc(&mp->m_sb)) {
133 struct xfs_da3_node_hdr *hdr3 = bp->b_addr;
134
135 if (ichdr.magic != XFS_DA3_NODE_MAGIC)
136 return __this_address;
137
138 if (!uuid_equal(&hdr3->info.uuid, &mp->m_sb.sb_meta_uuid))
139 return __this_address;
140 if (be64_to_cpu(hdr3->info.blkno) != bp->b_bn)
141 return __this_address;
142 if (!xfs_log_check_lsn(mp, be64_to_cpu(hdr3->info.lsn)))
143 return __this_address;
144 } else {
145 if (ichdr.magic != XFS_DA_NODE_MAGIC)
146 return __this_address;
147 }
148 if (ichdr.level == 0)
149 return __this_address;
150 if (ichdr.level > XFS_DA_NODE_MAXDEPTH)
151 return __this_address;
152 if (ichdr.count == 0)
153 return __this_address;
154
155 /*
156 * we don't know if the node is for and attribute or directory tree,
157 * so only fail if the count is outside both bounds
158 */
159 if (ichdr.count > mp->m_dir_geo->node_ents &&
160 ichdr.count > mp->m_attr_geo->node_ents)
161 return __this_address;
162
163 /* XXX: hash order check? */
164
165 return NULL;
166 }
167
168 static void
xfs_da3_node_write_verify(struct xfs_buf * bp)169 xfs_da3_node_write_verify(
170 struct xfs_buf *bp)
171 {
172 struct xfs_mount *mp = bp->b_target->bt_mount;
173 struct xfs_buf_log_item *bip = bp->b_log_item;
174 struct xfs_da3_node_hdr *hdr3 = bp->b_addr;
175 xfs_failaddr_t fa;
176
177 fa = xfs_da3_node_verify(bp);
178 if (fa) {
179 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
180 return;
181 }
182
183 if (!xfs_sb_version_hascrc(&mp->m_sb))
184 return;
185
186 if (bip)
187 hdr3->info.lsn = cpu_to_be64(bip->bli_item.li_lsn);
188
189 xfs_buf_update_cksum(bp, XFS_DA3_NODE_CRC_OFF);
190 }
191
192 /*
193 * leaf/node format detection on trees is sketchy, so a node read can be done on
194 * leaf level blocks when detection identifies the tree as a node format tree
195 * incorrectly. In this case, we need to swap the verifier to match the correct
196 * format of the block being read.
197 */
198 static void
xfs_da3_node_read_verify(struct xfs_buf * bp)199 xfs_da3_node_read_verify(
200 struct xfs_buf *bp)
201 {
202 struct xfs_da_blkinfo *info = bp->b_addr;
203 xfs_failaddr_t fa;
204
205 switch (be16_to_cpu(info->magic)) {
206 case XFS_DA3_NODE_MAGIC:
207 if (!xfs_buf_verify_cksum(bp, XFS_DA3_NODE_CRC_OFF)) {
208 xfs_verifier_error(bp, -EFSBADCRC,
209 __this_address);
210 break;
211 }
212 /* fall through */
213 case XFS_DA_NODE_MAGIC:
214 fa = xfs_da3_node_verify(bp);
215 if (fa)
216 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
217 return;
218 case XFS_ATTR_LEAF_MAGIC:
219 case XFS_ATTR3_LEAF_MAGIC:
220 bp->b_ops = &xfs_attr3_leaf_buf_ops;
221 bp->b_ops->verify_read(bp);
222 return;
223 case XFS_DIR2_LEAFN_MAGIC:
224 case XFS_DIR3_LEAFN_MAGIC:
225 bp->b_ops = &xfs_dir3_leafn_buf_ops;
226 bp->b_ops->verify_read(bp);
227 return;
228 default:
229 xfs_verifier_error(bp, -EFSCORRUPTED, __this_address);
230 break;
231 }
232 }
233
234 /* Verify the structure of a da3 block. */
235 static xfs_failaddr_t
xfs_da3_node_verify_struct(struct xfs_buf * bp)236 xfs_da3_node_verify_struct(
237 struct xfs_buf *bp)
238 {
239 struct xfs_da_blkinfo *info = bp->b_addr;
240
241 switch (be16_to_cpu(info->magic)) {
242 case XFS_DA3_NODE_MAGIC:
243 case XFS_DA_NODE_MAGIC:
244 return xfs_da3_node_verify(bp);
245 case XFS_ATTR_LEAF_MAGIC:
246 case XFS_ATTR3_LEAF_MAGIC:
247 bp->b_ops = &xfs_attr3_leaf_buf_ops;
248 return bp->b_ops->verify_struct(bp);
249 case XFS_DIR2_LEAFN_MAGIC:
250 case XFS_DIR3_LEAFN_MAGIC:
251 bp->b_ops = &xfs_dir3_leafn_buf_ops;
252 return bp->b_ops->verify_struct(bp);
253 default:
254 return __this_address;
255 }
256 }
257
258 const struct xfs_buf_ops xfs_da3_node_buf_ops = {
259 .name = "xfs_da3_node",
260 .verify_read = xfs_da3_node_read_verify,
261 .verify_write = xfs_da3_node_write_verify,
262 .verify_struct = xfs_da3_node_verify_struct,
263 };
264
265 int
xfs_da3_node_read(struct xfs_trans * tp,struct xfs_inode * dp,xfs_dablk_t bno,xfs_daddr_t mappedbno,struct xfs_buf ** bpp,int which_fork)266 xfs_da3_node_read(
267 struct xfs_trans *tp,
268 struct xfs_inode *dp,
269 xfs_dablk_t bno,
270 xfs_daddr_t mappedbno,
271 struct xfs_buf **bpp,
272 int which_fork)
273 {
274 int err;
275
276 err = xfs_da_read_buf(tp, dp, bno, mappedbno, bpp,
277 which_fork, &xfs_da3_node_buf_ops);
278 if (!err && tp && *bpp) {
279 struct xfs_da_blkinfo *info = (*bpp)->b_addr;
280 int type;
281
282 switch (be16_to_cpu(info->magic)) {
283 case XFS_DA_NODE_MAGIC:
284 case XFS_DA3_NODE_MAGIC:
285 type = XFS_BLFT_DA_NODE_BUF;
286 break;
287 case XFS_ATTR_LEAF_MAGIC:
288 case XFS_ATTR3_LEAF_MAGIC:
289 type = XFS_BLFT_ATTR_LEAF_BUF;
290 break;
291 case XFS_DIR2_LEAFN_MAGIC:
292 case XFS_DIR3_LEAFN_MAGIC:
293 type = XFS_BLFT_DIR_LEAFN_BUF;
294 break;
295 default:
296 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW,
297 tp->t_mountp, info, sizeof(*info));
298 xfs_trans_brelse(tp, *bpp);
299 *bpp = NULL;
300 return -EFSCORRUPTED;
301 }
302 xfs_trans_buf_set_type(tp, *bpp, type);
303 }
304 return err;
305 }
306
307 /*========================================================================
308 * Routines used for growing the Btree.
309 *========================================================================*/
310
311 /*
312 * Create the initial contents of an intermediate node.
313 */
314 int
xfs_da3_node_create(struct xfs_da_args * args,xfs_dablk_t blkno,int level,struct xfs_buf ** bpp,int whichfork)315 xfs_da3_node_create(
316 struct xfs_da_args *args,
317 xfs_dablk_t blkno,
318 int level,
319 struct xfs_buf **bpp,
320 int whichfork)
321 {
322 struct xfs_da_intnode *node;
323 struct xfs_trans *tp = args->trans;
324 struct xfs_mount *mp = tp->t_mountp;
325 struct xfs_da3_icnode_hdr ichdr = {0};
326 struct xfs_buf *bp;
327 int error;
328 struct xfs_inode *dp = args->dp;
329
330 trace_xfs_da_node_create(args);
331 ASSERT(level <= XFS_DA_NODE_MAXDEPTH);
332
333 error = xfs_da_get_buf(tp, dp, blkno, -1, &bp, whichfork);
334 if (error)
335 return error;
336 bp->b_ops = &xfs_da3_node_buf_ops;
337 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DA_NODE_BUF);
338 node = bp->b_addr;
339
340 if (xfs_sb_version_hascrc(&mp->m_sb)) {
341 struct xfs_da3_node_hdr *hdr3 = bp->b_addr;
342
343 memset(hdr3, 0, sizeof(struct xfs_da3_node_hdr));
344 ichdr.magic = XFS_DA3_NODE_MAGIC;
345 hdr3->info.blkno = cpu_to_be64(bp->b_bn);
346 hdr3->info.owner = cpu_to_be64(args->dp->i_ino);
347 uuid_copy(&hdr3->info.uuid, &mp->m_sb.sb_meta_uuid);
348 } else {
349 ichdr.magic = XFS_DA_NODE_MAGIC;
350 }
351 ichdr.level = level;
352
353 dp->d_ops->node_hdr_to_disk(node, &ichdr);
354 xfs_trans_log_buf(tp, bp,
355 XFS_DA_LOGRANGE(node, &node->hdr, dp->d_ops->node_hdr_size));
356
357 *bpp = bp;
358 return 0;
359 }
360
361 /*
362 * Split a leaf node, rebalance, then possibly split
363 * intermediate nodes, rebalance, etc.
364 */
365 int /* error */
xfs_da3_split(struct xfs_da_state * state)366 xfs_da3_split(
367 struct xfs_da_state *state)
368 {
369 struct xfs_da_state_blk *oldblk;
370 struct xfs_da_state_blk *newblk;
371 struct xfs_da_state_blk *addblk;
372 struct xfs_da_intnode *node;
373 int max;
374 int action = 0;
375 int error;
376 int i;
377
378 trace_xfs_da_split(state->args);
379
380 /*
381 * Walk back up the tree splitting/inserting/adjusting as necessary.
382 * If we need to insert and there isn't room, split the node, then
383 * decide which fragment to insert the new block from below into.
384 * Note that we may split the root this way, but we need more fixup.
385 */
386 max = state->path.active - 1;
387 ASSERT((max >= 0) && (max < XFS_DA_NODE_MAXDEPTH));
388 ASSERT(state->path.blk[max].magic == XFS_ATTR_LEAF_MAGIC ||
389 state->path.blk[max].magic == XFS_DIR2_LEAFN_MAGIC);
390
391 addblk = &state->path.blk[max]; /* initial dummy value */
392 for (i = max; (i >= 0) && addblk; state->path.active--, i--) {
393 oldblk = &state->path.blk[i];
394 newblk = &state->altpath.blk[i];
395
396 /*
397 * If a leaf node then
398 * Allocate a new leaf node, then rebalance across them.
399 * else if an intermediate node then
400 * We split on the last layer, must we split the node?
401 */
402 switch (oldblk->magic) {
403 case XFS_ATTR_LEAF_MAGIC:
404 error = xfs_attr3_leaf_split(state, oldblk, newblk);
405 if ((error != 0) && (error != -ENOSPC)) {
406 return error; /* GROT: attr is inconsistent */
407 }
408 if (!error) {
409 addblk = newblk;
410 break;
411 }
412 /*
413 * Entry wouldn't fit, split the leaf again. The new
414 * extrablk will be consumed by xfs_da3_node_split if
415 * the node is split.
416 */
417 state->extravalid = 1;
418 if (state->inleaf) {
419 state->extraafter = 0; /* before newblk */
420 trace_xfs_attr_leaf_split_before(state->args);
421 error = xfs_attr3_leaf_split(state, oldblk,
422 &state->extrablk);
423 } else {
424 state->extraafter = 1; /* after newblk */
425 trace_xfs_attr_leaf_split_after(state->args);
426 error = xfs_attr3_leaf_split(state, newblk,
427 &state->extrablk);
428 }
429 if (error)
430 return error; /* GROT: attr inconsistent */
431 addblk = newblk;
432 break;
433 case XFS_DIR2_LEAFN_MAGIC:
434 error = xfs_dir2_leafn_split(state, oldblk, newblk);
435 if (error)
436 return error;
437 addblk = newblk;
438 break;
439 case XFS_DA_NODE_MAGIC:
440 error = xfs_da3_node_split(state, oldblk, newblk, addblk,
441 max - i, &action);
442 addblk->bp = NULL;
443 if (error)
444 return error; /* GROT: dir is inconsistent */
445 /*
446 * Record the newly split block for the next time thru?
447 */
448 if (action)
449 addblk = newblk;
450 else
451 addblk = NULL;
452 break;
453 }
454
455 /*
456 * Update the btree to show the new hashval for this child.
457 */
458 xfs_da3_fixhashpath(state, &state->path);
459 }
460 if (!addblk)
461 return 0;
462
463 /*
464 * xfs_da3_node_split() should have consumed any extra blocks we added
465 * during a double leaf split in the attr fork. This is guaranteed as
466 * we can't be here if the attr fork only has a single leaf block.
467 */
468 ASSERT(state->extravalid == 0 ||
469 state->path.blk[max].magic == XFS_DIR2_LEAFN_MAGIC);
470
471 /*
472 * Split the root node.
473 */
474 ASSERT(state->path.active == 0);
475 oldblk = &state->path.blk[0];
476 error = xfs_da3_root_split(state, oldblk, addblk);
477 if (error) {
478 addblk->bp = NULL;
479 return error; /* GROT: dir is inconsistent */
480 }
481
482 /*
483 * Update pointers to the node which used to be block 0 and just got
484 * bumped because of the addition of a new root node. Note that the
485 * original block 0 could be at any position in the list of blocks in
486 * the tree.
487 *
488 * Note: the magic numbers and sibling pointers are in the same physical
489 * place for both v2 and v3 headers (by design). Hence it doesn't matter
490 * which version of the xfs_da_intnode structure we use here as the
491 * result will be the same using either structure.
492 */
493 node = oldblk->bp->b_addr;
494 if (node->hdr.info.forw) {
495 ASSERT(be32_to_cpu(node->hdr.info.forw) == addblk->blkno);
496 node = addblk->bp->b_addr;
497 node->hdr.info.back = cpu_to_be32(oldblk->blkno);
498 xfs_trans_log_buf(state->args->trans, addblk->bp,
499 XFS_DA_LOGRANGE(node, &node->hdr.info,
500 sizeof(node->hdr.info)));
501 }
502 node = oldblk->bp->b_addr;
503 if (node->hdr.info.back) {
504 ASSERT(be32_to_cpu(node->hdr.info.back) == addblk->blkno);
505 node = addblk->bp->b_addr;
506 node->hdr.info.forw = cpu_to_be32(oldblk->blkno);
507 xfs_trans_log_buf(state->args->trans, addblk->bp,
508 XFS_DA_LOGRANGE(node, &node->hdr.info,
509 sizeof(node->hdr.info)));
510 }
511 addblk->bp = NULL;
512 return 0;
513 }
514
515 /*
516 * Split the root. We have to create a new root and point to the two
517 * parts (the split old root) that we just created. Copy block zero to
518 * the EOF, extending the inode in process.
519 */
520 STATIC int /* error */
xfs_da3_root_split(struct xfs_da_state * state,struct xfs_da_state_blk * blk1,struct xfs_da_state_blk * blk2)521 xfs_da3_root_split(
522 struct xfs_da_state *state,
523 struct xfs_da_state_blk *blk1,
524 struct xfs_da_state_blk *blk2)
525 {
526 struct xfs_da_intnode *node;
527 struct xfs_da_intnode *oldroot;
528 struct xfs_da_node_entry *btree;
529 struct xfs_da3_icnode_hdr nodehdr;
530 struct xfs_da_args *args;
531 struct xfs_buf *bp;
532 struct xfs_inode *dp;
533 struct xfs_trans *tp;
534 struct xfs_dir2_leaf *leaf;
535 xfs_dablk_t blkno;
536 int level;
537 int error;
538 int size;
539
540 trace_xfs_da_root_split(state->args);
541
542 /*
543 * Copy the existing (incorrect) block from the root node position
544 * to a free space somewhere.
545 */
546 args = state->args;
547 error = xfs_da_grow_inode(args, &blkno);
548 if (error)
549 return error;
550
551 dp = args->dp;
552 tp = args->trans;
553 error = xfs_da_get_buf(tp, dp, blkno, -1, &bp, args->whichfork);
554 if (error)
555 return error;
556 node = bp->b_addr;
557 oldroot = blk1->bp->b_addr;
558 if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC) ||
559 oldroot->hdr.info.magic == cpu_to_be16(XFS_DA3_NODE_MAGIC)) {
560 struct xfs_da3_icnode_hdr icnodehdr;
561
562 dp->d_ops->node_hdr_from_disk(&icnodehdr, oldroot);
563 btree = dp->d_ops->node_tree_p(oldroot);
564 size = (int)((char *)&btree[icnodehdr.count] - (char *)oldroot);
565 level = icnodehdr.level;
566
567 /*
568 * we are about to copy oldroot to bp, so set up the type
569 * of bp while we know exactly what it will be.
570 */
571 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DA_NODE_BUF);
572 } else {
573 struct xfs_dir3_icleaf_hdr leafhdr;
574 struct xfs_dir2_leaf_entry *ents;
575
576 leaf = (xfs_dir2_leaf_t *)oldroot;
577 dp->d_ops->leaf_hdr_from_disk(&leafhdr, leaf);
578 ents = dp->d_ops->leaf_ents_p(leaf);
579
580 ASSERT(leafhdr.magic == XFS_DIR2_LEAFN_MAGIC ||
581 leafhdr.magic == XFS_DIR3_LEAFN_MAGIC);
582 size = (int)((char *)&ents[leafhdr.count] - (char *)leaf);
583 level = 0;
584
585 /*
586 * we are about to copy oldroot to bp, so set up the type
587 * of bp while we know exactly what it will be.
588 */
589 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DIR_LEAFN_BUF);
590 }
591
592 /*
593 * we can copy most of the information in the node from one block to
594 * another, but for CRC enabled headers we have to make sure that the
595 * block specific identifiers are kept intact. We update the buffer
596 * directly for this.
597 */
598 memcpy(node, oldroot, size);
599 if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DA3_NODE_MAGIC) ||
600 oldroot->hdr.info.magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC)) {
601 struct xfs_da3_intnode *node3 = (struct xfs_da3_intnode *)node;
602
603 node3->hdr.info.blkno = cpu_to_be64(bp->b_bn);
604 }
605 xfs_trans_log_buf(tp, bp, 0, size - 1);
606
607 bp->b_ops = blk1->bp->b_ops;
608 xfs_trans_buf_copy_type(bp, blk1->bp);
609 blk1->bp = bp;
610 blk1->blkno = blkno;
611
612 /*
613 * Set up the new root node.
614 */
615 error = xfs_da3_node_create(args,
616 (args->whichfork == XFS_DATA_FORK) ? args->geo->leafblk : 0,
617 level + 1, &bp, args->whichfork);
618 if (error)
619 return error;
620
621 node = bp->b_addr;
622 dp->d_ops->node_hdr_from_disk(&nodehdr, node);
623 btree = dp->d_ops->node_tree_p(node);
624 btree[0].hashval = cpu_to_be32(blk1->hashval);
625 btree[0].before = cpu_to_be32(blk1->blkno);
626 btree[1].hashval = cpu_to_be32(blk2->hashval);
627 btree[1].before = cpu_to_be32(blk2->blkno);
628 nodehdr.count = 2;
629 dp->d_ops->node_hdr_to_disk(node, &nodehdr);
630
631 #ifdef DEBUG
632 if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) ||
633 oldroot->hdr.info.magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC)) {
634 ASSERT(blk1->blkno >= args->geo->leafblk &&
635 blk1->blkno < args->geo->freeblk);
636 ASSERT(blk2->blkno >= args->geo->leafblk &&
637 blk2->blkno < args->geo->freeblk);
638 }
639 #endif
640
641 /* Header is already logged by xfs_da_node_create */
642 xfs_trans_log_buf(tp, bp,
643 XFS_DA_LOGRANGE(node, btree, sizeof(xfs_da_node_entry_t) * 2));
644
645 return 0;
646 }
647
648 /*
649 * Split the node, rebalance, then add the new entry.
650 */
651 STATIC int /* error */
xfs_da3_node_split(struct xfs_da_state * state,struct xfs_da_state_blk * oldblk,struct xfs_da_state_blk * newblk,struct xfs_da_state_blk * addblk,int treelevel,int * result)652 xfs_da3_node_split(
653 struct xfs_da_state *state,
654 struct xfs_da_state_blk *oldblk,
655 struct xfs_da_state_blk *newblk,
656 struct xfs_da_state_blk *addblk,
657 int treelevel,
658 int *result)
659 {
660 struct xfs_da_intnode *node;
661 struct xfs_da3_icnode_hdr nodehdr;
662 xfs_dablk_t blkno;
663 int newcount;
664 int error;
665 int useextra;
666 struct xfs_inode *dp = state->args->dp;
667
668 trace_xfs_da_node_split(state->args);
669
670 node = oldblk->bp->b_addr;
671 dp->d_ops->node_hdr_from_disk(&nodehdr, node);
672
673 /*
674 * With V2 dirs the extra block is data or freespace.
675 */
676 useextra = state->extravalid && state->args->whichfork == XFS_ATTR_FORK;
677 newcount = 1 + useextra;
678 /*
679 * Do we have to split the node?
680 */
681 if (nodehdr.count + newcount > state->args->geo->node_ents) {
682 /*
683 * Allocate a new node, add to the doubly linked chain of
684 * nodes, then move some of our excess entries into it.
685 */
686 error = xfs_da_grow_inode(state->args, &blkno);
687 if (error)
688 return error; /* GROT: dir is inconsistent */
689
690 error = xfs_da3_node_create(state->args, blkno, treelevel,
691 &newblk->bp, state->args->whichfork);
692 if (error)
693 return error; /* GROT: dir is inconsistent */
694 newblk->blkno = blkno;
695 newblk->magic = XFS_DA_NODE_MAGIC;
696 xfs_da3_node_rebalance(state, oldblk, newblk);
697 error = xfs_da3_blk_link(state, oldblk, newblk);
698 if (error)
699 return error;
700 *result = 1;
701 } else {
702 *result = 0;
703 }
704
705 /*
706 * Insert the new entry(s) into the correct block
707 * (updating last hashval in the process).
708 *
709 * xfs_da3_node_add() inserts BEFORE the given index,
710 * and as a result of using node_lookup_int() we always
711 * point to a valid entry (not after one), but a split
712 * operation always results in a new block whose hashvals
713 * FOLLOW the current block.
714 *
715 * If we had double-split op below us, then add the extra block too.
716 */
717 node = oldblk->bp->b_addr;
718 dp->d_ops->node_hdr_from_disk(&nodehdr, node);
719 if (oldblk->index <= nodehdr.count) {
720 oldblk->index++;
721 xfs_da3_node_add(state, oldblk, addblk);
722 if (useextra) {
723 if (state->extraafter)
724 oldblk->index++;
725 xfs_da3_node_add(state, oldblk, &state->extrablk);
726 state->extravalid = 0;
727 }
728 } else {
729 newblk->index++;
730 xfs_da3_node_add(state, newblk, addblk);
731 if (useextra) {
732 if (state->extraafter)
733 newblk->index++;
734 xfs_da3_node_add(state, newblk, &state->extrablk);
735 state->extravalid = 0;
736 }
737 }
738
739 return 0;
740 }
741
742 /*
743 * Balance the btree elements between two intermediate nodes,
744 * usually one full and one empty.
745 *
746 * NOTE: if blk2 is empty, then it will get the upper half of blk1.
747 */
748 STATIC void
xfs_da3_node_rebalance(struct xfs_da_state * state,struct xfs_da_state_blk * blk1,struct xfs_da_state_blk * blk2)749 xfs_da3_node_rebalance(
750 struct xfs_da_state *state,
751 struct xfs_da_state_blk *blk1,
752 struct xfs_da_state_blk *blk2)
753 {
754 struct xfs_da_intnode *node1;
755 struct xfs_da_intnode *node2;
756 struct xfs_da_intnode *tmpnode;
757 struct xfs_da_node_entry *btree1;
758 struct xfs_da_node_entry *btree2;
759 struct xfs_da_node_entry *btree_s;
760 struct xfs_da_node_entry *btree_d;
761 struct xfs_da3_icnode_hdr nodehdr1;
762 struct xfs_da3_icnode_hdr nodehdr2;
763 struct xfs_trans *tp;
764 int count;
765 int tmp;
766 int swap = 0;
767 struct xfs_inode *dp = state->args->dp;
768
769 trace_xfs_da_node_rebalance(state->args);
770
771 node1 = blk1->bp->b_addr;
772 node2 = blk2->bp->b_addr;
773 dp->d_ops->node_hdr_from_disk(&nodehdr1, node1);
774 dp->d_ops->node_hdr_from_disk(&nodehdr2, node2);
775 btree1 = dp->d_ops->node_tree_p(node1);
776 btree2 = dp->d_ops->node_tree_p(node2);
777
778 /*
779 * Figure out how many entries need to move, and in which direction.
780 * Swap the nodes around if that makes it simpler.
781 */
782 if (nodehdr1.count > 0 && nodehdr2.count > 0 &&
783 ((be32_to_cpu(btree2[0].hashval) < be32_to_cpu(btree1[0].hashval)) ||
784 (be32_to_cpu(btree2[nodehdr2.count - 1].hashval) <
785 be32_to_cpu(btree1[nodehdr1.count - 1].hashval)))) {
786 tmpnode = node1;
787 node1 = node2;
788 node2 = tmpnode;
789 dp->d_ops->node_hdr_from_disk(&nodehdr1, node1);
790 dp->d_ops->node_hdr_from_disk(&nodehdr2, node2);
791 btree1 = dp->d_ops->node_tree_p(node1);
792 btree2 = dp->d_ops->node_tree_p(node2);
793 swap = 1;
794 }
795
796 count = (nodehdr1.count - nodehdr2.count) / 2;
797 if (count == 0)
798 return;
799 tp = state->args->trans;
800 /*
801 * Two cases: high-to-low and low-to-high.
802 */
803 if (count > 0) {
804 /*
805 * Move elements in node2 up to make a hole.
806 */
807 tmp = nodehdr2.count;
808 if (tmp > 0) {
809 tmp *= (uint)sizeof(xfs_da_node_entry_t);
810 btree_s = &btree2[0];
811 btree_d = &btree2[count];
812 memmove(btree_d, btree_s, tmp);
813 }
814
815 /*
816 * Move the req'd B-tree elements from high in node1 to
817 * low in node2.
818 */
819 nodehdr2.count += count;
820 tmp = count * (uint)sizeof(xfs_da_node_entry_t);
821 btree_s = &btree1[nodehdr1.count - count];
822 btree_d = &btree2[0];
823 memcpy(btree_d, btree_s, tmp);
824 nodehdr1.count -= count;
825 } else {
826 /*
827 * Move the req'd B-tree elements from low in node2 to
828 * high in node1.
829 */
830 count = -count;
831 tmp = count * (uint)sizeof(xfs_da_node_entry_t);
832 btree_s = &btree2[0];
833 btree_d = &btree1[nodehdr1.count];
834 memcpy(btree_d, btree_s, tmp);
835 nodehdr1.count += count;
836
837 xfs_trans_log_buf(tp, blk1->bp,
838 XFS_DA_LOGRANGE(node1, btree_d, tmp));
839
840 /*
841 * Move elements in node2 down to fill the hole.
842 */
843 tmp = nodehdr2.count - count;
844 tmp *= (uint)sizeof(xfs_da_node_entry_t);
845 btree_s = &btree2[count];
846 btree_d = &btree2[0];
847 memmove(btree_d, btree_s, tmp);
848 nodehdr2.count -= count;
849 }
850
851 /*
852 * Log header of node 1 and all current bits of node 2.
853 */
854 dp->d_ops->node_hdr_to_disk(node1, &nodehdr1);
855 xfs_trans_log_buf(tp, blk1->bp,
856 XFS_DA_LOGRANGE(node1, &node1->hdr, dp->d_ops->node_hdr_size));
857
858 dp->d_ops->node_hdr_to_disk(node2, &nodehdr2);
859 xfs_trans_log_buf(tp, blk2->bp,
860 XFS_DA_LOGRANGE(node2, &node2->hdr,
861 dp->d_ops->node_hdr_size +
862 (sizeof(btree2[0]) * nodehdr2.count)));
863
864 /*
865 * Record the last hashval from each block for upward propagation.
866 * (note: don't use the swapped node pointers)
867 */
868 if (swap) {
869 node1 = blk1->bp->b_addr;
870 node2 = blk2->bp->b_addr;
871 dp->d_ops->node_hdr_from_disk(&nodehdr1, node1);
872 dp->d_ops->node_hdr_from_disk(&nodehdr2, node2);
873 btree1 = dp->d_ops->node_tree_p(node1);
874 btree2 = dp->d_ops->node_tree_p(node2);
875 }
876 blk1->hashval = be32_to_cpu(btree1[nodehdr1.count - 1].hashval);
877 blk2->hashval = be32_to_cpu(btree2[nodehdr2.count - 1].hashval);
878
879 /*
880 * Adjust the expected index for insertion.
881 */
882 if (blk1->index >= nodehdr1.count) {
883 blk2->index = blk1->index - nodehdr1.count;
884 blk1->index = nodehdr1.count + 1; /* make it invalid */
885 }
886 }
887
888 /*
889 * Add a new entry to an intermediate node.
890 */
891 STATIC void
xfs_da3_node_add(struct xfs_da_state * state,struct xfs_da_state_blk * oldblk,struct xfs_da_state_blk * newblk)892 xfs_da3_node_add(
893 struct xfs_da_state *state,
894 struct xfs_da_state_blk *oldblk,
895 struct xfs_da_state_blk *newblk)
896 {
897 struct xfs_da_intnode *node;
898 struct xfs_da3_icnode_hdr nodehdr;
899 struct xfs_da_node_entry *btree;
900 int tmp;
901 struct xfs_inode *dp = state->args->dp;
902
903 trace_xfs_da_node_add(state->args);
904
905 node = oldblk->bp->b_addr;
906 dp->d_ops->node_hdr_from_disk(&nodehdr, node);
907 btree = dp->d_ops->node_tree_p(node);
908
909 ASSERT(oldblk->index >= 0 && oldblk->index <= nodehdr.count);
910 ASSERT(newblk->blkno != 0);
911 if (state->args->whichfork == XFS_DATA_FORK)
912 ASSERT(newblk->blkno >= state->args->geo->leafblk &&
913 newblk->blkno < state->args->geo->freeblk);
914
915 /*
916 * We may need to make some room before we insert the new node.
917 */
918 tmp = 0;
919 if (oldblk->index < nodehdr.count) {
920 tmp = (nodehdr.count - oldblk->index) * (uint)sizeof(*btree);
921 memmove(&btree[oldblk->index + 1], &btree[oldblk->index], tmp);
922 }
923 btree[oldblk->index].hashval = cpu_to_be32(newblk->hashval);
924 btree[oldblk->index].before = cpu_to_be32(newblk->blkno);
925 xfs_trans_log_buf(state->args->trans, oldblk->bp,
926 XFS_DA_LOGRANGE(node, &btree[oldblk->index],
927 tmp + sizeof(*btree)));
928
929 nodehdr.count += 1;
930 dp->d_ops->node_hdr_to_disk(node, &nodehdr);
931 xfs_trans_log_buf(state->args->trans, oldblk->bp,
932 XFS_DA_LOGRANGE(node, &node->hdr, dp->d_ops->node_hdr_size));
933
934 /*
935 * Copy the last hash value from the oldblk to propagate upwards.
936 */
937 oldblk->hashval = be32_to_cpu(btree[nodehdr.count - 1].hashval);
938 }
939
940 /*========================================================================
941 * Routines used for shrinking the Btree.
942 *========================================================================*/
943
944 /*
945 * Deallocate an empty leaf node, remove it from its parent,
946 * possibly deallocating that block, etc...
947 */
948 int
xfs_da3_join(struct xfs_da_state * state)949 xfs_da3_join(
950 struct xfs_da_state *state)
951 {
952 struct xfs_da_state_blk *drop_blk;
953 struct xfs_da_state_blk *save_blk;
954 int action = 0;
955 int error;
956
957 trace_xfs_da_join(state->args);
958
959 drop_blk = &state->path.blk[ state->path.active-1 ];
960 save_blk = &state->altpath.blk[ state->path.active-1 ];
961 ASSERT(state->path.blk[0].magic == XFS_DA_NODE_MAGIC);
962 ASSERT(drop_blk->magic == XFS_ATTR_LEAF_MAGIC ||
963 drop_blk->magic == XFS_DIR2_LEAFN_MAGIC);
964
965 /*
966 * Walk back up the tree joining/deallocating as necessary.
967 * When we stop dropping blocks, break out.
968 */
969 for ( ; state->path.active >= 2; drop_blk--, save_blk--,
970 state->path.active--) {
971 /*
972 * See if we can combine the block with a neighbor.
973 * (action == 0) => no options, just leave
974 * (action == 1) => coalesce, then unlink
975 * (action == 2) => block empty, unlink it
976 */
977 switch (drop_blk->magic) {
978 case XFS_ATTR_LEAF_MAGIC:
979 error = xfs_attr3_leaf_toosmall(state, &action);
980 if (error)
981 return error;
982 if (action == 0)
983 return 0;
984 xfs_attr3_leaf_unbalance(state, drop_blk, save_blk);
985 break;
986 case XFS_DIR2_LEAFN_MAGIC:
987 error = xfs_dir2_leafn_toosmall(state, &action);
988 if (error)
989 return error;
990 if (action == 0)
991 return 0;
992 xfs_dir2_leafn_unbalance(state, drop_blk, save_blk);
993 break;
994 case XFS_DA_NODE_MAGIC:
995 /*
996 * Remove the offending node, fixup hashvals,
997 * check for a toosmall neighbor.
998 */
999 xfs_da3_node_remove(state, drop_blk);
1000 xfs_da3_fixhashpath(state, &state->path);
1001 error = xfs_da3_node_toosmall(state, &action);
1002 if (error)
1003 return error;
1004 if (action == 0)
1005 return 0;
1006 xfs_da3_node_unbalance(state, drop_blk, save_blk);
1007 break;
1008 }
1009 xfs_da3_fixhashpath(state, &state->altpath);
1010 error = xfs_da3_blk_unlink(state, drop_blk, save_blk);
1011 xfs_da_state_kill_altpath(state);
1012 if (error)
1013 return error;
1014 error = xfs_da_shrink_inode(state->args, drop_blk->blkno,
1015 drop_blk->bp);
1016 drop_blk->bp = NULL;
1017 if (error)
1018 return error;
1019 }
1020 /*
1021 * We joined all the way to the top. If it turns out that
1022 * we only have one entry in the root, make the child block
1023 * the new root.
1024 */
1025 xfs_da3_node_remove(state, drop_blk);
1026 xfs_da3_fixhashpath(state, &state->path);
1027 error = xfs_da3_root_join(state, &state->path.blk[0]);
1028 return error;
1029 }
1030
1031 #ifdef DEBUG
1032 static void
xfs_da_blkinfo_onlychild_validate(struct xfs_da_blkinfo * blkinfo,__u16 level)1033 xfs_da_blkinfo_onlychild_validate(struct xfs_da_blkinfo *blkinfo, __u16 level)
1034 {
1035 __be16 magic = blkinfo->magic;
1036
1037 if (level == 1) {
1038 ASSERT(magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) ||
1039 magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC) ||
1040 magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC) ||
1041 magic == cpu_to_be16(XFS_ATTR3_LEAF_MAGIC));
1042 } else {
1043 ASSERT(magic == cpu_to_be16(XFS_DA_NODE_MAGIC) ||
1044 magic == cpu_to_be16(XFS_DA3_NODE_MAGIC));
1045 }
1046 ASSERT(!blkinfo->forw);
1047 ASSERT(!blkinfo->back);
1048 }
1049 #else /* !DEBUG */
1050 #define xfs_da_blkinfo_onlychild_validate(blkinfo, level)
1051 #endif /* !DEBUG */
1052
1053 /*
1054 * We have only one entry in the root. Copy the only remaining child of
1055 * the old root to block 0 as the new root node.
1056 */
1057 STATIC int
xfs_da3_root_join(struct xfs_da_state * state,struct xfs_da_state_blk * root_blk)1058 xfs_da3_root_join(
1059 struct xfs_da_state *state,
1060 struct xfs_da_state_blk *root_blk)
1061 {
1062 struct xfs_da_intnode *oldroot;
1063 struct xfs_da_args *args;
1064 xfs_dablk_t child;
1065 struct xfs_buf *bp;
1066 struct xfs_da3_icnode_hdr oldroothdr;
1067 struct xfs_da_node_entry *btree;
1068 int error;
1069 struct xfs_inode *dp = state->args->dp;
1070
1071 trace_xfs_da_root_join(state->args);
1072
1073 ASSERT(root_blk->magic == XFS_DA_NODE_MAGIC);
1074
1075 args = state->args;
1076 oldroot = root_blk->bp->b_addr;
1077 dp->d_ops->node_hdr_from_disk(&oldroothdr, oldroot);
1078 ASSERT(oldroothdr.forw == 0);
1079 ASSERT(oldroothdr.back == 0);
1080
1081 /*
1082 * If the root has more than one child, then don't do anything.
1083 */
1084 if (oldroothdr.count > 1)
1085 return 0;
1086
1087 /*
1088 * Read in the (only) child block, then copy those bytes into
1089 * the root block's buffer and free the original child block.
1090 */
1091 btree = dp->d_ops->node_tree_p(oldroot);
1092 child = be32_to_cpu(btree[0].before);
1093 ASSERT(child != 0);
1094 error = xfs_da3_node_read(args->trans, dp, child, -1, &bp,
1095 args->whichfork);
1096 if (error)
1097 return error;
1098 xfs_da_blkinfo_onlychild_validate(bp->b_addr, oldroothdr.level);
1099
1100 /*
1101 * This could be copying a leaf back into the root block in the case of
1102 * there only being a single leaf block left in the tree. Hence we have
1103 * to update the b_ops pointer as well to match the buffer type change
1104 * that could occur. For dir3 blocks we also need to update the block
1105 * number in the buffer header.
1106 */
1107 memcpy(root_blk->bp->b_addr, bp->b_addr, args->geo->blksize);
1108 root_blk->bp->b_ops = bp->b_ops;
1109 xfs_trans_buf_copy_type(root_blk->bp, bp);
1110 if (oldroothdr.magic == XFS_DA3_NODE_MAGIC) {
1111 struct xfs_da3_blkinfo *da3 = root_blk->bp->b_addr;
1112 da3->blkno = cpu_to_be64(root_blk->bp->b_bn);
1113 }
1114 xfs_trans_log_buf(args->trans, root_blk->bp, 0,
1115 args->geo->blksize - 1);
1116 error = xfs_da_shrink_inode(args, child, bp);
1117 return error;
1118 }
1119
1120 /*
1121 * Check a node block and its neighbors to see if the block should be
1122 * collapsed into one or the other neighbor. Always keep the block
1123 * with the smaller block number.
1124 * If the current block is over 50% full, don't try to join it, return 0.
1125 * If the block is empty, fill in the state structure and return 2.
1126 * If it can be collapsed, fill in the state structure and return 1.
1127 * If nothing can be done, return 0.
1128 */
1129 STATIC int
xfs_da3_node_toosmall(struct xfs_da_state * state,int * action)1130 xfs_da3_node_toosmall(
1131 struct xfs_da_state *state,
1132 int *action)
1133 {
1134 struct xfs_da_intnode *node;
1135 struct xfs_da_state_blk *blk;
1136 struct xfs_da_blkinfo *info;
1137 xfs_dablk_t blkno;
1138 struct xfs_buf *bp;
1139 struct xfs_da3_icnode_hdr nodehdr;
1140 int count;
1141 int forward;
1142 int error;
1143 int retval;
1144 int i;
1145 struct xfs_inode *dp = state->args->dp;
1146
1147 trace_xfs_da_node_toosmall(state->args);
1148
1149 /*
1150 * Check for the degenerate case of the block being over 50% full.
1151 * If so, it's not worth even looking to see if we might be able
1152 * to coalesce with a sibling.
1153 */
1154 blk = &state->path.blk[ state->path.active-1 ];
1155 info = blk->bp->b_addr;
1156 node = (xfs_da_intnode_t *)info;
1157 dp->d_ops->node_hdr_from_disk(&nodehdr, node);
1158 if (nodehdr.count > (state->args->geo->node_ents >> 1)) {
1159 *action = 0; /* blk over 50%, don't try to join */
1160 return 0; /* blk over 50%, don't try to join */
1161 }
1162
1163 /*
1164 * Check for the degenerate case of the block being empty.
1165 * If the block is empty, we'll simply delete it, no need to
1166 * coalesce it with a sibling block. We choose (arbitrarily)
1167 * to merge with the forward block unless it is NULL.
1168 */
1169 if (nodehdr.count == 0) {
1170 /*
1171 * Make altpath point to the block we want to keep and
1172 * path point to the block we want to drop (this one).
1173 */
1174 forward = (info->forw != 0);
1175 memcpy(&state->altpath, &state->path, sizeof(state->path));
1176 error = xfs_da3_path_shift(state, &state->altpath, forward,
1177 0, &retval);
1178 if (error)
1179 return error;
1180 if (retval) {
1181 *action = 0;
1182 } else {
1183 *action = 2;
1184 }
1185 return 0;
1186 }
1187
1188 /*
1189 * Examine each sibling block to see if we can coalesce with
1190 * at least 25% free space to spare. We need to figure out
1191 * whether to merge with the forward or the backward block.
1192 * We prefer coalescing with the lower numbered sibling so as
1193 * to shrink a directory over time.
1194 */
1195 count = state->args->geo->node_ents;
1196 count -= state->args->geo->node_ents >> 2;
1197 count -= nodehdr.count;
1198
1199 /* start with smaller blk num */
1200 forward = nodehdr.forw < nodehdr.back;
1201 for (i = 0; i < 2; forward = !forward, i++) {
1202 struct xfs_da3_icnode_hdr thdr;
1203 if (forward)
1204 blkno = nodehdr.forw;
1205 else
1206 blkno = nodehdr.back;
1207 if (blkno == 0)
1208 continue;
1209 error = xfs_da3_node_read(state->args->trans, dp,
1210 blkno, -1, &bp, state->args->whichfork);
1211 if (error)
1212 return error;
1213
1214 node = bp->b_addr;
1215 dp->d_ops->node_hdr_from_disk(&thdr, node);
1216 xfs_trans_brelse(state->args->trans, bp);
1217
1218 if (count - thdr.count >= 0)
1219 break; /* fits with at least 25% to spare */
1220 }
1221 if (i >= 2) {
1222 *action = 0;
1223 return 0;
1224 }
1225
1226 /*
1227 * Make altpath point to the block we want to keep (the lower
1228 * numbered block) and path point to the block we want to drop.
1229 */
1230 memcpy(&state->altpath, &state->path, sizeof(state->path));
1231 if (blkno < blk->blkno) {
1232 error = xfs_da3_path_shift(state, &state->altpath, forward,
1233 0, &retval);
1234 } else {
1235 error = xfs_da3_path_shift(state, &state->path, forward,
1236 0, &retval);
1237 }
1238 if (error)
1239 return error;
1240 if (retval) {
1241 *action = 0;
1242 return 0;
1243 }
1244 *action = 1;
1245 return 0;
1246 }
1247
1248 /*
1249 * Pick up the last hashvalue from an intermediate node.
1250 */
1251 STATIC uint
xfs_da3_node_lasthash(struct xfs_inode * dp,struct xfs_buf * bp,int * count)1252 xfs_da3_node_lasthash(
1253 struct xfs_inode *dp,
1254 struct xfs_buf *bp,
1255 int *count)
1256 {
1257 struct xfs_da_intnode *node;
1258 struct xfs_da_node_entry *btree;
1259 struct xfs_da3_icnode_hdr nodehdr;
1260
1261 node = bp->b_addr;
1262 dp->d_ops->node_hdr_from_disk(&nodehdr, node);
1263 if (count)
1264 *count = nodehdr.count;
1265 if (!nodehdr.count)
1266 return 0;
1267 btree = dp->d_ops->node_tree_p(node);
1268 return be32_to_cpu(btree[nodehdr.count - 1].hashval);
1269 }
1270
1271 /*
1272 * Walk back up the tree adjusting hash values as necessary,
1273 * when we stop making changes, return.
1274 */
1275 void
xfs_da3_fixhashpath(struct xfs_da_state * state,struct xfs_da_state_path * path)1276 xfs_da3_fixhashpath(
1277 struct xfs_da_state *state,
1278 struct xfs_da_state_path *path)
1279 {
1280 struct xfs_da_state_blk *blk;
1281 struct xfs_da_intnode *node;
1282 struct xfs_da_node_entry *btree;
1283 xfs_dahash_t lasthash=0;
1284 int level;
1285 int count;
1286 struct xfs_inode *dp = state->args->dp;
1287
1288 trace_xfs_da_fixhashpath(state->args);
1289
1290 level = path->active-1;
1291 blk = &path->blk[ level ];
1292 switch (blk->magic) {
1293 case XFS_ATTR_LEAF_MAGIC:
1294 lasthash = xfs_attr_leaf_lasthash(blk->bp, &count);
1295 if (count == 0)
1296 return;
1297 break;
1298 case XFS_DIR2_LEAFN_MAGIC:
1299 lasthash = xfs_dir2_leaf_lasthash(dp, blk->bp, &count);
1300 if (count == 0)
1301 return;
1302 break;
1303 case XFS_DA_NODE_MAGIC:
1304 lasthash = xfs_da3_node_lasthash(dp, blk->bp, &count);
1305 if (count == 0)
1306 return;
1307 break;
1308 }
1309 for (blk--, level--; level >= 0; blk--, level--) {
1310 struct xfs_da3_icnode_hdr nodehdr;
1311
1312 node = blk->bp->b_addr;
1313 dp->d_ops->node_hdr_from_disk(&nodehdr, node);
1314 btree = dp->d_ops->node_tree_p(node);
1315 if (be32_to_cpu(btree[blk->index].hashval) == lasthash)
1316 break;
1317 blk->hashval = lasthash;
1318 btree[blk->index].hashval = cpu_to_be32(lasthash);
1319 xfs_trans_log_buf(state->args->trans, blk->bp,
1320 XFS_DA_LOGRANGE(node, &btree[blk->index],
1321 sizeof(*btree)));
1322
1323 lasthash = be32_to_cpu(btree[nodehdr.count - 1].hashval);
1324 }
1325 }
1326
1327 /*
1328 * Remove an entry from an intermediate node.
1329 */
1330 STATIC void
xfs_da3_node_remove(struct xfs_da_state * state,struct xfs_da_state_blk * drop_blk)1331 xfs_da3_node_remove(
1332 struct xfs_da_state *state,
1333 struct xfs_da_state_blk *drop_blk)
1334 {
1335 struct xfs_da_intnode *node;
1336 struct xfs_da3_icnode_hdr nodehdr;
1337 struct xfs_da_node_entry *btree;
1338 int index;
1339 int tmp;
1340 struct xfs_inode *dp = state->args->dp;
1341
1342 trace_xfs_da_node_remove(state->args);
1343
1344 node = drop_blk->bp->b_addr;
1345 dp->d_ops->node_hdr_from_disk(&nodehdr, node);
1346 ASSERT(drop_blk->index < nodehdr.count);
1347 ASSERT(drop_blk->index >= 0);
1348
1349 /*
1350 * Copy over the offending entry, or just zero it out.
1351 */
1352 index = drop_blk->index;
1353 btree = dp->d_ops->node_tree_p(node);
1354 if (index < nodehdr.count - 1) {
1355 tmp = nodehdr.count - index - 1;
1356 tmp *= (uint)sizeof(xfs_da_node_entry_t);
1357 memmove(&btree[index], &btree[index + 1], tmp);
1358 xfs_trans_log_buf(state->args->trans, drop_blk->bp,
1359 XFS_DA_LOGRANGE(node, &btree[index], tmp));
1360 index = nodehdr.count - 1;
1361 }
1362 memset(&btree[index], 0, sizeof(xfs_da_node_entry_t));
1363 xfs_trans_log_buf(state->args->trans, drop_blk->bp,
1364 XFS_DA_LOGRANGE(node, &btree[index], sizeof(btree[index])));
1365 nodehdr.count -= 1;
1366 dp->d_ops->node_hdr_to_disk(node, &nodehdr);
1367 xfs_trans_log_buf(state->args->trans, drop_blk->bp,
1368 XFS_DA_LOGRANGE(node, &node->hdr, dp->d_ops->node_hdr_size));
1369
1370 /*
1371 * Copy the last hash value from the block to propagate upwards.
1372 */
1373 drop_blk->hashval = be32_to_cpu(btree[index - 1].hashval);
1374 }
1375
1376 /*
1377 * Unbalance the elements between two intermediate nodes,
1378 * move all Btree elements from one node into another.
1379 */
1380 STATIC void
xfs_da3_node_unbalance(struct xfs_da_state * state,struct xfs_da_state_blk * drop_blk,struct xfs_da_state_blk * save_blk)1381 xfs_da3_node_unbalance(
1382 struct xfs_da_state *state,
1383 struct xfs_da_state_blk *drop_blk,
1384 struct xfs_da_state_blk *save_blk)
1385 {
1386 struct xfs_da_intnode *drop_node;
1387 struct xfs_da_intnode *save_node;
1388 struct xfs_da_node_entry *drop_btree;
1389 struct xfs_da_node_entry *save_btree;
1390 struct xfs_da3_icnode_hdr drop_hdr;
1391 struct xfs_da3_icnode_hdr save_hdr;
1392 struct xfs_trans *tp;
1393 int sindex;
1394 int tmp;
1395 struct xfs_inode *dp = state->args->dp;
1396
1397 trace_xfs_da_node_unbalance(state->args);
1398
1399 drop_node = drop_blk->bp->b_addr;
1400 save_node = save_blk->bp->b_addr;
1401 dp->d_ops->node_hdr_from_disk(&drop_hdr, drop_node);
1402 dp->d_ops->node_hdr_from_disk(&save_hdr, save_node);
1403 drop_btree = dp->d_ops->node_tree_p(drop_node);
1404 save_btree = dp->d_ops->node_tree_p(save_node);
1405 tp = state->args->trans;
1406
1407 /*
1408 * If the dying block has lower hashvals, then move all the
1409 * elements in the remaining block up to make a hole.
1410 */
1411 if ((be32_to_cpu(drop_btree[0].hashval) <
1412 be32_to_cpu(save_btree[0].hashval)) ||
1413 (be32_to_cpu(drop_btree[drop_hdr.count - 1].hashval) <
1414 be32_to_cpu(save_btree[save_hdr.count - 1].hashval))) {
1415 /* XXX: check this - is memmove dst correct? */
1416 tmp = save_hdr.count * sizeof(xfs_da_node_entry_t);
1417 memmove(&save_btree[drop_hdr.count], &save_btree[0], tmp);
1418
1419 sindex = 0;
1420 xfs_trans_log_buf(tp, save_blk->bp,
1421 XFS_DA_LOGRANGE(save_node, &save_btree[0],
1422 (save_hdr.count + drop_hdr.count) *
1423 sizeof(xfs_da_node_entry_t)));
1424 } else {
1425 sindex = save_hdr.count;
1426 xfs_trans_log_buf(tp, save_blk->bp,
1427 XFS_DA_LOGRANGE(save_node, &save_btree[sindex],
1428 drop_hdr.count * sizeof(xfs_da_node_entry_t)));
1429 }
1430
1431 /*
1432 * Move all the B-tree elements from drop_blk to save_blk.
1433 */
1434 tmp = drop_hdr.count * (uint)sizeof(xfs_da_node_entry_t);
1435 memcpy(&save_btree[sindex], &drop_btree[0], tmp);
1436 save_hdr.count += drop_hdr.count;
1437
1438 dp->d_ops->node_hdr_to_disk(save_node, &save_hdr);
1439 xfs_trans_log_buf(tp, save_blk->bp,
1440 XFS_DA_LOGRANGE(save_node, &save_node->hdr,
1441 dp->d_ops->node_hdr_size));
1442
1443 /*
1444 * Save the last hashval in the remaining block for upward propagation.
1445 */
1446 save_blk->hashval = be32_to_cpu(save_btree[save_hdr.count - 1].hashval);
1447 }
1448
1449 /*========================================================================
1450 * Routines used for finding things in the Btree.
1451 *========================================================================*/
1452
1453 /*
1454 * Walk down the Btree looking for a particular filename, filling
1455 * in the state structure as we go.
1456 *
1457 * We will set the state structure to point to each of the elements
1458 * in each of the nodes where either the hashval is or should be.
1459 *
1460 * We support duplicate hashval's so for each entry in the current
1461 * node that could contain the desired hashval, descend. This is a
1462 * pruned depth-first tree search.
1463 */
1464 int /* error */
xfs_da3_node_lookup_int(struct xfs_da_state * state,int * result)1465 xfs_da3_node_lookup_int(
1466 struct xfs_da_state *state,
1467 int *result)
1468 {
1469 struct xfs_da_state_blk *blk;
1470 struct xfs_da_blkinfo *curr;
1471 struct xfs_da_intnode *node;
1472 struct xfs_da_node_entry *btree;
1473 struct xfs_da3_icnode_hdr nodehdr;
1474 struct xfs_da_args *args;
1475 xfs_dablk_t blkno;
1476 xfs_dahash_t hashval;
1477 xfs_dahash_t btreehashval;
1478 int probe;
1479 int span;
1480 int max;
1481 int error;
1482 int retval;
1483 unsigned int expected_level = 0;
1484 uint16_t magic;
1485 struct xfs_inode *dp = state->args->dp;
1486
1487 args = state->args;
1488
1489 /*
1490 * Descend thru the B-tree searching each level for the right
1491 * node to use, until the right hashval is found.
1492 */
1493 blkno = args->geo->leafblk;
1494 for (blk = &state->path.blk[0], state->path.active = 1;
1495 state->path.active <= XFS_DA_NODE_MAXDEPTH;
1496 blk++, state->path.active++) {
1497 /*
1498 * Read the next node down in the tree.
1499 */
1500 blk->blkno = blkno;
1501 error = xfs_da3_node_read(args->trans, args->dp, blkno,
1502 -1, &blk->bp, args->whichfork);
1503 if (error) {
1504 blk->blkno = 0;
1505 state->path.active--;
1506 return error;
1507 }
1508 curr = blk->bp->b_addr;
1509 magic = be16_to_cpu(curr->magic);
1510
1511 if (magic == XFS_ATTR_LEAF_MAGIC ||
1512 magic == XFS_ATTR3_LEAF_MAGIC) {
1513 blk->magic = XFS_ATTR_LEAF_MAGIC;
1514 blk->hashval = xfs_attr_leaf_lasthash(blk->bp, NULL);
1515 break;
1516 }
1517
1518 if (magic == XFS_DIR2_LEAFN_MAGIC ||
1519 magic == XFS_DIR3_LEAFN_MAGIC) {
1520 blk->magic = XFS_DIR2_LEAFN_MAGIC;
1521 blk->hashval = xfs_dir2_leaf_lasthash(args->dp,
1522 blk->bp, NULL);
1523 break;
1524 }
1525
1526 if (magic != XFS_DA_NODE_MAGIC && magic != XFS_DA3_NODE_MAGIC)
1527 return -EFSCORRUPTED;
1528
1529 blk->magic = XFS_DA_NODE_MAGIC;
1530
1531 /*
1532 * Search an intermediate node for a match.
1533 */
1534 node = blk->bp->b_addr;
1535 dp->d_ops->node_hdr_from_disk(&nodehdr, node);
1536 btree = dp->d_ops->node_tree_p(node);
1537
1538 /* Tree taller than we can handle; bail out! */
1539 if (nodehdr.level >= XFS_DA_NODE_MAXDEPTH)
1540 return -EFSCORRUPTED;
1541
1542 /* Check the level from the root. */
1543 if (blkno == args->geo->leafblk)
1544 expected_level = nodehdr.level - 1;
1545 else if (expected_level != nodehdr.level)
1546 return -EFSCORRUPTED;
1547 else
1548 expected_level--;
1549
1550 max = nodehdr.count;
1551 blk->hashval = be32_to_cpu(btree[max - 1].hashval);
1552
1553 /*
1554 * Binary search. (note: small blocks will skip loop)
1555 */
1556 probe = span = max / 2;
1557 hashval = args->hashval;
1558 while (span > 4) {
1559 span /= 2;
1560 btreehashval = be32_to_cpu(btree[probe].hashval);
1561 if (btreehashval < hashval)
1562 probe += span;
1563 else if (btreehashval > hashval)
1564 probe -= span;
1565 else
1566 break;
1567 }
1568 ASSERT((probe >= 0) && (probe < max));
1569 ASSERT((span <= 4) ||
1570 (be32_to_cpu(btree[probe].hashval) == hashval));
1571
1572 /*
1573 * Since we may have duplicate hashval's, find the first
1574 * matching hashval in the node.
1575 */
1576 while (probe > 0 &&
1577 be32_to_cpu(btree[probe].hashval) >= hashval) {
1578 probe--;
1579 }
1580 while (probe < max &&
1581 be32_to_cpu(btree[probe].hashval) < hashval) {
1582 probe++;
1583 }
1584
1585 /*
1586 * Pick the right block to descend on.
1587 */
1588 if (probe == max) {
1589 blk->index = max - 1;
1590 blkno = be32_to_cpu(btree[max - 1].before);
1591 } else {
1592 blk->index = probe;
1593 blkno = be32_to_cpu(btree[probe].before);
1594 }
1595
1596 /* We can't point back to the root. */
1597 if (blkno == args->geo->leafblk)
1598 return -EFSCORRUPTED;
1599 }
1600
1601 if (expected_level != 0)
1602 return -EFSCORRUPTED;
1603
1604 /*
1605 * A leaf block that ends in the hashval that we are interested in
1606 * (final hashval == search hashval) means that the next block may
1607 * contain more entries with the same hashval, shift upward to the
1608 * next leaf and keep searching.
1609 */
1610 for (;;) {
1611 if (blk->magic == XFS_DIR2_LEAFN_MAGIC) {
1612 retval = xfs_dir2_leafn_lookup_int(blk->bp, args,
1613 &blk->index, state);
1614 } else if (blk->magic == XFS_ATTR_LEAF_MAGIC) {
1615 retval = xfs_attr3_leaf_lookup_int(blk->bp, args);
1616 blk->index = args->index;
1617 args->blkno = blk->blkno;
1618 } else {
1619 ASSERT(0);
1620 return -EFSCORRUPTED;
1621 }
1622 if (((retval == -ENOENT) || (retval == -ENOATTR)) &&
1623 (blk->hashval == args->hashval)) {
1624 error = xfs_da3_path_shift(state, &state->path, 1, 1,
1625 &retval);
1626 if (error)
1627 return error;
1628 if (retval == 0) {
1629 continue;
1630 } else if (blk->magic == XFS_ATTR_LEAF_MAGIC) {
1631 /* path_shift() gives ENOENT */
1632 retval = -ENOATTR;
1633 }
1634 }
1635 break;
1636 }
1637 *result = retval;
1638 return 0;
1639 }
1640
1641 /*========================================================================
1642 * Utility routines.
1643 *========================================================================*/
1644
1645 /*
1646 * Compare two intermediate nodes for "order".
1647 */
1648 STATIC int
xfs_da3_node_order(struct xfs_inode * dp,struct xfs_buf * node1_bp,struct xfs_buf * node2_bp)1649 xfs_da3_node_order(
1650 struct xfs_inode *dp,
1651 struct xfs_buf *node1_bp,
1652 struct xfs_buf *node2_bp)
1653 {
1654 struct xfs_da_intnode *node1;
1655 struct xfs_da_intnode *node2;
1656 struct xfs_da_node_entry *btree1;
1657 struct xfs_da_node_entry *btree2;
1658 struct xfs_da3_icnode_hdr node1hdr;
1659 struct xfs_da3_icnode_hdr node2hdr;
1660
1661 node1 = node1_bp->b_addr;
1662 node2 = node2_bp->b_addr;
1663 dp->d_ops->node_hdr_from_disk(&node1hdr, node1);
1664 dp->d_ops->node_hdr_from_disk(&node2hdr, node2);
1665 btree1 = dp->d_ops->node_tree_p(node1);
1666 btree2 = dp->d_ops->node_tree_p(node2);
1667
1668 if (node1hdr.count > 0 && node2hdr.count > 0 &&
1669 ((be32_to_cpu(btree2[0].hashval) < be32_to_cpu(btree1[0].hashval)) ||
1670 (be32_to_cpu(btree2[node2hdr.count - 1].hashval) <
1671 be32_to_cpu(btree1[node1hdr.count - 1].hashval)))) {
1672 return 1;
1673 }
1674 return 0;
1675 }
1676
1677 /*
1678 * Link a new block into a doubly linked list of blocks (of whatever type).
1679 */
1680 int /* error */
xfs_da3_blk_link(struct xfs_da_state * state,struct xfs_da_state_blk * old_blk,struct xfs_da_state_blk * new_blk)1681 xfs_da3_blk_link(
1682 struct xfs_da_state *state,
1683 struct xfs_da_state_blk *old_blk,
1684 struct xfs_da_state_blk *new_blk)
1685 {
1686 struct xfs_da_blkinfo *old_info;
1687 struct xfs_da_blkinfo *new_info;
1688 struct xfs_da_blkinfo *tmp_info;
1689 struct xfs_da_args *args;
1690 struct xfs_buf *bp;
1691 int before = 0;
1692 int error;
1693 struct xfs_inode *dp = state->args->dp;
1694
1695 /*
1696 * Set up environment.
1697 */
1698 args = state->args;
1699 ASSERT(args != NULL);
1700 old_info = old_blk->bp->b_addr;
1701 new_info = new_blk->bp->b_addr;
1702 ASSERT(old_blk->magic == XFS_DA_NODE_MAGIC ||
1703 old_blk->magic == XFS_DIR2_LEAFN_MAGIC ||
1704 old_blk->magic == XFS_ATTR_LEAF_MAGIC);
1705
1706 switch (old_blk->magic) {
1707 case XFS_ATTR_LEAF_MAGIC:
1708 before = xfs_attr_leaf_order(old_blk->bp, new_blk->bp);
1709 break;
1710 case XFS_DIR2_LEAFN_MAGIC:
1711 before = xfs_dir2_leafn_order(dp, old_blk->bp, new_blk->bp);
1712 break;
1713 case XFS_DA_NODE_MAGIC:
1714 before = xfs_da3_node_order(dp, old_blk->bp, new_blk->bp);
1715 break;
1716 }
1717
1718 /*
1719 * Link blocks in appropriate order.
1720 */
1721 if (before) {
1722 /*
1723 * Link new block in before existing block.
1724 */
1725 trace_xfs_da_link_before(args);
1726 new_info->forw = cpu_to_be32(old_blk->blkno);
1727 new_info->back = old_info->back;
1728 if (old_info->back) {
1729 error = xfs_da3_node_read(args->trans, dp,
1730 be32_to_cpu(old_info->back),
1731 -1, &bp, args->whichfork);
1732 if (error)
1733 return error;
1734 ASSERT(bp != NULL);
1735 tmp_info = bp->b_addr;
1736 ASSERT(tmp_info->magic == old_info->magic);
1737 ASSERT(be32_to_cpu(tmp_info->forw) == old_blk->blkno);
1738 tmp_info->forw = cpu_to_be32(new_blk->blkno);
1739 xfs_trans_log_buf(args->trans, bp, 0, sizeof(*tmp_info)-1);
1740 }
1741 old_info->back = cpu_to_be32(new_blk->blkno);
1742 } else {
1743 /*
1744 * Link new block in after existing block.
1745 */
1746 trace_xfs_da_link_after(args);
1747 new_info->forw = old_info->forw;
1748 new_info->back = cpu_to_be32(old_blk->blkno);
1749 if (old_info->forw) {
1750 error = xfs_da3_node_read(args->trans, dp,
1751 be32_to_cpu(old_info->forw),
1752 -1, &bp, args->whichfork);
1753 if (error)
1754 return error;
1755 ASSERT(bp != NULL);
1756 tmp_info = bp->b_addr;
1757 ASSERT(tmp_info->magic == old_info->magic);
1758 ASSERT(be32_to_cpu(tmp_info->back) == old_blk->blkno);
1759 tmp_info->back = cpu_to_be32(new_blk->blkno);
1760 xfs_trans_log_buf(args->trans, bp, 0, sizeof(*tmp_info)-1);
1761 }
1762 old_info->forw = cpu_to_be32(new_blk->blkno);
1763 }
1764
1765 xfs_trans_log_buf(args->trans, old_blk->bp, 0, sizeof(*tmp_info) - 1);
1766 xfs_trans_log_buf(args->trans, new_blk->bp, 0, sizeof(*tmp_info) - 1);
1767 return 0;
1768 }
1769
1770 /*
1771 * Unlink a block from a doubly linked list of blocks.
1772 */
1773 STATIC int /* error */
xfs_da3_blk_unlink(struct xfs_da_state * state,struct xfs_da_state_blk * drop_blk,struct xfs_da_state_blk * save_blk)1774 xfs_da3_blk_unlink(
1775 struct xfs_da_state *state,
1776 struct xfs_da_state_blk *drop_blk,
1777 struct xfs_da_state_blk *save_blk)
1778 {
1779 struct xfs_da_blkinfo *drop_info;
1780 struct xfs_da_blkinfo *save_info;
1781 struct xfs_da_blkinfo *tmp_info;
1782 struct xfs_da_args *args;
1783 struct xfs_buf *bp;
1784 int error;
1785
1786 /*
1787 * Set up environment.
1788 */
1789 args = state->args;
1790 ASSERT(args != NULL);
1791 save_info = save_blk->bp->b_addr;
1792 drop_info = drop_blk->bp->b_addr;
1793 ASSERT(save_blk->magic == XFS_DA_NODE_MAGIC ||
1794 save_blk->magic == XFS_DIR2_LEAFN_MAGIC ||
1795 save_blk->magic == XFS_ATTR_LEAF_MAGIC);
1796 ASSERT(save_blk->magic == drop_blk->magic);
1797 ASSERT((be32_to_cpu(save_info->forw) == drop_blk->blkno) ||
1798 (be32_to_cpu(save_info->back) == drop_blk->blkno));
1799 ASSERT((be32_to_cpu(drop_info->forw) == save_blk->blkno) ||
1800 (be32_to_cpu(drop_info->back) == save_blk->blkno));
1801
1802 /*
1803 * Unlink the leaf block from the doubly linked chain of leaves.
1804 */
1805 if (be32_to_cpu(save_info->back) == drop_blk->blkno) {
1806 trace_xfs_da_unlink_back(args);
1807 save_info->back = drop_info->back;
1808 if (drop_info->back) {
1809 error = xfs_da3_node_read(args->trans, args->dp,
1810 be32_to_cpu(drop_info->back),
1811 -1, &bp, args->whichfork);
1812 if (error)
1813 return error;
1814 ASSERT(bp != NULL);
1815 tmp_info = bp->b_addr;
1816 ASSERT(tmp_info->magic == save_info->magic);
1817 ASSERT(be32_to_cpu(tmp_info->forw) == drop_blk->blkno);
1818 tmp_info->forw = cpu_to_be32(save_blk->blkno);
1819 xfs_trans_log_buf(args->trans, bp, 0,
1820 sizeof(*tmp_info) - 1);
1821 }
1822 } else {
1823 trace_xfs_da_unlink_forward(args);
1824 save_info->forw = drop_info->forw;
1825 if (drop_info->forw) {
1826 error = xfs_da3_node_read(args->trans, args->dp,
1827 be32_to_cpu(drop_info->forw),
1828 -1, &bp, args->whichfork);
1829 if (error)
1830 return error;
1831 ASSERT(bp != NULL);
1832 tmp_info = bp->b_addr;
1833 ASSERT(tmp_info->magic == save_info->magic);
1834 ASSERT(be32_to_cpu(tmp_info->back) == drop_blk->blkno);
1835 tmp_info->back = cpu_to_be32(save_blk->blkno);
1836 xfs_trans_log_buf(args->trans, bp, 0,
1837 sizeof(*tmp_info) - 1);
1838 }
1839 }
1840
1841 xfs_trans_log_buf(args->trans, save_blk->bp, 0, sizeof(*save_info) - 1);
1842 return 0;
1843 }
1844
1845 /*
1846 * Move a path "forward" or "!forward" one block at the current level.
1847 *
1848 * This routine will adjust a "path" to point to the next block
1849 * "forward" (higher hashvalues) or "!forward" (lower hashvals) in the
1850 * Btree, including updating pointers to the intermediate nodes between
1851 * the new bottom and the root.
1852 */
1853 int /* error */
xfs_da3_path_shift(struct xfs_da_state * state,struct xfs_da_state_path * path,int forward,int release,int * result)1854 xfs_da3_path_shift(
1855 struct xfs_da_state *state,
1856 struct xfs_da_state_path *path,
1857 int forward,
1858 int release,
1859 int *result)
1860 {
1861 struct xfs_da_state_blk *blk;
1862 struct xfs_da_blkinfo *info;
1863 struct xfs_da_intnode *node;
1864 struct xfs_da_args *args;
1865 struct xfs_da_node_entry *btree;
1866 struct xfs_da3_icnode_hdr nodehdr;
1867 struct xfs_buf *bp;
1868 xfs_dablk_t blkno = 0;
1869 int level;
1870 int error;
1871 struct xfs_inode *dp = state->args->dp;
1872
1873 trace_xfs_da_path_shift(state->args);
1874
1875 /*
1876 * Roll up the Btree looking for the first block where our
1877 * current index is not at the edge of the block. Note that
1878 * we skip the bottom layer because we want the sibling block.
1879 */
1880 args = state->args;
1881 ASSERT(args != NULL);
1882 ASSERT(path != NULL);
1883 ASSERT((path->active > 0) && (path->active < XFS_DA_NODE_MAXDEPTH));
1884 level = (path->active-1) - 1; /* skip bottom layer in path */
1885 for (blk = &path->blk[level]; level >= 0; blk--, level--) {
1886 node = blk->bp->b_addr;
1887 dp->d_ops->node_hdr_from_disk(&nodehdr, node);
1888 btree = dp->d_ops->node_tree_p(node);
1889
1890 if (forward && (blk->index < nodehdr.count - 1)) {
1891 blk->index++;
1892 blkno = be32_to_cpu(btree[blk->index].before);
1893 break;
1894 } else if (!forward && (blk->index > 0)) {
1895 blk->index--;
1896 blkno = be32_to_cpu(btree[blk->index].before);
1897 break;
1898 }
1899 }
1900 if (level < 0) {
1901 *result = -ENOENT; /* we're out of our tree */
1902 ASSERT(args->op_flags & XFS_DA_OP_OKNOENT);
1903 return 0;
1904 }
1905
1906 /*
1907 * Roll down the edge of the subtree until we reach the
1908 * same depth we were at originally.
1909 */
1910 for (blk++, level++; level < path->active; blk++, level++) {
1911 /*
1912 * Read the next child block into a local buffer.
1913 */
1914 error = xfs_da3_node_read(args->trans, dp, blkno, -1, &bp,
1915 args->whichfork);
1916 if (error)
1917 return error;
1918
1919 /*
1920 * Release the old block (if it's dirty, the trans doesn't
1921 * actually let go) and swap the local buffer into the path
1922 * structure. This ensures failure of the above read doesn't set
1923 * a NULL buffer in an active slot in the path.
1924 */
1925 if (release)
1926 xfs_trans_brelse(args->trans, blk->bp);
1927 blk->blkno = blkno;
1928 blk->bp = bp;
1929
1930 info = blk->bp->b_addr;
1931 ASSERT(info->magic == cpu_to_be16(XFS_DA_NODE_MAGIC) ||
1932 info->magic == cpu_to_be16(XFS_DA3_NODE_MAGIC) ||
1933 info->magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) ||
1934 info->magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC) ||
1935 info->magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC) ||
1936 info->magic == cpu_to_be16(XFS_ATTR3_LEAF_MAGIC));
1937
1938
1939 /*
1940 * Note: we flatten the magic number to a single type so we
1941 * don't have to compare against crc/non-crc types elsewhere.
1942 */
1943 switch (be16_to_cpu(info->magic)) {
1944 case XFS_DA_NODE_MAGIC:
1945 case XFS_DA3_NODE_MAGIC:
1946 blk->magic = XFS_DA_NODE_MAGIC;
1947 node = (xfs_da_intnode_t *)info;
1948 dp->d_ops->node_hdr_from_disk(&nodehdr, node);
1949 btree = dp->d_ops->node_tree_p(node);
1950 blk->hashval = be32_to_cpu(btree[nodehdr.count - 1].hashval);
1951 if (forward)
1952 blk->index = 0;
1953 else
1954 blk->index = nodehdr.count - 1;
1955 blkno = be32_to_cpu(btree[blk->index].before);
1956 break;
1957 case XFS_ATTR_LEAF_MAGIC:
1958 case XFS_ATTR3_LEAF_MAGIC:
1959 blk->magic = XFS_ATTR_LEAF_MAGIC;
1960 ASSERT(level == path->active-1);
1961 blk->index = 0;
1962 blk->hashval = xfs_attr_leaf_lasthash(blk->bp, NULL);
1963 break;
1964 case XFS_DIR2_LEAFN_MAGIC:
1965 case XFS_DIR3_LEAFN_MAGIC:
1966 blk->magic = XFS_DIR2_LEAFN_MAGIC;
1967 ASSERT(level == path->active-1);
1968 blk->index = 0;
1969 blk->hashval = xfs_dir2_leaf_lasthash(args->dp,
1970 blk->bp, NULL);
1971 break;
1972 default:
1973 ASSERT(0);
1974 break;
1975 }
1976 }
1977 *result = 0;
1978 return 0;
1979 }
1980
1981
1982 /*========================================================================
1983 * Utility routines.
1984 *========================================================================*/
1985
1986 /*
1987 * Implement a simple hash on a character string.
1988 * Rotate the hash value by 7 bits, then XOR each character in.
1989 * This is implemented with some source-level loop unrolling.
1990 */
1991 xfs_dahash_t
xfs_da_hashname(const uint8_t * name,int namelen)1992 xfs_da_hashname(const uint8_t *name, int namelen)
1993 {
1994 xfs_dahash_t hash;
1995
1996 /*
1997 * Do four characters at a time as long as we can.
1998 */
1999 for (hash = 0; namelen >= 4; namelen -= 4, name += 4)
2000 hash = (name[0] << 21) ^ (name[1] << 14) ^ (name[2] << 7) ^
2001 (name[3] << 0) ^ rol32(hash, 7 * 4);
2002
2003 /*
2004 * Now do the rest of the characters.
2005 */
2006 switch (namelen) {
2007 case 3:
2008 return (name[0] << 14) ^ (name[1] << 7) ^ (name[2] << 0) ^
2009 rol32(hash, 7 * 3);
2010 case 2:
2011 return (name[0] << 7) ^ (name[1] << 0) ^ rol32(hash, 7 * 2);
2012 case 1:
2013 return (name[0] << 0) ^ rol32(hash, 7 * 1);
2014 default: /* case 0: */
2015 return hash;
2016 }
2017 }
2018
2019 enum xfs_dacmp
xfs_da_compname(struct xfs_da_args * args,const unsigned char * name,int len)2020 xfs_da_compname(
2021 struct xfs_da_args *args,
2022 const unsigned char *name,
2023 int len)
2024 {
2025 return (args->namelen == len && memcmp(args->name, name, len) == 0) ?
2026 XFS_CMP_EXACT : XFS_CMP_DIFFERENT;
2027 }
2028
2029 static xfs_dahash_t
xfs_default_hashname(struct xfs_name * name)2030 xfs_default_hashname(
2031 struct xfs_name *name)
2032 {
2033 return xfs_da_hashname(name->name, name->len);
2034 }
2035
2036 const struct xfs_nameops xfs_default_nameops = {
2037 .hashname = xfs_default_hashname,
2038 .compname = xfs_da_compname
2039 };
2040
2041 int
xfs_da_grow_inode_int(struct xfs_da_args * args,xfs_fileoff_t * bno,int count)2042 xfs_da_grow_inode_int(
2043 struct xfs_da_args *args,
2044 xfs_fileoff_t *bno,
2045 int count)
2046 {
2047 struct xfs_trans *tp = args->trans;
2048 struct xfs_inode *dp = args->dp;
2049 int w = args->whichfork;
2050 xfs_rfsblock_t nblks = dp->i_d.di_nblocks;
2051 struct xfs_bmbt_irec map, *mapp;
2052 int nmap, error, got, i, mapi;
2053
2054 /*
2055 * Find a spot in the file space to put the new block.
2056 */
2057 error = xfs_bmap_first_unused(tp, dp, count, bno, w);
2058 if (error)
2059 return error;
2060
2061 /*
2062 * Try mapping it in one filesystem block.
2063 */
2064 nmap = 1;
2065 error = xfs_bmapi_write(tp, dp, *bno, count,
2066 xfs_bmapi_aflag(w)|XFS_BMAPI_METADATA|XFS_BMAPI_CONTIG,
2067 args->total, &map, &nmap);
2068 if (error)
2069 return error;
2070
2071 ASSERT(nmap <= 1);
2072 if (nmap == 1) {
2073 mapp = ↦
2074 mapi = 1;
2075 } else if (nmap == 0 && count > 1) {
2076 xfs_fileoff_t b;
2077 int c;
2078
2079 /*
2080 * If we didn't get it and the block might work if fragmented,
2081 * try without the CONTIG flag. Loop until we get it all.
2082 */
2083 mapp = kmem_alloc(sizeof(*mapp) * count, KM_SLEEP);
2084 for (b = *bno, mapi = 0; b < *bno + count; ) {
2085 nmap = min(XFS_BMAP_MAX_NMAP, count);
2086 c = (int)(*bno + count - b);
2087 error = xfs_bmapi_write(tp, dp, b, c,
2088 xfs_bmapi_aflag(w)|XFS_BMAPI_METADATA,
2089 args->total, &mapp[mapi], &nmap);
2090 if (error)
2091 goto out_free_map;
2092 if (nmap < 1)
2093 break;
2094 mapi += nmap;
2095 b = mapp[mapi - 1].br_startoff +
2096 mapp[mapi - 1].br_blockcount;
2097 }
2098 } else {
2099 mapi = 0;
2100 mapp = NULL;
2101 }
2102
2103 /*
2104 * Count the blocks we got, make sure it matches the total.
2105 */
2106 for (i = 0, got = 0; i < mapi; i++)
2107 got += mapp[i].br_blockcount;
2108 if (got != count || mapp[0].br_startoff != *bno ||
2109 mapp[mapi - 1].br_startoff + mapp[mapi - 1].br_blockcount !=
2110 *bno + count) {
2111 error = -ENOSPC;
2112 goto out_free_map;
2113 }
2114
2115 /* account for newly allocated blocks in reserved blocks total */
2116 args->total -= dp->i_d.di_nblocks - nblks;
2117
2118 out_free_map:
2119 if (mapp != &map)
2120 kmem_free(mapp);
2121 return error;
2122 }
2123
2124 /*
2125 * Add a block to the btree ahead of the file.
2126 * Return the new block number to the caller.
2127 */
2128 int
xfs_da_grow_inode(struct xfs_da_args * args,xfs_dablk_t * new_blkno)2129 xfs_da_grow_inode(
2130 struct xfs_da_args *args,
2131 xfs_dablk_t *new_blkno)
2132 {
2133 xfs_fileoff_t bno;
2134 int error;
2135
2136 trace_xfs_da_grow_inode(args);
2137
2138 bno = args->geo->leafblk;
2139 error = xfs_da_grow_inode_int(args, &bno, args->geo->fsbcount);
2140 if (!error)
2141 *new_blkno = (xfs_dablk_t)bno;
2142 return error;
2143 }
2144
2145 /*
2146 * Ick. We need to always be able to remove a btree block, even
2147 * if there's no space reservation because the filesystem is full.
2148 * This is called if xfs_bunmapi on a btree block fails due to ENOSPC.
2149 * It swaps the target block with the last block in the file. The
2150 * last block in the file can always be removed since it can't cause
2151 * a bmap btree split to do that.
2152 */
2153 STATIC int
xfs_da3_swap_lastblock(struct xfs_da_args * args,xfs_dablk_t * dead_blknop,struct xfs_buf ** dead_bufp)2154 xfs_da3_swap_lastblock(
2155 struct xfs_da_args *args,
2156 xfs_dablk_t *dead_blknop,
2157 struct xfs_buf **dead_bufp)
2158 {
2159 struct xfs_da_blkinfo *dead_info;
2160 struct xfs_da_blkinfo *sib_info;
2161 struct xfs_da_intnode *par_node;
2162 struct xfs_da_intnode *dead_node;
2163 struct xfs_dir2_leaf *dead_leaf2;
2164 struct xfs_da_node_entry *btree;
2165 struct xfs_da3_icnode_hdr par_hdr;
2166 struct xfs_inode *dp;
2167 struct xfs_trans *tp;
2168 struct xfs_mount *mp;
2169 struct xfs_buf *dead_buf;
2170 struct xfs_buf *last_buf;
2171 struct xfs_buf *sib_buf;
2172 struct xfs_buf *par_buf;
2173 xfs_dahash_t dead_hash;
2174 xfs_fileoff_t lastoff;
2175 xfs_dablk_t dead_blkno;
2176 xfs_dablk_t last_blkno;
2177 xfs_dablk_t sib_blkno;
2178 xfs_dablk_t par_blkno;
2179 int error;
2180 int w;
2181 int entno;
2182 int level;
2183 int dead_level;
2184
2185 trace_xfs_da_swap_lastblock(args);
2186
2187 dead_buf = *dead_bufp;
2188 dead_blkno = *dead_blknop;
2189 tp = args->trans;
2190 dp = args->dp;
2191 w = args->whichfork;
2192 ASSERT(w == XFS_DATA_FORK);
2193 mp = dp->i_mount;
2194 lastoff = args->geo->freeblk;
2195 error = xfs_bmap_last_before(tp, dp, &lastoff, w);
2196 if (error)
2197 return error;
2198 if (unlikely(lastoff == 0)) {
2199 XFS_ERROR_REPORT("xfs_da_swap_lastblock(1)", XFS_ERRLEVEL_LOW,
2200 mp);
2201 return -EFSCORRUPTED;
2202 }
2203 /*
2204 * Read the last block in the btree space.
2205 */
2206 last_blkno = (xfs_dablk_t)lastoff - args->geo->fsbcount;
2207 error = xfs_da3_node_read(tp, dp, last_blkno, -1, &last_buf, w);
2208 if (error)
2209 return error;
2210 /*
2211 * Copy the last block into the dead buffer and log it.
2212 */
2213 memcpy(dead_buf->b_addr, last_buf->b_addr, args->geo->blksize);
2214 xfs_trans_log_buf(tp, dead_buf, 0, args->geo->blksize - 1);
2215 dead_info = dead_buf->b_addr;
2216 /*
2217 * Get values from the moved block.
2218 */
2219 if (dead_info->magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) ||
2220 dead_info->magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC)) {
2221 struct xfs_dir3_icleaf_hdr leafhdr;
2222 struct xfs_dir2_leaf_entry *ents;
2223
2224 dead_leaf2 = (xfs_dir2_leaf_t *)dead_info;
2225 dp->d_ops->leaf_hdr_from_disk(&leafhdr, dead_leaf2);
2226 ents = dp->d_ops->leaf_ents_p(dead_leaf2);
2227 dead_level = 0;
2228 dead_hash = be32_to_cpu(ents[leafhdr.count - 1].hashval);
2229 } else {
2230 struct xfs_da3_icnode_hdr deadhdr;
2231
2232 dead_node = (xfs_da_intnode_t *)dead_info;
2233 dp->d_ops->node_hdr_from_disk(&deadhdr, dead_node);
2234 btree = dp->d_ops->node_tree_p(dead_node);
2235 dead_level = deadhdr.level;
2236 dead_hash = be32_to_cpu(btree[deadhdr.count - 1].hashval);
2237 }
2238 sib_buf = par_buf = NULL;
2239 /*
2240 * If the moved block has a left sibling, fix up the pointers.
2241 */
2242 if ((sib_blkno = be32_to_cpu(dead_info->back))) {
2243 error = xfs_da3_node_read(tp, dp, sib_blkno, -1, &sib_buf, w);
2244 if (error)
2245 goto done;
2246 sib_info = sib_buf->b_addr;
2247 if (unlikely(
2248 be32_to_cpu(sib_info->forw) != last_blkno ||
2249 sib_info->magic != dead_info->magic)) {
2250 XFS_ERROR_REPORT("xfs_da_swap_lastblock(2)",
2251 XFS_ERRLEVEL_LOW, mp);
2252 error = -EFSCORRUPTED;
2253 goto done;
2254 }
2255 sib_info->forw = cpu_to_be32(dead_blkno);
2256 xfs_trans_log_buf(tp, sib_buf,
2257 XFS_DA_LOGRANGE(sib_info, &sib_info->forw,
2258 sizeof(sib_info->forw)));
2259 sib_buf = NULL;
2260 }
2261 /*
2262 * If the moved block has a right sibling, fix up the pointers.
2263 */
2264 if ((sib_blkno = be32_to_cpu(dead_info->forw))) {
2265 error = xfs_da3_node_read(tp, dp, sib_blkno, -1, &sib_buf, w);
2266 if (error)
2267 goto done;
2268 sib_info = sib_buf->b_addr;
2269 if (unlikely(
2270 be32_to_cpu(sib_info->back) != last_blkno ||
2271 sib_info->magic != dead_info->magic)) {
2272 XFS_ERROR_REPORT("xfs_da_swap_lastblock(3)",
2273 XFS_ERRLEVEL_LOW, mp);
2274 error = -EFSCORRUPTED;
2275 goto done;
2276 }
2277 sib_info->back = cpu_to_be32(dead_blkno);
2278 xfs_trans_log_buf(tp, sib_buf,
2279 XFS_DA_LOGRANGE(sib_info, &sib_info->back,
2280 sizeof(sib_info->back)));
2281 sib_buf = NULL;
2282 }
2283 par_blkno = args->geo->leafblk;
2284 level = -1;
2285 /*
2286 * Walk down the tree looking for the parent of the moved block.
2287 */
2288 for (;;) {
2289 error = xfs_da3_node_read(tp, dp, par_blkno, -1, &par_buf, w);
2290 if (error)
2291 goto done;
2292 par_node = par_buf->b_addr;
2293 dp->d_ops->node_hdr_from_disk(&par_hdr, par_node);
2294 if (level >= 0 && level != par_hdr.level + 1) {
2295 XFS_ERROR_REPORT("xfs_da_swap_lastblock(4)",
2296 XFS_ERRLEVEL_LOW, mp);
2297 error = -EFSCORRUPTED;
2298 goto done;
2299 }
2300 level = par_hdr.level;
2301 btree = dp->d_ops->node_tree_p(par_node);
2302 for (entno = 0;
2303 entno < par_hdr.count &&
2304 be32_to_cpu(btree[entno].hashval) < dead_hash;
2305 entno++)
2306 continue;
2307 if (entno == par_hdr.count) {
2308 XFS_ERROR_REPORT("xfs_da_swap_lastblock(5)",
2309 XFS_ERRLEVEL_LOW, mp);
2310 error = -EFSCORRUPTED;
2311 goto done;
2312 }
2313 par_blkno = be32_to_cpu(btree[entno].before);
2314 if (level == dead_level + 1)
2315 break;
2316 xfs_trans_brelse(tp, par_buf);
2317 par_buf = NULL;
2318 }
2319 /*
2320 * We're in the right parent block.
2321 * Look for the right entry.
2322 */
2323 for (;;) {
2324 for (;
2325 entno < par_hdr.count &&
2326 be32_to_cpu(btree[entno].before) != last_blkno;
2327 entno++)
2328 continue;
2329 if (entno < par_hdr.count)
2330 break;
2331 par_blkno = par_hdr.forw;
2332 xfs_trans_brelse(tp, par_buf);
2333 par_buf = NULL;
2334 if (unlikely(par_blkno == 0)) {
2335 XFS_ERROR_REPORT("xfs_da_swap_lastblock(6)",
2336 XFS_ERRLEVEL_LOW, mp);
2337 error = -EFSCORRUPTED;
2338 goto done;
2339 }
2340 error = xfs_da3_node_read(tp, dp, par_blkno, -1, &par_buf, w);
2341 if (error)
2342 goto done;
2343 par_node = par_buf->b_addr;
2344 dp->d_ops->node_hdr_from_disk(&par_hdr, par_node);
2345 if (par_hdr.level != level) {
2346 XFS_ERROR_REPORT("xfs_da_swap_lastblock(7)",
2347 XFS_ERRLEVEL_LOW, mp);
2348 error = -EFSCORRUPTED;
2349 goto done;
2350 }
2351 btree = dp->d_ops->node_tree_p(par_node);
2352 entno = 0;
2353 }
2354 /*
2355 * Update the parent entry pointing to the moved block.
2356 */
2357 btree[entno].before = cpu_to_be32(dead_blkno);
2358 xfs_trans_log_buf(tp, par_buf,
2359 XFS_DA_LOGRANGE(par_node, &btree[entno].before,
2360 sizeof(btree[entno].before)));
2361 *dead_blknop = last_blkno;
2362 *dead_bufp = last_buf;
2363 return 0;
2364 done:
2365 if (par_buf)
2366 xfs_trans_brelse(tp, par_buf);
2367 if (sib_buf)
2368 xfs_trans_brelse(tp, sib_buf);
2369 xfs_trans_brelse(tp, last_buf);
2370 return error;
2371 }
2372
2373 /*
2374 * Remove a btree block from a directory or attribute.
2375 */
2376 int
xfs_da_shrink_inode(struct xfs_da_args * args,xfs_dablk_t dead_blkno,struct xfs_buf * dead_buf)2377 xfs_da_shrink_inode(
2378 struct xfs_da_args *args,
2379 xfs_dablk_t dead_blkno,
2380 struct xfs_buf *dead_buf)
2381 {
2382 struct xfs_inode *dp;
2383 int done, error, w, count;
2384 struct xfs_trans *tp;
2385
2386 trace_xfs_da_shrink_inode(args);
2387
2388 dp = args->dp;
2389 w = args->whichfork;
2390 tp = args->trans;
2391 count = args->geo->fsbcount;
2392 for (;;) {
2393 /*
2394 * Remove extents. If we get ENOSPC for a dir we have to move
2395 * the last block to the place we want to kill.
2396 */
2397 error = xfs_bunmapi(tp, dp, dead_blkno, count,
2398 xfs_bmapi_aflag(w), 0, &done);
2399 if (error == -ENOSPC) {
2400 if (w != XFS_DATA_FORK)
2401 break;
2402 error = xfs_da3_swap_lastblock(args, &dead_blkno,
2403 &dead_buf);
2404 if (error)
2405 break;
2406 } else {
2407 break;
2408 }
2409 }
2410 xfs_trans_binval(tp, dead_buf);
2411 return error;
2412 }
2413
2414 /*
2415 * See if the mapping(s) for this btree block are valid, i.e.
2416 * don't contain holes, are logically contiguous, and cover the whole range.
2417 */
2418 STATIC int
xfs_da_map_covers_blocks(int nmap,xfs_bmbt_irec_t * mapp,xfs_dablk_t bno,int count)2419 xfs_da_map_covers_blocks(
2420 int nmap,
2421 xfs_bmbt_irec_t *mapp,
2422 xfs_dablk_t bno,
2423 int count)
2424 {
2425 int i;
2426 xfs_fileoff_t off;
2427
2428 for (i = 0, off = bno; i < nmap; i++) {
2429 if (mapp[i].br_startblock == HOLESTARTBLOCK ||
2430 mapp[i].br_startblock == DELAYSTARTBLOCK) {
2431 return 0;
2432 }
2433 if (off != mapp[i].br_startoff) {
2434 return 0;
2435 }
2436 off += mapp[i].br_blockcount;
2437 }
2438 return off == bno + count;
2439 }
2440
2441 /*
2442 * Convert a struct xfs_bmbt_irec to a struct xfs_buf_map.
2443 *
2444 * For the single map case, it is assumed that the caller has provided a pointer
2445 * to a valid xfs_buf_map. For the multiple map case, this function will
2446 * allocate the xfs_buf_map to hold all the maps and replace the caller's single
2447 * map pointer with the allocated map.
2448 */
2449 static int
xfs_buf_map_from_irec(struct xfs_mount * mp,struct xfs_buf_map ** mapp,int * nmaps,struct xfs_bmbt_irec * irecs,int nirecs)2450 xfs_buf_map_from_irec(
2451 struct xfs_mount *mp,
2452 struct xfs_buf_map **mapp,
2453 int *nmaps,
2454 struct xfs_bmbt_irec *irecs,
2455 int nirecs)
2456 {
2457 struct xfs_buf_map *map;
2458 int i;
2459
2460 ASSERT(*nmaps == 1);
2461 ASSERT(nirecs >= 1);
2462
2463 if (nirecs > 1) {
2464 map = kmem_zalloc(nirecs * sizeof(struct xfs_buf_map),
2465 KM_SLEEP | KM_NOFS);
2466 if (!map)
2467 return -ENOMEM;
2468 *mapp = map;
2469 }
2470
2471 *nmaps = nirecs;
2472 map = *mapp;
2473 for (i = 0; i < *nmaps; i++) {
2474 ASSERT(irecs[i].br_startblock != DELAYSTARTBLOCK &&
2475 irecs[i].br_startblock != HOLESTARTBLOCK);
2476 map[i].bm_bn = XFS_FSB_TO_DADDR(mp, irecs[i].br_startblock);
2477 map[i].bm_len = XFS_FSB_TO_BB(mp, irecs[i].br_blockcount);
2478 }
2479 return 0;
2480 }
2481
2482 /*
2483 * Map the block we are given ready for reading. There are three possible return
2484 * values:
2485 * -1 - will be returned if we land in a hole and mappedbno == -2 so the
2486 * caller knows not to execute a subsequent read.
2487 * 0 - if we mapped the block successfully
2488 * >0 - positive error number if there was an error.
2489 */
2490 static int
xfs_dabuf_map(struct xfs_inode * dp,xfs_dablk_t bno,xfs_daddr_t mappedbno,int whichfork,struct xfs_buf_map ** map,int * nmaps)2491 xfs_dabuf_map(
2492 struct xfs_inode *dp,
2493 xfs_dablk_t bno,
2494 xfs_daddr_t mappedbno,
2495 int whichfork,
2496 struct xfs_buf_map **map,
2497 int *nmaps)
2498 {
2499 struct xfs_mount *mp = dp->i_mount;
2500 int nfsb;
2501 int error = 0;
2502 struct xfs_bmbt_irec irec;
2503 struct xfs_bmbt_irec *irecs = &irec;
2504 int nirecs;
2505
2506 ASSERT(map && *map);
2507 ASSERT(*nmaps == 1);
2508
2509 if (whichfork == XFS_DATA_FORK)
2510 nfsb = mp->m_dir_geo->fsbcount;
2511 else
2512 nfsb = mp->m_attr_geo->fsbcount;
2513
2514 /*
2515 * Caller doesn't have a mapping. -2 means don't complain
2516 * if we land in a hole.
2517 */
2518 if (mappedbno == -1 || mappedbno == -2) {
2519 /*
2520 * Optimize the one-block case.
2521 */
2522 if (nfsb != 1)
2523 irecs = kmem_zalloc(sizeof(irec) * nfsb,
2524 KM_SLEEP | KM_NOFS);
2525
2526 nirecs = nfsb;
2527 error = xfs_bmapi_read(dp, (xfs_fileoff_t)bno, nfsb, irecs,
2528 &nirecs, xfs_bmapi_aflag(whichfork));
2529 if (error)
2530 goto out;
2531 } else {
2532 irecs->br_startblock = XFS_DADDR_TO_FSB(mp, mappedbno);
2533 irecs->br_startoff = (xfs_fileoff_t)bno;
2534 irecs->br_blockcount = nfsb;
2535 irecs->br_state = 0;
2536 nirecs = 1;
2537 }
2538
2539 if (!xfs_da_map_covers_blocks(nirecs, irecs, bno, nfsb)) {
2540 error = mappedbno == -2 ? -1 : -EFSCORRUPTED;
2541 if (unlikely(error == -EFSCORRUPTED)) {
2542 if (xfs_error_level >= XFS_ERRLEVEL_LOW) {
2543 int i;
2544 xfs_alert(mp, "%s: bno %lld dir: inode %lld",
2545 __func__, (long long)bno,
2546 (long long)dp->i_ino);
2547 for (i = 0; i < *nmaps; i++) {
2548 xfs_alert(mp,
2549 "[%02d] br_startoff %lld br_startblock %lld br_blockcount %lld br_state %d",
2550 i,
2551 (long long)irecs[i].br_startoff,
2552 (long long)irecs[i].br_startblock,
2553 (long long)irecs[i].br_blockcount,
2554 irecs[i].br_state);
2555 }
2556 }
2557 XFS_ERROR_REPORT("xfs_da_do_buf(1)",
2558 XFS_ERRLEVEL_LOW, mp);
2559 }
2560 goto out;
2561 }
2562 error = xfs_buf_map_from_irec(mp, map, nmaps, irecs, nirecs);
2563 out:
2564 if (irecs != &irec)
2565 kmem_free(irecs);
2566 return error;
2567 }
2568
2569 /*
2570 * Get a buffer for the dir/attr block.
2571 */
2572 int
xfs_da_get_buf(struct xfs_trans * trans,struct xfs_inode * dp,xfs_dablk_t bno,xfs_daddr_t mappedbno,struct xfs_buf ** bpp,int whichfork)2573 xfs_da_get_buf(
2574 struct xfs_trans *trans,
2575 struct xfs_inode *dp,
2576 xfs_dablk_t bno,
2577 xfs_daddr_t mappedbno,
2578 struct xfs_buf **bpp,
2579 int whichfork)
2580 {
2581 struct xfs_buf *bp;
2582 struct xfs_buf_map map;
2583 struct xfs_buf_map *mapp;
2584 int nmap;
2585 int error;
2586
2587 *bpp = NULL;
2588 mapp = ↦
2589 nmap = 1;
2590 error = xfs_dabuf_map(dp, bno, mappedbno, whichfork,
2591 &mapp, &nmap);
2592 if (error) {
2593 /* mapping a hole is not an error, but we don't continue */
2594 if (error == -1)
2595 error = 0;
2596 goto out_free;
2597 }
2598
2599 bp = xfs_trans_get_buf_map(trans, dp->i_mount->m_ddev_targp,
2600 mapp, nmap, 0);
2601 error = bp ? bp->b_error : -EIO;
2602 if (error) {
2603 if (bp)
2604 xfs_trans_brelse(trans, bp);
2605 goto out_free;
2606 }
2607
2608 *bpp = bp;
2609
2610 out_free:
2611 if (mapp != &map)
2612 kmem_free(mapp);
2613
2614 return error;
2615 }
2616
2617 /*
2618 * Get a buffer for the dir/attr block, fill in the contents.
2619 */
2620 int
xfs_da_read_buf(struct xfs_trans * trans,struct xfs_inode * dp,xfs_dablk_t bno,xfs_daddr_t mappedbno,struct xfs_buf ** bpp,int whichfork,const struct xfs_buf_ops * ops)2621 xfs_da_read_buf(
2622 struct xfs_trans *trans,
2623 struct xfs_inode *dp,
2624 xfs_dablk_t bno,
2625 xfs_daddr_t mappedbno,
2626 struct xfs_buf **bpp,
2627 int whichfork,
2628 const struct xfs_buf_ops *ops)
2629 {
2630 struct xfs_buf *bp;
2631 struct xfs_buf_map map;
2632 struct xfs_buf_map *mapp;
2633 int nmap;
2634 int error;
2635
2636 *bpp = NULL;
2637 mapp = ↦
2638 nmap = 1;
2639 error = xfs_dabuf_map(dp, bno, mappedbno, whichfork,
2640 &mapp, &nmap);
2641 if (error) {
2642 /* mapping a hole is not an error, but we don't continue */
2643 if (error == -1)
2644 error = 0;
2645 goto out_free;
2646 }
2647
2648 error = xfs_trans_read_buf_map(dp->i_mount, trans,
2649 dp->i_mount->m_ddev_targp,
2650 mapp, nmap, 0, &bp, ops);
2651 if (error)
2652 goto out_free;
2653
2654 if (whichfork == XFS_ATTR_FORK)
2655 xfs_buf_set_ref(bp, XFS_ATTR_BTREE_REF);
2656 else
2657 xfs_buf_set_ref(bp, XFS_DIR_BTREE_REF);
2658 *bpp = bp;
2659 out_free:
2660 if (mapp != &map)
2661 kmem_free(mapp);
2662
2663 return error;
2664 }
2665
2666 /*
2667 * Readahead the dir/attr block.
2668 */
2669 int
xfs_da_reada_buf(struct xfs_inode * dp,xfs_dablk_t bno,xfs_daddr_t mappedbno,int whichfork,const struct xfs_buf_ops * ops)2670 xfs_da_reada_buf(
2671 struct xfs_inode *dp,
2672 xfs_dablk_t bno,
2673 xfs_daddr_t mappedbno,
2674 int whichfork,
2675 const struct xfs_buf_ops *ops)
2676 {
2677 struct xfs_buf_map map;
2678 struct xfs_buf_map *mapp;
2679 int nmap;
2680 int error;
2681
2682 mapp = ↦
2683 nmap = 1;
2684 error = xfs_dabuf_map(dp, bno, mappedbno, whichfork,
2685 &mapp, &nmap);
2686 if (error) {
2687 /* mapping a hole is not an error, but we don't continue */
2688 if (error == -1)
2689 error = 0;
2690 goto out_free;
2691 }
2692
2693 mappedbno = mapp[0].bm_bn;
2694 xfs_buf_readahead_map(dp->i_mount->m_ddev_targp, mapp, nmap, ops);
2695
2696 out_free:
2697 if (mapp != &map)
2698 kmem_free(mapp);
2699
2700 return error;
2701 }
2702