Home
last modified time | relevance | path

Searched refs:sbi (Results 1 – 25 of 154) sorted by relevance

1234567

/linux-4.19.296/fs/f2fs/
Dsegment.h34 #define IS_CURSEG(sbi, seg) \ argument
35 (((seg) == CURSEG_I(sbi, CURSEG_HOT_DATA)->segno) || \
36 ((seg) == CURSEG_I(sbi, CURSEG_WARM_DATA)->segno) || \
37 ((seg) == CURSEG_I(sbi, CURSEG_COLD_DATA)->segno) || \
38 ((seg) == CURSEG_I(sbi, CURSEG_HOT_NODE)->segno) || \
39 ((seg) == CURSEG_I(sbi, CURSEG_WARM_NODE)->segno) || \
40 ((seg) == CURSEG_I(sbi, CURSEG_COLD_NODE)->segno))
42 #define IS_CURSEC(sbi, secno) \ argument
43 (((secno) == CURSEG_I(sbi, CURSEG_HOT_DATA)->segno / \
44 (sbi)->segs_per_sec) || \
[all …]
Dcheckpoint.c29 void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io) in f2fs_stop_checkpoint() argument
31 f2fs_build_fault_attr(sbi, 0, 0); in f2fs_stop_checkpoint()
32 set_ckpt_flags(sbi, CP_ERROR_FLAG); in f2fs_stop_checkpoint()
34 f2fs_flush_merged_writes(sbi); in f2fs_stop_checkpoint()
40 struct page *f2fs_grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index) in f2fs_grab_meta_page() argument
42 struct address_space *mapping = META_MAPPING(sbi); in f2fs_grab_meta_page()
59 static struct page *__get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index, in __get_meta_page() argument
62 struct address_space *mapping = META_MAPPING(sbi); in __get_meta_page()
65 .sbi = sbi, in __get_meta_page()
109 struct page *f2fs_get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index) in f2fs_get_meta_page() argument
[all …]
Dsuper.c61 void f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned int rate, in f2fs_build_fault_attr() argument
64 struct f2fs_fault_info *ffi = &F2FS_OPTION(sbi).fault_info; in f2fs_build_fault_attr()
214 static inline void limit_reserve_root(struct f2fs_sb_info *sbi) in limit_reserve_root() argument
216 block_t limit = (sbi->user_block_count << 1) / 1000; in limit_reserve_root()
219 if (test_opt(sbi, RESERVE_ROOT) && in limit_reserve_root()
220 F2FS_OPTION(sbi).root_reserved_blocks > limit) { in limit_reserve_root()
221 F2FS_OPTION(sbi).root_reserved_blocks = limit; in limit_reserve_root()
222 f2fs_msg(sbi->sb, KERN_INFO, in limit_reserve_root()
224 F2FS_OPTION(sbi).root_reserved_blocks); in limit_reserve_root()
226 if (!test_opt(sbi, RESERVE_ROOT) && in limit_reserve_root()
[all …]
Dsegment.c172 bool f2fs_need_SSR(struct f2fs_sb_info *sbi) in f2fs_need_SSR() argument
174 int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES); in f2fs_need_SSR()
175 int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS); in f2fs_need_SSR()
176 int imeta_secs = get_blocktype_secs(sbi, F2FS_DIRTY_IMETA); in f2fs_need_SSR()
178 if (test_opt(sbi, LFS)) in f2fs_need_SSR()
180 if (sbi->gc_mode == GC_URGENT) in f2fs_need_SSR()
183 return free_sections(sbi) <= (node_secs + 2 * dent_secs + imeta_secs + in f2fs_need_SSR()
184 SM_I(sbi)->min_ssr_sections + reserved_sections(sbi)); in f2fs_need_SSR()
189 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in f2fs_register_inmem_page() local
208 spin_lock(&sbi->inode_lock[ATOMIC_FILE]); in f2fs_register_inmem_page()
[all …]
Ddebug.c30 static void update_general_status(struct f2fs_sb_info *sbi) in update_general_status() argument
32 struct f2fs_stat_info *si = F2FS_STAT(sbi); in update_general_status()
36 si->hit_largest = atomic64_read(&sbi->read_hit_largest); in update_general_status()
37 si->hit_cached = atomic64_read(&sbi->read_hit_cached); in update_general_status()
38 si->hit_rbtree = atomic64_read(&sbi->read_hit_rbtree); in update_general_status()
40 si->total_ext = atomic64_read(&sbi->total_hit_ext); in update_general_status()
41 si->ext_tree = atomic_read(&sbi->total_ext_tree); in update_general_status()
42 si->zombie_tree = atomic_read(&sbi->total_zombie_tree); in update_general_status()
43 si->ext_node = atomic_read(&sbi->total_ext_node); in update_general_status()
44 si->ndirty_node = get_pages(sbi, F2FS_DIRTY_NODES); in update_general_status()
[all …]
Dgc.c28 struct f2fs_sb_info *sbi = data; in gc_thread_func() local
29 struct f2fs_gc_kthread *gc_th = sbi->gc_thread; in gc_thread_func()
30 wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head; in gc_thread_func()
51 if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) { in gc_thread_func()
56 if (time_to_inject(sbi, FAULT_CHECKPOINT)) { in gc_thread_func()
58 f2fs_stop_checkpoint(sbi, false); in gc_thread_func()
61 if (!sb_start_write_trylock(sbi->sb)) in gc_thread_func()
77 if (sbi->gc_mode == GC_URGENT) { in gc_thread_func()
79 mutex_lock(&sbi->gc_mutex); in gc_thread_func()
83 if (!mutex_trylock(&sbi->gc_mutex)) in gc_thread_func()
[all …]
Dshrinker.c22 static unsigned long __count_nat_entries(struct f2fs_sb_info *sbi) in __count_nat_entries() argument
24 long count = NM_I(sbi)->nat_cnt - NM_I(sbi)->dirty_nat_cnt; in __count_nat_entries()
29 static unsigned long __count_free_nids(struct f2fs_sb_info *sbi) in __count_free_nids() argument
31 long count = NM_I(sbi)->nid_cnt[FREE_NID] - MAX_FREE_NIDS; in __count_free_nids()
36 static unsigned long __count_extent_cache(struct f2fs_sb_info *sbi) in __count_extent_cache() argument
38 return atomic_read(&sbi->total_zombie_tree) + in __count_extent_cache()
39 atomic_read(&sbi->total_ext_node); in __count_extent_cache()
45 struct f2fs_sb_info *sbi; in f2fs_shrink_count() local
52 sbi = list_entry(p, struct f2fs_sb_info, s_list); in f2fs_shrink_count()
55 if (!mutex_trylock(&sbi->umount_mutex)) { in f2fs_shrink_count()
[all …]
Dsysfs.c47 static unsigned char *__struct_ptr(struct f2fs_sb_info *sbi, int struct_type) in __struct_ptr() argument
50 return (unsigned char *)sbi->gc_thread; in __struct_ptr()
52 return (unsigned char *)SM_I(sbi); in __struct_ptr()
54 return (unsigned char *)SM_I(sbi)->dcc_info; in __struct_ptr()
56 return (unsigned char *)NM_I(sbi); in __struct_ptr()
58 return (unsigned char *)sbi; in __struct_ptr()
62 return (unsigned char *)&F2FS_OPTION(sbi).fault_info; in __struct_ptr()
68 struct f2fs_sb_info *sbi, char *buf) in dirty_segments_show() argument
71 (unsigned long long)(dirty_segments(sbi))); in dirty_segments_show()
75 struct f2fs_sb_info *sbi, char *buf) in lifetime_write_kbytes_show() argument
[all …]
Df2fs.h33 #define f2fs_bug_on(sbi, condition) BUG_ON(condition) argument
35 #define f2fs_bug_on(sbi, condition) \ argument
39 set_sbi_flag(sbi, SBI_NEED_FSCK); \
104 #define F2FS_OPTION(sbi) ((sbi)->mount_opt) argument
105 #define clear_opt(sbi, option) (F2FS_OPTION(sbi).opt &= ~F2FS_MOUNT_##option) argument
106 #define set_opt(sbi, option) (F2FS_OPTION(sbi).opt |= F2FS_MOUNT_##option) argument
107 #define test_opt(sbi, option) (F2FS_OPTION(sbi).opt & F2FS_MOUNT_##option) argument
182 #define MAX_DISCARD_BLOCKS(sbi) BLKS_PER_SEC(sbi) argument
1021 struct f2fs_sb_info *sbi; /* f2fs_sb_info pointer */ member
1044 struct f2fs_sb_info *sbi; /* f2fs superblock */ member
[all …]
Dnode.c36 int f2fs_check_nid_range(struct f2fs_sb_info *sbi, nid_t nid) in f2fs_check_nid_range() argument
38 if (unlikely(nid < F2FS_ROOT_INO(sbi) || nid >= NM_I(sbi)->max_nid)) { in f2fs_check_nid_range()
39 set_sbi_flag(sbi, SBI_NEED_FSCK); in f2fs_check_nid_range()
40 f2fs_msg(sbi->sb, KERN_WARNING, in f2fs_check_nid_range()
48 bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type) in f2fs_available_free_memory() argument
50 struct f2fs_nm_info *nm_i = NM_I(sbi); in f2fs_available_free_memory()
72 if (excess_cached_nats(sbi)) in f2fs_available_free_memory()
75 if (sbi->sb->s_bdi->wb.dirty_exceeded) in f2fs_available_free_memory()
77 mem_size = get_pages(sbi, F2FS_DIRTY_DENTS); in f2fs_available_free_memory()
83 mem_size += sbi->im[i].ino_num * in f2fs_available_free_memory()
[all …]
/linux-4.19.296/fs/sysv/
Dsuper.c47 static void detected_xenix(struct sysv_sb_info *sbi, unsigned *max_links) in detected_xenix() argument
49 struct buffer_head *bh1 = sbi->s_bh1; in detected_xenix()
50 struct buffer_head *bh2 = sbi->s_bh2; in detected_xenix()
63 sbi->s_fic_size = XENIX_NICINOD; in detected_xenix()
64 sbi->s_flc_size = XENIX_NICFREE; in detected_xenix()
65 sbi->s_sbd1 = (char *)sbd1; in detected_xenix()
66 sbi->s_sbd2 = (char *)sbd2; in detected_xenix()
67 sbi->s_sb_fic_count = &sbd1->s_ninode; in detected_xenix()
68 sbi->s_sb_fic_inodes = &sbd1->s_inode[0]; in detected_xenix()
69 sbi->s_sb_total_free_inodes = &sbd2->s_tinode; in detected_xenix()
[all …]
Dballoc.c45 struct sysv_sb_info * sbi = SYSV_SB(sb); in sysv_free_block() local
47 sysv_zone_t *blocks = sbi->s_bcache; in sysv_free_block()
49 unsigned block = fs32_to_cpu(sbi, nr); in sysv_free_block()
56 if (sbi->s_type == FSTYPE_AFS) in sysv_free_block()
59 if (block < sbi->s_firstdatazone || block >= sbi->s_nzones) { in sysv_free_block()
64 mutex_lock(&sbi->s_lock); in sysv_free_block()
65 count = fs16_to_cpu(sbi, *sbi->s_bcache_count); in sysv_free_block()
67 if (count > sbi->s_flc_size) { in sysv_free_block()
69 mutex_unlock(&sbi->s_lock); in sysv_free_block()
76 if (count == sbi->s_flc_size || count == 0) { in sysv_free_block()
[all …]
Dialloc.c42 struct sysv_sb_info *sbi = SYSV_SB(sb); in sv_sb_fic_inode() local
44 if (sbi->s_bh1 == sbi->s_bh2) in sv_sb_fic_inode()
45 return &sbi->s_sb_fic_inodes[i]; in sv_sb_fic_inode()
50 return (sysv_ino_t*)(sbi->s_sbd1 + offset); in sv_sb_fic_inode()
52 return (sysv_ino_t*)(sbi->s_sbd2 + offset); in sv_sb_fic_inode()
59 struct sysv_sb_info *sbi = SYSV_SB(sb); in sysv_raw_inode() local
61 int block = sbi->s_firstinodezone + sbi->s_block_base; in sysv_raw_inode()
63 block += (ino-1) >> sbi->s_inodes_per_block_bits; in sysv_raw_inode()
68 return res + ((ino-1) & sbi->s_inodes_per_block_1); in sysv_raw_inode()
73 struct sysv_sb_info *sbi = SYSV_SB(sb); in refill_free_cache() local
[all …]
Dinode.c37 struct sysv_sb_info *sbi = SYSV_SB(sb); in sysv_sync_fs() local
40 mutex_lock(&sbi->s_lock); in sysv_sync_fs()
47 old_time = fs32_to_cpu(sbi, *sbi->s_sb_time); in sysv_sync_fs()
48 if (sbi->s_type == FSTYPE_SYSV4) { in sysv_sync_fs()
49 if (*sbi->s_sb_state == cpu_to_fs32(sbi, 0x7c269d38u - old_time)) in sysv_sync_fs()
50 *sbi->s_sb_state = cpu_to_fs32(sbi, 0x7c269d38u - time); in sysv_sync_fs()
51 *sbi->s_sb_time = cpu_to_fs32(sbi, time); in sysv_sync_fs()
52 mark_buffer_dirty(sbi->s_bh2); in sysv_sync_fs()
55 mutex_unlock(&sbi->s_lock); in sysv_sync_fs()
62 struct sysv_sb_info *sbi = SYSV_SB(sb); in sysv_remount() local
[all …]
/linux-4.19.296/fs/hfsplus/
Dsuper.c101 struct hfsplus_sb_info *sbi = HFSPLUS_SB(inode->i_sb); in hfsplus_system_write_inode() local
102 struct hfsplus_vh *vhdr = sbi->s_vhdr; in hfsplus_system_write_inode()
109 tree = sbi->ext_tree; in hfsplus_system_write_inode()
113 tree = sbi->cat_tree; in hfsplus_system_write_inode()
123 tree = sbi->attr_tree; in hfsplus_system_write_inode()
130 set_bit(HFSPLUS_SB_WRITEBACKUP, &sbi->flags); in hfsplus_system_write_inode()
177 struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb); in hfsplus_sync_fs() local
178 struct hfsplus_vh *vhdr = sbi->s_vhdr; in hfsplus_sync_fs()
195 error = filemap_write_and_wait(sbi->cat_tree->inode->i_mapping); in hfsplus_sync_fs()
196 error2 = filemap_write_and_wait(sbi->ext_tree->inode->i_mapping); in hfsplus_sync_fs()
[all …]
Doptions.c100 int hfsplus_parse_options(char *input, struct hfsplus_sb_info *sbi) in hfsplus_parse_options() argument
116 if (match_fourchar(&args[0], &sbi->creator)) { in hfsplus_parse_options()
122 if (match_fourchar(&args[0], &sbi->type)) { in hfsplus_parse_options()
132 sbi->umask = (umode_t)tmp; in hfsplus_parse_options()
139 sbi->uid = make_kuid(current_user_ns(), (uid_t)tmp); in hfsplus_parse_options()
140 if (!uid_valid(sbi->uid)) { in hfsplus_parse_options()
144 set_bit(HFSPLUS_SB_UID, &sbi->flags); in hfsplus_parse_options()
152 sbi->gid = make_kgid(current_user_ns(), (gid_t)tmp); in hfsplus_parse_options()
153 if (!gid_valid(sbi->gid)) { in hfsplus_parse_options()
157 set_bit(HFSPLUS_SB_GID, &sbi->flags); in hfsplus_parse_options()
[all …]
/linux-4.19.296/fs/autofs/
Dinode.c16 struct autofs_info *autofs_new_ino(struct autofs_sb_info *sbi) in autofs_new_ino() argument
25 ino->sbi = sbi; in autofs_new_ino()
44 struct autofs_sb_info *sbi = autofs_sbi(sb); in autofs_kill_sb() local
52 if (sbi) { in autofs_kill_sb()
54 autofs_catatonic_mode(sbi); in autofs_kill_sb()
55 put_pid(sbi->oz_pgrp); in autofs_kill_sb()
60 if (sbi) in autofs_kill_sb()
61 kfree_rcu(sbi, rcu); in autofs_kill_sb()
66 struct autofs_sb_info *sbi = autofs_sbi(root->d_sb); in autofs_show_options() local
69 if (!sbi) in autofs_show_options()
[all …]
Droot.c68 struct autofs_sb_info *sbi = autofs_sbi(dentry->d_sb); in autofs_add_active() local
73 spin_lock(&sbi->lookup_lock); in autofs_add_active()
76 list_add(&ino->active, &sbi->active_list); in autofs_add_active()
79 spin_unlock(&sbi->lookup_lock); in autofs_add_active()
85 struct autofs_sb_info *sbi = autofs_sbi(dentry->d_sb); in autofs_del_active() local
90 spin_lock(&sbi->lookup_lock); in autofs_del_active()
96 spin_unlock(&sbi->lookup_lock); in autofs_del_active()
103 struct autofs_sb_info *sbi = autofs_sbi(dentry->d_sb); in autofs_dir_open() local
107 if (autofs_oz_mode(sbi)) in autofs_dir_open()
119 spin_lock(&sbi->lookup_lock); in autofs_dir_open()
[all …]
/linux-4.19.296/fs/affs/
Dbitmap.c41 struct affs_sb_info *sbi = AFFS_SB(sb); in affs_free_block() local
49 if (block > sbi->s_partition_size) in affs_free_block()
52 blk = block - sbi->s_reserved; in affs_free_block()
53 bmap = blk / sbi->s_bmap_bits; in affs_free_block()
54 bit = blk % sbi->s_bmap_bits; in affs_free_block()
55 bm = &sbi->s_bitmap[bmap]; in affs_free_block()
57 mutex_lock(&sbi->s_bmlock); in affs_free_block()
59 bh = sbi->s_bmap_bh; in affs_free_block()
60 if (sbi->s_last_bmap != bmap) { in affs_free_block()
65 sbi->s_bmap_bh = bh; in affs_free_block()
[all …]
Dsuper.c34 struct affs_sb_info *sbi = AFFS_SB(sb); in affs_commit_super() local
35 struct buffer_head *bh = sbi->s_root_bh; in affs_commit_super()
51 struct affs_sb_info *sbi = AFFS_SB(sb); in affs_put_super() local
54 cancel_delayed_work_sync(&sbi->sb_work); in affs_put_super()
66 struct affs_sb_info *sbi; in flush_superblock() local
69 sbi = container_of(work, struct affs_sb_info, sb_work.work); in flush_superblock()
70 sb = sbi->sb; in flush_superblock()
72 spin_lock(&sbi->work_lock); in flush_superblock()
73 sbi->work_queued = 0; in flush_superblock()
74 spin_unlock(&sbi->work_lock); in flush_superblock()
[all …]
/linux-4.19.296/fs/omfs/
Dinode.c26 struct omfs_sb_info *sbi = OMFS_SB(sb); in omfs_bread() local
27 if (block >= sbi->s_num_blocks) in omfs_bread()
30 return sb_bread(sb, clus_to_blk(sbi, block)); in omfs_bread()
39 struct omfs_sb_info *sbi = OMFS_SB(dir->i_sb); in omfs_new_inode() local
45 err = omfs_allocate_range(dir->i_sb, sbi->s_mirrors, sbi->s_mirrors, in omfs_new_inode()
59 inode->i_size = sbi->s_sys_blocksize; in omfs_new_inode()
105 struct omfs_sb_info *sbi = OMFS_SB(inode->i_sb); in __omfs_write_inode() local
130 oi->i_head.h_body_size = cpu_to_be32(sbi->s_sys_blocksize - in __omfs_write_inode()
151 for (i = 1; i < sbi->s_mirrors; i++) { in __omfs_write_inode()
204 struct omfs_sb_info *sbi = OMFS_SB(sb); in omfs_iget() local
[all …]
/linux-4.19.296/fs/ufs/
Dcylinder.c32 struct ufs_sb_info * sbi = UFS_SB(sb); in ufs_read_cylinder() local
39 uspi = sbi->s_uspi; in ufs_read_cylinder()
40 ucpi = sbi->s_ucpi[bitmap_nr]; in ufs_read_cylinder()
41 ucg = (struct ufs_cylinder_group *)sbi->s_ucg[cgno]->b_data; in ufs_read_cylinder()
48 UCPI_UBH(ucpi)->bh[0] = sbi->s_ucg[cgno]; in ufs_read_cylinder()
52 sbi->s_cgno[bitmap_nr] = cgno; in ufs_read_cylinder()
74 brelse (sbi->s_ucg[j]); in ufs_read_cylinder()
75 sbi->s_cgno[bitmap_nr] = UFS_CGNO_EMPTY; in ufs_read_cylinder()
85 struct ufs_sb_info * sbi = UFS_SB(sb); in ufs_put_cylinder() local
93 uspi = sbi->s_uspi; in ufs_put_cylinder()
[all …]
/linux-4.19.296/fs/hpfs/
Dsuper.c112 static void free_sbi(struct hpfs_sb_info *sbi) in free_sbi() argument
114 kfree(sbi->sb_cp_table); in free_sbi()
115 kfree(sbi->sb_bmp_dir); in free_sbi()
116 kfree(sbi); in free_sbi()
166 struct hpfs_sb_info *sbi = hpfs_sb(s); in hpfs_get_free_dnodes() local
167 if (sbi->sb_n_free_dnodes == (unsigned)-1) { in hpfs_get_free_dnodes()
168 unsigned c = hpfs_count_one_bitmap(s, sbi->sb_dmap); in hpfs_get_free_dnodes()
171 sbi->sb_n_free_dnodes = c; in hpfs_get_free_dnodes()
173 return sbi->sb_n_free_dnodes; in hpfs_get_free_dnodes()
179 struct hpfs_sb_info *sbi = hpfs_sb(s); in hpfs_statfs() local
[all …]
/linux-4.19.296/fs/minix/
Dinode.c43 struct minix_sb_info *sbi = minix_sb(sb); in minix_put_super() local
46 if (sbi->s_version != MINIX_V3) /* s_state is now out from V3 sb */ in minix_put_super()
47 sbi->s_ms->s_state = sbi->s_mount_state; in minix_put_super()
48 mark_buffer_dirty(sbi->s_sbh); in minix_put_super()
50 for (i = 0; i < sbi->s_imap_blocks; i++) in minix_put_super()
51 brelse(sbi->s_imap[i]); in minix_put_super()
52 for (i = 0; i < sbi->s_zmap_blocks; i++) in minix_put_super()
53 brelse(sbi->s_zmap[i]); in minix_put_super()
54 brelse (sbi->s_sbh); in minix_put_super()
55 kfree(sbi->s_imap); in minix_put_super()
[all …]
/linux-4.19.296/fs/jfs/
Djfs_mount.c85 struct jfs_sb_info *sbi = JFS_SBI(sb); in jfs_mount() local
105 sbi->ipaimap = ipaimap; in jfs_mount()
128 sbi->ipbmap = ipbmap; in jfs_mount()
149 if ((sbi->mntflag & JFS_BAD_SAIT) == 0) { in jfs_mount()
156 sbi->ipaimap2 = ipaimap2; in jfs_mount()
170 sbi->ipaimap2 = NULL; in jfs_mount()
188 sbi->ipimap = ipimap; in jfs_mount()
235 struct jfs_sb_info *sbi = JFS_SBI(sb); in jfs_mount_rw() local
244 if (chkSuper(sb) || (sbi->state != FM_CLEAN)) in jfs_mount_rw()
247 truncate_inode_pages(sbi->ipimap->i_mapping, 0); in jfs_mount_rw()
[all …]

1234567