Home
last modified time | relevance | path

Searched refs:nr_entries (Results 1 – 24 of 24) sorted by relevance

/linux-4.19.296/drivers/lightnvm/
Dpblk-rb.c55 rb->nr_entries = (1 << power_size); in pblk_rb_init()
122 pblk_rl_init(&pblk->rl, rb->nr_entries); in pblk_rb_init()
130 unsigned int pblk_rb_calculate_size(unsigned int nr_entries) in pblk_rb_calculate_size() argument
133 return (1 << max(get_count_order(nr_entries), 7)); in pblk_rb_calculate_size()
168 return pblk_rb_ring_space(rb, mem, sync, rb->nr_entries); in pblk_rb_space()
180 return pblk_rb_ring_count(mem, subm, rb->nr_entries); in pblk_rb_read_count()
188 return pblk_rb_ring_count(mem, sync, rb->nr_entries); in pblk_rb_sync_count()
191 unsigned int pblk_rb_read_commit(struct pblk_rb *rb, unsigned int nr_entries) in pblk_rb_read_commit() argument
198 (subm + nr_entries) & (rb->nr_entries - 1)); in pblk_rb_read_commit()
231 rb->l2p_update = (rb->l2p_update + 1) & (rb->nr_entries - 1); in __pblk_rb_update_l2p()
[all …]
Dpblk-cache.c27 int nr_entries = pblk_get_secs(bio); in pblk_write_to_cache() local
38 ret = pblk_rb_may_write_user(&pblk->rwb, bio, nr_entries, &bpos); in pblk_write_to_cache()
58 for (i = 0; i < nr_entries; i++) { in pblk_write_to_cache()
69 atomic64_add(nr_entries, &pblk->user_wa); in pblk_write_to_cache()
72 atomic_long_add(nr_entries, &pblk->inflight_writes); in pblk_write_to_cache()
73 atomic_long_add(nr_entries, &pblk->req_writes); in pblk_write_to_cache()
76 pblk_rl_inserted(&pblk->rl, nr_entries); in pblk_write_to_cache()
Dpblk-rl.c35 int pblk_rl_user_may_insert(struct pblk_rl *rl, int nr_entries) in pblk_rl_user_may_insert() argument
40 if (unlikely(rb_space >= 0) && (rb_space - nr_entries < 0)) in pblk_rl_user_may_insert()
49 void pblk_rl_inserted(struct pblk_rl *rl, int nr_entries) in pblk_rl_inserted() argument
54 atomic_sub(nr_entries, &rl->rb_space); in pblk_rl_inserted()
57 int pblk_rl_gc_may_insert(struct pblk_rl *rl, int nr_entries) in pblk_rl_gc_may_insert() argument
67 void pblk_rl_user_in(struct pblk_rl *rl, int nr_entries) in pblk_rl_user_in() argument
69 atomic_add(nr_entries, &rl->rb_user_cnt); in pblk_rl_user_in()
86 void pblk_rl_gc_in(struct pblk_rl *rl, int nr_entries) in pblk_rl_gc_in() argument
88 atomic_add(nr_entries, &rl->rb_gc_cnt); in pblk_rl_gc_in()
Dpblk.h193 unsigned int nr_entries; /* Number of entries in write buffer - member
423 unsigned int nr_entries; /* Number of emeta entries */ member
730 unsigned int pblk_rb_calculate_size(unsigned int nr_entries);
733 unsigned int nr_entries, unsigned int *pos);
734 int pblk_rb_may_write_gc(struct pblk_rb *rb, unsigned int nr_entries,
746 unsigned int pos, unsigned int nr_entries,
753 unsigned int pblk_rb_sync_advance(struct pblk_rb *rb, unsigned int nr_entries);
912 int pblk_rl_user_may_insert(struct pblk_rl *rl, int nr_entries);
913 void pblk_rl_inserted(struct pblk_rl *rl, int nr_entries);
914 void pblk_rl_user_in(struct pblk_rl *rl, int nr_entries);
[all …]
Dpblk-write.c144 unsigned int nr_entries) in pblk_prepare_resubmit() argument
156 for (i = 0; i < nr_entries; i++) { in pblk_prepare_resubmit()
179 pos = (pos + 1) & (rb->nr_entries - 1); in pblk_prepare_resubmit()
289 if (sync == emeta->nr_entries) in pblk_end_io_write_meta()
Dpblk-init.c180 unsigned long nr_entries, buffer_size; in pblk_rwb_init() local
192 nr_entries = pblk_rb_calculate_size(buffer_size); in pblk_rwb_init()
194 entries = vzalloc(array_size(nr_entries, sizeof(struct pblk_rb_entry))); in pblk_rwb_init()
198 power_size = get_count_order(nr_entries); in pblk_rwb_init()
938 emeta->nr_entries = lm->emeta_sec[0]; in pblk_line_mg_init()
949 emeta->nr_entries = lm->emeta_sec[0]; in pblk_line_mg_init()
1302 pblk->rwb.nr_entries); in pblk_init()
/linux-4.19.296/fs/xfs/libxfs/
Dxfs_iext_tree.c458 int *nr_entries) in xfs_iext_split_node() argument
470 *nr_entries = 0; in xfs_iext_split_node()
486 *nr_entries = nr_move; in xfs_iext_split_node()
488 *nr_entries = nr_keep; in xfs_iext_split_node()
504 int i, pos, nr_entries; in xfs_iext_insert_node() local
513 nr_entries = xfs_iext_node_nr_entries(node, pos); in xfs_iext_insert_node()
515 ASSERT(pos >= nr_entries || xfs_iext_key_cmp(node, pos, offset) != 0); in xfs_iext_insert_node()
516 ASSERT(nr_entries <= KEYS_PER_NODE); in xfs_iext_insert_node()
518 if (nr_entries == KEYS_PER_NODE) in xfs_iext_insert_node()
519 new = xfs_iext_split_node(&node, &pos, &nr_entries); in xfs_iext_insert_node()
[all …]
/linux-4.19.296/drivers/pci/pcie/
Dportdrv_core.c102 int nr_entries, nvec; in pcie_port_enable_irq_vec() local
106 nr_entries = pci_alloc_irq_vectors(dev, 1, PCIE_PORT_MAX_MSI_ENTRIES, in pcie_port_enable_irq_vec()
108 if (nr_entries < 0) in pcie_port_enable_irq_vec()
109 return nr_entries; in pcie_port_enable_irq_vec()
113 if (nvec > nr_entries) { in pcie_port_enable_irq_vec()
129 if (nvec != nr_entries) { in pcie_port_enable_irq_vec()
132 nr_entries = pci_alloc_irq_vectors(dev, nvec, nvec, in pcie_port_enable_irq_vec()
134 if (nr_entries < 0) in pcie_port_enable_irq_vec()
135 return nr_entries; in pcie_port_enable_irq_vec()
/linux-4.19.296/lib/
Dstackdepot.c208 trace->nr_entries = trace->max_entries = stack->size; in depot_fetch_stack()
231 if (unlikely(trace->nr_entries == 0)) in depot_save_stack()
234 hash = hash_stack(trace->entries, trace->nr_entries); in depot_save_stack()
243 trace->nr_entries, hash); in depot_save_stack()
271 found = find_stack(*bucket, trace->entries, trace->nr_entries, hash); in depot_save_stack()
274 depot_alloc_stack(trace->entries, trace->nr_entries, in depot_save_stack()
Dfault-inject.c77 trace.nr_entries = 0; in fail_stacktrace()
83 for (n = 0; n < trace.nr_entries; n++) { in fail_stacktrace()
/linux-4.19.296/include/linux/
Dstacktrace.h12 unsigned int nr_entries, max_entries; member
Dpagevec.h28 pgoff_t start, unsigned nr_entries,
Dpagemap.h351 unsigned int nr_entries, struct page **entries,
376 int tag, unsigned int nr_entries,
Dblkdev.h932 int nr_entries; member
/linux-4.19.296/drivers/pci/
Dmsi.c675 static void __iomem *msix_map_region(struct pci_dev *dev, unsigned nr_entries) in msix_map_region() argument
692 return ioremap_nocache(phys_addr, nr_entries * PCI_MSIX_ENTRY_SIZE); in msix_map_region()
989 int nr_entries; in __pci_enable_msix() local
995 nr_entries = pci_msix_vec_count(dev); in __pci_enable_msix()
996 if (nr_entries < 0) in __pci_enable_msix()
997 return nr_entries; in __pci_enable_msix()
998 if (nvec > nr_entries) in __pci_enable_msix()
999 return nr_entries; in __pci_enable_msix()
1004 if (entries[i].entry >= nr_entries) in __pci_enable_msix()
/linux-4.19.296/fs/proc/
Dproc_sysctl.c1149 int nr_entries, name_bytes; in new_links() local
1152 nr_entries = 0; in new_links()
1154 nr_entries++; in new_links()
1159 sizeof(struct ctl_node)*nr_entries + in new_links()
1160 sizeof(struct ctl_table)*(nr_entries + 1) + in new_links()
1168 link_table = (struct ctl_table *)(node + nr_entries); in new_links()
1169 link_name = (char *)&link_table[nr_entries + 1]; in new_links()
1180 links->nreg = nr_entries; in new_links()
1306 int nr_entries = 0; in __register_sysctl_table() local
1309 nr_entries++; in __register_sysctl_table()
[all …]
Dbase.c447 trace.nr_entries = 0; in proc_pid_stack()
458 for (i = 0; i < trace.nr_entries; i++) { in proc_pid_stack()
/linux-4.19.296/drivers/pci/hotplug/
Dpnv_php.c660 int nr_entries, ret; in pnv_php_enable_msix() local
664 nr_entries = pci_msix_vec_count(pdev); in pnv_php_enable_msix()
665 if (nr_entries < 0) in pnv_php_enable_msix()
666 return nr_entries; in pnv_php_enable_msix()
671 if (entry.entry >= nr_entries) in pnv_php_enable_msix()
/linux-4.19.296/include/xen/interface/
Dmemory.h229 unsigned int nr_entries; member
/linux-4.19.296/fs/xfs/scrub/
Dagheader.c603 unsigned int nr_entries; member
636 sai->nr_entries < sai->sz_entries) in xchk_agfl_block()
637 sai->entries[sai->nr_entries++] = agbno; in xchk_agfl_block()
745 if (agflcount != sai.nr_entries) { in xchk_agfl()
751 sort(sai.entries, sai.nr_entries, sizeof(sai.entries[0]), in xchk_agfl()
753 for (i = 1; i < sai.nr_entries; i++) { in xchk_agfl()
/linux-4.19.296/include/uapi/linux/
Dvirtio_gpu.h178 __le32 nr_entries; member
/linux-4.19.296/fs/btrfs/
Dref-verify.c211 stack_trace.nr_entries = 0; in __save_stack_trace()
215 ra->trace_len = stack_trace.nr_entries; in __save_stack_trace()
227 trace.nr_entries = ra->trace_len; in __print_stack_trace()
/linux-4.19.296/fs/nfs/
Ddir.c2267 long nr_entries = atomic_long_read(&nfs_access_nr_entries); in nfs_access_cache_enforce_limit() local
2271 if (nr_entries < 0 || nr_entries <= nfs_access_max_cachesize) in nfs_access_cache_enforce_limit()
2274 diff = nr_entries - nfs_access_max_cachesize; in nfs_access_cache_enforce_limit()
/linux-4.19.296/block/
Dbio.c1226 if (i == map_data->nr_entries * nr_pages) { in bio_copy_user_iov()