Home
last modified time | relevance | path

Searched refs:chunks (Results 1 – 15 of 15) sorted by relevance

/linux-4.19.296/lib/ !
Dgenalloc.c161 INIT_LIST_HEAD(&pool->chunks); in gen_pool_create()
202 list_add_rcu(&chunk->next_chunk, &pool->chunks); in gen_pool_add_virt()
222 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { in gen_pool_virt_to_phys()
248 list_for_each_safe(_chunk, _next_chunk, &pool->chunks) { in gen_pool_destroy()
308 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { in gen_pool_alloc_algo()
388 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { in gen_pool_free()
421 list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk) in gen_pool_for_each_chunk()
444 list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk) { in addr_in_gen_pool()
468 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) in gen_pool_avail()
487 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) in gen_pool_size()
Dscatterlist.c386 unsigned int chunks, cur_page, seg_len, i; in __sg_alloc_table_from_pages() local
394 chunks = 1; in __sg_alloc_table_from_pages()
400 chunks++; in __sg_alloc_table_from_pages()
405 ret = sg_alloc_table(sgt, chunks, gfp_mask); in __sg_alloc_table_from_pages()
Ddebugobjects.c863 unsigned long flags, oaddr, saddr, eaddr, paddr, chunks; in __debug_check_no_obj_freed() local
875 chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1)); in __debug_check_no_obj_freed()
876 chunks >>= ODEBUG_CHUNK_SHIFT; in __debug_check_no_obj_freed()
878 for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) { in __debug_check_no_obj_freed()
DKconfig561 Provides a helper to split scatterlists into chunks, each chunk being
/linux-4.19.296/drivers/virt/vboxguest/ !
Dvboxguest_core.c346 u32 i, chunks; in vbg_balloon_work() local
374 chunks = req->balloon_chunks; in vbg_balloon_work()
375 if (chunks > gdev->mem_balloon.max_chunks) { in vbg_balloon_work()
377 __func__, chunks, gdev->mem_balloon.max_chunks); in vbg_balloon_work()
381 if (chunks > gdev->mem_balloon.chunks) { in vbg_balloon_work()
383 for (i = gdev->mem_balloon.chunks; i < chunks; i++) { in vbg_balloon_work()
388 gdev->mem_balloon.chunks++; in vbg_balloon_work()
392 for (i = gdev->mem_balloon.chunks; i-- > chunks;) { in vbg_balloon_work()
397 gdev->mem_balloon.chunks--; in vbg_balloon_work()
1431 balloon_info->u.out.balloon_chunks = gdev->mem_balloon.chunks; in vbg_ioctl_check_balloon()
Dvboxguest_core.h44 u32 chunks; member
/linux-4.19.296/include/linux/ !
Dshdma-base.h55 int chunks; member
Dsctp.h335 __u8 chunks[0]; member
347 __u8 chunks[0]; member
Dgenalloc.h62 struct list_head chunks; /* list of chunks in this pool */ member
/linux-4.19.296/fs/xfs/ !
Dxfs_buf_item.c734 int chunks; in xfs_buf_item_init() local
774 chunks = DIV_ROUND_UP(BBTOB(bp->b_maps[i].bm_len), in xfs_buf_item_init()
776 map_size = DIV_ROUND_UP(chunks, NBWORD); in xfs_buf_item_init()
/linux-4.19.296/include/uapi/drm/ !
Damdgpu_drm.h536 __u64 chunks; member
Dradeon_drm.h987 __u64 chunks; member
/linux-4.19.296/include/net/sctp/ !
Dstructs.h374 struct sctp_chunks_param *chunks; member
532 struct list_head chunks; member
/linux-4.19.296/fs/ocfs2/ !
Dquota_local.c346 int i, chunks = le32_to_cpu(ldinfo->dqi_chunks); in ocfs2_recovery_load_quota() local
349 for (i = 0; i < chunks; i++) { in ocfs2_recovery_load_quota()
/linux-4.19.296/fs/befs/ !
DChangeLog233 So it does i/o in much larger chunks. It is the correct linux way. It