/linux-4.19.296/drivers/media/v4l2-core/ |
D | videobuf-dma-sg.c | 66 int nr_pages) in videobuf_vmalloc_to_sg() argument 72 sglist = vzalloc(array_size(nr_pages, sizeof(*sglist))); in videobuf_vmalloc_to_sg() 75 sg_init_table(sglist, nr_pages); in videobuf_vmalloc_to_sg() 76 for (i = 0; i < nr_pages; i++, virt += PAGE_SIZE) { in videobuf_vmalloc_to_sg() 96 int nr_pages, int offset, size_t size) in videobuf_pages_to_sg() argument 103 sglist = vmalloc(array_size(nr_pages, sizeof(*sglist))); in videobuf_pages_to_sg() 106 sg_init_table(sglist, nr_pages); in videobuf_pages_to_sg() 114 for (i = 1; i < nr_pages; i++) { in videobuf_pages_to_sg() 177 dma->nr_pages = last-first+1; in videobuf_dma_init_user_locked() 178 dma->pages = kmalloc_array(dma->nr_pages, sizeof(struct page *), in videobuf_dma_init_user_locked() [all …]
|
/linux-4.19.296/include/linux/ |
D | page_counter.h | 50 void page_counter_cancel(struct page_counter *counter, unsigned long nr_pages); 51 void page_counter_charge(struct page_counter *counter, unsigned long nr_pages); 53 unsigned long nr_pages, 55 void page_counter_uncharge(struct page_counter *counter, unsigned long nr_pages); 56 void page_counter_set_min(struct page_counter *counter, unsigned long nr_pages); 57 void page_counter_set_low(struct page_counter *counter, unsigned long nr_pages); 58 int page_counter_set_max(struct page_counter *counter, unsigned long nr_pages); 60 unsigned long *nr_pages);
|
D | hugetlb_cgroup.h | 54 extern int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages, 56 extern void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages, 59 extern void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages, 61 extern void hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages, 85 hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages, in hugetlb_cgroup_charge_cgroup() argument 92 hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages, in hugetlb_cgroup_commit_charge() argument 99 hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages, struct page *page) in hugetlb_cgroup_uncharge_page() argument 104 hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages, in hugetlb_cgroup_uncharge_cgroup() argument
|
D | memory_hotplug.h | 84 extern int zone_grow_waitqueues(struct zone *zone, unsigned long nr_pages); 113 extern void __remove_pages(unsigned long start_pfn, unsigned long nr_pages, 117 extern int __add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages, 122 unsigned long nr_pages, struct vmem_altmap *altmap, in add_pages() argument 125 return __add_pages(nid, start_pfn, nr_pages, altmap, want_memblock); in add_pages() 128 int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages, 300 extern bool is_mem_section_removable(unsigned long pfn, unsigned long nr_pages); 302 extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages); 308 unsigned long nr_pages) in is_mem_section_removable() argument 315 static inline int offline_pages(unsigned long start_pfn, unsigned long nr_pages) in offline_pages() argument [all …]
|
D | mm_inline.h | 28 int nr_pages) in __update_lru_size() argument 32 __mod_node_page_state(pgdat, NR_LRU_BASE + lru, nr_pages); in __update_lru_size() 34 NR_ZONE_LRU_BASE + lru, nr_pages); in __update_lru_size() 39 int nr_pages) in update_lru_size() argument 41 __update_lru_size(lruvec, lru, zid, nr_pages); in update_lru_size() 43 mem_cgroup_update_lru_size(lruvec, lru, zid, nr_pages); in update_lru_size()
|
D | quicklist.h | 19 int nr_pages; member 44 q->nr_pages--; in quicklist_alloc() 64 q->nr_pages++; in __quicklist_free()
|
/linux-4.19.296/drivers/xen/ |
D | balloon.c | 419 static enum bp_state increase_reservation(unsigned long nr_pages) in increase_reservation() argument 425 if (nr_pages > ARRAY_SIZE(frame_list)) in increase_reservation() 426 nr_pages = ARRAY_SIZE(frame_list); in increase_reservation() 429 for (i = 0; i < nr_pages; i++) { in increase_reservation() 431 nr_pages = i; in increase_reservation() 439 rc = xenmem_reservation_increase(nr_pages, frame_list); in increase_reservation() 458 static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp) in decrease_reservation() argument 466 if (nr_pages > ARRAY_SIZE(frame_list)) in decrease_reservation() 467 nr_pages = ARRAY_SIZE(frame_list); in decrease_reservation() 469 for (i = 0; i < nr_pages; i++) { in decrease_reservation() [all …]
|
D | xlate_mmu.c | 223 unsigned long nr_pages; in xen_xlate_map_ballooned_pages() local 226 nr_pages = DIV_ROUND_UP(nr_grant_frames, XEN_PFN_PER_PAGE); in xen_xlate_map_ballooned_pages() 227 pages = kcalloc(nr_pages, sizeof(pages[0]), GFP_KERNEL); in xen_xlate_map_ballooned_pages() 236 rc = alloc_xenballooned_pages(nr_pages, pages); in xen_xlate_map_ballooned_pages() 239 nr_pages, rc); in xen_xlate_map_ballooned_pages() 249 vaddr = vmap(pages, nr_pages, 0, PAGE_KERNEL); in xen_xlate_map_ballooned_pages() 252 nr_pages, rc); in xen_xlate_map_ballooned_pages() 253 free_xenballooned_pages(nr_pages, pages); in xen_xlate_map_ballooned_pages()
|
D | privcmd.c | 67 unsigned long nr_pages); 452 unsigned long nr_pages; in privcmd_ioctl_mmap_batch() local 480 nr_pages = DIV_ROUND_UP(m.num, XEN_PFN_PER_PAGE); in privcmd_ioctl_mmap_batch() 481 if ((m.num <= 0) || (nr_pages > (LONG_MAX >> PAGE_SHIFT))) in privcmd_ioctl_mmap_batch() 523 m.addr + (nr_pages << PAGE_SHIFT) != vma->vm_end) { in privcmd_ioctl_mmap_batch() 528 ret = alloc_empty_pages(vma, nr_pages); in privcmd_ioctl_mmap_batch() 535 m.addr + (nr_pages << PAGE_SHIFT) > vma->vm_end) { in privcmd_ioctl_mmap_batch() 539 if (privcmd_vma_range_is_mapped(vma, m.addr, nr_pages)) { in privcmd_ioctl_mmap_batch() 584 struct page *pages[], unsigned int nr_pages) in lock_pages() argument 595 if (requested > nr_pages) in lock_pages() [all …]
|
D | grant-table.c | 783 int gnttab_pages_set_private(int nr_pages, struct page **pages) in gnttab_pages_set_private() argument 787 for (i = 0; i < nr_pages; i++) { in gnttab_pages_set_private() 809 int gnttab_alloc_pages(int nr_pages, struct page **pages) in gnttab_alloc_pages() argument 813 ret = alloc_xenballooned_pages(nr_pages, pages); in gnttab_alloc_pages() 817 ret = gnttab_pages_set_private(nr_pages, pages); in gnttab_alloc_pages() 819 gnttab_free_pages(nr_pages, pages); in gnttab_alloc_pages() 825 void gnttab_pages_clear_private(int nr_pages, struct page **pages) in gnttab_pages_clear_private() argument 829 for (i = 0; i < nr_pages; i++) { in gnttab_pages_clear_private() 845 void gnttab_free_pages(int nr_pages, struct page **pages) in gnttab_free_pages() argument 847 gnttab_pages_clear_private(nr_pages, pages); in gnttab_free_pages() [all …]
|
/linux-4.19.296/drivers/misc/mic/scif/ |
D | scif_rma.c | 90 scif_create_pinned_pages(int nr_pages, int prot) in scif_create_pinned_pages() argument 99 pin->pages = scif_zalloc(nr_pages * sizeof(*pin->pages)); in scif_create_pinned_pages() 125 for (j = 0; j < pin->nr_pages; j++) { in scif_destroy_pinned_pages() 134 pin->nr_pages * sizeof(*pin->pages)); in scif_destroy_pinned_pages() 148 struct scif_window *scif_create_window(struct scif_endpt *ep, int nr_pages, in scif_create_window() argument 158 window->dma_addr = scif_zalloc(nr_pages * sizeof(*window->dma_addr)); in scif_create_window() 162 window->num_pages = scif_zalloc(nr_pages * sizeof(*window->num_pages)); in scif_create_window() 180 nr_pages * sizeof(*window->dma_addr)); in scif_create_window() 197 int nr_pages = window->nr_pages; in scif_destroy_incomplete_window() local 222 scif_free(window->dma_addr, nr_pages * sizeof(*window->dma_addr)); in scif_destroy_incomplete_window() [all …]
|
D | scif_mmap.c | 43 req.nr_bytes = recv_window->nr_pages << PAGE_SHIFT; in scif_recv_munmap() 57 scif_put_window(window, window->nr_pages); in scif_recv_munmap() 139 scif_put_window(window, window->nr_pages); in __scif_cleanup_rma_for_zombies() 227 int nr_pages, err, i; in scif_get_pages() local 242 nr_pages = len >> PAGE_SHIFT; in scif_get_pages() 268 (*pages)->phys_addr = scif_zalloc(nr_pages * sizeof(dma_addr_t)); in scif_get_pages() 276 ((*pages)->va = scif_zalloc(nr_pages * sizeof(void *))); in scif_get_pages() 284 (*pages)->nr_pages = nr_pages; in scif_get_pages() 287 for (i = 0; i < nr_pages; i++) { in scif_get_pages() 300 scif_get_window(window, nr_pages); in scif_get_pages() [all …]
|
D | scif_rma.h | 187 s64 nr_pages; member 243 s64 nr_pages; member 310 s64 offset, int nr_pages, s64 *out_offset); 315 struct scif_window *scif_create_window(struct scif_endpt *ep, int nr_pages, 426 static inline void scif_get_window(struct scif_window *window, int nr_pages) in scif_get_window() argument 428 window->ref_count += nr_pages; in scif_get_window() 431 static inline void scif_put_window(struct scif_window *window, int nr_pages) in scif_put_window() argument 433 window->ref_count -= nr_pages; in scif_put_window() 436 static inline void scif_set_window_ref(struct scif_window *window, int nr_pages) in scif_set_window_ref() argument 438 window->ref_count = nr_pages; in scif_set_window_ref()
|
D | scif_rma_list.c | 74 scif_set_window_ref(window, window->nr_pages); in scif_insert_window() 101 (window->nr_pages << PAGE_SHIFT); in scif_query_tcw() 109 (window->nr_pages << PAGE_SHIFT); in scif_query_tcw() 157 (window->nr_pages << PAGE_SHIFT); in scif_query_window() 204 s64 offset, int nr_pages) in scif_rma_list_unregister() argument 214 end_offset = window->offset + (window->nr_pages << PAGE_SHIFT); in scif_rma_list_unregister() 216 nr_pages); in scif_rma_list_unregister() 220 nr_pages -= loop_nr_pages; in scif_rma_list_unregister() 222 if (!nr_pages) in scif_rma_list_unregister()
|
/linux-4.19.296/fs/ |
D | fs-writeback.c | 46 long nr_pages; member 817 static long wb_split_bdi_pages(struct bdi_writeback *wb, long nr_pages) in wb_split_bdi_pages() argument 822 if (nr_pages == LONG_MAX) in wb_split_bdi_pages() 831 return nr_pages; in wb_split_bdi_pages() 833 return DIV_ROUND_UP_ULL((u64)nr_pages * this_bw, tot_bw); in wb_split_bdi_pages() 862 long nr_pages; in bdi_split_work_to_wbs() local 877 nr_pages = wb_split_bdi_pages(wb, base_work->nr_pages); in bdi_split_work_to_wbs() 882 work->nr_pages = nr_pages; in bdi_split_work_to_wbs() 891 work->nr_pages = nr_pages; in bdi_split_work_to_wbs() 972 static long wb_split_bdi_pages(struct bdi_writeback *wb, long nr_pages) in wb_split_bdi_pages() argument [all …]
|
/linux-4.19.296/drivers/misc/genwqe/ |
D | card_utils.c | 306 sgl->nr_pages = DIV_ROUND_UP(sgl->fpage_offs + user_size, PAGE_SIZE); in genwqe_alloc_sync_sgl() 310 __func__, user_addr, user_size, sgl->nr_pages, in genwqe_alloc_sync_sgl() 316 sgl->sgl_size = genwqe_sgl_size(sgl->nr_pages); in genwqe_alloc_sync_sgl() 395 while (p < sgl->nr_pages) { in genwqe_setup_sgl() 413 } else if ((p == sgl->nr_pages - 1) && in genwqe_setup_sgl() 434 if (p == sgl->nr_pages) in genwqe_setup_sgl() 450 if (p == sgl->nr_pages) in genwqe_setup_sgl() 537 unsigned int nr_pages, int dirty) in genwqe_free_user_pages() argument 541 for (i = 0; i < nr_pages; i++) { in genwqe_free_user_pages() 595 m->nr_pages = DIV_ROUND_UP(offs + size, PAGE_SIZE); in genwqe_user_vmap() [all …]
|
/linux-4.19.296/drivers/edac/ |
D | pasemi_edac.c | 157 dimm->nr_pages = 128 << (20 - PAGE_SHIFT); in pasemi_edac_init_csrows() 160 dimm->nr_pages = 256 << (20 - PAGE_SHIFT); in pasemi_edac_init_csrows() 164 dimm->nr_pages = 512 << (20 - PAGE_SHIFT); in pasemi_edac_init_csrows() 167 dimm->nr_pages = 1024 << (20 - PAGE_SHIFT); in pasemi_edac_init_csrows() 170 dimm->nr_pages = 2048 << (20 - PAGE_SHIFT); in pasemi_edac_init_csrows() 180 csrow->last_page = csrow->first_page + dimm->nr_pages - 1; in pasemi_edac_init_csrows() 181 last_page_in_mmc += dimm->nr_pages; in pasemi_edac_init_csrows()
|
D | ie31200_edac.c | 464 unsigned long nr_pages; in ie31200_probe1() local 466 nr_pages = IE31200_PAGES(dimm_info[j][i].size, skl); in ie31200_probe1() 467 if (nr_pages == 0) in ie31200_probe1() 471 nr_pages = nr_pages / 2; in ie31200_probe1() 475 dimm->nr_pages = nr_pages; in ie31200_probe1() 476 edac_dbg(0, "set nr pages: 0x%lx\n", nr_pages); in ie31200_probe1() 487 dimm->nr_pages = nr_pages; in ie31200_probe1() 488 edac_dbg(0, "set nr pages: 0x%lx\n", nr_pages); in ie31200_probe1()
|
/linux-4.19.296/fs/exofs/ |
D | inode.c | 57 unsigned nr_pages; member 78 pcol->nr_pages = 0; in _pcol_init() 87 pcol->expected_pages -= min(pcol->nr_pages, pcol->expected_pages); in _pcol_reset() 91 pcol->nr_pages = 0; in _pcol_reset() 140 if (unlikely(pcol->nr_pages >= pcol->alloc_pages)) in pcol_add_page() 143 pcol->pages[pcol->nr_pages++] = page; in pcol_add_page() 212 pcol->nr_pages); in __readpages_done() 214 for (i = 0; i < pcol->nr_pages; i++) { in __readpages_done() 256 for (i = 0; i < pcol->nr_pages; i++) { in _unlock_pcol_pages() 272 BUG_ON(pcol_src->nr_pages < ios->nr_pages); in _maybe_not_all_in_one_io() [all …]
|
/linux-4.19.296/fs/9p/ |
D | cache.h | 52 unsigned *nr_pages); 77 unsigned *nr_pages) in v9fs_readpages_from_fscache() argument 80 nr_pages); in v9fs_readpages_from_fscache() 133 unsigned *nr_pages) in v9fs_readpages_from_fscache() argument
|
/linux-4.19.296/drivers/s390/char/ |
D | vmcp.c | 61 int nr_pages, order; in vmcp_response_alloc() local 64 nr_pages = ALIGN(session->bufsize, PAGE_SIZE) >> PAGE_SHIFT; in vmcp_response_alloc() 71 page = cma_alloc(vmcp_cma, nr_pages, 0, false); in vmcp_response_alloc() 82 int nr_pages, order; in vmcp_response_free() local 88 nr_pages = ALIGN(session->bufsize, PAGE_SIZE) >> PAGE_SHIFT; in vmcp_response_free() 91 cma_release(vmcp_cma, page, nr_pages); in vmcp_response_free()
|
/linux-4.19.296/fs/afs/ |
D | file.c | 29 struct list_head *pages, unsigned nr_pages); 198 for (i = 0; i < req->nr_pages; i++) in afs_put_read() 319 req->nr_pages = 1; in afs_page_filler() 491 req->pages[req->nr_pages++] = page; in afs_readpages_one() 493 } while (req->nr_pages < n); in afs_readpages_one() 495 if (req->nr_pages == 0) { in afs_readpages_one() 504 task_io_account_read(PAGE_SIZE * req->nr_pages); in afs_readpages_one() 516 for (i = 0; i < req->nr_pages; i++) { in afs_readpages_one() 535 struct list_head *pages, unsigned nr_pages) in afs_readpages() argument 542 key_serial(key), mapping->host->i_ino, nr_pages); in afs_readpages() [all …]
|
/linux-4.19.296/fs/ntfs/ |
D | file.c | 509 pgoff_t index, const unsigned nr_pages, struct page **pages, in __ntfs_grab_cache_pages() argument 514 BUG_ON(!nr_pages); in __ntfs_grab_cache_pages() 540 } while (nr < nr_pages); in __ntfs_grab_cache_pages() 585 unsigned nr_pages, s64 pos, size_t bytes) in ntfs_prepare_pages_for_non_resident_write() argument 613 BUG_ON(!nr_pages); in ntfs_prepare_pages_for_non_resident_write() 621 vi->i_ino, ni->type, pages[0]->index, nr_pages, in ntfs_prepare_pages_for_non_resident_write() 638 } while (++u < nr_pages); in ntfs_prepare_pages_for_non_resident_write() 1190 if (likely(!err && ++u < nr_pages)) in ntfs_prepare_pages_for_non_resident_write() 1236 } while (++u < nr_pages); in ntfs_prepare_pages_for_non_resident_write() 1344 nr_pages = u; in ntfs_prepare_pages_for_non_resident_write() [all …]
|
/linux-4.19.296/fs/btrfs/ |
D | compression.c | 86 for (i = 0; i < cb->nr_pages; i++) { in check_compressed_csum() 169 for (index = 0; index < cb->nr_pages; index++) { in end_compressed_bio_read() 210 unsigned long nr_pages = end_index - index + 1; in end_compressed_writeback() local 217 while (nr_pages > 0) { in end_compressed_writeback() 220 nr_pages, ARRAY_SIZE(pages)), pages); in end_compressed_writeback() 222 nr_pages -= 1; in end_compressed_writeback() 232 nr_pages -= ret; in end_compressed_writeback() 284 for (index = 0; index < cb->nr_pages; index++) { in end_compressed_bio_write() 310 unsigned long nr_pages, in btrfs_submit_compressed_write() argument 337 cb->nr_pages = nr_pages; in btrfs_submit_compressed_write() [all …]
|
/linux-4.19.296/drivers/xen/xenbus/ |
D | xenbus_client.c | 368 unsigned int nr_pages, grant_ref_t *grefs) in xenbus_grant_ring() argument 374 err = gnttab_alloc_grant_references(nr_pages, &gref_head); in xenbus_grant_ring() 380 for (i = 0; i < nr_pages; i++) { in xenbus_grant_ring() 574 unsigned int nr_pages = XENBUS_PAGES(nr_grefs); in xenbus_map_ring_valloc_hvm() local 585 err = alloc_xenballooned_pages(nr_pages, node->hvm.pages); in xenbus_map_ring_valloc_hvm() 600 addr = vmap(node->hvm.pages, nr_pages, VM_MAP | VM_IOREMAP, in xenbus_map_ring_valloc_hvm() 621 addr, nr_pages); in xenbus_map_ring_valloc_hvm() 624 free_xenballooned_pages(nr_pages, node->hvm.pages); in xenbus_map_ring_valloc_hvm() 841 unsigned int nr_pages; in xenbus_unmap_ring_vfree_hvm() local 861 nr_pages = XENBUS_PAGES(node->nr_handles); in xenbus_unmap_ring_vfree_hvm() [all …]
|