/linux-4.19.296/drivers/char/agp/ |
D | generic.c | 182 if (curr->page_count != 0) { in agp_free_memory() 187 for (i = 0; i < curr->page_count; i++) { in agp_free_memory() 192 for (i = 0; i < curr->page_count; i++) { in agp_free_memory() 219 size_t page_count, u32 type) in agp_allocate_memory() argument 230 if ((cur_memory + page_count > bridge->max_memory_agp) || in agp_allocate_memory() 231 (cur_memory + page_count < page_count)) in agp_allocate_memory() 235 new = agp_generic_alloc_user(page_count, type); in agp_allocate_memory() 242 new = bridge->driver->alloc_by_type(page_count, type); in agp_allocate_memory() 248 scratch_pages = (page_count + ENTRIES_PER_PAGE - 1) / ENTRIES_PER_PAGE; in agp_allocate_memory() 256 if (bridge->driver->agp_alloc_pages(bridge, new, page_count)) { in agp_allocate_memory() [all …]
|
D | i460-agp.c | 311 if ((io_pg_start + I460_IOPAGES_PER_KPAGE * mem->page_count) > num_entries) { in i460_insert_memory_small_io_page() 317 while (j < (io_pg_start + I460_IOPAGES_PER_KPAGE * mem->page_count)) { in i460_insert_memory_small_io_page() 327 for (i = 0, j = io_pg_start; i < mem->page_count; i++) { in i460_insert_memory_small_io_page() 346 for (i = pg_start; i < (pg_start + I460_IOPAGES_PER_KPAGE * mem->page_count); i++) in i460_remove_memory_small_io_page() 415 end = &i460.lp_desc[(pg_start + mem->page_count - 1) / I460_KPAGES_PER_IOPAGE]; in i460_insert_memory_large_io_page() 417 end_offset = (pg_start + mem->page_count - 1) % I460_KPAGES_PER_IOPAGE; in i460_insert_memory_large_io_page() 473 end = &i460.lp_desc[(pg_start + mem->page_count - 1) / I460_KPAGES_PER_IOPAGE]; in i460_remove_memory_large_io_page() 475 end_offset = (pg_start + mem->page_count - 1) % I460_KPAGES_PER_IOPAGE; in i460_remove_memory_large_io_page()
|
D | sgi-agp.c | 173 if ((pg_start + mem->page_count) > num_entries) in sgi_tioca_insert_memory() 178 while (j < (pg_start + mem->page_count)) { in sgi_tioca_insert_memory() 189 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { in sgi_tioca_insert_memory() 217 for (i = pg_start; i < (mem->page_count + pg_start); i++) { in sgi_tioca_remove_memory()
|
D | nvidia-agp.c | 210 if (mem->page_count == 0) in nvidia_insert_memory() 213 if ((pg_start + mem->page_count) > in nvidia_insert_memory() 217 for (j = pg_start; j < (pg_start + mem->page_count); j++) { in nvidia_insert_memory() 226 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { in nvidia_insert_memory() 250 if (mem->page_count == 0) in nvidia_remove_memory() 253 for (i = pg_start; i < (mem->page_count + pg_start); i++) in nvidia_remove_memory()
|
D | intel-gtt.c | 127 DBG("try unmapping %lu pages\n", (unsigned long)mem->page_count); in intel_gtt_unmap_memory() 217 if ((pg_start + mem->page_count) in i810_insert_dcache_entries() 224 for (i = pg_start; i < (pg_start + mem->page_count); i++) { in i810_insert_dcache_entries() 269 new->page_count = pg_count; in alloc_agpphysmem_i8xx() 280 if (curr->page_count == 4) in intel_i810_free_by_type() 914 if (mem->page_count == 0) in intel_fake_agp_insert_entries() 917 if (pg_start + mem->page_count > intel_private.gtt_total_entries) in intel_fake_agp_insert_entries() 932 ret = intel_gtt_map_memory(mem->pages, mem->page_count, &st); in intel_fake_agp_insert_entries() 940 intel_gtt_insert_pages(pg_start, mem->page_count, mem->pages, in intel_fake_agp_insert_entries() 967 if (mem->page_count == 0) in intel_fake_agp_remove_entries() [all …]
|
D | ati-agp.c | 282 if (mem->page_count == 0) in ati_insert_memory() 285 if ((pg_start + mem->page_count) > num_entries) in ati_insert_memory() 289 while (j < (pg_start + mem->page_count)) { in ati_insert_memory() 303 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { in ati_insert_memory() 328 if (mem->page_count == 0) in ati_remove_memory() 331 for (i = pg_start; i < (mem->page_count + pg_start); i++) { in ati_remove_memory()
|
D | uninorth-agp.c | 164 if (mem->page_count == 0) in uninorth_insert_memory() 170 if ((pg_start + mem->page_count) > num_entries) in uninorth_insert_memory() 174 for (i = 0; i < mem->page_count; ++i) { in uninorth_insert_memory() 183 for (i = 0; i < mem->page_count; i++) { in uninorth_insert_memory() 213 if (mem->page_count == 0) in uninorth_remove_memory() 217 for (i = 0; i < mem->page_count; ++i) { in uninorth_remove_memory()
|
D | agp.h | 199 struct agp_memory *agp_generic_alloc_by_type(size_t page_count, int type); 203 struct agp_memory *memory, size_t page_count); 220 struct agp_memory *agp_generic_alloc_user(size_t page_count, int type);
|
D | efficeon-agp.c | 240 int i, count = mem->page_count, num_entries; in efficeon_insert_memory() 248 if ((pg_start + mem->page_count) > num_entries) in efficeon_insert_memory() 289 int i, count = mem->page_count, num_entries; in efficeon_remove_memory() 295 if ((pg_start + mem->page_count) > num_entries) in efficeon_remove_memory()
|
D | ali-agp.c | 128 int i, page_count; in m1541_cache_flush() local 133 page_count = 1 << A_SIZE_32(agp_bridge->current_size)->page_order; in m1541_cache_flush() 134 for (i = 0; i < PAGE_SIZE * page_count; i += PAGE_SIZE) { in m1541_cache_flush()
|
D | sworks-agp.c | 332 if ((pg_start + mem->page_count) > num_entries) { in serverworks_insert_memory() 337 while (j < (pg_start + mem->page_count)) { in serverworks_insert_memory() 350 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { in serverworks_insert_memory() 375 for (i = pg_start; i < (mem->page_count + pg_start); i++) { in serverworks_remove_memory()
|
D | amd-k7-agp.c | 297 if ((pg_start + mem->page_count) > num_entries) in amd_insert_memory() 301 while (j < (pg_start + mem->page_count)) { in amd_insert_memory() 314 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { in amd_insert_memory() 337 for (i = pg_start; i < (mem->page_count + pg_start); i++) { in amd_remove_memory()
|
D | parisc-agp.c | 138 io_pg_count = info->io_pages_per_kpage * mem->page_count; in parisc_agp_insert_memory() 155 for (i = 0, j = io_pg_start; i < mem->page_count; i++) { in parisc_agp_insert_memory() 185 io_pg_count = info->io_pages_per_kpage * mem->page_count; in parisc_agp_remove_memory()
|
D | hp-agp.c | 345 io_pg_count = hp->io_pages_per_kpage * mem->page_count; in hp_zx1_insert_memory() 363 for (i = 0, j = io_pg_start; i < mem->page_count; i++) { in hp_zx1_insert_memory() 390 io_pg_count = hp->io_pages_per_kpage * mem->page_count; in hp_zx1_remove_memory()
|
D | amd64-agp.c | 63 if (((unsigned long)pg_start + mem->page_count) > num_entries) in amd64_insert_memory() 69 while (j < (pg_start + mem->page_count)) { in amd64_insert_memory() 80 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { in amd64_insert_memory()
|
/linux-4.19.296/fs/orangefs/ |
D | orangefs-bufmap.c | 152 int page_count; member 173 for (i = 0; i < bufmap->page_count; i++) in orangefs_bufmap_unmap() 242 bufmap->page_count = bufmap->total_size / PAGE_SIZE; in orangefs_bufmap_alloc() 246 kcalloc(bufmap->page_count, sizeof(struct page *), GFP_KERNEL); in orangefs_bufmap_alloc() 271 bufmap->page_count, 1, bufmap->page_array); in orangefs_bufmap_map() 276 if (ret != bufmap->page_count) { in orangefs_bufmap_map() 278 bufmap->page_count, ret); in orangefs_bufmap_map() 293 for (i = 0; i < bufmap->page_count; i++) in orangefs_bufmap_map()
|
/linux-4.19.296/fs/btrfs/ |
D | scrub.c | 94 int page_count; member 101 int page_count; member 555 for (i = 0; i < sbio->page_count; i++) { in scrub_free_ctx() 606 sbio->page_count = 0; in scrub_setup_ctx() 752 WARN_ON(sblock->page_count < 1); in scrub_print_warning() 847 BUG_ON(sblock_to_check->page_count < 1); in scrub_handle_errored_block() 1023 if (!sblocks_for_recheck[mirror_index].page_count) in scrub_handle_errored_block() 1034 if (!sblocks_for_recheck[1].page_count) in scrub_handle_errored_block() 1088 for (page_num = 0; page_num < sblock_bad->page_count; in scrub_handle_errored_block() 1110 sblocks_for_recheck[mirror_index].page_count > 0; in scrub_handle_errored_block() [all …]
|
/linux-4.19.296/drivers/virt/vboxguest/ |
D | vboxguest_utils.c | 194 u32 page_count; in hgcm_call_add_pagelist_size() local 196 page_count = hgcm_call_buf_size_in_pages(buf, len); in hgcm_call_add_pagelist_size() 197 *extra += offsetof(struct vmmdev_hgcm_pagelist, pages[page_count]); in hgcm_call_add_pagelist_size() 332 u32 i, page_count; in hgcm_call_init_linaddr() local 343 page_count = hgcm_call_buf_size_in_pages(buf, len); in hgcm_call_init_linaddr() 351 dst_pg_lst->page_count = page_count; in hgcm_call_init_linaddr() 353 for (i = 0; i < page_count; i++) { in hgcm_call_init_linaddr() 363 *off_extra += offsetof(struct vmmdev_hgcm_pagelist, pages[page_count]); in hgcm_call_init_linaddr()
|
/linux-4.19.296/fs/xfs/ |
D | xfs_buf.c | 284 int page_count) in _xfs_buf_get_pages() argument 288 bp->b_page_count = page_count; in _xfs_buf_get_pages() 289 if (page_count <= XB_PAGES) { in _xfs_buf_get_pages() 293 page_count, KM_NOFS); in _xfs_buf_get_pages() 297 memset(bp->b_pages, 0, sizeof(struct page *) * page_count); in _xfs_buf_get_pages() 360 unsigned short page_count, i; in xfs_buf_allocate_memory() local 396 page_count = end - start; in xfs_buf_allocate_memory() 397 error = _xfs_buf_get_pages(bp, page_count); in xfs_buf_allocate_memory() 917 int page_count; in xfs_buf_associate_memory() local 922 page_count = buflen >> PAGE_SHIFT; in xfs_buf_associate_memory() [all …]
|
/linux-4.19.296/fs/pstore/ |
D | ram_core.c | 405 unsigned int page_count; in persistent_ram_vmap() local 411 page_count = DIV_ROUND_UP(size + offset_in_page(start), PAGE_SIZE); in persistent_ram_vmap() 418 pages = kmalloc_array(page_count, sizeof(struct page *), GFP_KERNEL); in persistent_ram_vmap() 421 __func__, page_count); in persistent_ram_vmap() 425 for (i = 0; i < page_count; i++) { in persistent_ram_vmap() 433 vaddr = vmap(pages, page_count, VM_MAP | VM_IOREMAP, prot); in persistent_ram_vmap()
|
/linux-4.19.296/include/linux/ |
D | page_ref.h | 70 static inline int page_count(struct page *page) in page_count() function 175 VM_BUG_ON_PAGE(page_count(page) != 0, page); in page_ref_unfreeze()
|
D | agp_backend.h | 74 size_t page_count; member
|
D | firewire.h | 424 int page_count; member 429 int page_count, enum dma_data_direction direction);
|
/linux-4.19.296/include/uapi/linux/ |
D | vbox_vmmdev_types.h | 221 __u16 page_count; /** Number of pages. */ member
|
/linux-4.19.296/include/drm/ |
D | drm_legacy.h | 107 int page_count; /**< number of pages */ member
|