Home
last modified time | relevance | path

Searched refs:zone (Results 1 – 25 of 49) sorted by relevance

12

/linux-4.19.296/include/linux/
Dmmzone.h359 struct zone { struct
525 static inline unsigned long zone_end_pfn(const struct zone *zone) in zone_end_pfn() argument
527 return zone->zone_start_pfn + zone->spanned_pages; in zone_end_pfn()
530 static inline bool zone_spans_pfn(const struct zone *zone, unsigned long pfn) in zone_spans_pfn() argument
532 return zone->zone_start_pfn <= pfn && pfn < zone_end_pfn(zone); in zone_spans_pfn()
535 static inline bool zone_is_initialized(struct zone *zone) in zone_is_initialized() argument
537 return zone->initialized; in zone_is_initialized()
540 static inline bool zone_is_empty(struct zone *zone) in zone_is_empty() argument
542 return zone->spanned_pages == 0; in zone_is_empty()
549 static inline bool zone_intersects(struct zone *zone, in zone_intersects() argument
[all …]
Dvmstat.h136 static inline void zone_numa_state_add(long x, struct zone *zone, in zone_numa_state_add() argument
139 atomic_long_add(x, &zone->vm_numa_stat[item]); in zone_numa_state_add()
150 static inline unsigned long zone_numa_state_snapshot(struct zone *zone, in zone_numa_state_snapshot() argument
153 long x = atomic_long_read(&zone->vm_numa_stat[item]); in zone_numa_state_snapshot()
157 x += per_cpu_ptr(zone->pageset, cpu)->vm_numa_stat_diff[item]; in zone_numa_state_snapshot()
163 static inline void zone_page_state_add(long x, struct zone *zone, in zone_page_state_add() argument
166 atomic_long_add(x, &zone->vm_stat[item]); in zone_page_state_add()
197 static inline unsigned long zone_page_state(struct zone *zone, in zone_page_state() argument
200 long x = atomic_long_read(&zone->vm_stat[item]); in zone_page_state()
214 static inline unsigned long zone_page_state_snapshot(struct zone *zone, in zone_page_state_snapshot() argument
[all …]
Dmemory_hotplug.h11 struct zone;
63 static inline unsigned zone_span_seqbegin(struct zone *zone) in zone_span_seqbegin() argument
65 return read_seqbegin(&zone->span_seqlock); in zone_span_seqbegin()
67 static inline int zone_span_seqretry(struct zone *zone, unsigned iv) in zone_span_seqretry() argument
69 return read_seqretry(&zone->span_seqlock, iv); in zone_span_seqretry()
71 static inline void zone_span_writelock(struct zone *zone) in zone_span_writelock() argument
73 write_seqlock(&zone->span_seqlock); in zone_span_writelock()
75 static inline void zone_span_writeunlock(struct zone *zone) in zone_span_writeunlock() argument
77 write_sequnlock(&zone->span_seqlock); in zone_span_writeunlock()
79 static inline void zone_seqlock_init(struct zone *zone) in zone_seqlock_init() argument
[all …]
Dcompaction.h95 extern int fragmentation_index(struct zone *zone, unsigned int order);
100 extern enum compact_result compaction_suitable(struct zone *zone, int order,
103 extern void defer_compaction(struct zone *zone, int order);
104 extern bool compaction_deferred(struct zone *zone, int order);
105 extern void compaction_defer_reset(struct zone *zone, int order,
107 extern bool compaction_restarting(struct zone *zone, int order);
186 static inline enum compact_result compaction_suitable(struct zone *zone, int order, in compaction_suitable() argument
192 static inline void defer_compaction(struct zone *zone, int order) in defer_compaction() argument
196 static inline bool compaction_deferred(struct zone *zone, int order) in compaction_deferred() argument
Dpage-isolation.h6 static inline bool has_isolate_pageblock(struct zone *zone) in has_isolate_pageblock() argument
8 return zone->nr_isolate_pageblock; in has_isolate_pageblock()
19 static inline bool has_isolate_pageblock(struct zone *zone) in has_isolate_pageblock() argument
33 bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
36 int move_freepages_block(struct zone *zone, struct page *page,
Dcpuset.h74 static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) in __cpuset_zone_allowed()
79 static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) in cpuset_zone_allowed()
207 static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) in __cpuset_zone_allowed()
212 static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) in cpuset_zone_allowed()
Dgfp.h581 void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp);
582 void drain_all_pages(struct zone *zone);
583 void drain_local_pages(struct zone *zone);
Dpage_owner.h19 pg_data_t *pgdat, struct zone *zone);
/linux-4.19.296/include/net/netfilter/
Dnf_conntrack_zones.h14 return &ct->zone; in nf_ct_zone()
21 nf_ct_zone_init(struct nf_conntrack_zone *zone, u16 id, u8 dir, u8 flags) in nf_ct_zone_init() argument
23 zone->id = id; in nf_ct_zone_init()
24 zone->flags = flags; in nf_ct_zone_init()
25 zone->dir = dir; in nf_ct_zone_init()
27 return zone; in nf_ct_zone_init()
38 if (tmpl->zone.flags & NF_CT_FLAG_MARK) in nf_ct_zone_tmpl()
39 return nf_ct_zone_init(tmp, skb->mark, tmpl->zone.dir, 0); in nf_ct_zone_tmpl()
45 const struct nf_conntrack_zone *zone) in nf_ct_zone_add() argument
48 ct->zone = *zone; in nf_ct_zone_add()
[all …]
Dnf_conntrack_expect.h87 const struct nf_conntrack_zone *zone,
92 const struct nf_conntrack_zone *zone,
97 const struct nf_conntrack_zone *zone,
Dnf_conntrack_count.h23 const struct nf_conntrack_zone *zone);
27 const struct nf_conntrack_zone *zone);
/linux-4.19.296/fs/btrfs/
Dreada.c228 struct reada_zone *zone; in reada_find_zone() local
234 zone = NULL; in reada_find_zone()
236 ret = radix_tree_gang_lookup(&dev->reada_zones, (void **)&zone, in reada_find_zone()
238 if (ret == 1 && logical >= zone->start && logical <= zone->end) { in reada_find_zone()
239 kref_get(&zone->refcnt); in reada_find_zone()
241 return zone; in reada_find_zone()
254 zone = kzalloc(sizeof(*zone), GFP_KERNEL); in reada_find_zone()
255 if (!zone) in reada_find_zone()
260 kfree(zone); in reada_find_zone()
264 zone->start = start; in reada_find_zone()
[all …]
/linux-4.19.296/include/trace/events/
Dcompaction.h201 TP_PROTO(struct zone *zone,
205 TP_ARGS(zone, order, ret),
215 __entry->nid = zone_to_nid(zone);
216 __entry->idx = zone_idx(zone);
230 TP_PROTO(struct zone *zone,
234 TP_ARGS(zone, order, ret)
239 TP_PROTO(struct zone *zone,
243 TP_ARGS(zone, order, ret)
248 TP_PROTO(struct zone *zone, int order),
250 TP_ARGS(zone, order),
[all …]
/linux-4.19.296/fs/adfs/
Dmap.c192 scan_map(struct adfs_sb_info *asb, unsigned int zone, in scan_map() argument
199 dm = asb->s_map + zone; in scan_map()
200 zone = asb->s_map_size; in scan_map()
201 dm_end = asb->s_map + zone; in scan_map()
212 } while (--zone > 0); in scan_map()
235 unsigned int zone; in adfs_map_free() local
238 zone = asb->s_map_size; in adfs_map_free()
242 } while (--zone > 0); in adfs_map_free()
252 unsigned int zone, mapoff; in adfs_map_lookup() local
260 zone = asb->s_map_size >> 1; in adfs_map_lookup()
[all …]
Dsuper.c306 int i, zone; in adfs_read_map() local
323 for (zone = 0; zone < nzones; zone++, map_addr++) { in adfs_read_map()
324 dm[zone].dm_startbit = 0; in adfs_read_map()
325 dm[zone].dm_endbit = zone_size; in adfs_read_map()
326 dm[zone].dm_startblk = zone * zone_size - ADFS_DR_SIZE_BITS; in adfs_read_map()
327 dm[zone].dm_bh = sb_bread(sb, map_addr); in adfs_read_map()
329 if (!dm[zone].dm_bh) { in adfs_read_map()
336 i = zone - 1; in adfs_read_map()
349 while (--zone >= 0) in adfs_read_map()
350 brelse(dm[zone].dm_bh); in adfs_read_map()
/linux-4.19.296/lib/
Dshow_mem.c26 struct zone *zone = &pgdat->node_zones[zoneid]; in show_mem() local
27 if (!populated_zone(zone)) in show_mem()
30 total += zone->present_pages; in show_mem()
31 reserved += zone->present_pages - zone->managed_pages; in show_mem()
34 highmem += zone->present_pages; in show_mem()
/linux-4.19.296/virt/kvm/
Dcoalesced_mmio.c36 if (addr < dev->zone.addr) in coalesced_mmio_in_range()
38 if (addr + len > dev->zone.addr + dev->zone.size) in coalesced_mmio_in_range()
141 struct kvm_coalesced_mmio_zone *zone) in kvm_vm_ioctl_register_coalesced_mmio() argument
152 dev->zone = *zone; in kvm_vm_ioctl_register_coalesced_mmio()
155 ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, zone->addr, in kvm_vm_ioctl_register_coalesced_mmio()
156 zone->size, &dev->dev); in kvm_vm_ioctl_register_coalesced_mmio()
172 struct kvm_coalesced_mmio_zone *zone) in kvm_vm_ioctl_unregister_coalesced_mmio() argument
179 if (coalesced_mmio_in_range(dev, zone->addr, zone->size)) { in kvm_vm_ioctl_unregister_coalesced_mmio()
Dcoalesced_mmio.h22 struct kvm_coalesced_mmio_zone zone; member
28 struct kvm_coalesced_mmio_zone *zone);
30 struct kvm_coalesced_mmio_zone *zone);
/linux-4.19.296/drivers/iio/light/
Dlm3533-als.c61 atomic_t zone; member
90 static int _lm3533_als_get_zone(struct iio_dev *indio_dev, u8 *zone) in _lm3533_als_get_zone() argument
103 *zone = min_t(u8, val, LM3533_ALS_ZONE_MAX); in _lm3533_als_get_zone()
108 static int lm3533_als_get_zone(struct iio_dev *indio_dev, u8 *zone) in lm3533_als_get_zone() argument
114 *zone = atomic_read(&als->zone); in lm3533_als_get_zone()
116 ret = _lm3533_als_get_zone(indio_dev, zone); in lm3533_als_get_zone()
128 static inline u8 lm3533_als_get_target_reg(unsigned channel, unsigned zone) in lm3533_als_get_target_reg() argument
130 return LM3533_REG_ALS_TARGET_BASE + 5 * channel + zone; in lm3533_als_get_target_reg()
134 unsigned zone, u8 *val) in lm3533_als_get_target() argument
143 if (zone > LM3533_ALS_ZONE_MAX) in lm3533_als_get_target()
[all …]
/linux-4.19.296/fs/xfs/
Dkmem.h108 kmem_zone_free(kmem_zone_t *zone, void *ptr) in kmem_zone_free() argument
110 kmem_cache_free(zone, ptr); in kmem_zone_free()
114 kmem_zone_destroy(kmem_zone_t *zone) in kmem_zone_destroy() argument
116 kmem_cache_destroy(zone); in kmem_zone_destroy()
122 kmem_zone_zalloc(kmem_zone_t *zone, xfs_km_flags_t flags) in kmem_zone_zalloc() argument
124 return kmem_zone_alloc(zone, flags | KM_ZERO); in kmem_zone_zalloc()
/linux-4.19.296/drivers/uwb/
Ddrp-ie.c268 void uwb_drp_ie_single_zone_to_bm(struct uwb_mas_bm *bm, u8 zone, u16 mas_bm) in uwb_drp_ie_single_zone_to_bm() argument
276 set_bit(zone * UWB_NUM_ZONES + mas, bm->bm); in uwb_drp_ie_single_zone_to_bm()
301 u8 zone; in uwb_drp_ie_to_bm() local
310 for (zone = 0; zone < UWB_NUM_ZONES; zone++) { in uwb_drp_ie_to_bm()
311 zone_mask = 1 << zone; in uwb_drp_ie_to_bm()
313 uwb_drp_ie_single_zone_to_bm(bm, zone, mas_bm); in uwb_drp_ie_to_bm()
/linux-4.19.296/fs/sysv/
Dballoc.c188 sysv_zone_t zone; in sysv_count_free_blocks() local
191 zone = 0; in sysv_count_free_blocks()
192 while (n && (zone = blocks[--n]) != 0) in sysv_count_free_blocks()
194 if (zone == 0) in sysv_count_free_blocks()
197 block = fs32_to_cpu(sbi, zone); in sysv_count_free_blocks()
/linux-4.19.296/fs/minix/
Dbitmap.c48 unsigned long bit, zone; in minix_free_block() local
54 zone = block - sbi->s_firstdatazone + 1; in minix_free_block()
55 bit = zone & ((1<<k) - 1); in minix_free_block()
56 zone >>= k; in minix_free_block()
57 if (zone >= sbi->s_zmap_blocks) { in minix_free_block()
61 bh = sbi->s_zmap[zone]; in minix_free_block()
/linux-4.19.296/include/uapi/linux/netfilter/
Dxt_CT.h21 __u16 zone; member
32 __u16 zone; member
/linux-4.19.296/fs/ntfs/
Dlcnalloc.c148 const NTFS_CLUSTER_ALLOCATION_ZONES zone, in ntfs_cluster_alloc() argument
167 zone == MFT_ZONE ? "MFT" : "DATA"); in ntfs_cluster_alloc()
174 BUG_ON(zone < FIRST_ZONE); in ntfs_cluster_alloc()
175 BUG_ON(zone > LAST_ZONE); in ntfs_cluster_alloc()
202 if (zone == DATA_ZONE) in ntfs_cluster_alloc()
213 } else if (zone == DATA_ZONE && zone_start >= vol->mft_zone_start && in ntfs_cluster_alloc()
221 } else if (zone == MFT_ZONE && (zone_start < vol->mft_zone_start || in ntfs_cluster_alloc()
232 if (zone == MFT_ZONE) { in ntfs_cluster_alloc()
696 if (zone == MFT_ZONE || mft_zone_size <= 0) { in ntfs_cluster_alloc()

12