/linux-4.19.296/fs/nfsd/ |
D | auth.c | 25 struct cred *new; in nfsd_setuser() local 33 new = prepare_creds(); in nfsd_setuser() 34 if (!new) in nfsd_setuser() 37 new->fsuid = rqstp->rq_cred.cr_uid; in nfsd_setuser() 38 new->fsgid = rqstp->rq_cred.cr_gid; in nfsd_setuser() 43 new->fsuid = exp->ex_anon_uid; in nfsd_setuser() 44 new->fsgid = exp->ex_anon_gid; in nfsd_setuser() 49 if (uid_eq(new->fsuid, GLOBAL_ROOT_UID)) in nfsd_setuser() 50 new->fsuid = exp->ex_anon_uid; in nfsd_setuser() 51 if (gid_eq(new->fsgid, GLOBAL_ROOT_GID)) in nfsd_setuser() [all …]
|
/linux-4.19.296/lib/ |
D | refcount.c | 63 unsigned int new, val = atomic_read(&r->refs); in refcount_add_not_zero_checked() local 72 new = val + i; in refcount_add_not_zero_checked() 73 if (new < val) in refcount_add_not_zero_checked() 74 new = UINT_MAX; in refcount_add_not_zero_checked() 76 } while (!atomic_try_cmpxchg_relaxed(&r->refs, &val, new)); in refcount_add_not_zero_checked() 78 WARN_ONCE(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n"); in refcount_add_not_zero_checked() 120 unsigned int new, val = atomic_read(&r->refs); in refcount_inc_not_zero_checked() local 123 new = val + 1; in refcount_inc_not_zero_checked() 128 if (unlikely(!new)) in refcount_inc_not_zero_checked() 131 } while (!atomic_try_cmpxchg_relaxed(&r->refs, &val, new)); in refcount_inc_not_zero_checked() [all …]
|
D | errseq.c | 78 errseq_t new; in errseq_set() local 81 new = (old & ~(MAX_ERRNO|ERRSEQ_SEEN)) | -err; in errseq_set() 85 new += ERRSEQ_CTR_INC; in errseq_set() 88 if (new == old) { in errseq_set() 89 cur = new; in errseq_set() 94 cur = cmpxchg(eseq, old, new); in errseq_set() 100 if (likely(cur == old || cur == new)) in errseq_set() 177 errseq_t old, new; in errseq_check_and_advance() local 198 new = old | ERRSEQ_SEEN; in errseq_check_and_advance() 199 if (new != old) in errseq_check_and_advance() [all …]
|
D | lockref.c | 16 struct lockref new = old, prev = old; \ 20 new.lock_count); \ 44 new.count++; in lockref_get() 65 new.count++; in lockref_get_not_zero() 93 new.count--; in lockref_put_not_zero() 120 new.count++; in lockref_get_or_lock() 146 new.count--; in lockref_put_return() 150 return new.count; in lockref_put_return() 164 new.count--; in lockref_put_or_lock() 201 new.count++; in lockref_get_not_dead()
|
D | prime_numbers.c | 111 struct primes *new; in expand_to_next_prime() local 127 new = kmalloc(sizeof(*new) + bitmap_size(sz), in expand_to_next_prime() 129 if (!new) in expand_to_next_prime() 135 kfree(new); in expand_to_next_prime() 143 bitmap_fill(new->primes, sz); in expand_to_next_prime() 144 bitmap_copy(new->primes, p->primes, p->sz); in expand_to_next_prime() 145 for (y = 2UL; y < sz; y = find_next_bit(new->primes, sz, y + 1)) in expand_to_next_prime() 146 new->last = clear_multiples(y, new->primes, p->sz, sz); in expand_to_next_prime() 147 new->sz = sz; in expand_to_next_prime() 149 BUG_ON(new->last <= x); in expand_to_next_prime() [all …]
|
D | rbtree_test.c | 33 struct rb_node **new = &root->rb_root.rb_node, *parent = NULL; in insert() local 36 while (*new) { in insert() 37 parent = *new; in insert() 39 new = &parent->rb_left; in insert() 41 new = &parent->rb_right; in insert() 44 rb_link_node(&node->rb, parent, new); in insert() 50 struct rb_node **new = &root->rb_root.rb_node, *parent = NULL; in insert_cached() local 54 while (*new) { in insert_cached() 55 parent = *new; in insert_cached() 57 new = &parent->rb_left; in insert_cached() [all …]
|
D | rbtree.c | 87 __rb_rotate_set_parents(struct rb_node *old, struct rb_node *new, in __rb_rotate_set_parents() argument 91 new->__rb_parent_color = old->__rb_parent_color; in __rb_rotate_set_parents() 92 rb_set_parent_color(old, new, color); in __rb_rotate_set_parents() 93 __rb_change_child(old, new, parent, root); in __rb_rotate_set_parents() 99 void (*augment_rotate)(struct rb_node *old, struct rb_node *new)) in __rb_insert() argument 244 void (*augment_rotate)(struct rb_node *old, struct rb_node *new)) in ____rb_erase_color() argument 427 void (*augment_rotate)(struct rb_node *old, struct rb_node *new)) in __rb_erase_color() argument 441 static inline void dummy_copy(struct rb_node *old, struct rb_node *new) {} in dummy_copy() argument 442 static inline void dummy_rotate(struct rb_node *old, struct rb_node *new) {} in dummy_rotate() argument 493 void (*augment_rotate)(struct rb_node *old, struct rb_node *new)) in __rb_insert_augmented() argument [all …]
|
/linux-4.19.296/virt/kvm/arm/ |
D | aarch32.c | 88 unsigned long old, new; in get_except32_cpsr() local 91 new = 0; in get_except32_cpsr() 93 new |= (old & PSR_AA32_N_BIT); in get_except32_cpsr() 94 new |= (old & PSR_AA32_Z_BIT); in get_except32_cpsr() 95 new |= (old & PSR_AA32_C_BIT); in get_except32_cpsr() 96 new |= (old & PSR_AA32_V_BIT); in get_except32_cpsr() 97 new |= (old & PSR_AA32_Q_BIT); in get_except32_cpsr() 103 new |= (old & PSR_AA32_DIT_BIT); in get_except32_cpsr() 108 new |= PSR_AA32_SSBS_BIT; in get_except32_cpsr() 113 new |= (old & PSR_AA32_PAN_BIT); in get_except32_cpsr() [all …]
|
/linux-4.19.296/fs/nfs/blocklayout/ |
D | extent_tree.c | 137 struct pnfs_block_extent *new, bool merge_ok) in __ext_tree_insert() argument 146 if (new->be_f_offset < be->be_f_offset) { in __ext_tree_insert() 147 if (merge_ok && ext_can_merge(new, be)) { in __ext_tree_insert() 148 be->be_f_offset = new->be_f_offset; in __ext_tree_insert() 150 be->be_v_offset = new->be_v_offset; in __ext_tree_insert() 151 be->be_length += new->be_length; in __ext_tree_insert() 156 } else if (new->be_f_offset >= ext_f_end(be)) { in __ext_tree_insert() 157 if (merge_ok && ext_can_merge(be, new)) { in __ext_tree_insert() 158 be->be_length += new->be_length; in __ext_tree_insert() 168 rb_link_node(&new->be_node, parent, p); in __ext_tree_insert() [all …]
|
/linux-4.19.296/fs/ |
D | signalfd.c | 74 struct signalfd_siginfo new; in signalfd_copyinfo() local 81 memset(&new, 0, sizeof(new)); in signalfd_copyinfo() 87 new.ssi_signo = kinfo->si_signo; in signalfd_copyinfo() 88 new.ssi_errno = kinfo->si_errno; in signalfd_copyinfo() 89 new.ssi_code = kinfo->si_code; in signalfd_copyinfo() 92 new.ssi_pid = kinfo->si_pid; in signalfd_copyinfo() 93 new.ssi_uid = kinfo->si_uid; in signalfd_copyinfo() 96 new.ssi_tid = kinfo->si_tid; in signalfd_copyinfo() 97 new.ssi_overrun = kinfo->si_overrun; in signalfd_copyinfo() 98 new.ssi_ptr = (long) kinfo->si_ptr; in signalfd_copyinfo() [all …]
|
/linux-4.19.296/fs/cachefiles/ |
D | security.c | 22 struct cred *new; in cachefiles_get_security_ID() local 27 new = prepare_kernel_cred(current); in cachefiles_get_security_ID() 28 if (!new) { in cachefiles_get_security_ID() 34 ret = set_security_override_from_ctx(new, cache->secctx); in cachefiles_get_security_ID() 36 put_cred(new); in cachefiles_get_security_ID() 43 cache->cache_cred = new; in cachefiles_get_security_ID() 83 struct cred *new; in cachefiles_determine_cache_security() local 90 new = prepare_creds(); in cachefiles_determine_cache_security() 91 if (!new) in cachefiles_determine_cache_security() 98 ret = set_create_files_as(new, d_backing_inode(root)); in cachefiles_determine_cache_security() [all …]
|
/linux-4.19.296/include/asm-generic/ |
D | atomic-long.h | 83 #define atomic_long_cmpxchg_relaxed(l, old, new) \ argument 85 (old), (new))) 86 #define atomic_long_cmpxchg_acquire(l, old, new) \ argument 88 (old), (new))) 89 #define atomic_long_cmpxchg_release(l, old, new) \ argument 91 (old), (new))) 92 #define atomic_long_cmpxchg(l, old, new) \ argument 93 (ATOMIC_LONG_PFX(_cmpxchg)((ATOMIC_LONG_PFX(_t) *)(l), (old), (new))) 96 #define atomic_long_try_cmpxchg_relaxed(l, old, new) \ argument 98 (ATOMIC_LONG_TYPE *)(old), (ATOMIC_LONG_TYPE)(new))) [all …]
|
D | cmpxchg-local.h | 16 unsigned long old, unsigned long new, int size) in __cmpxchg_local_generic() argument 30 *(u8 *)ptr = (u8)new; in __cmpxchg_local_generic() 34 *(u16 *)ptr = (u16)new; in __cmpxchg_local_generic() 38 *(u32 *)ptr = (u32)new; in __cmpxchg_local_generic() 42 *(u64 *)ptr = (u64)new; in __cmpxchg_local_generic() 55 u64 old, u64 new) in __cmpxchg64_local_generic() argument 63 *(u64 *)ptr = new; in __cmpxchg64_local_generic()
|
D | atomic-instrumented.h | 55 static __always_inline int atomic_cmpxchg(atomic_t *v, int old, int new) in atomic_cmpxchg() argument 58 return arch_atomic_cmpxchg(v, old, new); in atomic_cmpxchg() 61 static __always_inline s64 atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new) in atomic64_cmpxchg() argument 64 return arch_atomic64_cmpxchg(v, old, new); in atomic64_cmpxchg() 69 static __always_inline bool atomic_try_cmpxchg(atomic_t *v, int *old, int new) in atomic_try_cmpxchg() argument 73 return arch_atomic_try_cmpxchg(v, old, new); in atomic_try_cmpxchg() 79 static __always_inline bool atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new) in atomic64_try_cmpxchg() argument 83 return arch_atomic64_try_cmpxchg(v, old, new); in atomic64_try_cmpxchg() 411 #define xchg(ptr, new) \ argument 415 arch_xchg(__ai_ptr, (new)); \ [all …]
|
/linux-4.19.296/fs/afs/ |
D | security.c | 121 struct afs_permits *permits, *xpermits, *replacement, *zap, *new = NULL; in afs_cache_permit() local 197 new = kzalloc(sizeof(struct afs_permits) + in afs_cache_permit() 199 if (!new) in afs_cache_permit() 202 refcount_set(&new->usage, 1); in afs_cache_permit() 203 new->nr_permits = size; in afs_cache_permit() 208 new->permits[j].key = key; in afs_cache_permit() 209 new->permits[j].access = caller_access; in afs_cache_permit() 212 new->permits[j].key = permits->permits[i].key; in afs_cache_permit() 213 new->permits[j].access = permits->permits[i].access; in afs_cache_permit() 219 new->permits[j].key = key; in afs_cache_permit() [all …]
|
D | callback.c | 30 struct afs_cb_interest *new; in afs_create_interest() local 37 new = kzalloc(sizeof(struct afs_cb_interest), GFP_KERNEL); in afs_create_interest() 38 if (!new) { in afs_create_interest() 48 refcount_set(&new->usage, 1); in afs_create_interest() 49 new->sb = vnode->vfs_inode.i_sb; in afs_create_interest() 50 new->vid = vnode->volume->vid; in afs_create_interest() 51 new->server = afs_get_server(server); in afs_create_interest() 52 INIT_HLIST_NODE(&new->cb_vlink); in afs_create_interest() 75 new->vol_interest = vi; in afs_create_interest() 76 hlist_add_head(&new->cb_vlink, &vi->cb_interests); in afs_create_interest() [all …]
|
D | server_list.c | 105 bool afs_annotate_server_list(struct afs_server_list *new, in afs_annotate_server_list() argument 111 if (old->nr_servers != new->nr_servers) in afs_annotate_server_list() 115 if (old->servers[i].server != new->servers[i].server) in afs_annotate_server_list() 123 for (j = 0; j < new->nr_servers; j++) { in afs_annotate_server_list() 124 if (new->servers[j].server == cur) { in afs_annotate_server_list() 125 new->index = j; in afs_annotate_server_list() 135 while (i < old->nr_servers && j < new->nr_servers) { in afs_annotate_server_list() 136 if (new->servers[j].server == old->servers[i].server) { in afs_annotate_server_list() 139 new->servers[j].cb_interest = cbi; in afs_annotate_server_list() 147 if (new->servers[j].server < old->servers[i].server) { in afs_annotate_server_list()
|
/linux-4.19.296/include/linux/ |
D | rculist.h | 49 static inline void __list_add_rcu(struct list_head *new, in __list_add_rcu() argument 52 if (!__list_add_valid(new, prev, next)) in __list_add_rcu() 55 new->next = next; in __list_add_rcu() 56 new->prev = prev; in __list_add_rcu() 57 rcu_assign_pointer(list_next_rcu(prev), new); in __list_add_rcu() 58 next->prev = new; in __list_add_rcu() 77 static inline void list_add_rcu(struct list_head *new, struct list_head *head) in list_add_rcu() argument 79 __list_add_rcu(new, head, head->next); in list_add_rcu() 98 static inline void list_add_tail_rcu(struct list_head *new, in list_add_tail_rcu() argument 101 __list_add_rcu(new, head->prev, head); in list_add_tail_rcu() [all …]
|
D | rbtree_augmented.h | 41 void (*copy)(struct rb_node *old, struct rb_node *new); 42 void (*rotate)(struct rb_node *old, struct rb_node *new); 48 void (*augment_rotate)(struct rb_node *old, struct rb_node *new)); 93 rbstruct *new = rb_entry(rb_new, rbstruct, rbfield); \ 94 new->rbaugmented = old->rbaugmented; \ 100 rbstruct *new = rb_entry(rb_new, rbstruct, rbfield); \ 101 new->rbaugmented = old->rbaugmented; \ 135 __rb_change_child(struct rb_node *old, struct rb_node *new, in __rb_change_child() argument 140 WRITE_ONCE(parent->rb_left, new); in __rb_change_child() 142 WRITE_ONCE(parent->rb_right, new); in __rb_change_child() [all …]
|
D | list.h | 33 extern bool __list_add_valid(struct list_head *new, 38 static inline bool __list_add_valid(struct list_head *new, in __list_add_valid() argument 56 static inline void __list_add(struct list_head *new, in __list_add() argument 60 if (!__list_add_valid(new, prev, next)) in __list_add() 63 next->prev = new; in __list_add() 64 new->next = next; in __list_add() 65 new->prev = prev; in __list_add() 66 WRITE_ONCE(prev->next, new); in __list_add() 77 static inline void list_add(struct list_head *new, struct list_head *head) in list_add() argument 79 __list_add(new, head, head->next); in list_add() [all …]
|
/linux-4.19.296/fs/lockd/ |
D | mon.c | 276 struct nsm_handle *new; in nsm_create_handle() local 281 new = kzalloc(sizeof(*new) + hostname_len + 1, GFP_KERNEL); in nsm_create_handle() 282 if (unlikely(new == NULL)) in nsm_create_handle() 285 refcount_set(&new->sm_count, 1); in nsm_create_handle() 286 new->sm_name = (char *)(new + 1); in nsm_create_handle() 287 memcpy(nsm_addr(new), sap, salen); in nsm_create_handle() 288 new->sm_addrlen = salen; in nsm_create_handle() 289 nsm_init_private(new); in nsm_create_handle() 291 if (rpc_ntop(nsm_addr(new), new->sm_addrbuf, in nsm_create_handle() 292 sizeof(new->sm_addrbuf)) == 0) in nsm_create_handle() [all …]
|
/linux-4.19.296/drivers/misc/ocxl/ |
D | pasid.c | 28 struct id_range *cur, *new; in range_alloc() local 31 new = kmalloc(sizeof(struct id_range), GFP_KERNEL); in range_alloc() 32 if (!new) in range_alloc() 44 new->start = last_end + 1; in range_alloc() 45 new->end = new->start + size - 1; in range_alloc() 47 if (new->end > max_id) { in range_alloc() 48 kfree(new); in range_alloc() 51 list_add(&new->list, pos); in range_alloc() 52 rc = new->start; in range_alloc()
|
/linux-4.19.296/include/linux/usb/ |
D | gadget_configfs.h | 53 struct struct_in *new; \ 57 new = kzalloc(sizeof(*new), GFP_KERNEL); \ 58 if (!new) \ 61 ret = check_user_usb_string(name, &new->stringtab_dev); \ 64 config_group_init_type_name(&new->group, name, \ 70 if (gs->stringtab_dev.language == new->stringtab_dev.language) \ 78 list_add_tail(&new->list, &gi->string_list); \ 79 return &new->group; \ 81 kfree(new); \
|
/linux-4.19.296/drivers/char/agp/ |
D | generic.c | 99 struct agp_memory *new; in agp_create_user_memory() local 105 new = kzalloc(sizeof(struct agp_memory), GFP_KERNEL); in agp_create_user_memory() 106 if (new == NULL) in agp_create_user_memory() 109 new->key = agp_get_key(); in agp_create_user_memory() 111 if (new->key < 0) { in agp_create_user_memory() 112 kfree(new); in agp_create_user_memory() 116 agp_alloc_page_array(alloc_size, new); in agp_create_user_memory() 118 if (new->pages == NULL) { in agp_create_user_memory() 119 agp_free_key(new->key); in agp_create_user_memory() 120 kfree(new); in agp_create_user_memory() [all …]
|
/linux-4.19.296/fs/xfs/libxfs/ |
D | xfs_iext_tree.c | 461 struct xfs_iext_node *new = kmem_zalloc(NODE_SIZE, KM_NOFS); in xfs_iext_split_node() local 468 *nodep = new; in xfs_iext_split_node() 476 new->keys[i] = node->keys[nr_keep + i]; in xfs_iext_split_node() 477 new->ptrs[i] = node->ptrs[nr_keep + i]; in xfs_iext_split_node() 484 *nodep = new; in xfs_iext_split_node() 492 new->keys[i] = XFS_IEXT_KEY_INVALID; in xfs_iext_split_node() 493 return new; in xfs_iext_split_node() 503 struct xfs_iext_node *node, *new; in xfs_iext_insert_node() local 510 new = NULL; in xfs_iext_insert_node() 519 new = xfs_iext_split_node(&node, &pos, &nr_entries); in xfs_iext_insert_node() [all …]
|