/linux-4.19.296/drivers/isdn/hardware/eicon/ |
D | um_idi.c | 30 static void cleanup_entity(divas_um_idi_entity_t *e); 34 static int process_idi_request(divas_um_idi_entity_t *e, 36 static int process_idi_rc(divas_um_idi_entity_t *e, byte rc); 37 static int process_idi_ind(divas_um_idi_entity_t *e, byte ind); 38 static int write_return_code(divas_um_idi_entity_t *e, byte rc); 173 divas_um_idi_entity_t *e; in cleanup_adapter() local 176 e = list_entry(tmp, divas_um_idi_entity_t, link); in cleanup_adapter() 178 cleanup_entity(e); in cleanup_adapter() 179 if (e->os_context) { in cleanup_adapter() 180 diva_os_wakeup_read(e->os_context); in cleanup_adapter() [all …]
|
D | io.c | 44 static void Request##N(ENTITY *e) \ 45 { if (IoAdapters[N]) (*IoAdapters[N]->DIRequest)(IoAdapters[N], e); } 203 void request(PISDN_ADAPTER IoAdapter, ENTITY *e) in request() argument 211 if (!e->Req) in request() 213 IDI_SYNC_REQ *syncReq = (IDI_SYNC_REQ *)e; in request() 214 switch (e->Rc) in request() 335 pcm_req(IoAdapter, e); in request() 338 e->Ind = 0; in request() 343 pcm_req(IoAdapter, e); in request() 346 e->Ind = 0; in request() [all …]
|
D | dadapter.c | 211 ENTITY IDI_CALL_ENTITY_T *e) { in diva_dadapter_request() argument 212 IDI_SYNC_REQ *syncReq = (IDI_SYNC_REQ *)e; in diva_dadapter_request() 213 if (e->Req) { /* We do not process it, also return error */ in diva_dadapter_request() 214 e->Rc = OUT_OF_RESOURCES; in diva_dadapter_request() 215 DBG_ERR(("Can't process async request, Req=%02x", e->Req)) in diva_dadapter_request() 221 switch (e->Rc) { in diva_dadapter_request() 227 e->Rc = 0xff; in diva_dadapter_request() 232 e->Rc = 0xff; in diva_dadapter_request() 237 e->Rc = OUT_OF_RESOURCES; in diva_dadapter_request() 239 e->Rc = 0xff; in diva_dadapter_request() [all …]
|
/linux-4.19.296/block/ |
D | elevator.c | 61 struct elevator_queue *e = q->elevator; in elv_iosched_allow_bio_merge() local 63 if (e->uses_mq && e->type->ops.mq.allow_merge) in elv_iosched_allow_bio_merge() 64 return e->type->ops.mq.allow_merge(q, rq, bio); in elv_iosched_allow_bio_merge() 65 else if (!e->uses_mq && e->type->ops.sq.elevator_allow_bio_merge_fn) in elv_iosched_allow_bio_merge() 66 return e->type->ops.sq.elevator_allow_bio_merge_fn(q, rq, bio); in elv_iosched_allow_bio_merge() 86 static bool elevator_match(const struct elevator_type *e, const char *name) in elevator_match() argument 88 if (!strcmp(e->elevator_name, name)) in elevator_match() 90 if (e->elevator_alias && !strcmp(e->elevator_alias, name)) in elevator_match() 101 struct elevator_type *e; in elevator_find() local 103 list_for_each_entry(e, &elv_list, list) { in elevator_find() [all …]
|
D | blk-mq-sched.h | 29 int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e); 30 void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e); 45 struct elevator_queue *e = q->elevator; in blk_mq_sched_allow_merge() local 47 if (e && e->type->ops.mq.allow_merge) in blk_mq_sched_allow_merge() 48 return e->type->ops.mq.allow_merge(q, rq, bio); in blk_mq_sched_allow_merge() 55 struct elevator_queue *e = rq->q->elevator; in blk_mq_sched_completed_request() local 57 if (e && e->type->ops.mq.completed_request) in blk_mq_sched_completed_request() 58 e->type->ops.mq.completed_request(rq); in blk_mq_sched_completed_request() 64 struct elevator_queue *e = q->elevator; in blk_mq_sched_started_request() local 66 if (e && e->type->ops.mq.started_request) in blk_mq_sched_started_request() [all …]
|
D | blk-mq-sched.c | 91 struct elevator_queue *e = q->elevator; in blk_mq_do_dispatch_sched() local 97 if (e->type->ops.mq.has_work && in blk_mq_do_dispatch_sched() 98 !e->type->ops.mq.has_work(hctx)) in blk_mq_do_dispatch_sched() 104 rq = e->type->ops.mq.dispatch_request(hctx); in blk_mq_do_dispatch_sched() 174 struct elevator_queue *e = q->elevator; in blk_mq_sched_dispatch_requests() local 175 const bool has_sched_dispatch = e && e->type->ops.mq.dispatch_request; in blk_mq_sched_dispatch_requests() 321 struct elevator_queue *e = q->elevator; in __blk_mq_sched_bio_merge() local 326 if (e && e->type->ops.mq.bio_merge) { in __blk_mq_sched_bio_merge() 328 return e->type->ops.mq.bio_merge(hctx, bio); in __blk_mq_sched_bio_merge() 377 struct elevator_queue *e = q->elevator; in blk_mq_sched_insert_request() local [all …]
|
/linux-4.19.296/fs/ |
D | binfmt_misc.c | 95 Node *e = list_entry(l, Node, list); in check_file() local 100 if (!test_bit(Enabled, &e->flags)) in check_file() 104 if (!test_bit(Magic, &e->flags)) { in check_file() 105 if (p && !strcmp(e->magic, p + 1)) in check_file() 106 return e; in check_file() 111 s = bprm->buf + e->offset; in check_file() 112 if (e->mask) { in check_file() 113 for (j = 0; j < e->size; j++) in check_file() 114 if ((*s++ ^ e->magic[j]) & e->mask[j]) in check_file() 117 for (j = 0; j < e->size; j++) in check_file() [all …]
|
/linux-4.19.296/lib/ |
D | lru_cache.c | 51 #define PARANOIA_LC_ELEMENT(lc, e) do { \ argument 53 struct lc_element *e_ = (e); \ 106 struct lc_element *e; in lc_create() local 150 e = p + e_off; in lc_create() 151 e->lc_index = i; in lc_create() 152 e->lc_number = LC_FREE; in lc_create() 153 e->lc_new_number = LC_FREE; in lc_create() 154 list_add(&e->list, &lc->free); in lc_create() 155 element[i] = e; in lc_create() 224 struct lc_element *e = lc->lc_element[i]; in lc_reset() local [all …]
|
D | sha256.c | 50 u32 a, b, c, d, e, f, g, h, t1, t2; in sha256_transform() local 64 e = state[4]; f = state[5]; g = state[6]; h = state[7]; in sha256_transform() 67 t1 = h + e1(e) + Ch(e, f, g) + 0x428a2f98 + W[0]; in sha256_transform() 69 t1 = g + e1(d) + Ch(d, e, f) + 0x71374491 + W[1]; in sha256_transform() 71 t1 = f + e1(c) + Ch(c, d, e) + 0xb5c0fbcf + W[2]; in sha256_transform() 73 t1 = e + e1(b) + Ch(b, c, d) + 0xe9b5dba5 + W[3]; in sha256_transform() 74 t2 = e0(f) + Maj(f, g, h); a += t1; e = t1 + t2; in sha256_transform() 76 t2 = e0(e) + Maj(e, f, g); h += t1; d = t1 + t2; in sha256_transform() 78 t2 = e0(d) + Maj(d, e, f); g += t1; c = t1 + t2; in sha256_transform() 80 t2 = e0(c) + Maj(c, d, e); f += t1; b = t1 + t2; in sha256_transform() [all …]
|
D | inflate.c | 140 uch e; /* number of extra bits or operation */ member 327 const ush *e, /* list of extra bits for non-simple codes */ in huft_build() argument 508 r.e = (uch)(16 + j); /* bits in this table */ in huft_build() 520 r.e = 99; /* out of values--invalid code */ in huft_build() 523 r.e = (uch)(*p < 256 ? 16 : 15); /* 256 is end-of-block code */ in huft_build() 529 r.e = (uch)e[*p - s]; /* non-simple--look up in lists */ in huft_build() 598 register unsigned e; /* table entry flag/number of extra bits */ in inflate_codes() local 618 if ((e = (t = tl + ((unsigned)b & ml))->e) > 16) in inflate_codes() 620 if (e == 99) in inflate_codes() 623 e -= 16; in inflate_codes() [all …]
|
/linux-4.19.296/fs/dlm/ |
D | requestqueue.c | 36 struct rq_entry *e; in dlm_add_requestqueue() local 39 e = kmalloc(sizeof(struct rq_entry) + length, GFP_NOFS); in dlm_add_requestqueue() 40 if (!e) { in dlm_add_requestqueue() 45 e->recover_seq = ls->ls_recover_seq & 0xFFFFFFFF; in dlm_add_requestqueue() 46 e->nodeid = nodeid; in dlm_add_requestqueue() 47 memcpy(&e->request, ms, ms->m_header.h_length); in dlm_add_requestqueue() 50 list_add_tail(&e->list, &ls->ls_requestqueue); in dlm_add_requestqueue() 67 struct rq_entry *e; in dlm_process_requestqueue() local 79 e = list_entry(ls->ls_requestqueue.next, struct rq_entry, list); in dlm_process_requestqueue() 82 ms = &e->request; in dlm_process_requestqueue() [all …]
|
/linux-4.19.296/virt/kvm/arm/vgic/ |
D | vgic-irqfd.c | 29 static int vgic_irqfd_set_irq(struct kvm_kernel_irq_routing_entry *e, in vgic_irqfd_set_irq() argument 33 unsigned int spi_id = e->irqchip.pin + VGIC_NR_PRIVATE_IRQS; in vgic_irqfd_set_irq() 50 struct kvm_kernel_irq_routing_entry *e, in kvm_set_routing_entry() argument 57 e->set = vgic_irqfd_set_irq; in kvm_set_routing_entry() 58 e->irqchip.irqchip = ue->u.irqchip.irqchip; in kvm_set_routing_entry() 59 e->irqchip.pin = ue->u.irqchip.pin; in kvm_set_routing_entry() 60 if ((e->irqchip.pin >= KVM_IRQCHIP_NUM_PINS) || in kvm_set_routing_entry() 61 (e->irqchip.irqchip >= KVM_NR_IRQCHIPS)) in kvm_set_routing_entry() 65 e->set = kvm_set_msi; in kvm_set_routing_entry() 66 e->msi.address_lo = ue->u.msi.address_lo; in kvm_set_routing_entry() [all …]
|
/linux-4.19.296/crypto/ |
D | sha256_generic.c | 73 u32 a, b, c, d, e, f, g, h, t1, t2; in sha256_transform() local 87 e=state[4]; f=state[5]; g=state[6]; h=state[7]; in sha256_transform() 90 t1 = h + e1(e) + Ch(e,f,g) + 0x428a2f98 + W[ 0]; in sha256_transform() 92 t1 = g + e1(d) + Ch(d,e,f) + 0x71374491 + W[ 1]; in sha256_transform() 94 t1 = f + e1(c) + Ch(c,d,e) + 0xb5c0fbcf + W[ 2]; in sha256_transform() 96 t1 = e + e1(b) + Ch(b,c,d) + 0xe9b5dba5 + W[ 3]; in sha256_transform() 97 t2 = e0(f) + Maj(f,g,h); a+=t1; e=t1+t2; in sha256_transform() 99 t2 = e0(e) + Maj(e,f,g); h+=t1; d=t1+t2; in sha256_transform() 101 t2 = e0(d) + Maj(d,e,f); g+=t1; c=t1+t2; in sha256_transform() 103 t2 = e0(c) + Maj(c,d,e); f+=t1; b=t1+t2; in sha256_transform() [all …]
|
D | sha512_generic.c | 106 u64 a, b, c, d, e, f, g, h, t1, t2; in sha512_transform() local 113 e=state[4]; f=state[5]; g=state[6]; h=state[7]; in sha512_transform() 131 t1 = h + e1(e) + Ch(e,f,g) + sha512_K[i ] + W[(i & 15)]; in sha512_transform() 133 t1 = g + e1(d) + Ch(d,e,f) + sha512_K[i+1] + W[(i & 15) + 1]; in sha512_transform() 135 t1 = f + e1(c) + Ch(c,d,e) + sha512_K[i+2] + W[(i & 15) + 2]; in sha512_transform() 137 t1 = e + e1(b) + Ch(b,c,d) + sha512_K[i+3] + W[(i & 15) + 3]; in sha512_transform() 138 t2 = e0(f) + Maj(f,g,h); a+=t1; e=t1+t2; in sha512_transform() 140 t2 = e0(e) + Maj(e,f,g); h+=t1; d=t1+t2; in sha512_transform() 142 t2 = e0(d) + Maj(d,e,f); g+=t1; c=t1+t2; in sha512_transform() 144 t2 = e0(c) + Maj(c,d,e); f+=t1; b=t1+t2; in sha512_transform() [all …]
|
/linux-4.19.296/include/linux/ |
D | intel-iommu.h | 118 #define ecap_dit(e) ((e >> 41) & 0x1) argument 119 #define ecap_pasid(e) ((e >> 40) & 0x1) argument 120 #define ecap_pss(e) ((e >> 35) & 0x1f) argument 121 #define ecap_eafs(e) ((e >> 34) & 0x1) argument 122 #define ecap_nwfs(e) ((e >> 33) & 0x1) argument 123 #define ecap_srs(e) ((e >> 31) & 0x1) argument 124 #define ecap_ers(e) ((e >> 30) & 0x1) argument 125 #define ecap_prs(e) ((e >> 29) & 0x1) argument 126 #define ecap_broken_pasid(e) ((e >> 28) & 0x1) argument 127 #define ecap_dis(e) ((e >> 27) & 0x1) argument [all …]
|
D | average.h | 32 static inline void ewma_##name##_init(struct ewma_##name *e) \ 42 e->internal = 0; \ 45 ewma_##name##_read(struct ewma_##name *e) \ 51 return e->internal >> (_precision); \ 53 static inline void ewma_##name##_add(struct ewma_##name *e, \ 56 unsigned long internal = READ_ONCE(e->internal); \ 65 WRITE_ONCE(e->internal, internal ? \
|
D | build_bug.h | 10 #define BUILD_BUG_ON_ZERO(e) (0) argument 11 #define BUILD_BUG_ON_INVALID(e) (0) argument 29 #define BUILD_BUG_ON_ZERO(e) (sizeof(struct { int:(-!!(e)); })) argument 36 #define BUILD_BUG_ON_INVALID(e) ((void)(sizeof((__force long)(e)))) argument
|
/linux-4.19.296/virt/kvm/ |
D | irqchip.c | 38 struct kvm_kernel_irq_routing_entry *e; in kvm_irq_map_gsi() local 44 hlist_for_each_entry(e, &irq_rt->map[gsi], link) { in kvm_irq_map_gsi() 45 entries[n] = *e; in kvm_irq_map_gsi() 120 struct kvm_kernel_irq_routing_entry *e; in free_irq_routing_table() local 123 hlist_for_each_entry_safe(e, n, &rt->map[i], link) { in free_irq_routing_table() 124 hlist_del(&e->link); in free_irq_routing_table() 125 kfree(e); in free_irq_routing_table() 142 struct kvm_kernel_irq_routing_entry *e, in setup_routing_entry() argument 159 e->gsi = gsi; in setup_routing_entry() 160 e->type = ue->type; in setup_routing_entry() [all …]
|
/linux-4.19.296/drivers/edac/ |
D | ghes_edac.c | 187 struct edac_raw_error_desc *e; in ghes_edac_report_mem_error() local 208 e = &mci->error_desc; in ghes_edac_report_mem_error() 211 memset(e, 0, sizeof (*e)); in ghes_edac_report_mem_error() 212 e->error_count = 1; in ghes_edac_report_mem_error() 213 e->grain = 1; in ghes_edac_report_mem_error() 214 strcpy(e->label, "unknown label"); in ghes_edac_report_mem_error() 215 e->msg = pvt->msg; in ghes_edac_report_mem_error() 216 e->other_detail = pvt->other_detail; in ghes_edac_report_mem_error() 217 e->top_layer = -1; in ghes_edac_report_mem_error() 218 e->mid_layer = -1; in ghes_edac_report_mem_error() [all …]
|
D | edac_mc.c | 1062 struct edac_raw_error_desc *e) in edac_raw_mc_handle_error() argument 1065 int pos[EDAC_MAX_LAYERS] = { e->top_layer, e->mid_layer, e->low_layer }; in edac_raw_mc_handle_error() 1071 e->page_frame_number, e->offset_in_page, in edac_raw_mc_handle_error() 1072 e->grain, e->syndrome); in edac_raw_mc_handle_error() 1073 edac_ce_error(mci, e->error_count, pos, e->msg, e->location, e->label, in edac_raw_mc_handle_error() 1074 detail, e->other_detail, e->enable_per_layer_report, in edac_raw_mc_handle_error() 1075 e->page_frame_number, e->offset_in_page, e->grain); in edac_raw_mc_handle_error() 1079 e->page_frame_number, e->offset_in_page, e->grain); in edac_raw_mc_handle_error() 1081 edac_ue_error(mci, e->error_count, pos, e->msg, e->location, e->label, in edac_raw_mc_handle_error() 1082 detail, e->other_detail, e->enable_per_layer_report); in edac_raw_mc_handle_error() [all …]
|
/linux-4.19.296/fs/ubifs/ |
D | recovery.c | 1261 struct size_entry *e; in add_ino() local 1265 e = rb_entry(parent, struct size_entry, rb); in add_ino() 1266 if (inum < e->inum) in add_ino() 1272 e = kzalloc(sizeof(struct size_entry), GFP_KERNEL); in add_ino() 1273 if (!e) in add_ino() 1276 e->inum = inum; in add_ino() 1277 e->i_size = i_size; in add_ino() 1278 e->d_size = d_size; in add_ino() 1279 e->exists = exists; in add_ino() 1281 rb_link_node(&e->rb, parent, p); in add_ino() [all …]
|
/linux-4.19.296/include/uapi/linux/netfilter_bridge/ |
D | ebtables.h | 195 ebt_get_target(struct ebt_entry *e) in ebt_get_target() argument 197 return (void *)e + e->target_offset; in ebt_get_target() 216 #define EBT_MATCH_ITERATE(e, fn, args...) \ argument 223 __i < (e)->watchers_offset; \ 226 __match = (void *)(e) + __i; \ 233 if (__i != (e)->watchers_offset) \ 239 #define EBT_WATCHER_ITERATE(e, fn, args...) \ argument 245 for (__i = e->watchers_offset; \ 246 __i < (e)->target_offset; \ 249 __watcher = (void *)(e) + __i; \ [all …]
|
/linux-4.19.296/include/trace/events/ |
D | kvm.h | 99 TP_PROTO(__u64 e, int pin, bool coalesced), 100 TP_ARGS(e, pin, coalesced), 103 __field( __u64, e ) 109 __entry->e = e; 115 __entry->pin, (u8)(__entry->e >> 56), (u8)__entry->e, 116 __print_symbolic((__entry->e >> 8 & 0x7), kvm_deliver_mode), 117 (__entry->e & (1<<11)) ? "logical" : "physical", 118 (__entry->e & (1<<15)) ? "level" : "edge", 119 (__entry->e & (1<<16)) ? "|masked" : "", 124 TP_PROTO(__u64 e), [all …]
|
/linux-4.19.296/drivers/misc/vmw_vmci/ |
D | vmci_event.c | 53 int e; in vmci_event_exit() local 56 for (e = 0; e < VMCI_EVENT_MAX; e++) { in vmci_event_exit() 58 list_for_each_entry_safe(cur, p2, &subscriber_array[e], node) { in vmci_event_exit() 77 int e; in event_find() local 79 for (e = 0; e < VMCI_EVENT_MAX; e++) { in event_find() 81 list_for_each_entry(cur, &subscriber_array[e], node) { in event_find()
|
/linux-4.19.296/include/net/netfilter/ |
D | nf_conntrack_ecache.h | 46 struct nf_conntrack_ecache *e; in nf_ct_ecache_ext_add() local 55 e = nf_ct_ext_add(ct, NF_CT_EXT_ECACHE, gfp); in nf_ct_ecache_ext_add() 56 if (e) { in nf_ct_ecache_ext_add() 57 e->ctmask = ctmask; in nf_ct_ecache_ext_add() 58 e->expmask = expmask; in nf_ct_ecache_ext_add() 60 return e; in nf_ct_ecache_ext_add() 91 struct nf_conntrack_ecache *e; in nf_conntrack_event_cache() local 96 e = nf_ct_ecache_find(ct); in nf_conntrack_event_cache() 97 if (e == NULL) in nf_conntrack_event_cache() 100 set_bit(event, &e->cache); in nf_conntrack_event_cache() [all …]
|