/linux-4.19.296/fs/btrfs/ |
D | async-thread.c | 65 btrfs_workqueue_owner(const struct __btrfs_workqueue *wq) in btrfs_workqueue_owner() argument 67 return wq->fs_info; in btrfs_workqueue_owner() 73 return work->wq->fs_info; in btrfs_work_owner() 76 bool btrfs_workqueue_normal_congested(const struct btrfs_workqueue *wq) in btrfs_workqueue_normal_congested() argument 84 if (wq->normal->thresh == NO_THRESHOLD) in btrfs_workqueue_normal_congested() 87 return atomic_read(&wq->normal->pending) > wq->normal->thresh * 2; in btrfs_workqueue_normal_congested() 162 __btrfs_destroy_workqueue(struct __btrfs_workqueue *wq); 200 static inline void thresh_queue_hook(struct __btrfs_workqueue *wq) in thresh_queue_hook() argument 202 if (wq->thresh == NO_THRESHOLD) in thresh_queue_hook() 204 atomic_inc(&wq->pending); in thresh_queue_hook() [all …]
|
D | async-thread.h | 28 struct __btrfs_workqueue *wq; member 68 void btrfs_queue_work(struct btrfs_workqueue *wq, 70 void btrfs_destroy_workqueue(struct btrfs_workqueue *wq); 71 void btrfs_workqueue_set_max(struct btrfs_workqueue *wq, int max); 74 struct btrfs_fs_info *btrfs_workqueue_owner(const struct __btrfs_workqueue *wq); 75 bool btrfs_workqueue_normal_congested(const struct btrfs_workqueue *wq); 76 void btrfs_flush_workqueue(struct btrfs_workqueue *wq);
|
/linux-4.19.296/fs/autofs/ |
D | waitq.c | 20 struct autofs_wait_queue *wq, *nwq; in autofs_catatonic_mode() local 31 wq = sbi->queues; in autofs_catatonic_mode() 33 while (wq) { in autofs_catatonic_mode() 34 nwq = wq->next; in autofs_catatonic_mode() 35 wq->status = -ENOENT; /* Magic is gone - report failure */ in autofs_catatonic_mode() 36 kfree(wq->name.name); in autofs_catatonic_mode() 37 wq->name.name = NULL; in autofs_catatonic_mode() 38 wake_up_interruptible(&wq->queue); in autofs_catatonic_mode() 39 if (!--wq->wait_ctr) in autofs_catatonic_mode() 40 kfree(wq); in autofs_catatonic_mode() [all …]
|
/linux-4.19.296/include/linux/ |
D | swait.h | 134 static inline int swait_active(struct swait_queue_head *wq) in swait_active() argument 136 return !list_empty(&wq->task_list); in swait_active() 147 static inline bool swq_has_sleeper(struct swait_queue_head *wq) in swq_has_sleeper() argument 157 return swait_active(wq); in swq_has_sleeper() 171 #define ___swait_event(wq, condition, state, ret, cmd) \ argument 179 long __int = prepare_to_swait_event(&wq, &__wait, state);\ 191 finish_swait(&wq, &__wait); \ 195 #define __swait_event(wq, condition) \ argument 196 (void)___swait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, \ 199 #define swait_event_exclusive(wq, condition) \ argument [all …]
|
D | workqueue.h | 121 struct workqueue_struct *wq; member 130 struct workqueue_struct *wq; member 457 extern void destroy_workqueue(struct workqueue_struct *wq); 461 int apply_workqueue_attrs(struct workqueue_struct *wq, 465 extern bool queue_work_on(int cpu, struct workqueue_struct *wq, 467 extern bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq, 469 extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq, 471 extern bool queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork); 473 extern void flush_workqueue(struct workqueue_struct *wq); 474 extern void drain_workqueue(struct workqueue_struct *wq); [all …]
|
D | wait.h | 578 #define wait_event_interruptible_hrtimeout(wq, condition, timeout) \ argument 583 __ret = __wait_event_hrtimeout(wq, condition, timeout, \ 588 #define __wait_event_interruptible_exclusive(wq, condition) \ argument 589 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \ 592 #define wait_event_interruptible_exclusive(wq, condition) \ argument 597 __ret = __wait_event_interruptible_exclusive(wq, condition); \ 601 #define __wait_event_killable_exclusive(wq, condition) \ argument 602 ___wait_event(wq, condition, TASK_KILLABLE, 1, 0, \ 605 #define wait_event_killable_exclusive(wq, condition) \ argument 610 __ret = __wait_event_killable_exclusive(wq, condition); \ [all …]
|
D | freezer.h | 251 #define wait_event_freezekillable_unsafe(wq, condition) \ argument 255 __retval = wait_event_killable(wq, (condition)); \ 297 #define wait_event_freezekillable_unsafe(wq, condition) \ argument 298 wait_event_killable(wq, condition)
|
D | padata.h | 156 struct workqueue_struct *wq; member 169 struct workqueue_struct *wq);
|
/linux-4.19.296/lib/raid6/ |
D | neon.uc | 62 register unative_t wd$$, wq$$, wp$$, w1$$, w2$$; 70 wq$$ = wp$$ = vld1q_u8(&dptr[z0][d+$$*NSIZE]); 74 w2$$ = MASK(wq$$); 75 w1$$ = SHLBYTE(wq$$); 79 wq$$ = veorq_u8(w1$$, wd$$); 82 vst1q_u8(&q[d+NSIZE*$$], wq$$); 93 register unative_t wd$$, wq$$, wp$$, w1$$, w2$$; 101 wq$$ = vld1q_u8(&dptr[z0][d+$$*NSIZE]); 102 wp$$ = veorq_u8(vld1q_u8(&p[d+$$*NSIZE]), wq$$); 108 w2$$ = MASK(wq$$); [all …]
|
D | int.uc | 88 unative_t wd$$, wq$$, wp$$, w1$$, w2$$; 95 wq$$ = wp$$ = *(unative_t *)&dptr[z0][d+$$*NSIZE]; 99 w2$$ = MASK(wq$$); 100 w1$$ = SHLBYTE(wq$$); 103 wq$$ = w1$$ ^ wd$$; 106 *(unative_t *)&q[d+NSIZE*$$] = wq$$; 117 unative_t wd$$, wq$$, wp$$, w1$$, w2$$; 125 wq$$ = wp$$ = *(unative_t *)&dptr[z0][d+$$*NSIZE]; 129 w2$$ = MASK(wq$$); 130 w1$$ = SHLBYTE(wq$$); [all …]
|
D | vpermxor.uc | 49 unative_t wp$$, wq$$, wd$$; 56 wp$$ = wq$$ = *(unative_t *)&dptr[z0][d+$$*NSIZE]; 64 asm(VPERMXOR(%0,%1,%2,%3):"=v"(wq$$):"v"(gf_high), "v"(gf_low), "v"(wq$$)); 65 wq$$ = vec_xor(wq$$, wd$$); 68 *(unative_t *)&q[d+NSIZE*$$] = wq$$;
|
D | altivec.uc | 77 unative_t wd$$, wq$$, wp$$, w1$$, w2$$; 85 wq$$ = wp$$ = *(unative_t *)&dptr[z0][d+$$*NSIZE]; 89 w2$$ = MASK(wq$$); 90 w1$$ = SHLBYTE(wq$$); 93 wq$$ = vec_xor(w1$$, wd$$); 96 *(unative_t *)&q[d+NSIZE*$$] = wq$$;
|
/linux-4.19.296/drivers/edac/ |
D | wq.c | 3 static struct workqueue_struct *wq; variable 7 return queue_delayed_work(wq, work, delay); in edac_queue_work() 13 return mod_delayed_work(wq, work, delay); in edac_mod_work() 22 flush_workqueue(wq); in edac_stop_work() 30 wq = alloc_ordered_workqueue("edac-poller", WQ_MEM_RECLAIM); in edac_workqueue_setup() 31 if (!wq) in edac_workqueue_setup() 39 flush_workqueue(wq); in edac_workqueue_teardown() 40 destroy_workqueue(wq); in edac_workqueue_teardown() 41 wq = NULL; in edac_workqueue_teardown()
|
/linux-4.19.296/fs/jfs/ |
D | jfs_lock.h | 35 #define __SLEEP_COND(wq, cond, lock_cmd, unlock_cmd) \ argument 39 add_wait_queue(&wq, &__wait); \ 49 remove_wait_queue(&wq, &__wait); \
|
/linux-4.19.296/fs/ |
D | eventpoll.c | 198 wait_queue_head_t wq; member 570 static void ep_poll_safewake(wait_queue_head_t *wq) in ep_poll_safewake() argument 575 ep_poll_wakeup_proc, NULL, wq, (void *) (long) this_cpu); in ep_poll_safewake() 582 static void ep_poll_safewake(wait_queue_head_t *wq) in ep_poll_safewake() argument 584 wake_up_poll(wq, EPOLLIN); in ep_poll_safewake() 698 spin_lock_irq(&ep->wq.lock); in ep_scan_ready_list() 701 spin_unlock_irq(&ep->wq.lock); in ep_scan_ready_list() 708 spin_lock_irq(&ep->wq.lock); in ep_scan_ready_list() 745 if (waitqueue_active(&ep->wq)) in ep_scan_ready_list() 746 wake_up_locked(&ep->wq); in ep_scan_ready_list() [all …]
|
D | userfaultfd.c | 89 wait_queue_entry_t wq; member 107 static int userfaultfd_wake_function(wait_queue_entry_t *wq, unsigned mode, in userfaultfd_wake_function() argument 115 uwq = container_of(wq, struct userfaultfd_wait_queue, wq); in userfaultfd_wake_function() 128 ret = wake_up_state(wq->private, mode); in userfaultfd_wake_function() 141 list_del_init(&wq->entry); in userfaultfd_wake_function() 460 init_waitqueue_func_entry(&uwq.wq, userfaultfd_wake_function); in handle_userfault() 461 uwq.wq.private = current; in handle_userfault() 478 __add_wait_queue(&ctx->fault_pending_wqh, &uwq.wq); in handle_userfault() 566 if (!list_empty_careful(&uwq.wq.entry)) { in handle_userfault() 572 list_del(&uwq.wq.entry); in handle_userfault() [all …]
|
/linux-4.19.296/drivers/hid/ |
D | hid-elo.c | 35 static struct workqueue_struct *wq; variable 183 queue_delayed_work(wq, &priv->work, ELO_PERIODIC_READ_INTERVAL); in elo_work() 259 queue_delayed_work(wq, &priv->work, ELO_PERIODIC_READ_INTERVAL); in elo_probe() 297 wq = create_singlethread_workqueue("elousb"); in elo_driver_init() 298 if (!wq) in elo_driver_init() 303 destroy_workqueue(wq); in elo_driver_init() 312 destroy_workqueue(wq); in elo_driver_exit()
|
/linux-4.19.296/drivers/i2c/busses/ |
D | i2c-taos-evm.c | 38 static DECLARE_WAIT_QUEUE_HEAD(wq); 112 wait_event_interruptible_timeout(wq, taos->state == TAOS_STATE_IDLE, in taos_smbus_xfer() 169 wake_up_interruptible(&wq); in taos_interrupt() 174 wake_up_interruptible(&wq); in taos_interrupt() 181 wake_up_interruptible(&wq); in taos_interrupt() 234 wait_event_interruptible_timeout(wq, taos->state == TAOS_STATE_IDLE, in taos_connect() 256 wait_event_interruptible_timeout(wq, taos->state == TAOS_STATE_IDLE, in taos_connect()
|
/linux-4.19.296/drivers/iio/adc/ |
D | berlin2-adc.c | 75 wait_queue_head_t wq; member 126 ret = wait_event_interruptible_timeout(priv->wq, priv->data_available, in berlin2_adc_read() 177 ret = wait_event_interruptible_timeout(priv->wq, priv->data_available, in berlin2_adc_tsen_read() 253 wake_up_interruptible(&priv->wq); in berlin2_adc_irq() 273 wake_up_interruptible(&priv->wq); in berlin2_adc_tsen_irq() 323 init_waitqueue_head(&priv->wq); in berlin2_adc_probe()
|
/linux-4.19.296/drivers/char/tpm/ |
D | tpm_ibmvtpm.h | 34 wait_queue_head_t wq; member 46 wait_queue_head_t wq; member
|
D | tpm_vtpm_proxy.c | 38 wait_queue_head_t wq; member 84 sig = wait_event_interruptible(proxy_dev->wq, in vtpm_proxy_fops_read() 163 wake_up_interruptible(&proxy_dev->wq); in vtpm_proxy_fops_write() 181 poll_wait(filp, &proxy_dev->wq, wait); in vtpm_proxy_fops_poll() 227 wake_up_interruptible(&proxy_dev->wq); in vtpm_proxy_fops_undo_open() 366 wake_up_interruptible(&proxy_dev->wq); in vtpm_proxy_tpm_op_send() 504 init_waitqueue_head(&proxy_dev->wq); in vtpm_proxy_create_proxy_dev()
|
D | tpm_ibmvtpm.c | 122 sig = wait_event_interruptible(ibmvtpm->wq, !ibmvtpm->tpm_processing_cmd); in tpm_ibmvtpm_recv() 233 sig = wait_event_interruptible(ibmvtpm->wq, !ibmvtpm->tpm_processing_cmd); in tpm_ibmvtpm_send() 562 wake_up_interruptible(&ibmvtpm->wq); in ibmvtpm_crq_process() 591 wake_up_interruptible(&ibmvtpm->crq_queue.wq); in ibmvtpm_interrupt() 639 init_waitqueue_head(&crq_q->wq); in tpm_ibmvtpm_probe() 672 init_waitqueue_head(&ibmvtpm->wq); in tpm_ibmvtpm_probe() 692 if (!wait_event_timeout(ibmvtpm->crq_queue.wq, in tpm_ibmvtpm_probe()
|
/linux-4.19.296/drivers/misc/ |
D | aspeed-lpc-snoop.c | 68 wait_queue_head_t wq; member 96 ret = wait_event_interruptible(chan->wq, in snoop_file_read() 113 poll_wait(file, &chan->wq, pt); in snoop_file_poll() 132 wake_up_interruptible(&chan->wq); in put_fifo_with_discard() 199 init_waitqueue_head(&lpc_snoop->chan[channel].wq); in aspeed_lpc_enable_snoop()
|
/linux-4.19.296/fs/nfs/blocklayout/ |
D | rpc_pipefs.c | 62 DECLARE_WAITQUEUE(wq, current); in bl_resolve_deviceid() 87 add_wait_queue(&nn->bl_wq, &wq); in bl_resolve_deviceid() 90 remove_wait_queue(&nn->bl_wq, &wq); in bl_resolve_deviceid() 96 remove_wait_queue(&nn->bl_wq, &wq); in bl_resolve_deviceid()
|
/linux-4.19.296/include/trace/events/ |
D | btrfs.h | 1329 __field( const void *, wq ) 1338 __entry->wq = work->wq; 1347 __entry->work, __entry->normal_work, __entry->wq, 1403 TP_PROTO(const struct __btrfs_workqueue *wq, 1406 TP_ARGS(wq, name, high), 1409 __field( const void *, wq ) 1414 TP_fast_assign_btrfs(btrfs_workqueue_owner(wq), 1415 __entry->wq = wq; 1423 __entry->wq) 1428 TP_PROTO(const struct __btrfs_workqueue *wq, [all …]
|