/linux-4.19.296/virt/kvm/ |
D | async_pf.c | 156 vcpu->async_pf.queued = 0; in kvm_clear_async_pf_completion_queue() 175 vcpu->async_pf.queued--; in kvm_check_async_pf_completion() 185 if (vcpu->async_pf.queued >= ASYNC_PF_PER_VCPU) in kvm_setup_async_pf() 217 vcpu->async_pf.queued++; in kvm_setup_async_pf() 245 vcpu->async_pf.queued++; in kvm_async_pf_wakeup_all()
|
/linux-4.19.296/fs/xfs/ |
D | xfs_mru_cache.c | 102 unsigned int queued; /* work has been queued */ member 204 if (!mru->queued) { in _xfs_mru_cache_list_insert() 205 mru->queued = 1; in _xfs_mru_cache_list_insert() 280 mru->queued = next; in _xfs_mru_cache_reap() 281 if ((mru->queued > 0)) { in _xfs_mru_cache_reap() 388 if (mru->queued) { in xfs_mru_cache_flush()
|
/linux-4.19.296/block/ |
D | blk-flush.c | 171 bool queued = false, kicked; in blk_flush_complete_seq() local 194 queued = blk_flush_queue_rq(rq, true); in blk_flush_complete_seq() 218 return kicked | queued; in blk_flush_complete_seq() 225 bool queued = false; in flush_end_io() local 271 queued |= blk_flush_complete_seq(rq, fq, seq, error); in flush_end_io() 285 if (queued || fq->flush_queue_delayed) { in flush_end_io()
|
D | blk-throttle.c | 80 struct list_head queued[2]; /* throtl_qnode [READ/WRITE] */ member 408 struct list_head *queued) in throtl_qnode_add_bio() argument 412 list_add_tail(&qn->node, queued); in throtl_qnode_add_bio() 421 static struct bio *throtl_peek_queued(struct list_head *queued) in throtl_peek_queued() argument 423 struct throtl_qnode *qn = list_first_entry(queued, struct throtl_qnode, node); in throtl_peek_queued() 426 if (list_empty(queued)) in throtl_peek_queued() 448 static struct bio *throtl_pop_queued(struct list_head *queued, in throtl_pop_queued() argument 451 struct throtl_qnode *qn = list_first_entry(queued, struct throtl_qnode, node); in throtl_pop_queued() 454 if (list_empty(queued)) in throtl_pop_queued() 467 list_move_tail(&qn->node, queued); in throtl_pop_queued() [all …]
|
D | bfq-cgroup.c | 108 if (blkg_rwstat_total(&stats->queued)) in bfqg_stats_set_start_empty_time() 150 blkg_rwstat_total(&stats->queued)); in bfqg_stats_update_avg_queue_size() 158 blkg_rwstat_add(&bfqg->stats.queued, op, 1); in bfqg_stats_update_io_add() 166 blkg_rwstat_add(&bfqg->stats.queued, op, -1); in bfqg_stats_update_io_remove() 371 blkg_rwstat_exit(&stats->queued); in bfqg_stats_exit() 388 blkg_rwstat_init(&stats->queued, gfp) || in bfqg_stats_init() 1101 .private = offsetof(struct bfq_group, stats.queued), 1144 .private = offsetof(struct bfq_group, stats.queued),
|
D | bfq-iosched.h | 258 int queued[2]; member 511 int queued; member 778 struct blkg_rwstat queued; member
|
D | cfq-iosched.c | 129 int queued[2]; member 194 struct blkg_rwstat queued; member 544 if (blkg_rwstat_total(&stats->queued)) in cfqg_stats_set_start_empty_time() 588 blkg_rwstat_total(&stats->queued)); in cfqg_stats_update_avg_queue_size() 677 blkg_rwstat_add(&cfqg->stats.queued, op, 1); in cfqg_stats_update_io_add() 694 blkg_rwstat_add(&cfqg->stats.queued, op, -1); in cfqg_stats_update_io_remove() 1536 blkg_rwstat_exit(&stats->queued); in cfqg_stats_exit() 1554 blkg_rwstat_init(&stats->queued, gfp) || in cfqg_stats_init() 2064 .private = offsetof(struct cfq_group, stats.queued), 2105 .private = offsetof(struct cfq_group, stats.queued), [all …]
|
D | blk-mq.c | 395 data->hctx->queued++; in blk_mq_get_request() 970 static inline unsigned int queued_to_index(unsigned int queued) in queued_to_index() argument 972 if (!queued) in queued_to_index() 975 return min(BLK_MQ_MAX_DISPATCH_ORDER - 1, ilog2(queued) + 1); in queued_to_index() 1147 int errors, queued; in blk_mq_dispatch_rq_list() local 1158 errors = queued = 0; in blk_mq_dispatch_rq_list() 1215 queued++; in blk_mq_dispatch_rq_list() 1218 hctx->dispatched[queued_to_index(queued)]++; in blk_mq_dispatch_rq_list() 1283 return (queued + errors) != 0; in blk_mq_dispatch_rq_list()
|
D | bfq-iosched.c | 421 if (bfqd->queued != 0) { in bfq_schedule_dispatch() 1689 bfqq->queued[rq_is_sync(rq)]++; in bfq_add_request() 1690 bfqd->queued++; in bfq_add_request() 1807 bfqq->queued[sync]--; in bfq_remove_request() 1808 bfqd->queued--; in bfq_remove_request() 4596 bool small_req = bfqq->queued[rq_is_sync(rq)] == 1 && in bfq_rq_enqueued() 4804 if (bfqd->rq_in_driver + bfqd->queued < BFQ_HW_QUEUE_THRESHOLD) in bfq_update_hw_tag() 5269 else if (bfqq->queued[0] == 0 && bfqq->queued[1] == 0) in bfq_idle_slice_timer_body()
|
D | blk-mq-debugfs.c | 595 seq_printf(m, "%lu\n", hctx->queued); in hctx_queued_show() 604 hctx->queued = 0; in hctx_queued_write()
|
/linux-4.19.296/include/drm/ |
D | drm_flip_work.h | 77 struct list_head queued; member
|
/linux-4.19.296/fs/ocfs2/cluster/ |
D | heartbeat.c | 837 int queued = 0; in o2hb_shutdown_slot() local 855 queued = 1; in o2hb_shutdown_slot() 860 if (queued) in o2hb_shutdown_slot() 921 int queued = 0; in o2hb_check_slot() local 1015 queued = 1; in o2hb_check_slot() 1067 queued = 1; in o2hb_check_slot() 1083 if (queued) in o2hb_check_slot()
|
/linux-4.19.296/include/linux/ |
D | blk-mq.h | 50 unsigned long queued; member
|
D | kvm_host.h | 285 u32 queued; member
|
/linux-4.19.296/fs/ |
D | aio.c | 1817 bool queued; member 1828 if (unlikely(pt->queued)) { in aio_poll_queue_proc() 1833 pt->queued = true; in aio_poll_queue_proc() 1865 apt.queued = false; in aio_poll() 1874 if (likely(apt.queued)) { in aio_poll()
|
/linux-4.19.296/drivers/media/common/videobuf2/ |
D | videobuf2-core.c | 2186 unsigned int queued:1; member 2316 fileio->bufs[i].queued = 1; in __vb2_init_fileio() 2436 buf->queued = 0; in __vb2_perform_fileio() 2507 buf->queued = 1; in __vb2_perform_fileio()
|
/linux-4.19.296/include/linux/spi/ |
D | spi.h | 526 bool queued; member
|
/linux-4.19.296/lib/ |
D | Kconfig.debug | 1389 Workqueue used to implicitly guarantee that work items queued
|
/linux-4.19.296/ |
D | MAINTAINERS | 758 P: Andres Salomon <dilinger@queued.net>
|