/linux-4.19.296/block/ |
D | blk-flush.c | 99 static unsigned int blk_flush_policy(unsigned long fflags, struct request *rq) in blk_flush_policy() argument 103 if (blk_rq_sectors(rq)) in blk_flush_policy() 107 if (rq->cmd_flags & REQ_PREFLUSH) in blk_flush_policy() 110 (rq->cmd_flags & REQ_FUA)) in blk_flush_policy() 116 static unsigned int blk_flush_cur_seq(struct request *rq) in blk_flush_cur_seq() argument 118 return 1 << ffz(rq->flush.seq); in blk_flush_cur_seq() 121 static void blk_flush_restore_request(struct request *rq) in blk_flush_restore_request() argument 128 rq->bio = rq->biotail; in blk_flush_restore_request() 131 rq->rq_flags &= ~RQF_FLUSH_SEQ; in blk_flush_restore_request() 132 rq->end_io = rq->flush.saved_end_io; in blk_flush_restore_request() [all …]
|
D | blk-mq.c | 44 static int blk_mq_poll_stats_bkt(const struct request *rq) in blk_mq_poll_stats_bkt() argument 48 ddir = rq_data_dir(rq); in blk_mq_poll_stats_bkt() 49 bytes = blk_rq_bytes(rq); in blk_mq_poll_stats_bkt() 93 struct request *rq, void *priv, in blk_mq_check_inflight() argument 103 if (rq->part == mi->part) in blk_mq_check_inflight() 119 struct request *rq, void *priv, in blk_mq_check_inflight_rw() argument 124 if (rq->part == mi->part) in blk_mq_check_inflight_rw() 125 mi->inflight[rq_data_dir(rq)]++; in blk_mq_check_inflight_rw() 281 struct request *rq = tags->static_rqs[tag]; in blk_mq_rq_ctx_init() local 285 rq->tag = -1; in blk_mq_rq_ctx_init() [all …]
|
D | deadline-iosched.c | 54 deadline_rb_root(struct deadline_data *dd, struct request *rq) in deadline_rb_root() argument 56 return &dd->sort_list[rq_data_dir(rq)]; in deadline_rb_root() 63 deadline_latter_request(struct request *rq) in deadline_latter_request() argument 65 struct rb_node *node = rb_next(&rq->rb_node); in deadline_latter_request() 74 deadline_add_rq_rb(struct deadline_data *dd, struct request *rq) in deadline_add_rq_rb() argument 76 struct rb_root *root = deadline_rb_root(dd, rq); in deadline_add_rq_rb() 78 elv_rb_add(root, rq); in deadline_add_rq_rb() 82 deadline_del_rq_rb(struct deadline_data *dd, struct request *rq) in deadline_del_rq_rb() argument 84 const int data_dir = rq_data_dir(rq); in deadline_del_rq_rb() 86 if (dd->next_rq[data_dir] == rq) in deadline_del_rq_rb() [all …]
|
D | elevator.c | 52 #define rq_hash_key(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq)) argument 58 static int elv_iosched_allow_bio_merge(struct request *rq, struct bio *bio) in elv_iosched_allow_bio_merge() argument 60 struct request_queue *q = rq->q; in elv_iosched_allow_bio_merge() 64 return e->type->ops.mq.allow_merge(q, rq, bio); in elv_iosched_allow_bio_merge() 66 return e->type->ops.sq.elevator_allow_bio_merge_fn(q, rq, bio); in elv_iosched_allow_bio_merge() 74 bool elv_bio_merge_ok(struct request *rq, struct bio *bio) in elv_bio_merge_ok() argument 76 if (!blk_rq_merge_ok(rq, bio)) in elv_bio_merge_ok() 79 if (!elv_iosched_allow_bio_merge(rq, bio)) in elv_bio_merge_ok() 255 static inline void __elv_rqhash_del(struct request *rq) in __elv_rqhash_del() argument 257 hash_del(&rq->hash); in __elv_rqhash_del() [all …]
|
D | mq-deadline.c | 67 deadline_rb_root(struct deadline_data *dd, struct request *rq) in deadline_rb_root() argument 69 return &dd->sort_list[rq_data_dir(rq)]; in deadline_rb_root() 76 deadline_latter_request(struct request *rq) in deadline_latter_request() argument 78 struct rb_node *node = rb_next(&rq->rb_node); in deadline_latter_request() 87 deadline_add_rq_rb(struct deadline_data *dd, struct request *rq) in deadline_add_rq_rb() argument 89 struct rb_root *root = deadline_rb_root(dd, rq); in deadline_add_rq_rb() 91 elv_rb_add(root, rq); in deadline_add_rq_rb() 95 deadline_del_rq_rb(struct deadline_data *dd, struct request *rq) in deadline_del_rq_rb() argument 97 const int data_dir = rq_data_dir(rq); in deadline_del_rq_rb() 99 if (dd->next_rq[data_dir] == rq) in deadline_del_rq_rb() [all …]
|
D | blk-map.c | 18 int blk_rq_append_bio(struct request *rq, struct bio **bio) in blk_rq_append_bio() argument 22 blk_queue_bounce(rq->q, bio); in blk_rq_append_bio() 24 if (!rq->bio) { in blk_rq_append_bio() 25 blk_rq_bio_prep(rq->q, rq, *bio); in blk_rq_append_bio() 27 if (!ll_back_merge_fn(rq->q, rq, *bio)) { in blk_rq_append_bio() 35 rq->biotail->bi_next = *bio; in blk_rq_append_bio() 36 rq->biotail = *bio; in blk_rq_append_bio() 37 rq->__data_len += (*bio)->bi_iter.bi_size; in blk_rq_append_bio() 58 static int __blk_rq_map_user_iov(struct request *rq, in __blk_rq_map_user_iov() argument 62 struct request_queue *q = rq->q; in __blk_rq_map_user_iov() [all …]
|
D | blk-mq-sched.c | 34 void blk_mq_sched_assign_ioc(struct request *rq, struct bio *bio) in blk_mq_sched_assign_ioc() argument 36 struct request_queue *q = rq->q; in blk_mq_sched_assign_ioc() 50 rq->elv.icq = icq; in blk_mq_sched_assign_ioc() 95 struct request *rq; in blk_mq_do_dispatch_sched() local 104 rq = e->type->ops.mq.dispatch_request(hctx); in blk_mq_do_dispatch_sched() 105 if (!rq) { in blk_mq_do_dispatch_sched() 115 list_add(&rq->queuelist, &rq_list); in blk_mq_do_dispatch_sched() 142 struct request *rq; in blk_mq_do_dispatch_ctx() local 150 rq = blk_mq_dequeue_from_ctx(hctx, ctx); in blk_mq_do_dispatch_ctx() 151 if (!rq) { in blk_mq_do_dispatch_ctx() [all …]
|
D | blk-core.c | 186 void blk_rq_init(struct request_queue *q, struct request *rq) in blk_rq_init() argument 188 memset(rq, 0, sizeof(*rq)); in blk_rq_init() 190 INIT_LIST_HEAD(&rq->queuelist); in blk_rq_init() 191 INIT_LIST_HEAD(&rq->timeout_list); in blk_rq_init() 192 rq->cpu = -1; in blk_rq_init() 193 rq->q = q; in blk_rq_init() 194 rq->__sector = (sector_t) -1; in blk_rq_init() 195 INIT_HLIST_NODE(&rq->hash); in blk_rq_init() 196 RB_CLEAR_NODE(&rq->rb_node); in blk_rq_init() 197 rq->tag = -1; in blk_rq_init() [all …]
|
D | blk-exec.c | 19 static void blk_end_sync_rq(struct request *rq, blk_status_t error) in blk_end_sync_rq() argument 21 struct completion *waiting = rq->end_io_data; in blk_end_sync_rq() 23 rq->end_io_data = NULL; in blk_end_sync_rq() 48 struct request *rq, int at_head, in blk_execute_rq_nowait() argument 54 WARN_ON(!blk_rq_is_passthrough(rq)); in blk_execute_rq_nowait() 56 rq->rq_disk = bd_disk; in blk_execute_rq_nowait() 57 rq->end_io = done; in blk_execute_rq_nowait() 64 blk_mq_sched_insert_request(rq, at_head, true, false); in blk_execute_rq_nowait() 71 rq->rq_flags |= RQF_QUIET; in blk_execute_rq_nowait() 72 __blk_end_request_all(rq, BLK_STS_IOERR); in blk_execute_rq_nowait() [all …]
|
D | bsg.c | 72 static int bsg_scsi_fill_hdr(struct request *rq, struct sg_io_v4 *hdr, in bsg_scsi_fill_hdr() argument 75 struct scsi_request *sreq = scsi_req(rq); in bsg_scsi_fill_hdr() 91 static int bsg_scsi_complete_rq(struct request *rq, struct sg_io_v4 *hdr) in bsg_scsi_complete_rq() argument 93 struct scsi_request *sreq = scsi_req(rq); in bsg_scsi_complete_rq() 117 if (rq->next_rq) { in bsg_scsi_complete_rq() 119 hdr->din_resid = scsi_req(rq->next_rq)->resid_len; in bsg_scsi_complete_rq() 120 } else if (rq_data_dir(rq) == READ) { in bsg_scsi_complete_rq() 129 static void bsg_scsi_free_rq(struct request *rq) in bsg_scsi_free_rq() argument 131 scsi_req_free_cmd(scsi_req(rq)); in bsg_scsi_free_rq() 144 struct request *rq, *next_rq = NULL; in bsg_map_hdr() local [all …]
|
D | blk-mq.h | 39 bool blk_mq_get_driver_tag(struct request *rq); 59 void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, 61 void blk_mq_request_bypass_insert(struct request *rq, bool run_queue); 66 blk_status_t blk_mq_request_issue_directly(struct request *rq); 97 static inline enum mq_rq_state blk_mq_rq_state(struct request *rq) in blk_mq_rq_state() argument 99 return READ_ONCE(rq->state); in blk_mq_rq_state() 176 struct request *rq) in __blk_mq_put_driver_tag() argument 178 blk_mq_put_tag(hctx, hctx->tags, rq->mq_ctx, rq->tag); in __blk_mq_put_driver_tag() 179 rq->tag = -1; in __blk_mq_put_driver_tag() 181 if (rq->rq_flags & RQF_MQ_INFLIGHT) { in __blk_mq_put_driver_tag() [all …]
|
D | blk-mq-sched.h | 11 void blk_mq_sched_assign_ioc(struct request *rq, struct bio *bio); 13 void blk_mq_sched_request_inserted(struct request *rq); 17 bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq); 21 void blk_mq_sched_insert_request(struct request *rq, bool at_head, 42 blk_mq_sched_allow_merge(struct request_queue *q, struct request *rq, in blk_mq_sched_allow_merge() argument 48 return e->type->ops.mq.allow_merge(q, rq, bio); in blk_mq_sched_allow_merge() 53 static inline void blk_mq_sched_completed_request(struct request *rq) in blk_mq_sched_completed_request() argument 55 struct elevator_queue *e = rq->q->elevator; in blk_mq_sched_completed_request() 58 e->type->ops.mq.completed_request(rq); in blk_mq_sched_completed_request() 61 static inline void blk_mq_sched_started_request(struct request *rq) in blk_mq_sched_started_request() argument [all …]
|
D | noop-iosched.c | 15 static void noop_merged_requests(struct request_queue *q, struct request *rq, in noop_merged_requests() argument 24 struct request *rq; in noop_dispatch() local 26 rq = list_first_entry_or_null(&nd->queue, struct request, queuelist); in noop_dispatch() 27 if (rq) { in noop_dispatch() 28 list_del_init(&rq->queuelist); in noop_dispatch() 29 elv_dispatch_sort(q, rq); in noop_dispatch() 35 static void noop_add_request(struct request_queue *q, struct request *rq) in noop_add_request() argument 39 list_add_tail(&rq->queuelist, &nd->queue); in noop_add_request() 43 noop_former_request(struct request_queue *q, struct request *rq) in noop_former_request() argument 47 if (rq->queuelist.prev == &nd->queue) in noop_former_request() [all …]
|
D | blk.h | 141 void blk_rq_bio_prep(struct request_queue *q, struct request *rq, 204 static inline int blk_mark_rq_complete(struct request *rq) in blk_mark_rq_complete() argument 206 return test_and_set_bit(0, &rq->__deadline); in blk_mark_rq_complete() 209 static inline void blk_clear_rq_complete(struct request *rq) in blk_clear_rq_complete() argument 211 clear_bit(0, &rq->__deadline); in blk_clear_rq_complete() 214 static inline bool blk_rq_is_complete(struct request *rq) in blk_rq_is_complete() argument 216 return test_bit(0, &rq->__deadline); in blk_rq_is_complete() 222 #define ELV_ON_HASH(rq) ((rq)->rq_flags & RQF_HASHED) argument 224 void blk_insert_flush(struct request *rq); 226 static inline void elv_activate_rq(struct request_queue *q, struct request *rq) in elv_activate_rq() argument [all …]
|
D | scsi_ioctl.c | 230 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq, in blk_fill_sghdr_rq() argument 233 struct scsi_request *req = scsi_req(rq); in blk_fill_sghdr_rq() 245 rq->timeout = msecs_to_jiffies(hdr->timeout); in blk_fill_sghdr_rq() 246 if (!rq->timeout) in blk_fill_sghdr_rq() 247 rq->timeout = q->sg_timeout; in blk_fill_sghdr_rq() 248 if (!rq->timeout) in blk_fill_sghdr_rq() 249 rq->timeout = BLK_DEFAULT_SG_TIMEOUT; in blk_fill_sghdr_rq() 250 if (rq->timeout < BLK_MIN_SG_TIMEOUT) in blk_fill_sghdr_rq() 251 rq->timeout = BLK_MIN_SG_TIMEOUT; in blk_fill_sghdr_rq() 256 static int blk_complete_sghdr_rq(struct request *rq, struct sg_io_hdr *hdr, in blk_complete_sghdr_rq() argument [all …]
|
D | blk-merge.c | 303 void blk_recalc_rq_segments(struct request *rq) in blk_recalc_rq_segments() argument 306 &rq->q->queue_flags); in blk_recalc_rq_segments() 308 rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio, in blk_recalc_rq_segments() 433 int blk_rq_map_sg(struct request_queue *q, struct request *rq, in blk_rq_map_sg() argument 439 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) in blk_rq_map_sg() 440 nsegs = __blk_bvec_map_sg(q, rq->special_vec, sglist, &sg); in blk_rq_map_sg() 441 else if (rq->bio && bio_op(rq->bio) == REQ_OP_WRITE_SAME) in blk_rq_map_sg() 442 nsegs = __blk_bvec_map_sg(q, bio_iovec(rq->bio), sglist, &sg); in blk_rq_map_sg() 443 else if (rq->bio) in blk_rq_map_sg() 444 nsegs = __blk_bios_map_sg(q, rq->bio, sglist, &sg); in blk_rq_map_sg() [all …]
|
D | blk-softirq.c | 33 struct request *rq; in blk_done_softirq() local 35 rq = list_entry(local_list.next, struct request, ipi_list); in blk_done_softirq() 36 list_del_init(&rq->ipi_list); in blk_done_softirq() 37 rq->q->softirq_done_fn(rq); in blk_done_softirq() 44 struct request *rq = data; in trigger_softirq() local 50 list_add_tail(&rq->ipi_list, list); in trigger_softirq() 52 if (list->next == &rq->ipi_list) in trigger_softirq() 61 static int raise_blk_irq(int cpu, struct request *rq) in raise_blk_irq() argument 64 call_single_data_t *data = &rq->csd; in raise_blk_irq() 67 data->info = rq; in raise_blk_irq() [all …]
|
D | blk-tag.c | 262 void blk_queue_end_tag(struct request_queue *q, struct request *rq) in blk_queue_end_tag() argument 265 unsigned tag = rq->tag; /* negative tags invalid */ in blk_queue_end_tag() 271 list_del_init(&rq->queuelist); in blk_queue_end_tag() 272 rq->rq_flags &= ~RQF_QUEUED; in blk_queue_end_tag() 273 rq->tag = -1; in blk_queue_end_tag() 274 rq->internal_tag = -1; in blk_queue_end_tag() 309 int blk_queue_start_tag(struct request_queue *q, struct request *rq) in blk_queue_start_tag() argument 317 if (unlikely((rq->rq_flags & RQF_QUEUED))) { in blk_queue_start_tag() 320 __func__, rq, in blk_queue_start_tag() 321 rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->tag); in blk_queue_start_tag() [all …]
|
/linux-4.19.296/drivers/s390/char/ |
D | raw3270.c | 139 struct raw3270_request *rq; in raw3270_request_alloc() local 142 rq = kzalloc(sizeof(struct raw3270_request), GFP_KERNEL | GFP_DMA); in raw3270_request_alloc() 143 if (!rq) in raw3270_request_alloc() 148 rq->buffer = kmalloc(size, GFP_KERNEL | GFP_DMA); in raw3270_request_alloc() 149 if (!rq->buffer) { in raw3270_request_alloc() 150 kfree(rq); in raw3270_request_alloc() 154 rq->size = size; in raw3270_request_alloc() 155 INIT_LIST_HEAD(&rq->list); in raw3270_request_alloc() 160 rq->ccw.cda = __pa(rq->buffer); in raw3270_request_alloc() 161 rq->ccw.flags = CCW_FLAG_SLI; in raw3270_request_alloc() [all …]
|
D | fs3270.c | 48 fs3270_wake_up(struct raw3270_request *rq, void *data) in fs3270_wake_up() argument 64 fs3270_do_io(struct raw3270_view *view, struct raw3270_request *rq) in fs3270_do_io() argument 70 rq->callback = fs3270_wake_up; in fs3270_do_io() 71 rq->callback_data = &fp->wait; in fs3270_do_io() 81 rc = raw3270_start(view, rq); in fs3270_do_io() 84 wait_event(fp->wait, raw3270_request_final(rq)); in fs3270_do_io() 94 fs3270_reset_callback(struct raw3270_request *rq, void *data) in fs3270_reset_callback() argument 98 fp = (struct fs3270 *) rq->view; in fs3270_reset_callback() 99 raw3270_request_reset(rq); in fs3270_reset_callback() 104 fs3270_restore_callback(struct raw3270_request *rq, void *data) in fs3270_restore_callback() argument [all …]
|
/linux-4.19.296/drivers/char/ |
D | raw.c | 211 struct raw_config_request rq; in raw_ctl_ioctl() local 217 if (copy_from_user(&rq, (void __user *) arg, sizeof(rq))) in raw_ctl_ioctl() 220 return bind_set(rq.raw_minor, rq.block_major, rq.block_minor); in raw_ctl_ioctl() 223 if (copy_from_user(&rq, (void __user *) arg, sizeof(rq))) in raw_ctl_ioctl() 226 err = bind_get(rq.raw_minor, &dev); in raw_ctl_ioctl() 230 rq.block_major = MAJOR(dev); in raw_ctl_ioctl() 231 rq.block_minor = MINOR(dev); in raw_ctl_ioctl() 233 if (copy_to_user((void __user *)arg, &rq, sizeof(rq))) in raw_ctl_ioctl() 253 struct raw32_config_request rq; in raw_ctl_compat_ioctl() local 259 if (copy_from_user(&rq, user_req, sizeof(rq))) in raw_ctl_compat_ioctl() [all …]
|
/linux-4.19.296/include/linux/ |
D | blkdev.h | 291 static inline bool blk_rq_is_scsi(struct request *rq) in blk_rq_is_scsi() argument 293 return blk_op_is_scsi(req_op(rq)); in blk_rq_is_scsi() 296 static inline bool blk_rq_is_private(struct request *rq) in blk_rq_is_private() argument 298 return blk_op_is_private(req_op(rq)); in blk_rq_is_private() 301 static inline bool blk_rq_is_passthrough(struct request *rq) in blk_rq_is_passthrough() argument 303 return blk_rq_is_scsi(rq) || blk_rq_is_private(rq); in blk_rq_is_passthrough() 476 void (*initialize_rq_fn)(struct request *rq); 749 #define blk_noretry_request(rq) \ argument 750 ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \ 765 static inline bool blk_account_rq(struct request *rq) in blk_account_rq() argument [all …]
|
D | t10-pi.h | 40 static inline u32 t10_pi_ref_tag(struct request *rq) in t10_pi_ref_tag() argument 42 unsigned int shift = ilog2(queue_logical_block_size(rq->q)); in t10_pi_ref_tag() 45 if (rq->q->integrity.interval_exp) in t10_pi_ref_tag() 46 shift = rq->q->integrity.interval_exp; in t10_pi_ref_tag() 48 return blk_rq_pos(rq) >> (shift - SECTOR_SHIFT) & 0xffffffff; in t10_pi_ref_tag() 57 extern void t10_pi_prepare(struct request *rq, u8 protection_type); 58 extern void t10_pi_complete(struct request *rq, u8 protection_type, 61 static inline void t10_pi_complete(struct request *rq, u8 protection_type, in t10_pi_complete() argument 65 static inline void t10_pi_prepare(struct request *rq, u8 protection_type) in t10_pi_prepare() argument
|
D | blk-mq.h | 96 struct request *rq; member 167 void (*initialize_rq_fn)(struct request *rq); 182 void (*show_rq)(struct seq_file *m, struct request *rq); 221 void blk_mq_free_request(struct request *rq); 247 u32 blk_mq_unique_tag(struct request *rq); 260 int blk_mq_request_started(struct request *rq); 261 void blk_mq_start_request(struct request *rq); 262 void blk_mq_end_request(struct request *rq, blk_status_t error); 263 void __blk_mq_end_request(struct request *rq, blk_status_t error); 265 void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list); [all …]
|
/linux-4.19.296/include/trace/events/ |
D | block.h | 76 TP_PROTO(struct request_queue *q, struct request *rq), 78 TP_ARGS(q, rq), 89 __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0; 90 __entry->sector = blk_rq_trace_sector(rq); 91 __entry->nr_sector = blk_rq_trace_nr_sectors(rq); 93 blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq)); 118 TP_PROTO(struct request *rq, int error, unsigned int nr_bytes), 120 TP_ARGS(rq, error, nr_bytes), 132 __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0; 133 __entry->sector = blk_rq_pos(rq); [all …]
|