Home
last modified time | relevance | path

Searched refs:mq (Results 1 – 16 of 16) sorted by relevance

/linux-4.19.296/drivers/sh/maple/
Dmaple.c122 void (*callback) (struct mapleq *mq), in maple_getcond_callback()
140 struct mapleq *mq; in maple_release_device() local
143 mq = mdev->mq; in maple_release_device()
144 kmem_cache_free(maple_queue_cache, mq->recvbuf); in maple_release_device()
145 kfree(mq); in maple_release_device()
172 mdev->mq->command = command; in maple_add_packet()
173 mdev->mq->length = length; in maple_add_packet()
176 mdev->mq->sendbuf = sendbuf; in maple_add_packet()
179 list_add_tail(&mdev->mq->list, &maple_waitq); in maple_add_packet()
188 struct mapleq *mq; in maple_allocq() local
[all …]
/linux-4.19.296/block/
DMakefile9 blk-lib.o blk-mq.o blk-mq-tag.o blk-stat.o \
10 blk-mq-sysfs.o blk-mq-cpumap.o blk-mq-sched.o ioctl.o \
24 obj-$(CONFIG_MQ_IOSCHED_DEADLINE) += mq-deadline.o
32 obj-$(CONFIG_BLK_MQ_PCI) += blk-mq-pci.o
33 obj-$(CONFIG_BLK_MQ_VIRTIO) += blk-mq-virtio.o
34 obj-$(CONFIG_BLK_MQ_RDMA) += blk-mq-rdma.o
37 obj-$(CONFIG_BLK_DEBUG_FS) += blk-mq-debugfs.o
38 obj-$(CONFIG_BLK_DEBUG_FS_ZONED)+= blk-mq-debugfs-zoned.o
Dblk-mq-sched.h47 if (e && e->type->ops.mq.allow_merge) in blk_mq_sched_allow_merge()
48 return e->type->ops.mq.allow_merge(q, rq, bio); in blk_mq_sched_allow_merge()
57 if (e && e->type->ops.mq.completed_request) in blk_mq_sched_completed_request()
58 e->type->ops.mq.completed_request(rq); in blk_mq_sched_completed_request()
66 if (e && e->type->ops.mq.started_request) in blk_mq_sched_started_request()
67 e->type->ops.mq.started_request(rq); in blk_mq_sched_started_request()
75 if (e && e->type->ops.mq.requeue_request) in blk_mq_sched_requeue_request()
76 e->type->ops.mq.requeue_request(rq); in blk_mq_sched_requeue_request()
83 if (e && e->type->ops.mq.has_work) in blk_mq_sched_has_work()
84 return e->type->ops.mq.has_work(hctx); in blk_mq_sched_has_work()
Dblk-mq-sched.c97 if (e->type->ops.mq.has_work && in blk_mq_do_dispatch_sched()
98 !e->type->ops.mq.has_work(hctx)) in blk_mq_do_dispatch_sched()
104 rq = e->type->ops.mq.dispatch_request(hctx); in blk_mq_do_dispatch_sched()
175 const bool has_sched_dispatch = e && e->type->ops.mq.dispatch_request; in blk_mq_sched_dispatch_requests()
326 if (e && e->type->ops.mq.bio_merge) { in __blk_mq_sched_bio_merge()
328 return e->type->ops.mq.bio_merge(hctx, bio); in __blk_mq_sched_bio_merge()
392 if (e && e->type->ops.mq.insert_requests) { in blk_mq_sched_insert_request()
396 e->type->ops.mq.insert_requests(hctx, &list, at_head); in blk_mq_sched_insert_request()
415 if (e && e->type->ops.mq.insert_requests) in blk_mq_sched_insert_requests()
416 e->type->ops.mq.insert_requests(hctx, list, false); in blk_mq_sched_insert_requests()
[all …]
Delevator.c63 if (e->uses_mq && e->type->ops.mq.allow_merge) in elv_iosched_allow_bio_merge()
64 return e->type->ops.mq.allow_merge(q, rq, bio); in elv_iosched_allow_bio_merge()
99 static struct elevator_type *elevator_find(const char *name, bool mq) in elevator_find() argument
104 if (elevator_match(e, name) && (mq == e->uses_mq)) in elevator_find()
246 if (e->uses_mq && e->type->ops.mq.exit_sched) in elevator_exit()
459 if (e->uses_mq && e->type->ops.mq.request_merge) in elv_merge()
460 return e->type->ops.mq.request_merge(q, req, bio); in elv_merge()
513 if (e->uses_mq && e->type->ops.mq.request_merged) in elv_merged_request()
514 e->type->ops.mq.request_merged(q, rq, type); in elv_merged_request()
530 if (e->uses_mq && e->type->ops.mq.requests_merged) in elv_merge_requests()
[all …]
Dblk-ioc.c51 if (et->uses_mq && et->ops.mq.exit_icq) in ioc_exit_icq()
52 et->ops.mq.exit_icq(icq); in ioc_exit_icq()
425 if (et->uses_mq && et->ops.mq.init_icq) in ioc_create_icq()
426 et->ops.mq.init_icq(icq); in ioc_create_icq()
Dblk-mq.c367 if (!op_is_flush(op) && e->type->ops.mq.limit_depth && in blk_mq_get_request()
369 e->type->ops.mq.limit_depth(op, data); in blk_mq_get_request()
387 if (e && e->type->ops.mq.prepare_request) { in blk_mq_get_request()
391 e->type->ops.mq.prepare_request(rq, bio); in blk_mq_get_request()
494 if (e && e->type->ops.mq.finish_request) in blk_mq_free_request()
495 e->type->ops.mq.finish_request(rq); in blk_mq_free_request()
2903 if (q->elevator && q->elevator->type->ops.mq.depth_updated) in blk_mq_update_nr_requests()
2904 q->elevator->type->ops.mq.depth_updated(hctx); in blk_mq_update_nr_requests()
Dmq-deadline.c777 .ops.mq = {
Dkyber-iosched.c955 .ops.mq = {
Dbfq-iosched.c5812 .ops.mq = {
/linux-4.19.296/drivers/isdn/capi/
Dcapilib.c48 struct capilib_msgidqueue *mq; in mq_enqueue() local
49 if ((mq = np->msgidfree) == NULL) in mq_enqueue()
51 np->msgidfree = mq->next; in mq_enqueue()
52 mq->msgid = msgid; in mq_enqueue()
53 mq->next = NULL; in mq_enqueue()
55 np->msgidlast->next = mq; in mq_enqueue()
56 np->msgidlast = mq; in mq_enqueue()
58 np->msgidqueue = mq; in mq_enqueue()
68 struct capilib_msgidqueue *mq = *pp; in mq_dequeue() local
69 *pp = mq->next; in mq_dequeue()
[all …]
/linux-4.19.296/drivers/misc/sgi-xp/
Dxpc_uv.c109 xpc_get_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq, int cpu, char *irq_name) in xpc_get_gru_mq_irq_uv() argument
111 int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade); in xpc_get_gru_mq_irq_uv()
114 mq->irq = uv_setup_irq(irq_name, cpu, mq->mmr_blade, mq->mmr_offset, in xpc_get_gru_mq_irq_uv()
116 if (mq->irq < 0) in xpc_get_gru_mq_irq_uv()
117 return mq->irq; in xpc_get_gru_mq_irq_uv()
119 mq->mmr_value = uv_read_global_mmr64(mmr_pnode, mq->mmr_offset); in xpc_get_gru_mq_irq_uv()
123 mq->irq = SGI_XPC_ACTIVATE; in xpc_get_gru_mq_irq_uv()
125 mq->irq = SGI_XPC_NOTIFY; in xpc_get_gru_mq_irq_uv()
129 mq->mmr_value = (unsigned long)cpu_physical_id(cpu) << 32 | mq->irq; in xpc_get_gru_mq_irq_uv()
130 uv_write_global_mmr64(mmr_pnode, mq->mmr_offset, mq->mmr_value); in xpc_get_gru_mq_irq_uv()
[all …]
/linux-4.19.296/drivers/misc/sgi-gru/
Dgrukservices.c146 #define HSTATUS(mq, h) ((mq) + offsetof(struct message_queue, hstatus[h])) argument
560 struct message_queue *mq = p; in gru_create_message_queue() local
564 memset(mq, 0, bytes); in gru_create_message_queue()
565 mq->start = &mq->data; in gru_create_message_queue()
566 mq->start2 = &mq->data + (qlines / 2 - 1) * GRU_CACHE_LINE_BYTES; in gru_create_message_queue()
567 mq->next = &mq->data; in gru_create_message_queue()
568 mq->limit = &mq->data + (qlines - 2) * GRU_CACHE_LINE_BYTES; in gru_create_message_queue()
569 mq->qlines = qlines; in gru_create_message_queue()
570 mq->hstatus[0] = 0; in gru_create_message_queue()
571 mq->hstatus[1] = 1; in gru_create_message_queue()
[all …]
Dgrukservices.h45 void *mq; /* message queue vaddress */ member
/linux-4.19.296/include/linux/
Dmaple.h70 struct mapleq *mq; member
71 void (*callback) (struct mapleq * mq);
90 void (*callback) (struct mapleq * mq),
Delevator.h143 struct elevator_mq_ops mq; member