/linux-4.19.296/drivers/thunderbolt/ |
D | nhi.c | 22 #define RING_TYPE(ring) ((ring)->is_tx ? "TX ring" : "RX ring") argument 41 static int ring_interrupt_index(const struct tb_ring *ring) in ring_interrupt_index() argument 43 int bit = ring->hop; in ring_interrupt_index() 44 if (!ring->is_tx) in ring_interrupt_index() 45 bit += ring->nhi->hop_count; in ring_interrupt_index() 54 static void ring_interrupt_active(struct tb_ring *ring, bool active) in ring_interrupt_active() argument 57 ring_interrupt_index(ring) / 32 * 4; in ring_interrupt_active() 58 int bit = ring_interrupt_index(ring) & 31; in ring_interrupt_active() 62 if (ring->irq > 0) { in ring_interrupt_active() 67 if (ring->is_tx) in ring_interrupt_active() [all …]
|
D | ctl.c | 325 static void tb_ctl_tx_callback(struct tb_ring *ring, struct ring_frame *frame, in tb_ctl_tx_callback() argument 406 static void tb_ctl_rx_callback(struct tb_ring *ring, struct ring_frame *frame, in tb_ctl_rx_callback() argument
|
/linux-4.19.296/include/linux/ |
D | skb_array.h | 30 struct ptr_ring ring; member 38 return __ptr_ring_full(&a->ring); in __skb_array_full() 43 return ptr_ring_full(&a->ring); in skb_array_full() 48 return ptr_ring_produce(&a->ring, skb); in skb_array_produce() 53 return ptr_ring_produce_irq(&a->ring, skb); in skb_array_produce_irq() 58 return ptr_ring_produce_bh(&a->ring, skb); in skb_array_produce_bh() 63 return ptr_ring_produce_any(&a->ring, skb); in skb_array_produce_any() 72 return __ptr_ring_empty(&a->ring); in __skb_array_empty() 77 return __ptr_ring_peek(&a->ring); in __skb_array_peek() 82 return ptr_ring_empty(&a->ring); in skb_array_empty() [all …]
|
D | thunderbolt.h | 552 void tb_ring_start(struct tb_ring *ring); 553 void tb_ring_stop(struct tb_ring *ring); 554 void tb_ring_free(struct tb_ring *ring); 556 int __tb_ring_enqueue(struct tb_ring *ring, struct ring_frame *frame); 574 static inline int tb_ring_rx(struct tb_ring *ring, struct ring_frame *frame) in tb_ring_rx() argument 576 WARN_ON(ring->is_tx); in tb_ring_rx() 577 return __tb_ring_enqueue(ring, frame); in tb_ring_rx() 595 static inline int tb_ring_tx(struct tb_ring *ring, struct ring_frame *frame) in tb_ring_tx() argument 597 WARN_ON(!ring->is_tx); in tb_ring_tx() 598 return __tb_ring_enqueue(ring, frame); in tb_ring_tx() [all …]
|
D | if_tap.h | 73 struct ptr_ring ring; member
|
D | libata.h | 691 struct ata_ering_entry ring[ATA_ERING_SIZE]; member
|
/linux-4.19.296/virt/kvm/ |
D | coalesced_mmio.c | 45 struct kvm_coalesced_mmio_ring *ring; in coalesced_mmio_has_room() local 54 ring = dev->kvm->coalesced_mmio_ring; in coalesced_mmio_has_room() 55 avail = (ring->first - last - 1) % KVM_COALESCED_MMIO_MAX; in coalesced_mmio_has_room() 69 struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring; in coalesced_mmio_write() local 77 insert = READ_ONCE(ring->last); in coalesced_mmio_write() 86 ring->coalesced_mmio[insert].phys_addr = addr; in coalesced_mmio_write() 87 ring->coalesced_mmio[insert].len = len; in coalesced_mmio_write() 88 memcpy(ring->coalesced_mmio[insert].data, val, len); in coalesced_mmio_write() 90 ring->last = (insert + 1) % KVM_COALESCED_MMIO_MAX; in coalesced_mmio_write()
|
/linux-4.19.296/drivers/xen/ |
D | pvcalls-front.c | 41 struct xen_pvcalls_front_ring ring; member 74 struct pvcalls_data_intf *ring; member 136 *req_id = bedata->ring.req_prod_pvt & (RING_SIZE(&bedata->ring) - 1); in get_request() 137 if (RING_FULL(&bedata->ring) || in get_request() 145 struct pvcalls_data_intf *intf = map->active.ring; in pvcalls_front_write_todo() 162 struct pvcalls_data_intf *intf = map->active.ring; in pvcalls_front_read_todo() 193 while (RING_HAS_UNCONSUMED_RESPONSES(&bedata->ring)) { in pvcalls_front_event_handler() 194 rsp = RING_GET_RESPONSE(&bedata->ring, bedata->ring.rsp_cons); in pvcalls_front_event_handler() 225 bedata->ring.rsp_cons++; in pvcalls_front_event_handler() 228 RING_FINAL_CHECK_FOR_RESPONSES(&bedata->ring, more); in pvcalls_front_event_handler() [all …]
|
D | pvcalls-back.c | 50 struct xen_pvcalls_back_ring ring; member 69 struct pvcalls_data_intf *ring; member 107 struct pvcalls_data_intf *intf = map->ring; in pvcalls_conn_back_read() 178 struct pvcalls_data_intf *intf = map->ring; in pvcalls_conn_back_write() 287 rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++); in pvcalls_back_socket() 346 map->ring = page; in pvcalls_new_active_socket() 347 map->ring_order = map->ring->ring_order; in pvcalls_new_active_socket() 355 ret = xenbus_map_ring_valloc(fedata->dev, map->ring->ref, in pvcalls_new_active_socket() 432 rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++); in pvcalls_back_connect() 457 xenbus_unmap_ring_vfree(dev, (void *)map->ring); in pvcalls_back_release_active() [all …]
|
D | evtchn.c | 68 evtchn_port_t *ring; member 90 static void evtchn_free_ring(evtchn_port_t *ring) in evtchn_free_ring() argument 92 kvfree(ring); in evtchn_free_ring() 104 return u->ring + evtchn_ring_offset(u, idx); in evtchn_ring_entry() 250 copy_to_user(&buf[bytes1], &u->ring[0], bytes2))) in evtchn_read() 328 old_ring = u->ring; in evtchn_resize_ring() 348 memcpy(new_ring, old_ring, u->ring_size * sizeof(*u->ring)); in evtchn_resize_ring() 350 u->ring_size * sizeof(*u->ring)); in evtchn_resize_ring() 352 u->ring = new_ring; in evtchn_resize_ring() 682 evtchn_free_ring(u->ring); in evtchn_release()
|
D | xen-scsiback.c | 93 struct vscsiif_back_ring ring; member 333 ring_res = RING_GET_RESPONSE(&info->ring, info->ring.rsp_prod_pvt); in scsiback_send_response() 334 info->ring.rsp_prod_pvt++; in scsiback_send_response() 352 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&info->ring, notify); in scsiback_send_response() 589 xenbus_unmap_ring_vfree(info->dev, info->ring.sring); in scsiback_disconnect() 649 static struct vscsibk_pend *scsiback_get_pend_req(struct vscsiif_back_ring *ring, in scsiback_get_pend_req() argument 676 struct vscsiif_back_ring *ring, in prepare_pending_reqs() argument 709 pending_req = scsiback_get_pend_req(ring, v2p); in prepare_pending_reqs() 727 struct vscsiif_back_ring *ring = &info->ring; in scsiback_do_cmd_fn() local 734 rc = ring->req_cons; in scsiback_do_cmd_fn() [all …]
|
/linux-4.19.296/include/uapi/linux/ |
D | virtio_ring.h | 80 __virtio16 ring[]; member 94 struct vring_used_elem ring[]; member 140 #define vring_used_event(vr) ((vr)->avail->ring[(vr)->num]) 141 #define vring_avail_event(vr) (*(__virtio16 *)&(vr)->used->ring[(vr)->num]) 149 vr->used = (void *)(((uintptr_t)&vr->avail->ring[num] + sizeof(__virtio16) in vring_init()
|
/linux-4.19.296/include/xen/interface/io/ |
D | console.h | 15 #define MASK_XENCONS_IDX(idx, ring) ((idx) & (sizeof(ring)-1)) argument
|
D | ring.h | 63 (__RD32(((_sz) - offsetof(struct _s##_sring, ring)) / \ 64 sizeof(((struct _s##_sring *)0)->ring[0]))) 69 (__RD32(((_sz) - (long)(_s)->ring + (long)(_s)) / sizeof((_s)->ring[0]))) 116 union __name##_sring_entry ring[1]; /* variable-length */ \ 202 (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].req)) 205 (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp))
|
/linux-4.19.296/fs/ |
D | aio.c | 464 struct aio_ring *ring; in aio_setup_ring() local 545 ring = kmap_atomic(ctx->ring_pages[0]); in aio_setup_ring() 546 ring->nr = nr_events; /* user copy */ in aio_setup_ring() 547 ring->id = ~0U; in aio_setup_ring() 548 ring->head = ring->tail = 0; in aio_setup_ring() 549 ring->magic = AIO_RING_MAGIC; in aio_setup_ring() 550 ring->compat_features = AIO_RING_COMPAT_FEATURES; in aio_setup_ring() 551 ring->incompat_features = AIO_RING_INCOMPAT_FEATURES; in aio_setup_ring() 552 ring->header_length = sizeof(struct aio_ring); in aio_setup_ring() 553 kunmap_atomic(ring); in aio_setup_ring() [all …]
|
/linux-4.19.296/include/uapi/linux/genwqe/ |
D | genwqe_card.h | 53 #define IO_EXTENDED_DIAG_MAP(ring) (0x00000500 | ((ring) << 3)) argument 55 #define GENWQE_EXTENDED_DIAG_SELECTOR(ring, trace) (((ring) << 8) | (trace)) argument
|
/linux-4.19.296/drivers/misc/genwqe/ |
D | card_utils.c | 858 int entries = 0, ring, traps, traces, trace_entries; in genwqe_ffdc_buff_size() local 890 for (ring = 0; ring < 8; ring++) { in genwqe_ffdc_buff_size() 891 addr = GENWQE_UID_OFFS(uid) | IO_EXTENDED_DIAG_MAP(ring); in genwqe_ffdc_buff_size() 912 int i, traps, traces, trace, trace_entries, trace_entry, ring; in genwqe_ffdc_buff_read() local 954 for (ring = 0; ring < 8; ring++) { /* 0 is fls, 1 is fds, in genwqe_ffdc_buff_read() 956 addr = GENWQE_UID_OFFS(uid) | IO_EXTENDED_DIAG_MAP(ring); in genwqe_ffdc_buff_read() 971 GENWQE_EXTENDED_DIAG_SELECTOR(ring, trace); in genwqe_ffdc_buff_read()
|
/linux-4.19.296/drivers/misc/mic/vop/ |
D | vop_debugfs.c | 176 j, avail->ring[j]); in vop_vdev_info_show() 187 used->ring[j].id), in vop_vdev_info_show() 189 used->ring[j].len)); in vop_vdev_info_show()
|
/linux-4.19.296/include/uapi/drm/ |
D | amdgpu_drm.h | 403 __u32 ring; member 421 __u32 ring; member 576 __u32 ring; member 582 __u32 ring; member
|
/linux-4.19.296/include/net/ |
D | page_pool.h | 98 struct ptr_ring ring; member
|
/linux-4.19.296/drivers/virtio/ |
D | virtio_ring.c | 400 vq->vring.avail->ring[avail] = cpu_to_virtio16(_vq->vdev, head); in virtqueue_add() 723 i = virtio32_to_cpu(_vq->vdev, vq->vring.used->ring[last_used].id); in virtqueue_get_buf_ctx() 724 *len = virtio32_to_cpu(_vq->vdev, vq->vring.used->ring[last_used].len); in virtqueue_get_buf_ctx()
|
/linux-4.19.296/drivers/hid/ |
D | wacom_wac.c | 1432 int ring = data[285] & 0x7F; in wacom_intuos_pro2_bt_pad() local 1437 ring = 71 - ring; in wacom_intuos_pro2_bt_pad() 1438 ring += 3*72/16; in wacom_intuos_pro2_bt_pad() 1439 if (ring > 71) in wacom_intuos_pro2_bt_pad() 1440 ring -= 72; in wacom_intuos_pro2_bt_pad() 1444 input_report_abs(pad_input, ABS_WHEEL, ringstatus ? ring : 0); in wacom_intuos_pro2_bt_pad()
|
/linux-4.19.296/drivers/iio/accel/ |
D | Kconfig | 382 accelerometers. These devices use a hardware ring buffer.
|