1 /*
2 * Xen Event Channels (internal header)
3 *
4 * Copyright (C) 2013 Citrix Systems R&D Ltd.
5 *
6 * This source code is licensed under the GNU General Public License,
7 * Version 2 or later. See the file COPYING for more details.
8 */
9 #ifndef __EVENTS_INTERNAL_H__
10 #define __EVENTS_INTERNAL_H__
11 #include <linux/rcupdate.h>
12
13 /* Interrupt types. */
14 enum xen_irq_type {
15 IRQT_UNBOUND = 0,
16 IRQT_PIRQ,
17 IRQT_VIRQ,
18 IRQT_IPI,
19 IRQT_EVTCHN
20 };
21
22 /*
23 * Packed IRQ information:
24 * type - enum xen_irq_type
25 * event channel - irq->event channel mapping
26 * cpu - cpu this event channel is bound to
27 * index - type-specific information:
28 * PIRQ - vector, with MSB being "needs EIO", or physical IRQ of the HVM
29 * guest, or GSI (real passthrough IRQ) of the device.
30 * VIRQ - virq number
31 * IPI - IPI vector
32 * EVTCHN -
33 */
34 struct irq_info {
35 struct list_head list;
36 struct list_head eoi_list;
37 struct rcu_work rwork;
38 short refcnt;
39 short spurious_cnt;
40 short type; /* type */
41 u8 mask_reason; /* Why is event channel masked */
42 #define EVT_MASK_REASON_EXPLICIT 0x01
43 #define EVT_MASK_REASON_TEMPORARY 0x02
44 #define EVT_MASK_REASON_EOI_PENDING 0x04
45 u8 is_active; /* Is event just being handled? */
46 unsigned irq;
47 unsigned int evtchn; /* event channel */
48 unsigned short cpu; /* cpu bound */
49 unsigned short eoi_cpu; /* EOI must happen on this cpu */
50 unsigned int irq_epoch; /* If eoi_cpu valid: irq_epoch of event */
51 u64 eoi_time; /* Time in jiffies when to EOI. */
52 raw_spinlock_t lock;
53
54 union {
55 unsigned short virq;
56 enum ipi_vector ipi;
57 struct {
58 unsigned short pirq;
59 unsigned short gsi;
60 unsigned char vector;
61 unsigned char flags;
62 uint16_t domid;
63 } pirq;
64 } u;
65 };
66
67 #define PIRQ_NEEDS_EOI (1 << 0)
68 #define PIRQ_SHAREABLE (1 << 1)
69 #define PIRQ_MSI_GROUP (1 << 2)
70
71 struct evtchn_loop_ctrl;
72
73 struct evtchn_ops {
74 unsigned (*max_channels)(void);
75 unsigned (*nr_channels)(void);
76
77 int (*setup)(struct irq_info *info);
78 void (*remove)(evtchn_port_t port, unsigned int cpu);
79 void (*bind_to_cpu)(struct irq_info *info, unsigned cpu);
80
81 void (*clear_pending)(unsigned port);
82 void (*set_pending)(unsigned port);
83 bool (*is_pending)(unsigned port);
84 void (*mask)(unsigned port);
85 void (*unmask)(unsigned port);
86
87 void (*handle_events)(unsigned cpu, struct evtchn_loop_ctrl *ctrl);
88 void (*resume)(void);
89
90 int (*percpu_init)(unsigned int cpu);
91 int (*percpu_deinit)(unsigned int cpu);
92 };
93
94 extern const struct evtchn_ops *evtchn_ops;
95
96 extern int **evtchn_to_irq;
97 int get_evtchn_to_irq(unsigned int evtchn);
98 void handle_irq_for_port(evtchn_port_t port, struct evtchn_loop_ctrl *ctrl);
99
100 struct irq_info *info_for_irq(unsigned irq);
101 unsigned cpu_from_irq(unsigned irq);
102 unsigned cpu_from_evtchn(unsigned int evtchn);
103
xen_evtchn_max_channels(void)104 static inline unsigned xen_evtchn_max_channels(void)
105 {
106 return evtchn_ops->max_channels();
107 }
108
109 /*
110 * Do any ABI specific setup for a bound event channel before it can
111 * be unmasked and used.
112 */
xen_evtchn_port_setup(struct irq_info * info)113 static inline int xen_evtchn_port_setup(struct irq_info *info)
114 {
115 if (evtchn_ops->setup)
116 return evtchn_ops->setup(info);
117 return 0;
118 }
119
xen_evtchn_port_remove(evtchn_port_t evtchn,unsigned int cpu)120 static inline void xen_evtchn_port_remove(evtchn_port_t evtchn,
121 unsigned int cpu)
122 {
123 if (evtchn_ops->remove)
124 evtchn_ops->remove(evtchn, cpu);
125 }
126
xen_evtchn_port_bind_to_cpu(struct irq_info * info,unsigned cpu)127 static inline void xen_evtchn_port_bind_to_cpu(struct irq_info *info,
128 unsigned cpu)
129 {
130 evtchn_ops->bind_to_cpu(info, cpu);
131 }
132
clear_evtchn(unsigned port)133 static inline void clear_evtchn(unsigned port)
134 {
135 evtchn_ops->clear_pending(port);
136 }
137
set_evtchn(unsigned port)138 static inline void set_evtchn(unsigned port)
139 {
140 evtchn_ops->set_pending(port);
141 }
142
test_evtchn(unsigned port)143 static inline bool test_evtchn(unsigned port)
144 {
145 return evtchn_ops->is_pending(port);
146 }
147
mask_evtchn(unsigned port)148 static inline void mask_evtchn(unsigned port)
149 {
150 return evtchn_ops->mask(port);
151 }
152
unmask_evtchn(unsigned port)153 static inline void unmask_evtchn(unsigned port)
154 {
155 return evtchn_ops->unmask(port);
156 }
157
xen_evtchn_handle_events(unsigned cpu,struct evtchn_loop_ctrl * ctrl)158 static inline void xen_evtchn_handle_events(unsigned cpu,
159 struct evtchn_loop_ctrl *ctrl)
160 {
161 return evtchn_ops->handle_events(cpu, ctrl);
162 }
163
xen_evtchn_resume(void)164 static inline void xen_evtchn_resume(void)
165 {
166 if (evtchn_ops->resume)
167 evtchn_ops->resume();
168 }
169
170 void xen_evtchn_2l_init(void);
171 int xen_evtchn_fifo_init(void);
172
173 #endif /* #ifndef __EVENTS_INTERNAL_H__ */
174