1 /*
2  * Xen event channels
3  *
4  * Xen models interrupts with abstract event channels.  Because each
5  * domain gets 1024 event channels, but NR_IRQ is not that large, we
6  * must dynamically map irqs<->event channels.  The event channels
7  * interface with the rest of the kernel by defining a xen interrupt
8  * chip.  When an event is received, it is mapped to an irq and sent
9  * through the normal interrupt processing path.
10  *
11  * There are four kinds of events which can be mapped to an event
12  * channel:
13  *
14  * 1. Inter-domain notifications.  This includes all the virtual
15  *    device events, since they're driven by front-ends in another domain
16  *    (typically dom0).
17  * 2. VIRQs, typically used for timers.  These are per-cpu events.
18  * 3. IPIs.
19  * 4. PIRQs - Hardware interrupts.
20  *
21  * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
22  */
23 
24 #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
25 
26 #include <linux/linkage.h>
27 #include <linux/interrupt.h>
28 #include <linux/irq.h>
29 #include <linux/moduleparam.h>
30 #include <linux/string.h>
31 #include <linux/bootmem.h>
32 #include <linux/slab.h>
33 #include <linux/irqnr.h>
34 #include <linux/pci.h>
35 #include <linux/spinlock.h>
36 #include <linux/cpuhotplug.h>
37 #include <linux/atomic.h>
38 #include <linux/ktime.h>
39 
40 #ifdef CONFIG_X86
41 #include <asm/desc.h>
42 #include <asm/ptrace.h>
43 #include <asm/irq.h>
44 #include <asm/io_apic.h>
45 #include <asm/i8259.h>
46 #include <asm/xen/pci.h>
47 #endif
48 #include <asm/sync_bitops.h>
49 #include <asm/xen/hypercall.h>
50 #include <asm/xen/hypervisor.h>
51 #include <xen/page.h>
52 
53 #include <xen/xen.h>
54 #include <xen/hvm.h>
55 #include <xen/xen-ops.h>
56 #include <xen/events.h>
57 #include <xen/interface/xen.h>
58 #include <xen/interface/event_channel.h>
59 #include <xen/interface/hvm/hvm_op.h>
60 #include <xen/interface/hvm/params.h>
61 #include <xen/interface/physdev.h>
62 #include <xen/interface/sched.h>
63 #include <xen/interface/vcpu.h>
64 #include <asm/hw_irq.h>
65 
66 #include "events_internal.h"
67 
68 #undef MODULE_PARAM_PREFIX
69 #define MODULE_PARAM_PREFIX "xen."
70 
71 static uint __read_mostly event_loop_timeout = 2;
72 module_param(event_loop_timeout, uint, 0644);
73 
74 static uint __read_mostly event_eoi_delay = 10;
75 module_param(event_eoi_delay, uint, 0644);
76 
77 const struct evtchn_ops *evtchn_ops;
78 
79 /*
80  * This lock protects updates to the following mapping and reference-count
81  * arrays. The lock does not need to be acquired to read the mapping tables.
82  */
83 static DEFINE_MUTEX(irq_mapping_update_lock);
84 
85 /*
86  * Lock hierarchy:
87  *
88  * irq_mapping_update_lock
89  *   IRQ-desc lock
90  *     percpu eoi_list_lock
91  *       irq_info->lock
92  */
93 
94 static LIST_HEAD(xen_irq_list_head);
95 
96 /* IRQ <-> VIRQ mapping. */
97 static DEFINE_PER_CPU(int [NR_VIRQS], virq_to_irq) = {[0 ... NR_VIRQS-1] = -1};
98 
99 /* IRQ <-> IPI mapping */
100 static DEFINE_PER_CPU(int [XEN_NR_IPIS], ipi_to_irq) = {[0 ... XEN_NR_IPIS-1] = -1};
101 
102 int **evtchn_to_irq;
103 #ifdef CONFIG_X86
104 static unsigned long *pirq_eoi_map;
105 #endif
106 static bool (*pirq_needs_eoi)(unsigned irq);
107 
108 #define EVTCHN_ROW(e)  (e / (PAGE_SIZE/sizeof(**evtchn_to_irq)))
109 #define EVTCHN_COL(e)  (e % (PAGE_SIZE/sizeof(**evtchn_to_irq)))
110 #define EVTCHN_PER_ROW (PAGE_SIZE / sizeof(**evtchn_to_irq))
111 
112 /* Xen will never allocate port zero for any purpose. */
113 #define VALID_EVTCHN(chn)	((chn) != 0)
114 
115 static struct irq_info *legacy_info_ptrs[NR_IRQS_LEGACY];
116 
117 static struct irq_chip xen_dynamic_chip;
118 static struct irq_chip xen_lateeoi_chip;
119 static struct irq_chip xen_percpu_chip;
120 static struct irq_chip xen_pirq_chip;
121 static void enable_dynirq(struct irq_data *data);
122 static void disable_dynirq(struct irq_data *data);
123 
124 static DEFINE_PER_CPU(unsigned int, irq_epoch);
125 
clear_evtchn_to_irq_row(int * evtchn_row)126 static void clear_evtchn_to_irq_row(int *evtchn_row)
127 {
128 	unsigned col;
129 
130 	for (col = 0; col < EVTCHN_PER_ROW; col++)
131 		WRITE_ONCE(evtchn_row[col], -1);
132 }
133 
clear_evtchn_to_irq_all(void)134 static void clear_evtchn_to_irq_all(void)
135 {
136 	unsigned row;
137 
138 	for (row = 0; row < EVTCHN_ROW(xen_evtchn_max_channels()); row++) {
139 		if (evtchn_to_irq[row] == NULL)
140 			continue;
141 		clear_evtchn_to_irq_row(evtchn_to_irq[row]);
142 	}
143 }
144 
set_evtchn_to_irq(unsigned evtchn,unsigned irq)145 static int set_evtchn_to_irq(unsigned evtchn, unsigned irq)
146 {
147 	unsigned row;
148 	unsigned col;
149 	int *evtchn_row;
150 
151 	if (evtchn >= xen_evtchn_max_channels())
152 		return -EINVAL;
153 
154 	row = EVTCHN_ROW(evtchn);
155 	col = EVTCHN_COL(evtchn);
156 
157 	if (evtchn_to_irq[row] == NULL) {
158 		/* Unallocated irq entries return -1 anyway */
159 		if (irq == -1)
160 			return 0;
161 
162 		evtchn_row = (int *) __get_free_pages(GFP_KERNEL, 0);
163 		if (evtchn_row == NULL)
164 			return -ENOMEM;
165 
166 		clear_evtchn_to_irq_row(evtchn_row);
167 
168 		/*
169 		 * We've prepared an empty row for the mapping. If a different
170 		 * thread was faster inserting it, we can drop ours.
171 		 */
172 		if (cmpxchg(&evtchn_to_irq[row], NULL, evtchn_row) != NULL)
173 			free_page((unsigned long) evtchn_row);
174 	}
175 
176 	WRITE_ONCE(evtchn_to_irq[row][col], irq);
177 	return 0;
178 }
179 
get_evtchn_to_irq(unsigned evtchn)180 int get_evtchn_to_irq(unsigned evtchn)
181 {
182 	if (evtchn >= xen_evtchn_max_channels())
183 		return -1;
184 	if (evtchn_to_irq[EVTCHN_ROW(evtchn)] == NULL)
185 		return -1;
186 	return READ_ONCE(evtchn_to_irq[EVTCHN_ROW(evtchn)][EVTCHN_COL(evtchn)]);
187 }
188 
189 /* Get info for IRQ */
info_for_irq(unsigned irq)190 struct irq_info *info_for_irq(unsigned irq)
191 {
192 	if (irq < nr_legacy_irqs())
193 		return legacy_info_ptrs[irq];
194 	else
195 		return irq_get_chip_data(irq);
196 }
197 
set_info_for_irq(unsigned int irq,struct irq_info * info)198 static void set_info_for_irq(unsigned int irq, struct irq_info *info)
199 {
200 	if (irq < nr_legacy_irqs())
201 		legacy_info_ptrs[irq] = info;
202 	else
203 		irq_set_chip_data(irq, info);
204 }
205 
delayed_free_irq(struct work_struct * work)206 static void delayed_free_irq(struct work_struct *work)
207 {
208 	struct irq_info *info = container_of(to_rcu_work(work), struct irq_info,
209 					     rwork);
210 	unsigned int irq = info->irq;
211 
212 	/* Remove the info pointer only now, with no potential users left. */
213 	set_info_for_irq(irq, NULL);
214 
215 	kfree(info);
216 
217 	/* Legacy IRQ descriptors are managed by the arch. */
218 	if (irq >= nr_legacy_irqs())
219 		irq_free_desc(irq);
220 }
221 
222 /* Constructors for packed IRQ information. */
xen_irq_info_common_setup(struct irq_info * info,unsigned irq,enum xen_irq_type type,unsigned evtchn,unsigned short cpu)223 static int xen_irq_info_common_setup(struct irq_info *info,
224 				     unsigned irq,
225 				     enum xen_irq_type type,
226 				     unsigned evtchn,
227 				     unsigned short cpu)
228 {
229 	int ret;
230 
231 	BUG_ON(info->type != IRQT_UNBOUND && info->type != type);
232 
233 	info->type = type;
234 	info->irq = irq;
235 	info->evtchn = evtchn;
236 	info->cpu = cpu;
237 	info->mask_reason = EVT_MASK_REASON_EXPLICIT;
238 	raw_spin_lock_init(&info->lock);
239 
240 	ret = set_evtchn_to_irq(evtchn, irq);
241 	if (ret < 0)
242 		return ret;
243 
244 	irq_clear_status_flags(irq, IRQ_NOREQUEST|IRQ_NOAUTOEN);
245 
246 	return xen_evtchn_port_setup(info);
247 }
248 
xen_irq_info_evtchn_setup(unsigned irq,unsigned evtchn)249 static int xen_irq_info_evtchn_setup(unsigned irq,
250 				     unsigned evtchn)
251 {
252 	struct irq_info *info = info_for_irq(irq);
253 
254 	return xen_irq_info_common_setup(info, irq, IRQT_EVTCHN, evtchn, 0);
255 }
256 
xen_irq_info_ipi_setup(unsigned cpu,unsigned irq,unsigned evtchn,enum ipi_vector ipi)257 static int xen_irq_info_ipi_setup(unsigned cpu,
258 				  unsigned irq,
259 				  unsigned evtchn,
260 				  enum ipi_vector ipi)
261 {
262 	struct irq_info *info = info_for_irq(irq);
263 
264 	info->u.ipi = ipi;
265 
266 	per_cpu(ipi_to_irq, cpu)[ipi] = irq;
267 
268 	return xen_irq_info_common_setup(info, irq, IRQT_IPI, evtchn, 0);
269 }
270 
xen_irq_info_virq_setup(unsigned cpu,unsigned irq,unsigned evtchn,unsigned virq)271 static int xen_irq_info_virq_setup(unsigned cpu,
272 				   unsigned irq,
273 				   unsigned evtchn,
274 				   unsigned virq)
275 {
276 	struct irq_info *info = info_for_irq(irq);
277 
278 	info->u.virq = virq;
279 
280 	per_cpu(virq_to_irq, cpu)[virq] = irq;
281 
282 	return xen_irq_info_common_setup(info, irq, IRQT_VIRQ, evtchn, 0);
283 }
284 
xen_irq_info_pirq_setup(unsigned irq,unsigned evtchn,unsigned pirq,unsigned gsi,uint16_t domid,unsigned char flags)285 static int xen_irq_info_pirq_setup(unsigned irq,
286 				   unsigned evtchn,
287 				   unsigned pirq,
288 				   unsigned gsi,
289 				   uint16_t domid,
290 				   unsigned char flags)
291 {
292 	struct irq_info *info = info_for_irq(irq);
293 
294 	info->u.pirq.pirq = pirq;
295 	info->u.pirq.gsi = gsi;
296 	info->u.pirq.domid = domid;
297 	info->u.pirq.flags = flags;
298 
299 	return xen_irq_info_common_setup(info, irq, IRQT_PIRQ, evtchn, 0);
300 }
301 
xen_irq_info_cleanup(struct irq_info * info)302 static void xen_irq_info_cleanup(struct irq_info *info)
303 {
304 	set_evtchn_to_irq(info->evtchn, -1);
305 	xen_evtchn_port_remove(info->evtchn, info->cpu);
306 	info->evtchn = 0;
307 }
308 
309 /*
310  * Accessors for packed IRQ information.
311  */
evtchn_from_irq(unsigned irq)312 unsigned int evtchn_from_irq(unsigned irq)
313 {
314 	const struct irq_info *info = NULL;
315 
316 	if (likely(irq < nr_irqs))
317 		info = info_for_irq(irq);
318 	if (!info)
319 		return 0;
320 
321 	return info->evtchn;
322 }
323 
irq_from_evtchn(unsigned int evtchn)324 unsigned irq_from_evtchn(unsigned int evtchn)
325 {
326 	return get_evtchn_to_irq(evtchn);
327 }
328 EXPORT_SYMBOL_GPL(irq_from_evtchn);
329 
irq_from_virq(unsigned int cpu,unsigned int virq)330 int irq_from_virq(unsigned int cpu, unsigned int virq)
331 {
332 	return per_cpu(virq_to_irq, cpu)[virq];
333 }
334 
ipi_from_irq(unsigned irq)335 static enum ipi_vector ipi_from_irq(unsigned irq)
336 {
337 	struct irq_info *info = info_for_irq(irq);
338 
339 	BUG_ON(info == NULL);
340 	BUG_ON(info->type != IRQT_IPI);
341 
342 	return info->u.ipi;
343 }
344 
virq_from_irq(unsigned irq)345 static unsigned virq_from_irq(unsigned irq)
346 {
347 	struct irq_info *info = info_for_irq(irq);
348 
349 	BUG_ON(info == NULL);
350 	BUG_ON(info->type != IRQT_VIRQ);
351 
352 	return info->u.virq;
353 }
354 
pirq_from_irq(unsigned irq)355 static unsigned pirq_from_irq(unsigned irq)
356 {
357 	struct irq_info *info = info_for_irq(irq);
358 
359 	BUG_ON(info == NULL);
360 	BUG_ON(info->type != IRQT_PIRQ);
361 
362 	return info->u.pirq.pirq;
363 }
364 
type_from_irq(unsigned irq)365 static enum xen_irq_type type_from_irq(unsigned irq)
366 {
367 	return info_for_irq(irq)->type;
368 }
369 
cpu_from_irq(unsigned irq)370 unsigned cpu_from_irq(unsigned irq)
371 {
372 	return info_for_irq(irq)->cpu;
373 }
374 
cpu_from_evtchn(unsigned int evtchn)375 unsigned int cpu_from_evtchn(unsigned int evtchn)
376 {
377 	int irq = get_evtchn_to_irq(evtchn);
378 	unsigned ret = 0;
379 
380 	if (irq != -1)
381 		ret = cpu_from_irq(irq);
382 
383 	return ret;
384 }
385 
do_mask(struct irq_info * info,u8 reason)386 static void do_mask(struct irq_info *info, u8 reason)
387 {
388 	unsigned long flags;
389 
390 	raw_spin_lock_irqsave(&info->lock, flags);
391 
392 	if (!info->mask_reason)
393 		mask_evtchn(info->evtchn);
394 
395 	info->mask_reason |= reason;
396 
397 	raw_spin_unlock_irqrestore(&info->lock, flags);
398 }
399 
do_unmask(struct irq_info * info,u8 reason)400 static void do_unmask(struct irq_info *info, u8 reason)
401 {
402 	unsigned long flags;
403 
404 	raw_spin_lock_irqsave(&info->lock, flags);
405 
406 	info->mask_reason &= ~reason;
407 
408 	if (!info->mask_reason)
409 		unmask_evtchn(info->evtchn);
410 
411 	raw_spin_unlock_irqrestore(&info->lock, flags);
412 }
413 
414 #ifdef CONFIG_X86
pirq_check_eoi_map(unsigned irq)415 static bool pirq_check_eoi_map(unsigned irq)
416 {
417 	return test_bit(pirq_from_irq(irq), pirq_eoi_map);
418 }
419 #endif
420 
pirq_needs_eoi_flag(unsigned irq)421 static bool pirq_needs_eoi_flag(unsigned irq)
422 {
423 	struct irq_info *info = info_for_irq(irq);
424 	BUG_ON(info->type != IRQT_PIRQ);
425 
426 	return info->u.pirq.flags & PIRQ_NEEDS_EOI;
427 }
428 
bind_evtchn_to_cpu(unsigned int chn,unsigned int cpu)429 static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
430 {
431 	int irq = get_evtchn_to_irq(chn);
432 	struct irq_info *info = info_for_irq(irq);
433 
434 	BUG_ON(irq == -1);
435 #ifdef CONFIG_SMP
436 	cpumask_copy(irq_get_affinity_mask(irq), cpumask_of(cpu));
437 #endif
438 	xen_evtchn_port_bind_to_cpu(info, cpu);
439 
440 	info->cpu = cpu;
441 }
442 
443 /**
444  * notify_remote_via_irq - send event to remote end of event channel via irq
445  * @irq: irq of event channel to send event to
446  *
447  * Unlike notify_remote_via_evtchn(), this is safe to use across
448  * save/restore. Notifications on a broken connection are silently
449  * dropped.
450  */
notify_remote_via_irq(int irq)451 void notify_remote_via_irq(int irq)
452 {
453 	int evtchn = evtchn_from_irq(irq);
454 
455 	if (VALID_EVTCHN(evtchn))
456 		notify_remote_via_evtchn(evtchn);
457 }
458 EXPORT_SYMBOL_GPL(notify_remote_via_irq);
459 
460 struct lateeoi_work {
461 	struct delayed_work delayed;
462 	spinlock_t eoi_list_lock;
463 	struct list_head eoi_list;
464 };
465 
466 static DEFINE_PER_CPU(struct lateeoi_work, lateeoi);
467 
lateeoi_list_del(struct irq_info * info)468 static void lateeoi_list_del(struct irq_info *info)
469 {
470 	struct lateeoi_work *eoi = &per_cpu(lateeoi, info->eoi_cpu);
471 	unsigned long flags;
472 
473 	spin_lock_irqsave(&eoi->eoi_list_lock, flags);
474 	list_del_init(&info->eoi_list);
475 	spin_unlock_irqrestore(&eoi->eoi_list_lock, flags);
476 }
477 
lateeoi_list_add(struct irq_info * info)478 static void lateeoi_list_add(struct irq_info *info)
479 {
480 	struct lateeoi_work *eoi = &per_cpu(lateeoi, info->eoi_cpu);
481 	struct irq_info *elem;
482 	u64 now = get_jiffies_64();
483 	unsigned long delay;
484 	unsigned long flags;
485 
486 	if (now < info->eoi_time)
487 		delay = info->eoi_time - now;
488 	else
489 		delay = 1;
490 
491 	spin_lock_irqsave(&eoi->eoi_list_lock, flags);
492 
493 	if (list_empty(&eoi->eoi_list)) {
494 		list_add(&info->eoi_list, &eoi->eoi_list);
495 		mod_delayed_work_on(info->eoi_cpu, system_wq,
496 				    &eoi->delayed, delay);
497 	} else {
498 		list_for_each_entry_reverse(elem, &eoi->eoi_list, eoi_list) {
499 			if (elem->eoi_time <= info->eoi_time)
500 				break;
501 		}
502 		list_add(&info->eoi_list, &elem->eoi_list);
503 	}
504 
505 	spin_unlock_irqrestore(&eoi->eoi_list_lock, flags);
506 }
507 
xen_irq_lateeoi_locked(struct irq_info * info,bool spurious)508 static void xen_irq_lateeoi_locked(struct irq_info *info, bool spurious)
509 {
510 	evtchn_port_t evtchn;
511 	unsigned int cpu;
512 	unsigned int delay = 0;
513 
514 	evtchn = info->evtchn;
515 	if (!VALID_EVTCHN(evtchn) || !list_empty(&info->eoi_list))
516 		return;
517 
518 	if (spurious) {
519 		if ((1 << info->spurious_cnt) < (HZ << 2))
520 			info->spurious_cnt++;
521 		if (info->spurious_cnt > 1) {
522 			delay = 1 << (info->spurious_cnt - 2);
523 			if (delay > HZ)
524 				delay = HZ;
525 			if (!info->eoi_time)
526 				info->eoi_cpu = smp_processor_id();
527 			info->eoi_time = get_jiffies_64() + delay;
528 		}
529 	} else {
530 		info->spurious_cnt = 0;
531 	}
532 
533 	cpu = info->eoi_cpu;
534 	if (info->eoi_time &&
535 	    (info->irq_epoch == per_cpu(irq_epoch, cpu) || delay)) {
536 		lateeoi_list_add(info);
537 		return;
538 	}
539 
540 	info->eoi_time = 0;
541 
542 	/* is_active hasn't been reset yet, do it now. */
543 	smp_store_release(&info->is_active, 0);
544 	do_unmask(info, EVT_MASK_REASON_EOI_PENDING);
545 }
546 
xen_irq_lateeoi_worker(struct work_struct * work)547 static void xen_irq_lateeoi_worker(struct work_struct *work)
548 {
549 	struct lateeoi_work *eoi;
550 	struct irq_info *info;
551 	u64 now = get_jiffies_64();
552 	unsigned long flags;
553 
554 	eoi = container_of(to_delayed_work(work), struct lateeoi_work, delayed);
555 
556 	rcu_read_lock();
557 
558 	while (true) {
559 		spin_lock_irqsave(&eoi->eoi_list_lock, flags);
560 
561 		info = list_first_entry_or_null(&eoi->eoi_list, struct irq_info,
562 						eoi_list);
563 
564 		if (info == NULL)
565 			break;
566 
567 		if (now < info->eoi_time) {
568 			mod_delayed_work_on(info->eoi_cpu, system_wq,
569 					    &eoi->delayed,
570 					    info->eoi_time - now);
571 			break;
572 		}
573 
574 		list_del_init(&info->eoi_list);
575 
576 		spin_unlock_irqrestore(&eoi->eoi_list_lock, flags);
577 
578 		info->eoi_time = 0;
579 
580 		xen_irq_lateeoi_locked(info, false);
581 	}
582 
583 	spin_unlock_irqrestore(&eoi->eoi_list_lock, flags);
584 
585 	rcu_read_unlock();
586 }
587 
xen_cpu_init_eoi(unsigned int cpu)588 static void xen_cpu_init_eoi(unsigned int cpu)
589 {
590 	struct lateeoi_work *eoi = &per_cpu(lateeoi, cpu);
591 
592 	INIT_DELAYED_WORK(&eoi->delayed, xen_irq_lateeoi_worker);
593 	spin_lock_init(&eoi->eoi_list_lock);
594 	INIT_LIST_HEAD(&eoi->eoi_list);
595 }
596 
xen_irq_lateeoi(unsigned int irq,unsigned int eoi_flags)597 void xen_irq_lateeoi(unsigned int irq, unsigned int eoi_flags)
598 {
599 	struct irq_info *info;
600 
601 	rcu_read_lock();
602 
603 	info = info_for_irq(irq);
604 
605 	if (info)
606 		xen_irq_lateeoi_locked(info, eoi_flags & XEN_EOI_FLAG_SPURIOUS);
607 
608 	rcu_read_unlock();
609 }
610 EXPORT_SYMBOL_GPL(xen_irq_lateeoi);
611 
xen_irq_init(unsigned irq)612 static void xen_irq_init(unsigned irq)
613 {
614 	struct irq_info *info;
615 
616 #ifdef CONFIG_SMP
617 	/* By default all event channels notify CPU#0. */
618 	cpumask_copy(irq_get_affinity_mask(irq), cpumask_of(0));
619 #endif
620 
621 	info = kzalloc(sizeof(*info), GFP_KERNEL);
622 	if (info == NULL)
623 		panic("Unable to allocate metadata for IRQ%d\n", irq);
624 
625 	info->type = IRQT_UNBOUND;
626 	info->refcnt = -1;
627 	INIT_RCU_WORK(&info->rwork, delayed_free_irq);
628 
629 	set_info_for_irq(irq, info);
630 
631 	INIT_LIST_HEAD(&info->eoi_list);
632 	list_add_tail(&info->list, &xen_irq_list_head);
633 }
634 
xen_allocate_irqs_dynamic(int nvec)635 static int __must_check xen_allocate_irqs_dynamic(int nvec)
636 {
637 	int i, irq = irq_alloc_descs(-1, 0, nvec, -1);
638 
639 	if (irq >= 0) {
640 		for (i = 0; i < nvec; i++)
641 			xen_irq_init(irq + i);
642 	}
643 
644 	return irq;
645 }
646 
xen_allocate_irq_dynamic(void)647 static inline int __must_check xen_allocate_irq_dynamic(void)
648 {
649 
650 	return xen_allocate_irqs_dynamic(1);
651 }
652 
xen_allocate_irq_gsi(unsigned gsi)653 static int __must_check xen_allocate_irq_gsi(unsigned gsi)
654 {
655 	int irq;
656 
657 	/*
658 	 * A PV guest has no concept of a GSI (since it has no ACPI
659 	 * nor access to/knowledge of the physical APICs). Therefore
660 	 * all IRQs are dynamically allocated from the entire IRQ
661 	 * space.
662 	 */
663 	if (xen_pv_domain() && !xen_initial_domain())
664 		return xen_allocate_irq_dynamic();
665 
666 	/* Legacy IRQ descriptors are already allocated by the arch. */
667 	if (gsi < nr_legacy_irqs())
668 		irq = gsi;
669 	else
670 		irq = irq_alloc_desc_at(gsi, -1);
671 
672 	xen_irq_init(irq);
673 
674 	return irq;
675 }
676 
xen_free_irq(unsigned irq)677 static void xen_free_irq(unsigned irq)
678 {
679 	struct irq_info *info = info_for_irq(irq);
680 
681 	if (WARN_ON(!info))
682 		return;
683 
684 	if (!list_empty(&info->eoi_list))
685 		lateeoi_list_del(info);
686 
687 	list_del(&info->list);
688 
689 	WARN_ON(info->refcnt > 0);
690 
691 	queue_rcu_work(system_wq, &info->rwork);
692 }
693 
xen_evtchn_close(unsigned int port)694 static void xen_evtchn_close(unsigned int port)
695 {
696 	struct evtchn_close close;
697 
698 	close.port = port;
699 	if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
700 		BUG();
701 }
702 
event_handler_exit(struct irq_info * info)703 static void event_handler_exit(struct irq_info *info)
704 {
705 	smp_store_release(&info->is_active, 0);
706 	clear_evtchn(info->evtchn);
707 }
708 
pirq_query_unmask(int irq)709 static void pirq_query_unmask(int irq)
710 {
711 	struct physdev_irq_status_query irq_status;
712 	struct irq_info *info = info_for_irq(irq);
713 
714 	BUG_ON(info->type != IRQT_PIRQ);
715 
716 	irq_status.irq = pirq_from_irq(irq);
717 	if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status))
718 		irq_status.flags = 0;
719 
720 	info->u.pirq.flags &= ~PIRQ_NEEDS_EOI;
721 	if (irq_status.flags & XENIRQSTAT_needs_eoi)
722 		info->u.pirq.flags |= PIRQ_NEEDS_EOI;
723 }
724 
eoi_pirq(struct irq_data * data)725 static void eoi_pirq(struct irq_data *data)
726 {
727 	struct irq_info *info = info_for_irq(data->irq);
728 	int evtchn = info ? info->evtchn : 0;
729 	struct physdev_eoi eoi = { .irq = pirq_from_irq(data->irq) };
730 	int rc = 0;
731 
732 	if (!VALID_EVTCHN(evtchn))
733 		return;
734 
735 	if (unlikely(irqd_is_setaffinity_pending(data)) &&
736 	    likely(!irqd_irq_disabled(data))) {
737 		do_mask(info, EVT_MASK_REASON_TEMPORARY);
738 
739 		event_handler_exit(info);
740 
741 		irq_move_masked_irq(data);
742 
743 		do_unmask(info, EVT_MASK_REASON_TEMPORARY);
744 	} else
745 		event_handler_exit(info);
746 
747 	if (pirq_needs_eoi(data->irq)) {
748 		rc = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi);
749 		WARN_ON(rc);
750 	}
751 }
752 
mask_ack_pirq(struct irq_data * data)753 static void mask_ack_pirq(struct irq_data *data)
754 {
755 	disable_dynirq(data);
756 	eoi_pirq(data);
757 }
758 
__startup_pirq(unsigned int irq)759 static unsigned int __startup_pirq(unsigned int irq)
760 {
761 	struct evtchn_bind_pirq bind_pirq;
762 	struct irq_info *info = info_for_irq(irq);
763 	int evtchn = evtchn_from_irq(irq);
764 	int rc;
765 
766 	BUG_ON(info->type != IRQT_PIRQ);
767 
768 	if (VALID_EVTCHN(evtchn))
769 		goto out;
770 
771 	bind_pirq.pirq = pirq_from_irq(irq);
772 	/* NB. We are happy to share unless we are probing. */
773 	bind_pirq.flags = info->u.pirq.flags & PIRQ_SHAREABLE ?
774 					BIND_PIRQ__WILL_SHARE : 0;
775 	rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &bind_pirq);
776 	if (rc != 0) {
777 		pr_warn("Failed to obtain physical IRQ %d\n", irq);
778 		return 0;
779 	}
780 	evtchn = bind_pirq.port;
781 
782 	pirq_query_unmask(irq);
783 
784 	rc = set_evtchn_to_irq(evtchn, irq);
785 	if (rc)
786 		goto err;
787 
788 	info->evtchn = evtchn;
789 	bind_evtchn_to_cpu(evtchn, 0);
790 
791 	rc = xen_evtchn_port_setup(info);
792 	if (rc)
793 		goto err;
794 
795 out:
796 	do_unmask(info, EVT_MASK_REASON_EXPLICIT);
797 
798 	eoi_pirq(irq_get_irq_data(irq));
799 
800 	return 0;
801 
802 err:
803 	pr_err("irq%d: Failed to set port to irq mapping (%d)\n", irq, rc);
804 	xen_evtchn_close(evtchn);
805 	return 0;
806 }
807 
startup_pirq(struct irq_data * data)808 static unsigned int startup_pirq(struct irq_data *data)
809 {
810 	return __startup_pirq(data->irq);
811 }
812 
shutdown_pirq(struct irq_data * data)813 static void shutdown_pirq(struct irq_data *data)
814 {
815 	unsigned int irq = data->irq;
816 	struct irq_info *info = info_for_irq(irq);
817 	unsigned evtchn = evtchn_from_irq(irq);
818 
819 	BUG_ON(info->type != IRQT_PIRQ);
820 
821 	if (!VALID_EVTCHN(evtchn))
822 		return;
823 
824 	do_mask(info, EVT_MASK_REASON_EXPLICIT);
825 	xen_evtchn_close(evtchn);
826 	xen_irq_info_cleanup(info);
827 }
828 
enable_pirq(struct irq_data * data)829 static void enable_pirq(struct irq_data *data)
830 {
831 	enable_dynirq(data);
832 }
833 
disable_pirq(struct irq_data * data)834 static void disable_pirq(struct irq_data *data)
835 {
836 	disable_dynirq(data);
837 }
838 
xen_irq_from_gsi(unsigned gsi)839 int xen_irq_from_gsi(unsigned gsi)
840 {
841 	struct irq_info *info;
842 
843 	list_for_each_entry(info, &xen_irq_list_head, list) {
844 		if (info->type != IRQT_PIRQ)
845 			continue;
846 
847 		if (info->u.pirq.gsi == gsi)
848 			return info->irq;
849 	}
850 
851 	return -1;
852 }
853 EXPORT_SYMBOL_GPL(xen_irq_from_gsi);
854 
__unbind_from_irq(unsigned int irq)855 static void __unbind_from_irq(unsigned int irq)
856 {
857 	int evtchn = evtchn_from_irq(irq);
858 	struct irq_info *info = info_for_irq(irq);
859 
860 	if (info->refcnt > 0) {
861 		info->refcnt--;
862 		if (info->refcnt != 0)
863 			return;
864 	}
865 
866 	if (VALID_EVTCHN(evtchn)) {
867 		unsigned int cpu = cpu_from_irq(irq);
868 
869 		xen_evtchn_close(evtchn);
870 
871 		switch (type_from_irq(irq)) {
872 		case IRQT_VIRQ:
873 			per_cpu(virq_to_irq, cpu)[virq_from_irq(irq)] = -1;
874 			break;
875 		case IRQT_IPI:
876 			per_cpu(ipi_to_irq, cpu)[ipi_from_irq(irq)] = -1;
877 			break;
878 		default:
879 			break;
880 		}
881 
882 		xen_irq_info_cleanup(info);
883 	}
884 
885 	xen_free_irq(irq);
886 }
887 
888 /*
889  * Do not make any assumptions regarding the relationship between the
890  * IRQ number returned here and the Xen pirq argument.
891  *
892  * Note: We don't assign an event channel until the irq actually started
893  * up.  Return an existing irq if we've already got one for the gsi.
894  *
895  * Shareable implies level triggered, not shareable implies edge
896  * triggered here.
897  */
xen_bind_pirq_gsi_to_irq(unsigned gsi,unsigned pirq,int shareable,char * name)898 int xen_bind_pirq_gsi_to_irq(unsigned gsi,
899 			     unsigned pirq, int shareable, char *name)
900 {
901 	int irq = -1;
902 	struct physdev_irq irq_op;
903 	int ret;
904 
905 	mutex_lock(&irq_mapping_update_lock);
906 
907 	irq = xen_irq_from_gsi(gsi);
908 	if (irq != -1) {
909 		pr_info("%s: returning irq %d for gsi %u\n",
910 			__func__, irq, gsi);
911 		goto out;
912 	}
913 
914 	irq = xen_allocate_irq_gsi(gsi);
915 	if (irq < 0)
916 		goto out;
917 
918 	irq_op.irq = irq;
919 	irq_op.vector = 0;
920 
921 	/* Only the privileged domain can do this. For non-priv, the pcifront
922 	 * driver provides a PCI bus that does the call to do exactly
923 	 * this in the priv domain. */
924 	if (xen_initial_domain() &&
925 	    HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op)) {
926 		xen_free_irq(irq);
927 		irq = -ENOSPC;
928 		goto out;
929 	}
930 
931 	ret = xen_irq_info_pirq_setup(irq, 0, pirq, gsi, DOMID_SELF,
932 			       shareable ? PIRQ_SHAREABLE : 0);
933 	if (ret < 0) {
934 		__unbind_from_irq(irq);
935 		irq = ret;
936 		goto out;
937 	}
938 
939 	pirq_query_unmask(irq);
940 	/* We try to use the handler with the appropriate semantic for the
941 	 * type of interrupt: if the interrupt is an edge triggered
942 	 * interrupt we use handle_edge_irq.
943 	 *
944 	 * On the other hand if the interrupt is level triggered we use
945 	 * handle_fasteoi_irq like the native code does for this kind of
946 	 * interrupts.
947 	 *
948 	 * Depending on the Xen version, pirq_needs_eoi might return true
949 	 * not only for level triggered interrupts but for edge triggered
950 	 * interrupts too. In any case Xen always honors the eoi mechanism,
951 	 * not injecting any more pirqs of the same kind if the first one
952 	 * hasn't received an eoi yet. Therefore using the fasteoi handler
953 	 * is the right choice either way.
954 	 */
955 	if (shareable)
956 		irq_set_chip_and_handler_name(irq, &xen_pirq_chip,
957 				handle_fasteoi_irq, name);
958 	else
959 		irq_set_chip_and_handler_name(irq, &xen_pirq_chip,
960 				handle_edge_irq, name);
961 
962 out:
963 	mutex_unlock(&irq_mapping_update_lock);
964 
965 	return irq;
966 }
967 
968 #ifdef CONFIG_PCI_MSI
xen_allocate_pirq_msi(struct pci_dev * dev,struct msi_desc * msidesc)969 int xen_allocate_pirq_msi(struct pci_dev *dev, struct msi_desc *msidesc)
970 {
971 	int rc;
972 	struct physdev_get_free_pirq op_get_free_pirq;
973 
974 	op_get_free_pirq.type = MAP_PIRQ_TYPE_MSI;
975 	rc = HYPERVISOR_physdev_op(PHYSDEVOP_get_free_pirq, &op_get_free_pirq);
976 
977 	WARN_ONCE(rc == -ENOSYS,
978 		  "hypervisor does not support the PHYSDEVOP_get_free_pirq interface\n");
979 
980 	return rc ? -1 : op_get_free_pirq.pirq;
981 }
982 
xen_bind_pirq_msi_to_irq(struct pci_dev * dev,struct msi_desc * msidesc,int pirq,int nvec,const char * name,domid_t domid)983 int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,
984 			     int pirq, int nvec, const char *name, domid_t domid)
985 {
986 	int i, irq, ret;
987 
988 	mutex_lock(&irq_mapping_update_lock);
989 
990 	irq = xen_allocate_irqs_dynamic(nvec);
991 	if (irq < 0)
992 		goto out;
993 
994 	for (i = 0; i < nvec; i++) {
995 		irq_set_chip_and_handler_name(irq + i, &xen_pirq_chip, handle_edge_irq, name);
996 
997 		ret = xen_irq_info_pirq_setup(irq + i, 0, pirq + i, 0, domid,
998 					      i == 0 ? 0 : PIRQ_MSI_GROUP);
999 		if (ret < 0)
1000 			goto error_irq;
1001 	}
1002 
1003 	ret = irq_set_msi_desc(irq, msidesc);
1004 	if (ret < 0)
1005 		goto error_irq;
1006 out:
1007 	mutex_unlock(&irq_mapping_update_lock);
1008 	return irq;
1009 error_irq:
1010 	while (nvec--)
1011 		__unbind_from_irq(irq + nvec);
1012 	mutex_unlock(&irq_mapping_update_lock);
1013 	return ret;
1014 }
1015 #endif
1016 
xen_destroy_irq(int irq)1017 int xen_destroy_irq(int irq)
1018 {
1019 	struct physdev_unmap_pirq unmap_irq;
1020 	struct irq_info *info = info_for_irq(irq);
1021 	int rc = -ENOENT;
1022 
1023 	mutex_lock(&irq_mapping_update_lock);
1024 
1025 	/*
1026 	 * If trying to remove a vector in a MSI group different
1027 	 * than the first one skip the PIRQ unmap unless this vector
1028 	 * is the first one in the group.
1029 	 */
1030 	if (xen_initial_domain() && !(info->u.pirq.flags & PIRQ_MSI_GROUP)) {
1031 		unmap_irq.pirq = info->u.pirq.pirq;
1032 		unmap_irq.domid = info->u.pirq.domid;
1033 		rc = HYPERVISOR_physdev_op(PHYSDEVOP_unmap_pirq, &unmap_irq);
1034 		/* If another domain quits without making the pci_disable_msix
1035 		 * call, the Xen hypervisor takes care of freeing the PIRQs
1036 		 * (free_domain_pirqs).
1037 		 */
1038 		if ((rc == -ESRCH && info->u.pirq.domid != DOMID_SELF))
1039 			pr_info("domain %d does not have %d anymore\n",
1040 				info->u.pirq.domid, info->u.pirq.pirq);
1041 		else if (rc) {
1042 			pr_warn("unmap irq failed %d\n", rc);
1043 			goto out;
1044 		}
1045 	}
1046 
1047 	xen_free_irq(irq);
1048 
1049 out:
1050 	mutex_unlock(&irq_mapping_update_lock);
1051 	return rc;
1052 }
1053 
xen_irq_from_pirq(unsigned pirq)1054 int xen_irq_from_pirq(unsigned pirq)
1055 {
1056 	int irq;
1057 
1058 	struct irq_info *info;
1059 
1060 	mutex_lock(&irq_mapping_update_lock);
1061 
1062 	list_for_each_entry(info, &xen_irq_list_head, list) {
1063 		if (info->type != IRQT_PIRQ)
1064 			continue;
1065 		irq = info->irq;
1066 		if (info->u.pirq.pirq == pirq)
1067 			goto out;
1068 	}
1069 	irq = -1;
1070 out:
1071 	mutex_unlock(&irq_mapping_update_lock);
1072 
1073 	return irq;
1074 }
1075 
1076 
xen_pirq_from_irq(unsigned irq)1077 int xen_pirq_from_irq(unsigned irq)
1078 {
1079 	return pirq_from_irq(irq);
1080 }
1081 EXPORT_SYMBOL_GPL(xen_pirq_from_irq);
1082 
bind_evtchn_to_irq_chip(evtchn_port_t evtchn,struct irq_chip * chip)1083 static int bind_evtchn_to_irq_chip(evtchn_port_t evtchn, struct irq_chip *chip)
1084 {
1085 	int irq;
1086 	int ret;
1087 
1088 	if (evtchn >= xen_evtchn_max_channels())
1089 		return -ENOMEM;
1090 
1091 	mutex_lock(&irq_mapping_update_lock);
1092 
1093 	irq = get_evtchn_to_irq(evtchn);
1094 
1095 	if (irq == -1) {
1096 		irq = xen_allocate_irq_dynamic();
1097 		if (irq < 0)
1098 			goto out;
1099 
1100 		irq_set_chip_and_handler_name(irq, chip,
1101 					      handle_edge_irq, "event");
1102 
1103 		ret = xen_irq_info_evtchn_setup(irq, evtchn);
1104 		if (ret < 0) {
1105 			__unbind_from_irq(irq);
1106 			irq = ret;
1107 			goto out;
1108 		}
1109 		/* New interdomain events are bound to VCPU 0. */
1110 		bind_evtchn_to_cpu(evtchn, 0);
1111 	} else {
1112 		struct irq_info *info = info_for_irq(irq);
1113 		WARN_ON(info == NULL || info->type != IRQT_EVTCHN);
1114 	}
1115 
1116 out:
1117 	mutex_unlock(&irq_mapping_update_lock);
1118 
1119 	return irq;
1120 }
1121 
bind_evtchn_to_irq(evtchn_port_t evtchn)1122 int bind_evtchn_to_irq(evtchn_port_t evtchn)
1123 {
1124 	return bind_evtchn_to_irq_chip(evtchn, &xen_dynamic_chip);
1125 }
1126 EXPORT_SYMBOL_GPL(bind_evtchn_to_irq);
1127 
bind_evtchn_to_irq_lateeoi(evtchn_port_t evtchn)1128 int bind_evtchn_to_irq_lateeoi(evtchn_port_t evtchn)
1129 {
1130 	return bind_evtchn_to_irq_chip(evtchn, &xen_lateeoi_chip);
1131 }
1132 EXPORT_SYMBOL_GPL(bind_evtchn_to_irq_lateeoi);
1133 
bind_ipi_to_irq(unsigned int ipi,unsigned int cpu)1134 static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
1135 {
1136 	struct evtchn_bind_ipi bind_ipi;
1137 	int evtchn, irq;
1138 	int ret;
1139 
1140 	mutex_lock(&irq_mapping_update_lock);
1141 
1142 	irq = per_cpu(ipi_to_irq, cpu)[ipi];
1143 
1144 	if (irq == -1) {
1145 		irq = xen_allocate_irq_dynamic();
1146 		if (irq < 0)
1147 			goto out;
1148 
1149 		irq_set_chip_and_handler_name(irq, &xen_percpu_chip,
1150 					      handle_percpu_irq, "ipi");
1151 
1152 		bind_ipi.vcpu = xen_vcpu_nr(cpu);
1153 		if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
1154 						&bind_ipi) != 0)
1155 			BUG();
1156 		evtchn = bind_ipi.port;
1157 
1158 		ret = xen_irq_info_ipi_setup(cpu, irq, evtchn, ipi);
1159 		if (ret < 0) {
1160 			__unbind_from_irq(irq);
1161 			irq = ret;
1162 			goto out;
1163 		}
1164 		bind_evtchn_to_cpu(evtchn, cpu);
1165 	} else {
1166 		struct irq_info *info = info_for_irq(irq);
1167 		WARN_ON(info == NULL || info->type != IRQT_IPI);
1168 	}
1169 
1170  out:
1171 	mutex_unlock(&irq_mapping_update_lock);
1172 	return irq;
1173 }
1174 
bind_interdomain_evtchn_to_irq_chip(unsigned int remote_domain,evtchn_port_t remote_port,struct irq_chip * chip)1175 static int bind_interdomain_evtchn_to_irq_chip(unsigned int remote_domain,
1176 					       evtchn_port_t remote_port,
1177 					       struct irq_chip *chip)
1178 {
1179 	struct evtchn_bind_interdomain bind_interdomain;
1180 	int err;
1181 
1182 	bind_interdomain.remote_dom  = remote_domain;
1183 	bind_interdomain.remote_port = remote_port;
1184 
1185 	err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
1186 					  &bind_interdomain);
1187 
1188 	return err ? : bind_evtchn_to_irq_chip(bind_interdomain.local_port,
1189 					       chip);
1190 }
1191 
bind_interdomain_evtchn_to_irq(unsigned int remote_domain,evtchn_port_t remote_port)1192 int bind_interdomain_evtchn_to_irq(unsigned int remote_domain,
1193 				   evtchn_port_t remote_port)
1194 {
1195 	return bind_interdomain_evtchn_to_irq_chip(remote_domain, remote_port,
1196 						   &xen_dynamic_chip);
1197 }
1198 EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irq);
1199 
bind_interdomain_evtchn_to_irq_lateeoi(unsigned int remote_domain,evtchn_port_t remote_port)1200 int bind_interdomain_evtchn_to_irq_lateeoi(unsigned int remote_domain,
1201 					   evtchn_port_t remote_port)
1202 {
1203 	return bind_interdomain_evtchn_to_irq_chip(remote_domain, remote_port,
1204 						   &xen_lateeoi_chip);
1205 }
1206 EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irq_lateeoi);
1207 
find_virq(unsigned int virq,unsigned int cpu)1208 static int find_virq(unsigned int virq, unsigned int cpu)
1209 {
1210 	struct evtchn_status status;
1211 	int port, rc = -ENOENT;
1212 
1213 	memset(&status, 0, sizeof(status));
1214 	for (port = 0; port < xen_evtchn_max_channels(); port++) {
1215 		status.dom = DOMID_SELF;
1216 		status.port = port;
1217 		rc = HYPERVISOR_event_channel_op(EVTCHNOP_status, &status);
1218 		if (rc < 0)
1219 			continue;
1220 		if (status.status != EVTCHNSTAT_virq)
1221 			continue;
1222 		if (status.u.virq == virq && status.vcpu == xen_vcpu_nr(cpu)) {
1223 			rc = port;
1224 			break;
1225 		}
1226 	}
1227 	return rc;
1228 }
1229 
1230 /**
1231  * xen_evtchn_nr_channels - number of usable event channel ports
1232  *
1233  * This may be less than the maximum supported by the current
1234  * hypervisor ABI. Use xen_evtchn_max_channels() for the maximum
1235  * supported.
1236  */
xen_evtchn_nr_channels(void)1237 unsigned xen_evtchn_nr_channels(void)
1238 {
1239         return evtchn_ops->nr_channels();
1240 }
1241 EXPORT_SYMBOL_GPL(xen_evtchn_nr_channels);
1242 
bind_virq_to_irq(unsigned int virq,unsigned int cpu,bool percpu)1243 int bind_virq_to_irq(unsigned int virq, unsigned int cpu, bool percpu)
1244 {
1245 	struct evtchn_bind_virq bind_virq;
1246 	int evtchn, irq, ret;
1247 
1248 	mutex_lock(&irq_mapping_update_lock);
1249 
1250 	irq = per_cpu(virq_to_irq, cpu)[virq];
1251 
1252 	if (irq == -1) {
1253 		irq = xen_allocate_irq_dynamic();
1254 		if (irq < 0)
1255 			goto out;
1256 
1257 		if (percpu)
1258 			irq_set_chip_and_handler_name(irq, &xen_percpu_chip,
1259 						      handle_percpu_irq, "virq");
1260 		else
1261 			irq_set_chip_and_handler_name(irq, &xen_dynamic_chip,
1262 						      handle_edge_irq, "virq");
1263 
1264 		bind_virq.virq = virq;
1265 		bind_virq.vcpu = xen_vcpu_nr(cpu);
1266 		ret = HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
1267 						&bind_virq);
1268 		if (ret == 0)
1269 			evtchn = bind_virq.port;
1270 		else {
1271 			if (ret == -EEXIST)
1272 				ret = find_virq(virq, cpu);
1273 			BUG_ON(ret < 0);
1274 			evtchn = ret;
1275 		}
1276 
1277 		ret = xen_irq_info_virq_setup(cpu, irq, evtchn, virq);
1278 		if (ret < 0) {
1279 			__unbind_from_irq(irq);
1280 			irq = ret;
1281 			goto out;
1282 		}
1283 
1284 		bind_evtchn_to_cpu(evtchn, cpu);
1285 	} else {
1286 		struct irq_info *info = info_for_irq(irq);
1287 		WARN_ON(info == NULL || info->type != IRQT_VIRQ);
1288 	}
1289 
1290 out:
1291 	mutex_unlock(&irq_mapping_update_lock);
1292 
1293 	return irq;
1294 }
1295 
unbind_from_irq(unsigned int irq)1296 static void unbind_from_irq(unsigned int irq)
1297 {
1298 	mutex_lock(&irq_mapping_update_lock);
1299 	__unbind_from_irq(irq);
1300 	mutex_unlock(&irq_mapping_update_lock);
1301 }
1302 
bind_evtchn_to_irqhandler_chip(evtchn_port_t evtchn,irq_handler_t handler,unsigned long irqflags,const char * devname,void * dev_id,struct irq_chip * chip)1303 static int bind_evtchn_to_irqhandler_chip(evtchn_port_t evtchn,
1304 					  irq_handler_t handler,
1305 					  unsigned long irqflags,
1306 					  const char *devname, void *dev_id,
1307 					  struct irq_chip *chip)
1308 {
1309 	int irq, retval;
1310 
1311 	irq = bind_evtchn_to_irq_chip(evtchn, chip);
1312 	if (irq < 0)
1313 		return irq;
1314 	retval = request_irq(irq, handler, irqflags, devname, dev_id);
1315 	if (retval != 0) {
1316 		unbind_from_irq(irq);
1317 		return retval;
1318 	}
1319 
1320 	return irq;
1321 }
1322 
bind_evtchn_to_irqhandler(evtchn_port_t evtchn,irq_handler_t handler,unsigned long irqflags,const char * devname,void * dev_id)1323 int bind_evtchn_to_irqhandler(evtchn_port_t evtchn,
1324 			      irq_handler_t handler,
1325 			      unsigned long irqflags,
1326 			      const char *devname, void *dev_id)
1327 {
1328 	return bind_evtchn_to_irqhandler_chip(evtchn, handler, irqflags,
1329 					      devname, dev_id,
1330 					      &xen_dynamic_chip);
1331 }
1332 EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler);
1333 
bind_evtchn_to_irqhandler_lateeoi(evtchn_port_t evtchn,irq_handler_t handler,unsigned long irqflags,const char * devname,void * dev_id)1334 int bind_evtchn_to_irqhandler_lateeoi(evtchn_port_t evtchn,
1335 				      irq_handler_t handler,
1336 				      unsigned long irqflags,
1337 				      const char *devname, void *dev_id)
1338 {
1339 	return bind_evtchn_to_irqhandler_chip(evtchn, handler, irqflags,
1340 					      devname, dev_id,
1341 					      &xen_lateeoi_chip);
1342 }
1343 EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler_lateeoi);
1344 
bind_interdomain_evtchn_to_irqhandler_chip(unsigned int remote_domain,evtchn_port_t remote_port,irq_handler_t handler,unsigned long irqflags,const char * devname,void * dev_id,struct irq_chip * chip)1345 static int bind_interdomain_evtchn_to_irqhandler_chip(
1346 		unsigned int remote_domain, evtchn_port_t remote_port,
1347 		irq_handler_t handler, unsigned long irqflags,
1348 		const char *devname, void *dev_id, struct irq_chip *chip)
1349 {
1350 	int irq, retval;
1351 
1352 	irq = bind_interdomain_evtchn_to_irq_chip(remote_domain, remote_port,
1353 						  chip);
1354 	if (irq < 0)
1355 		return irq;
1356 
1357 	retval = request_irq(irq, handler, irqflags, devname, dev_id);
1358 	if (retval != 0) {
1359 		unbind_from_irq(irq);
1360 		return retval;
1361 	}
1362 
1363 	return irq;
1364 }
1365 
bind_interdomain_evtchn_to_irqhandler(unsigned int remote_domain,evtchn_port_t remote_port,irq_handler_t handler,unsigned long irqflags,const char * devname,void * dev_id)1366 int bind_interdomain_evtchn_to_irqhandler(unsigned int remote_domain,
1367 					  evtchn_port_t remote_port,
1368 					  irq_handler_t handler,
1369 					  unsigned long irqflags,
1370 					  const char *devname,
1371 					  void *dev_id)
1372 {
1373 	return bind_interdomain_evtchn_to_irqhandler_chip(remote_domain,
1374 				remote_port, handler, irqflags, devname,
1375 				dev_id, &xen_dynamic_chip);
1376 }
1377 EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irqhandler);
1378 
bind_interdomain_evtchn_to_irqhandler_lateeoi(unsigned int remote_domain,evtchn_port_t remote_port,irq_handler_t handler,unsigned long irqflags,const char * devname,void * dev_id)1379 int bind_interdomain_evtchn_to_irqhandler_lateeoi(unsigned int remote_domain,
1380 						  evtchn_port_t remote_port,
1381 						  irq_handler_t handler,
1382 						  unsigned long irqflags,
1383 						  const char *devname,
1384 						  void *dev_id)
1385 {
1386 	return bind_interdomain_evtchn_to_irqhandler_chip(remote_domain,
1387 				remote_port, handler, irqflags, devname,
1388 				dev_id, &xen_lateeoi_chip);
1389 }
1390 EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irqhandler_lateeoi);
1391 
bind_virq_to_irqhandler(unsigned int virq,unsigned int cpu,irq_handler_t handler,unsigned long irqflags,const char * devname,void * dev_id)1392 int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu,
1393 			    irq_handler_t handler,
1394 			    unsigned long irqflags, const char *devname, void *dev_id)
1395 {
1396 	int irq, retval;
1397 
1398 	irq = bind_virq_to_irq(virq, cpu, irqflags & IRQF_PERCPU);
1399 	if (irq < 0)
1400 		return irq;
1401 	retval = request_irq(irq, handler, irqflags, devname, dev_id);
1402 	if (retval != 0) {
1403 		unbind_from_irq(irq);
1404 		return retval;
1405 	}
1406 
1407 	return irq;
1408 }
1409 EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler);
1410 
bind_ipi_to_irqhandler(enum ipi_vector ipi,unsigned int cpu,irq_handler_t handler,unsigned long irqflags,const char * devname,void * dev_id)1411 int bind_ipi_to_irqhandler(enum ipi_vector ipi,
1412 			   unsigned int cpu,
1413 			   irq_handler_t handler,
1414 			   unsigned long irqflags,
1415 			   const char *devname,
1416 			   void *dev_id)
1417 {
1418 	int irq, retval;
1419 
1420 	irq = bind_ipi_to_irq(ipi, cpu);
1421 	if (irq < 0)
1422 		return irq;
1423 
1424 	irqflags |= IRQF_NO_SUSPEND | IRQF_FORCE_RESUME | IRQF_EARLY_RESUME;
1425 	retval = request_irq(irq, handler, irqflags, devname, dev_id);
1426 	if (retval != 0) {
1427 		unbind_from_irq(irq);
1428 		return retval;
1429 	}
1430 
1431 	return irq;
1432 }
1433 
unbind_from_irqhandler(unsigned int irq,void * dev_id)1434 void unbind_from_irqhandler(unsigned int irq, void *dev_id)
1435 {
1436 	struct irq_info *info = info_for_irq(irq);
1437 
1438 	if (WARN_ON(!info))
1439 		return;
1440 	free_irq(irq, dev_id);
1441 	unbind_from_irq(irq);
1442 }
1443 EXPORT_SYMBOL_GPL(unbind_from_irqhandler);
1444 
1445 /**
1446  * xen_set_irq_priority() - set an event channel priority.
1447  * @irq:irq bound to an event channel.
1448  * @priority: priority between XEN_IRQ_PRIORITY_MAX and XEN_IRQ_PRIORITY_MIN.
1449  */
xen_set_irq_priority(unsigned irq,unsigned priority)1450 int xen_set_irq_priority(unsigned irq, unsigned priority)
1451 {
1452 	struct evtchn_set_priority set_priority;
1453 
1454 	set_priority.port = evtchn_from_irq(irq);
1455 	set_priority.priority = priority;
1456 
1457 	return HYPERVISOR_event_channel_op(EVTCHNOP_set_priority,
1458 					   &set_priority);
1459 }
1460 EXPORT_SYMBOL_GPL(xen_set_irq_priority);
1461 
evtchn_make_refcounted(unsigned int evtchn)1462 int evtchn_make_refcounted(unsigned int evtchn)
1463 {
1464 	int irq = get_evtchn_to_irq(evtchn);
1465 	struct irq_info *info;
1466 
1467 	if (irq == -1)
1468 		return -ENOENT;
1469 
1470 	info = info_for_irq(irq);
1471 
1472 	if (!info)
1473 		return -ENOENT;
1474 
1475 	WARN_ON(info->refcnt != -1);
1476 
1477 	info->refcnt = 1;
1478 
1479 	return 0;
1480 }
1481 EXPORT_SYMBOL_GPL(evtchn_make_refcounted);
1482 
evtchn_get(unsigned int evtchn)1483 int evtchn_get(unsigned int evtchn)
1484 {
1485 	int irq;
1486 	struct irq_info *info;
1487 	int err = -ENOENT;
1488 
1489 	if (evtchn >= xen_evtchn_max_channels())
1490 		return -EINVAL;
1491 
1492 	mutex_lock(&irq_mapping_update_lock);
1493 
1494 	irq = get_evtchn_to_irq(evtchn);
1495 	if (irq == -1)
1496 		goto done;
1497 
1498 	info = info_for_irq(irq);
1499 
1500 	if (!info)
1501 		goto done;
1502 
1503 	err = -EINVAL;
1504 	if (info->refcnt <= 0 || info->refcnt == SHRT_MAX)
1505 		goto done;
1506 
1507 	info->refcnt++;
1508 	err = 0;
1509  done:
1510 	mutex_unlock(&irq_mapping_update_lock);
1511 
1512 	return err;
1513 }
1514 EXPORT_SYMBOL_GPL(evtchn_get);
1515 
evtchn_put(unsigned int evtchn)1516 void evtchn_put(unsigned int evtchn)
1517 {
1518 	int irq = get_evtchn_to_irq(evtchn);
1519 	if (WARN_ON(irq == -1))
1520 		return;
1521 	unbind_from_irq(irq);
1522 }
1523 EXPORT_SYMBOL_GPL(evtchn_put);
1524 
xen_send_IPI_one(unsigned int cpu,enum ipi_vector vector)1525 void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector)
1526 {
1527 	int irq;
1528 
1529 #ifdef CONFIG_X86
1530 	if (unlikely(vector == XEN_NMI_VECTOR)) {
1531 		int rc =  HYPERVISOR_vcpu_op(VCPUOP_send_nmi, xen_vcpu_nr(cpu),
1532 					     NULL);
1533 		if (rc < 0)
1534 			printk(KERN_WARNING "Sending nmi to CPU%d failed (rc:%d)\n", cpu, rc);
1535 		return;
1536 	}
1537 #endif
1538 	irq = per_cpu(ipi_to_irq, cpu)[vector];
1539 	BUG_ON(irq < 0);
1540 	notify_remote_via_irq(irq);
1541 }
1542 
1543 struct evtchn_loop_ctrl {
1544 	ktime_t timeout;
1545 	unsigned count;
1546 	bool defer_eoi;
1547 };
1548 
handle_irq_for_port(evtchn_port_t port,struct evtchn_loop_ctrl * ctrl)1549 void handle_irq_for_port(evtchn_port_t port, struct evtchn_loop_ctrl *ctrl)
1550 {
1551 	int irq;
1552 	struct irq_info *info;
1553 
1554 	irq = get_evtchn_to_irq(port);
1555 	if (irq == -1)
1556 		return;
1557 
1558 	/*
1559 	 * Check for timeout every 256 events.
1560 	 * We are setting the timeout value only after the first 256
1561 	 * events in order to not hurt the common case of few loop
1562 	 * iterations. The 256 is basically an arbitrary value.
1563 	 *
1564 	 * In case we are hitting the timeout we need to defer all further
1565 	 * EOIs in order to ensure to leave the event handling loop rather
1566 	 * sooner than later.
1567 	 */
1568 	if (!ctrl->defer_eoi && !(++ctrl->count & 0xff)) {
1569 		ktime_t kt = ktime_get();
1570 
1571 		if (!ctrl->timeout) {
1572 			kt = ktime_add_ms(kt,
1573 					  jiffies_to_msecs(event_loop_timeout));
1574 			ctrl->timeout = kt;
1575 		} else if (kt > ctrl->timeout) {
1576 			ctrl->defer_eoi = true;
1577 		}
1578 	}
1579 
1580 	info = info_for_irq(irq);
1581 	if (xchg_acquire(&info->is_active, 1))
1582 		return;
1583 
1584 	if (ctrl->defer_eoi) {
1585 		info->eoi_cpu = smp_processor_id();
1586 		info->irq_epoch = __this_cpu_read(irq_epoch);
1587 		info->eoi_time = get_jiffies_64() + event_eoi_delay;
1588 	}
1589 
1590 	generic_handle_irq(irq);
1591 }
1592 
1593 static DEFINE_PER_CPU(unsigned, xed_nesting_count);
1594 
__xen_evtchn_do_upcall(void)1595 static void __xen_evtchn_do_upcall(void)
1596 {
1597 	struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
1598 	int cpu = get_cpu();
1599 	unsigned count;
1600 	struct evtchn_loop_ctrl ctrl = { 0 };
1601 
1602 	/*
1603 	 * When closing an event channel the associated IRQ must not be freed
1604 	 * until all cpus have left the event handling loop. This is ensured
1605 	 * by taking the rcu_read_lock() while handling events, as freeing of
1606 	 * the IRQ is handled via queue_rcu_work() _after_ closing the event
1607 	 * channel.
1608 	 */
1609 	rcu_read_lock();
1610 
1611 	do {
1612 		vcpu_info->evtchn_upcall_pending = 0;
1613 
1614 		if (__this_cpu_inc_return(xed_nesting_count) - 1)
1615 			goto out;
1616 
1617 		xen_evtchn_handle_events(cpu, &ctrl);
1618 
1619 		BUG_ON(!irqs_disabled());
1620 
1621 		count = __this_cpu_read(xed_nesting_count);
1622 		__this_cpu_write(xed_nesting_count, 0);
1623 	} while (count != 1 || vcpu_info->evtchn_upcall_pending);
1624 
1625 out:
1626 	rcu_read_unlock();
1627 
1628 	/*
1629 	 * Increment irq_epoch only now to defer EOIs only for
1630 	 * xen_irq_lateeoi() invocations occurring from inside the loop
1631 	 * above.
1632 	 */
1633 	__this_cpu_inc(irq_epoch);
1634 
1635 	put_cpu();
1636 }
1637 
xen_evtchn_do_upcall(struct pt_regs * regs)1638 void xen_evtchn_do_upcall(struct pt_regs *regs)
1639 {
1640 	struct pt_regs *old_regs = set_irq_regs(regs);
1641 
1642 	irq_enter();
1643 #ifdef CONFIG_X86
1644 	inc_irq_stat(irq_hv_callback_count);
1645 #endif
1646 
1647 	__xen_evtchn_do_upcall();
1648 
1649 	irq_exit();
1650 	set_irq_regs(old_regs);
1651 }
1652 
xen_hvm_evtchn_do_upcall(void)1653 void xen_hvm_evtchn_do_upcall(void)
1654 {
1655 	__xen_evtchn_do_upcall();
1656 }
1657 EXPORT_SYMBOL_GPL(xen_hvm_evtchn_do_upcall);
1658 
1659 /* Rebind a new event channel to an existing irq. */
rebind_evtchn_irq(int evtchn,int irq)1660 void rebind_evtchn_irq(int evtchn, int irq)
1661 {
1662 	struct irq_info *info = info_for_irq(irq);
1663 
1664 	if (WARN_ON(!info))
1665 		return;
1666 
1667 	/* Make sure the irq is masked, since the new event channel
1668 	   will also be masked. */
1669 	disable_irq(irq);
1670 
1671 	mutex_lock(&irq_mapping_update_lock);
1672 
1673 	/* After resume the irq<->evtchn mappings are all cleared out */
1674 	BUG_ON(get_evtchn_to_irq(evtchn) != -1);
1675 	/* Expect irq to have been bound before,
1676 	   so there should be a proper type */
1677 	BUG_ON(info->type == IRQT_UNBOUND);
1678 
1679 	(void)xen_irq_info_evtchn_setup(irq, evtchn);
1680 
1681 	mutex_unlock(&irq_mapping_update_lock);
1682 
1683         bind_evtchn_to_cpu(evtchn, info->cpu);
1684 	/* This will be deferred until interrupt is processed */
1685 	irq_set_affinity(irq, cpumask_of(info->cpu));
1686 
1687 	/* Unmask the event channel. */
1688 	enable_irq(irq);
1689 }
1690 
1691 /* Rebind an evtchn so that it gets delivered to a specific cpu */
xen_rebind_evtchn_to_cpu(struct irq_info * info,unsigned int tcpu)1692 static int xen_rebind_evtchn_to_cpu(struct irq_info *info, unsigned int tcpu)
1693 {
1694 	struct evtchn_bind_vcpu bind_vcpu;
1695 	evtchn_port_t evtchn = info ? info->evtchn : 0;
1696 
1697 	if (!VALID_EVTCHN(evtchn))
1698 		return -1;
1699 
1700 	if (!xen_support_evtchn_rebind())
1701 		return -1;
1702 
1703 	/* Send future instances of this interrupt to other vcpu. */
1704 	bind_vcpu.port = evtchn;
1705 	bind_vcpu.vcpu = xen_vcpu_nr(tcpu);
1706 
1707 	/*
1708 	 * Mask the event while changing the VCPU binding to prevent
1709 	 * it being delivered on an unexpected VCPU.
1710 	 */
1711 	do_mask(info, EVT_MASK_REASON_TEMPORARY);
1712 
1713 	/*
1714 	 * If this fails, it usually just indicates that we're dealing with a
1715 	 * virq or IPI channel, which don't actually need to be rebound. Ignore
1716 	 * it, but don't do the xenlinux-level rebind in that case.
1717 	 */
1718 	if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
1719 		bind_evtchn_to_cpu(evtchn, tcpu);
1720 
1721 	do_unmask(info, EVT_MASK_REASON_TEMPORARY);
1722 
1723 	return 0;
1724 }
1725 
set_affinity_irq(struct irq_data * data,const struct cpumask * dest,bool force)1726 static int set_affinity_irq(struct irq_data *data, const struct cpumask *dest,
1727 			    bool force)
1728 {
1729 	unsigned tcpu = cpumask_first_and(dest, cpu_online_mask);
1730 	int ret = xen_rebind_evtchn_to_cpu(info_for_irq(data->irq), tcpu);
1731 
1732 	if (!ret)
1733 		irq_data_update_effective_affinity(data, cpumask_of(tcpu));
1734 
1735 	return ret;
1736 }
1737 
1738 /* To be called with desc->lock held. */
xen_set_affinity_evtchn(struct irq_desc * desc,unsigned int tcpu)1739 int xen_set_affinity_evtchn(struct irq_desc *desc, unsigned int tcpu)
1740 {
1741 	struct irq_data *d = irq_desc_get_irq_data(desc);
1742 
1743 	return set_affinity_irq(d, cpumask_of(tcpu), false);
1744 }
1745 EXPORT_SYMBOL_GPL(xen_set_affinity_evtchn);
1746 
enable_dynirq(struct irq_data * data)1747 static void enable_dynirq(struct irq_data *data)
1748 {
1749 	struct irq_info *info = info_for_irq(data->irq);
1750 	evtchn_port_t evtchn = info ? info->evtchn : 0;
1751 
1752 	if (VALID_EVTCHN(evtchn))
1753 		do_unmask(info, EVT_MASK_REASON_EXPLICIT);
1754 }
1755 
disable_dynirq(struct irq_data * data)1756 static void disable_dynirq(struct irq_data *data)
1757 {
1758 	struct irq_info *info = info_for_irq(data->irq);
1759 	evtchn_port_t evtchn = info ? info->evtchn : 0;
1760 
1761 	if (VALID_EVTCHN(evtchn))
1762 		do_mask(info, EVT_MASK_REASON_EXPLICIT);
1763 }
1764 
ack_dynirq(struct irq_data * data)1765 static void ack_dynirq(struct irq_data *data)
1766 {
1767 	struct irq_info *info = info_for_irq(data->irq);
1768 	evtchn_port_t evtchn = info ? info->evtchn : 0;
1769 
1770 	if (!VALID_EVTCHN(evtchn))
1771 		return;
1772 
1773 	if (unlikely(irqd_is_setaffinity_pending(data)) &&
1774 	    likely(!irqd_irq_disabled(data))) {
1775 		do_mask(info, EVT_MASK_REASON_TEMPORARY);
1776 
1777 		event_handler_exit(info);
1778 
1779 		irq_move_masked_irq(data);
1780 
1781 		do_unmask(info, EVT_MASK_REASON_TEMPORARY);
1782 	} else
1783 		event_handler_exit(info);
1784 }
1785 
mask_ack_dynirq(struct irq_data * data)1786 static void mask_ack_dynirq(struct irq_data *data)
1787 {
1788 	disable_dynirq(data);
1789 	ack_dynirq(data);
1790 }
1791 
lateeoi_ack_dynirq(struct irq_data * data)1792 static void lateeoi_ack_dynirq(struct irq_data *data)
1793 {
1794 	struct irq_info *info = info_for_irq(data->irq);
1795 	evtchn_port_t evtchn = info ? info->evtchn : 0;
1796 
1797 	if (!VALID_EVTCHN(evtchn))
1798 		return;
1799 
1800 	do_mask(info, EVT_MASK_REASON_EOI_PENDING);
1801 
1802 	if (unlikely(irqd_is_setaffinity_pending(data)) &&
1803 	    likely(!irqd_irq_disabled(data))) {
1804 		do_mask(info, EVT_MASK_REASON_TEMPORARY);
1805 
1806 		clear_evtchn(evtchn);
1807 
1808 		irq_move_masked_irq(data);
1809 
1810 		do_unmask(info, EVT_MASK_REASON_TEMPORARY);
1811 	} else
1812 		clear_evtchn(evtchn);
1813 }
1814 
lateeoi_mask_ack_dynirq(struct irq_data * data)1815 static void lateeoi_mask_ack_dynirq(struct irq_data *data)
1816 {
1817 	struct irq_info *info = info_for_irq(data->irq);
1818 	evtchn_port_t evtchn = info ? info->evtchn : 0;
1819 
1820 	if (VALID_EVTCHN(evtchn)) {
1821 		do_mask(info, EVT_MASK_REASON_EXPLICIT);
1822 		ack_dynirq(data);
1823 	}
1824 }
1825 
retrigger_dynirq(struct irq_data * data)1826 static int retrigger_dynirq(struct irq_data *data)
1827 {
1828 	struct irq_info *info = info_for_irq(data->irq);
1829 	evtchn_port_t evtchn = info ? info->evtchn : 0;
1830 
1831 	if (!VALID_EVTCHN(evtchn))
1832 		return 0;
1833 
1834 	do_mask(info, EVT_MASK_REASON_TEMPORARY);
1835 	set_evtchn(evtchn);
1836 	do_unmask(info, EVT_MASK_REASON_TEMPORARY);
1837 
1838 	return 1;
1839 }
1840 
restore_pirqs(void)1841 static void restore_pirqs(void)
1842 {
1843 	int pirq, rc, irq, gsi;
1844 	struct physdev_map_pirq map_irq;
1845 	struct irq_info *info;
1846 
1847 	list_for_each_entry(info, &xen_irq_list_head, list) {
1848 		if (info->type != IRQT_PIRQ)
1849 			continue;
1850 
1851 		pirq = info->u.pirq.pirq;
1852 		gsi = info->u.pirq.gsi;
1853 		irq = info->irq;
1854 
1855 		/* save/restore of PT devices doesn't work, so at this point the
1856 		 * only devices present are GSI based emulated devices */
1857 		if (!gsi)
1858 			continue;
1859 
1860 		map_irq.domid = DOMID_SELF;
1861 		map_irq.type = MAP_PIRQ_TYPE_GSI;
1862 		map_irq.index = gsi;
1863 		map_irq.pirq = pirq;
1864 
1865 		rc = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq);
1866 		if (rc) {
1867 			pr_warn("xen map irq failed gsi=%d irq=%d pirq=%d rc=%d\n",
1868 				gsi, irq, pirq, rc);
1869 			xen_free_irq(irq);
1870 			continue;
1871 		}
1872 
1873 		printk(KERN_DEBUG "xen: --> irq=%d, pirq=%d\n", irq, map_irq.pirq);
1874 
1875 		__startup_pirq(irq);
1876 	}
1877 }
1878 
restore_cpu_virqs(unsigned int cpu)1879 static void restore_cpu_virqs(unsigned int cpu)
1880 {
1881 	struct evtchn_bind_virq bind_virq;
1882 	int virq, irq, evtchn;
1883 
1884 	for (virq = 0; virq < NR_VIRQS; virq++) {
1885 		if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1)
1886 			continue;
1887 
1888 		BUG_ON(virq_from_irq(irq) != virq);
1889 
1890 		/* Get a new binding from Xen. */
1891 		bind_virq.virq = virq;
1892 		bind_virq.vcpu = xen_vcpu_nr(cpu);
1893 		if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
1894 						&bind_virq) != 0)
1895 			BUG();
1896 		evtchn = bind_virq.port;
1897 
1898 		/* Record the new mapping. */
1899 		(void)xen_irq_info_virq_setup(cpu, irq, evtchn, virq);
1900 		bind_evtchn_to_cpu(evtchn, cpu);
1901 	}
1902 }
1903 
restore_cpu_ipis(unsigned int cpu)1904 static void restore_cpu_ipis(unsigned int cpu)
1905 {
1906 	struct evtchn_bind_ipi bind_ipi;
1907 	int ipi, irq, evtchn;
1908 
1909 	for (ipi = 0; ipi < XEN_NR_IPIS; ipi++) {
1910 		if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1)
1911 			continue;
1912 
1913 		BUG_ON(ipi_from_irq(irq) != ipi);
1914 
1915 		/* Get a new binding from Xen. */
1916 		bind_ipi.vcpu = xen_vcpu_nr(cpu);
1917 		if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
1918 						&bind_ipi) != 0)
1919 			BUG();
1920 		evtchn = bind_ipi.port;
1921 
1922 		/* Record the new mapping. */
1923 		(void)xen_irq_info_ipi_setup(cpu, irq, evtchn, ipi);
1924 		bind_evtchn_to_cpu(evtchn, cpu);
1925 	}
1926 }
1927 
1928 /* Clear an irq's pending state, in preparation for polling on it */
xen_clear_irq_pending(int irq)1929 void xen_clear_irq_pending(int irq)
1930 {
1931 	struct irq_info *info = info_for_irq(irq);
1932 	evtchn_port_t evtchn = info ? info->evtchn : 0;
1933 
1934 	if (VALID_EVTCHN(evtchn))
1935 		event_handler_exit(info);
1936 }
1937 EXPORT_SYMBOL(xen_clear_irq_pending);
xen_set_irq_pending(int irq)1938 void xen_set_irq_pending(int irq)
1939 {
1940 	int evtchn = evtchn_from_irq(irq);
1941 
1942 	if (VALID_EVTCHN(evtchn))
1943 		set_evtchn(evtchn);
1944 }
1945 
xen_test_irq_pending(int irq)1946 bool xen_test_irq_pending(int irq)
1947 {
1948 	int evtchn = evtchn_from_irq(irq);
1949 	bool ret = false;
1950 
1951 	if (VALID_EVTCHN(evtchn))
1952 		ret = test_evtchn(evtchn);
1953 
1954 	return ret;
1955 }
1956 
1957 /* Poll waiting for an irq to become pending with timeout.  In the usual case,
1958  * the irq will be disabled so it won't deliver an interrupt. */
xen_poll_irq_timeout(int irq,u64 timeout)1959 void xen_poll_irq_timeout(int irq, u64 timeout)
1960 {
1961 	evtchn_port_t evtchn = evtchn_from_irq(irq);
1962 
1963 	if (VALID_EVTCHN(evtchn)) {
1964 		struct sched_poll poll;
1965 
1966 		poll.nr_ports = 1;
1967 		poll.timeout = timeout;
1968 		set_xen_guest_handle(poll.ports, &evtchn);
1969 
1970 		if (HYPERVISOR_sched_op(SCHEDOP_poll, &poll) != 0)
1971 			BUG();
1972 	}
1973 }
1974 EXPORT_SYMBOL(xen_poll_irq_timeout);
1975 /* Poll waiting for an irq to become pending.  In the usual case, the
1976  * irq will be disabled so it won't deliver an interrupt. */
xen_poll_irq(int irq)1977 void xen_poll_irq(int irq)
1978 {
1979 	xen_poll_irq_timeout(irq, 0 /* no timeout */);
1980 }
1981 
1982 /* Check whether the IRQ line is shared with other guests. */
xen_test_irq_shared(int irq)1983 int xen_test_irq_shared(int irq)
1984 {
1985 	struct irq_info *info = info_for_irq(irq);
1986 	struct physdev_irq_status_query irq_status;
1987 
1988 	if (WARN_ON(!info))
1989 		return -ENOENT;
1990 
1991 	irq_status.irq = info->u.pirq.pirq;
1992 
1993 	if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status))
1994 		return 0;
1995 	return !(irq_status.flags & XENIRQSTAT_shared);
1996 }
1997 EXPORT_SYMBOL_GPL(xen_test_irq_shared);
1998 
xen_irq_resume(void)1999 void xen_irq_resume(void)
2000 {
2001 	unsigned int cpu;
2002 	struct irq_info *info;
2003 
2004 	/* New event-channel space is not 'live' yet. */
2005 	xen_evtchn_resume();
2006 
2007 	/* No IRQ <-> event-channel mappings. */
2008 	list_for_each_entry(info, &xen_irq_list_head, list)
2009 		info->evtchn = 0; /* zap event-channel binding */
2010 
2011 	clear_evtchn_to_irq_all();
2012 
2013 	for_each_possible_cpu(cpu) {
2014 		restore_cpu_virqs(cpu);
2015 		restore_cpu_ipis(cpu);
2016 	}
2017 
2018 	restore_pirqs();
2019 }
2020 
2021 static struct irq_chip xen_dynamic_chip __read_mostly = {
2022 	.name			= "xen-dyn",
2023 
2024 	.irq_disable		= disable_dynirq,
2025 	.irq_mask		= disable_dynirq,
2026 	.irq_unmask		= enable_dynirq,
2027 
2028 	.irq_ack		= ack_dynirq,
2029 	.irq_mask_ack		= mask_ack_dynirq,
2030 
2031 	.irq_set_affinity	= set_affinity_irq,
2032 	.irq_retrigger		= retrigger_dynirq,
2033 };
2034 
2035 static struct irq_chip xen_lateeoi_chip __read_mostly = {
2036 	/* The chip name needs to contain "xen-dyn" for irqbalance to work. */
2037 	.name			= "xen-dyn-lateeoi",
2038 
2039 	.irq_disable		= disable_dynirq,
2040 	.irq_mask		= disable_dynirq,
2041 	.irq_unmask		= enable_dynirq,
2042 
2043 	.irq_ack		= lateeoi_ack_dynirq,
2044 	.irq_mask_ack		= lateeoi_mask_ack_dynirq,
2045 
2046 	.irq_set_affinity	= set_affinity_irq,
2047 	.irq_retrigger		= retrigger_dynirq,
2048 };
2049 
2050 static struct irq_chip xen_pirq_chip __read_mostly = {
2051 	.name			= "xen-pirq",
2052 
2053 	.irq_startup		= startup_pirq,
2054 	.irq_shutdown		= shutdown_pirq,
2055 	.irq_enable		= enable_pirq,
2056 	.irq_disable		= disable_pirq,
2057 
2058 	.irq_mask		= disable_dynirq,
2059 	.irq_unmask		= enable_dynirq,
2060 
2061 	.irq_ack		= eoi_pirq,
2062 	.irq_eoi		= eoi_pirq,
2063 	.irq_mask_ack		= mask_ack_pirq,
2064 
2065 	.irq_set_affinity	= set_affinity_irq,
2066 
2067 	.irq_retrigger		= retrigger_dynirq,
2068 };
2069 
2070 static struct irq_chip xen_percpu_chip __read_mostly = {
2071 	.name			= "xen-percpu",
2072 
2073 	.irq_disable		= disable_dynirq,
2074 	.irq_mask		= disable_dynirq,
2075 	.irq_unmask		= enable_dynirq,
2076 
2077 	.irq_ack		= ack_dynirq,
2078 };
2079 
2080 #ifdef CONFIG_XEN_PVHVM
2081 /* Vector callbacks are better than PCI interrupts to receive event
2082  * channel notifications because we can receive vector callbacks on any
2083  * vcpu and we don't need PCI support or APIC interactions. */
xen_callback_vector(void)2084 void xen_callback_vector(void)
2085 {
2086 	int rc;
2087 	uint64_t callback_via;
2088 
2089 	if (xen_have_vector_callback) {
2090 		callback_via = HVM_CALLBACK_VECTOR(HYPERVISOR_CALLBACK_VECTOR);
2091 		rc = xen_set_callback_via(callback_via);
2092 		if (rc) {
2093 			pr_err("Request for Xen HVM callback vector failed\n");
2094 			xen_have_vector_callback = 0;
2095 			return;
2096 		}
2097 		pr_info_once("Xen HVM callback vector for event delivery is enabled\n");
2098 		alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR,
2099 				xen_hvm_callback_vector);
2100 	}
2101 }
2102 #else
xen_callback_vector(void)2103 void xen_callback_vector(void) {}
2104 #endif
2105 
2106 bool xen_fifo_events = true;
2107 module_param_named(fifo_events, xen_fifo_events, bool, 0);
2108 
xen_evtchn_cpu_prepare(unsigned int cpu)2109 static int xen_evtchn_cpu_prepare(unsigned int cpu)
2110 {
2111 	int ret = 0;
2112 
2113 	xen_cpu_init_eoi(cpu);
2114 
2115 	if (evtchn_ops->percpu_init)
2116 		ret = evtchn_ops->percpu_init(cpu);
2117 
2118 	return ret;
2119 }
2120 
xen_evtchn_cpu_dead(unsigned int cpu)2121 static int xen_evtchn_cpu_dead(unsigned int cpu)
2122 {
2123 	int ret = 0;
2124 
2125 	if (evtchn_ops->percpu_deinit)
2126 		ret = evtchn_ops->percpu_deinit(cpu);
2127 
2128 	return ret;
2129 }
2130 
xen_init_IRQ(void)2131 void __init xen_init_IRQ(void)
2132 {
2133 	int ret = -EINVAL;
2134 	unsigned int evtchn;
2135 
2136 	if (xen_fifo_events)
2137 		ret = xen_evtchn_fifo_init();
2138 	if (ret < 0) {
2139 		xen_evtchn_2l_init();
2140 		xen_fifo_events = false;
2141 	}
2142 
2143 	xen_cpu_init_eoi(smp_processor_id());
2144 
2145 	cpuhp_setup_state_nocalls(CPUHP_XEN_EVTCHN_PREPARE,
2146 				  "xen/evtchn:prepare",
2147 				  xen_evtchn_cpu_prepare, xen_evtchn_cpu_dead);
2148 
2149 	evtchn_to_irq = kcalloc(EVTCHN_ROW(xen_evtchn_max_channels()),
2150 				sizeof(*evtchn_to_irq), GFP_KERNEL);
2151 	BUG_ON(!evtchn_to_irq);
2152 
2153 	/* No event channels are 'live' right now. */
2154 	for (evtchn = 0; evtchn < xen_evtchn_nr_channels(); evtchn++)
2155 		mask_evtchn(evtchn);
2156 
2157 	pirq_needs_eoi = pirq_needs_eoi_flag;
2158 
2159 #ifdef CONFIG_X86
2160 	if (xen_pv_domain()) {
2161 		irq_ctx_init(smp_processor_id());
2162 		if (xen_initial_domain())
2163 			pci_xen_initial_domain();
2164 	}
2165 	if (xen_feature(XENFEAT_hvm_callback_vector))
2166 		xen_callback_vector();
2167 
2168 	if (xen_hvm_domain()) {
2169 		native_init_IRQ();
2170 		/* pci_xen_hvm_init must be called after native_init_IRQ so that
2171 		 * __acpi_register_gsi can point at the right function */
2172 		pci_xen_hvm_init();
2173 	} else {
2174 		int rc;
2175 		struct physdev_pirq_eoi_gmfn eoi_gmfn;
2176 
2177 		pirq_eoi_map = (void *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
2178 		eoi_gmfn.gmfn = virt_to_gfn(pirq_eoi_map);
2179 		rc = HYPERVISOR_physdev_op(PHYSDEVOP_pirq_eoi_gmfn_v2, &eoi_gmfn);
2180 		if (rc != 0) {
2181 			free_page((unsigned long) pirq_eoi_map);
2182 			pirq_eoi_map = NULL;
2183 		} else
2184 			pirq_needs_eoi = pirq_check_eoi_map;
2185 	}
2186 #endif
2187 }
2188