1 /******************************************************************************
2 * evtchn.c
3 *
4 * Driver for receiving and demuxing event-channel signals.
5 *
6 * Copyright (c) 2004-2005, K A Fraser
7 * Multi-process extensions Copyright (c) 2004, Steven Smith
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License version 2
11 * as published by the Free Software Foundation; or, when distributed
12 * separately from the Linux kernel or incorporated into other
13 * software packages, subject to the following license:
14 *
15 * Permission is hereby granted, free of charge, to any person obtaining a copy
16 * of this source file (the "Software"), to deal in the Software without
17 * restriction, including without limitation the rights to use, copy, modify,
18 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
19 * and to permit persons to whom the Software is furnished to do so, subject to
20 * the following conditions:
21 *
22 * The above copyright notice and this permission notice shall be included in
23 * all copies or substantial portions of the Software.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
26 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
27 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
28 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
29 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
30 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
31 * IN THE SOFTWARE.
32 */
33
34 #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
35
36 #include <linux/module.h>
37 #include <linux/kernel.h>
38 #include <linux/sched.h>
39 #include <linux/slab.h>
40 #include <linux/string.h>
41 #include <linux/errno.h>
42 #include <linux/fs.h>
43 #include <linux/miscdevice.h>
44 #include <linux/major.h>
45 #include <linux/proc_fs.h>
46 #include <linux/stat.h>
47 #include <linux/poll.h>
48 #include <linux/irq.h>
49 #include <linux/init.h>
50 #include <linux/mutex.h>
51 #include <linux/cpu.h>
52 #include <linux/mm.h>
53 #include <linux/vmalloc.h>
54
55 #include <xen/xen.h>
56 #include <xen/events.h>
57 #include <xen/evtchn.h>
58 #include <xen/xen-ops.h>
59 #include <asm/xen/hypervisor.h>
60
61 struct per_user_data {
62 struct mutex bind_mutex; /* serialize bind/unbind operations */
63 struct rb_root evtchns;
64 unsigned int nr_evtchns;
65
66 /* Notification ring, accessed via /dev/xen/evtchn. */
67 unsigned int ring_size;
68 evtchn_port_t *ring;
69 unsigned int ring_cons, ring_prod, ring_overflow;
70 struct mutex ring_cons_mutex; /* protect against concurrent readers */
71 spinlock_t ring_prod_lock; /* product against concurrent interrupts */
72
73 /* Processes wait on this queue when ring is empty. */
74 wait_queue_head_t evtchn_wait;
75 struct fasync_struct *evtchn_async_queue;
76 const char *name;
77
78 domid_t restrict_domid;
79 };
80
81 #define UNRESTRICTED_DOMID ((domid_t)-1)
82
83 struct user_evtchn {
84 struct rb_node node;
85 struct per_user_data *user;
86 unsigned port;
87 bool enabled;
88 };
89
evtchn_free_ring(evtchn_port_t * ring)90 static void evtchn_free_ring(evtchn_port_t *ring)
91 {
92 kvfree(ring);
93 }
94
evtchn_ring_offset(struct per_user_data * u,unsigned int idx)95 static unsigned int evtchn_ring_offset(struct per_user_data *u,
96 unsigned int idx)
97 {
98 return idx & (u->ring_size - 1);
99 }
100
evtchn_ring_entry(struct per_user_data * u,unsigned int idx)101 static evtchn_port_t *evtchn_ring_entry(struct per_user_data *u,
102 unsigned int idx)
103 {
104 return u->ring + evtchn_ring_offset(u, idx);
105 }
106
add_evtchn(struct per_user_data * u,struct user_evtchn * evtchn)107 static int add_evtchn(struct per_user_data *u, struct user_evtchn *evtchn)
108 {
109 struct rb_node **new = &(u->evtchns.rb_node), *parent = NULL;
110
111 u->nr_evtchns++;
112
113 while (*new) {
114 struct user_evtchn *this;
115
116 this = rb_entry(*new, struct user_evtchn, node);
117
118 parent = *new;
119 if (this->port < evtchn->port)
120 new = &((*new)->rb_left);
121 else if (this->port > evtchn->port)
122 new = &((*new)->rb_right);
123 else
124 return -EEXIST;
125 }
126
127 /* Add new node and rebalance tree. */
128 rb_link_node(&evtchn->node, parent, new);
129 rb_insert_color(&evtchn->node, &u->evtchns);
130
131 return 0;
132 }
133
del_evtchn(struct per_user_data * u,struct user_evtchn * evtchn)134 static void del_evtchn(struct per_user_data *u, struct user_evtchn *evtchn)
135 {
136 u->nr_evtchns--;
137 rb_erase(&evtchn->node, &u->evtchns);
138 kfree(evtchn);
139 }
140
find_evtchn(struct per_user_data * u,unsigned port)141 static struct user_evtchn *find_evtchn(struct per_user_data *u, unsigned port)
142 {
143 struct rb_node *node = u->evtchns.rb_node;
144
145 while (node) {
146 struct user_evtchn *evtchn;
147
148 evtchn = rb_entry(node, struct user_evtchn, node);
149
150 if (evtchn->port < port)
151 node = node->rb_left;
152 else if (evtchn->port > port)
153 node = node->rb_right;
154 else
155 return evtchn;
156 }
157 return NULL;
158 }
159
evtchn_interrupt(int irq,void * data)160 static irqreturn_t evtchn_interrupt(int irq, void *data)
161 {
162 struct user_evtchn *evtchn = data;
163 struct per_user_data *u = evtchn->user;
164
165 WARN(!evtchn->enabled,
166 "Interrupt for port %d, but apparently not enabled; per-user %p\n",
167 evtchn->port, u);
168
169 evtchn->enabled = false;
170
171 spin_lock(&u->ring_prod_lock);
172
173 if ((u->ring_prod - u->ring_cons) < u->ring_size) {
174 *evtchn_ring_entry(u, u->ring_prod) = evtchn->port;
175 wmb(); /* Ensure ring contents visible */
176 if (u->ring_cons == u->ring_prod++) {
177 wake_up_interruptible(&u->evtchn_wait);
178 kill_fasync(&u->evtchn_async_queue,
179 SIGIO, POLL_IN);
180 }
181 } else
182 u->ring_overflow = 1;
183
184 spin_unlock(&u->ring_prod_lock);
185
186 return IRQ_HANDLED;
187 }
188
evtchn_read(struct file * file,char __user * buf,size_t count,loff_t * ppos)189 static ssize_t evtchn_read(struct file *file, char __user *buf,
190 size_t count, loff_t *ppos)
191 {
192 int rc;
193 unsigned int c, p, bytes1 = 0, bytes2 = 0;
194 struct per_user_data *u = file->private_data;
195
196 /* Whole number of ports. */
197 count &= ~(sizeof(evtchn_port_t)-1);
198
199 if (count == 0)
200 return 0;
201
202 if (count > PAGE_SIZE)
203 count = PAGE_SIZE;
204
205 for (;;) {
206 mutex_lock(&u->ring_cons_mutex);
207
208 rc = -EFBIG;
209 if (u->ring_overflow)
210 goto unlock_out;
211
212 c = u->ring_cons;
213 p = u->ring_prod;
214 if (c != p)
215 break;
216
217 mutex_unlock(&u->ring_cons_mutex);
218
219 if (file->f_flags & O_NONBLOCK)
220 return -EAGAIN;
221
222 rc = wait_event_interruptible(u->evtchn_wait,
223 u->ring_cons != u->ring_prod);
224 if (rc)
225 return rc;
226 }
227
228 /* Byte lengths of two chunks. Chunk split (if any) is at ring wrap. */
229 if (((c ^ p) & u->ring_size) != 0) {
230 bytes1 = (u->ring_size - evtchn_ring_offset(u, c)) *
231 sizeof(evtchn_port_t);
232 bytes2 = evtchn_ring_offset(u, p) * sizeof(evtchn_port_t);
233 } else {
234 bytes1 = (p - c) * sizeof(evtchn_port_t);
235 bytes2 = 0;
236 }
237
238 /* Truncate chunks according to caller's maximum byte count. */
239 if (bytes1 > count) {
240 bytes1 = count;
241 bytes2 = 0;
242 } else if ((bytes1 + bytes2) > count) {
243 bytes2 = count - bytes1;
244 }
245
246 rc = -EFAULT;
247 rmb(); /* Ensure that we see the port before we copy it. */
248 if (copy_to_user(buf, evtchn_ring_entry(u, c), bytes1) ||
249 ((bytes2 != 0) &&
250 copy_to_user(&buf[bytes1], &u->ring[0], bytes2)))
251 goto unlock_out;
252
253 u->ring_cons += (bytes1 + bytes2) / sizeof(evtchn_port_t);
254 rc = bytes1 + bytes2;
255
256 unlock_out:
257 mutex_unlock(&u->ring_cons_mutex);
258 return rc;
259 }
260
evtchn_write(struct file * file,const char __user * buf,size_t count,loff_t * ppos)261 static ssize_t evtchn_write(struct file *file, const char __user *buf,
262 size_t count, loff_t *ppos)
263 {
264 int rc, i;
265 evtchn_port_t *kbuf = (evtchn_port_t *)__get_free_page(GFP_KERNEL);
266 struct per_user_data *u = file->private_data;
267
268 if (kbuf == NULL)
269 return -ENOMEM;
270
271 /* Whole number of ports. */
272 count &= ~(sizeof(evtchn_port_t)-1);
273
274 rc = 0;
275 if (count == 0)
276 goto out;
277
278 if (count > PAGE_SIZE)
279 count = PAGE_SIZE;
280
281 rc = -EFAULT;
282 if (copy_from_user(kbuf, buf, count) != 0)
283 goto out;
284
285 mutex_lock(&u->bind_mutex);
286
287 for (i = 0; i < (count/sizeof(evtchn_port_t)); i++) {
288 unsigned port = kbuf[i];
289 struct user_evtchn *evtchn;
290
291 evtchn = find_evtchn(u, port);
292 if (evtchn && !evtchn->enabled) {
293 evtchn->enabled = true;
294 xen_irq_lateeoi(irq_from_evtchn(port), 0);
295 }
296 }
297
298 mutex_unlock(&u->bind_mutex);
299
300 rc = count;
301
302 out:
303 free_page((unsigned long)kbuf);
304 return rc;
305 }
306
evtchn_resize_ring(struct per_user_data * u)307 static int evtchn_resize_ring(struct per_user_data *u)
308 {
309 unsigned int new_size;
310 evtchn_port_t *new_ring, *old_ring;
311
312 /*
313 * Ensure the ring is large enough to capture all possible
314 * events. i.e., one free slot for each bound event.
315 */
316 if (u->nr_evtchns <= u->ring_size)
317 return 0;
318
319 if (u->ring_size == 0)
320 new_size = 64;
321 else
322 new_size = 2 * u->ring_size;
323
324 new_ring = kvmalloc_array(new_size, sizeof(*new_ring), GFP_KERNEL);
325 if (!new_ring)
326 return -ENOMEM;
327
328 old_ring = u->ring;
329
330 /*
331 * Access to the ring contents is serialized by either the
332 * prod /or/ cons lock so take both when resizing.
333 */
334 mutex_lock(&u->ring_cons_mutex);
335 spin_lock_irq(&u->ring_prod_lock);
336
337 /*
338 * Copy the old ring contents to the new ring.
339 *
340 * To take care of wrapping, a full ring, and the new index
341 * pointing into the second half, simply copy the old contents
342 * twice.
343 *
344 * +---------+ +------------------+
345 * |34567 12| -> |34567 1234567 12|
346 * +-----p-c-+ +-------c------p---+
347 */
348 memcpy(new_ring, old_ring, u->ring_size * sizeof(*u->ring));
349 memcpy(new_ring + u->ring_size, old_ring,
350 u->ring_size * sizeof(*u->ring));
351
352 u->ring = new_ring;
353 u->ring_size = new_size;
354
355 spin_unlock_irq(&u->ring_prod_lock);
356 mutex_unlock(&u->ring_cons_mutex);
357
358 evtchn_free_ring(old_ring);
359
360 return 0;
361 }
362
evtchn_bind_to_user(struct per_user_data * u,int port)363 static int evtchn_bind_to_user(struct per_user_data *u, int port)
364 {
365 struct user_evtchn *evtchn;
366 struct evtchn_close close;
367 int rc = 0;
368
369 /*
370 * Ports are never reused, so every caller should pass in a
371 * unique port.
372 *
373 * (Locking not necessary because we haven't registered the
374 * interrupt handler yet, and our caller has already
375 * serialized bind operations.)
376 */
377
378 evtchn = kzalloc(sizeof(*evtchn), GFP_KERNEL);
379 if (!evtchn)
380 return -ENOMEM;
381
382 evtchn->user = u;
383 evtchn->port = port;
384 evtchn->enabled = true; /* start enabled */
385
386 rc = add_evtchn(u, evtchn);
387 if (rc < 0)
388 goto err;
389
390 rc = evtchn_resize_ring(u);
391 if (rc < 0)
392 goto err;
393
394 rc = bind_evtchn_to_irqhandler_lateeoi(port, evtchn_interrupt, 0,
395 u->name, evtchn);
396 if (rc < 0)
397 goto err;
398
399 rc = evtchn_make_refcounted(port);
400 return rc;
401
402 err:
403 /* bind failed, should close the port now */
404 close.port = port;
405 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
406 BUG();
407 del_evtchn(u, evtchn);
408 return rc;
409 }
410
evtchn_unbind_from_user(struct per_user_data * u,struct user_evtchn * evtchn)411 static void evtchn_unbind_from_user(struct per_user_data *u,
412 struct user_evtchn *evtchn)
413 {
414 int irq = irq_from_evtchn(evtchn->port);
415
416 BUG_ON(irq < 0);
417
418 unbind_from_irqhandler(irq, evtchn);
419
420 del_evtchn(u, evtchn);
421 }
422
423 static DEFINE_PER_CPU(int, bind_last_selected_cpu);
424
evtchn_bind_interdom_next_vcpu(int evtchn)425 static void evtchn_bind_interdom_next_vcpu(int evtchn)
426 {
427 unsigned int selected_cpu, irq;
428 struct irq_desc *desc;
429 unsigned long flags;
430
431 irq = irq_from_evtchn(evtchn);
432 desc = irq_to_desc(irq);
433
434 if (!desc)
435 return;
436
437 raw_spin_lock_irqsave(&desc->lock, flags);
438 selected_cpu = this_cpu_read(bind_last_selected_cpu);
439 selected_cpu = cpumask_next_and(selected_cpu,
440 desc->irq_common_data.affinity, cpu_online_mask);
441
442 if (unlikely(selected_cpu >= nr_cpu_ids))
443 selected_cpu = cpumask_first_and(desc->irq_common_data.affinity,
444 cpu_online_mask);
445
446 this_cpu_write(bind_last_selected_cpu, selected_cpu);
447
448 /* unmask expects irqs to be disabled */
449 xen_set_affinity_evtchn(desc, selected_cpu);
450 raw_spin_unlock_irqrestore(&desc->lock, flags);
451 }
452
evtchn_ioctl(struct file * file,unsigned int cmd,unsigned long arg)453 static long evtchn_ioctl(struct file *file,
454 unsigned int cmd, unsigned long arg)
455 {
456 int rc;
457 struct per_user_data *u = file->private_data;
458 void __user *uarg = (void __user *) arg;
459
460 /* Prevent bind from racing with unbind */
461 mutex_lock(&u->bind_mutex);
462
463 switch (cmd) {
464 case IOCTL_EVTCHN_BIND_VIRQ: {
465 struct ioctl_evtchn_bind_virq bind;
466 struct evtchn_bind_virq bind_virq;
467
468 rc = -EACCES;
469 if (u->restrict_domid != UNRESTRICTED_DOMID)
470 break;
471
472 rc = -EFAULT;
473 if (copy_from_user(&bind, uarg, sizeof(bind)))
474 break;
475
476 bind_virq.virq = bind.virq;
477 bind_virq.vcpu = xen_vcpu_nr(0);
478 rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
479 &bind_virq);
480 if (rc != 0)
481 break;
482
483 rc = evtchn_bind_to_user(u, bind_virq.port);
484 if (rc == 0)
485 rc = bind_virq.port;
486 break;
487 }
488
489 case IOCTL_EVTCHN_BIND_INTERDOMAIN: {
490 struct ioctl_evtchn_bind_interdomain bind;
491 struct evtchn_bind_interdomain bind_interdomain;
492
493 rc = -EFAULT;
494 if (copy_from_user(&bind, uarg, sizeof(bind)))
495 break;
496
497 rc = -EACCES;
498 if (u->restrict_domid != UNRESTRICTED_DOMID &&
499 u->restrict_domid != bind.remote_domain)
500 break;
501
502 bind_interdomain.remote_dom = bind.remote_domain;
503 bind_interdomain.remote_port = bind.remote_port;
504 rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
505 &bind_interdomain);
506 if (rc != 0)
507 break;
508
509 rc = evtchn_bind_to_user(u, bind_interdomain.local_port);
510 if (rc == 0) {
511 rc = bind_interdomain.local_port;
512 evtchn_bind_interdom_next_vcpu(rc);
513 }
514 break;
515 }
516
517 case IOCTL_EVTCHN_BIND_UNBOUND_PORT: {
518 struct ioctl_evtchn_bind_unbound_port bind;
519 struct evtchn_alloc_unbound alloc_unbound;
520
521 rc = -EACCES;
522 if (u->restrict_domid != UNRESTRICTED_DOMID)
523 break;
524
525 rc = -EFAULT;
526 if (copy_from_user(&bind, uarg, sizeof(bind)))
527 break;
528
529 alloc_unbound.dom = DOMID_SELF;
530 alloc_unbound.remote_dom = bind.remote_domain;
531 rc = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
532 &alloc_unbound);
533 if (rc != 0)
534 break;
535
536 rc = evtchn_bind_to_user(u, alloc_unbound.port);
537 if (rc == 0)
538 rc = alloc_unbound.port;
539 break;
540 }
541
542 case IOCTL_EVTCHN_UNBIND: {
543 struct ioctl_evtchn_unbind unbind;
544 struct user_evtchn *evtchn;
545
546 rc = -EFAULT;
547 if (copy_from_user(&unbind, uarg, sizeof(unbind)))
548 break;
549
550 rc = -EINVAL;
551 if (unbind.port >= xen_evtchn_nr_channels())
552 break;
553
554 rc = -ENOTCONN;
555 evtchn = find_evtchn(u, unbind.port);
556 if (!evtchn)
557 break;
558
559 disable_irq(irq_from_evtchn(unbind.port));
560 evtchn_unbind_from_user(u, evtchn);
561 rc = 0;
562 break;
563 }
564
565 case IOCTL_EVTCHN_NOTIFY: {
566 struct ioctl_evtchn_notify notify;
567 struct user_evtchn *evtchn;
568
569 rc = -EFAULT;
570 if (copy_from_user(¬ify, uarg, sizeof(notify)))
571 break;
572
573 rc = -ENOTCONN;
574 evtchn = find_evtchn(u, notify.port);
575 if (evtchn) {
576 notify_remote_via_evtchn(notify.port);
577 rc = 0;
578 }
579 break;
580 }
581
582 case IOCTL_EVTCHN_RESET: {
583 /* Initialise the ring to empty. Clear errors. */
584 mutex_lock(&u->ring_cons_mutex);
585 spin_lock_irq(&u->ring_prod_lock);
586 u->ring_cons = u->ring_prod = u->ring_overflow = 0;
587 spin_unlock_irq(&u->ring_prod_lock);
588 mutex_unlock(&u->ring_cons_mutex);
589 rc = 0;
590 break;
591 }
592
593 case IOCTL_EVTCHN_RESTRICT_DOMID: {
594 struct ioctl_evtchn_restrict_domid ierd;
595
596 rc = -EACCES;
597 if (u->restrict_domid != UNRESTRICTED_DOMID)
598 break;
599
600 rc = -EFAULT;
601 if (copy_from_user(&ierd, uarg, sizeof(ierd)))
602 break;
603
604 rc = -EINVAL;
605 if (ierd.domid == 0 || ierd.domid >= DOMID_FIRST_RESERVED)
606 break;
607
608 u->restrict_domid = ierd.domid;
609 rc = 0;
610
611 break;
612 }
613
614 default:
615 rc = -ENOSYS;
616 break;
617 }
618 mutex_unlock(&u->bind_mutex);
619
620 return rc;
621 }
622
evtchn_poll(struct file * file,poll_table * wait)623 static __poll_t evtchn_poll(struct file *file, poll_table *wait)
624 {
625 __poll_t mask = EPOLLOUT | EPOLLWRNORM;
626 struct per_user_data *u = file->private_data;
627
628 poll_wait(file, &u->evtchn_wait, wait);
629 if (u->ring_cons != u->ring_prod)
630 mask |= EPOLLIN | EPOLLRDNORM;
631 if (u->ring_overflow)
632 mask = EPOLLERR;
633 return mask;
634 }
635
evtchn_fasync(int fd,struct file * filp,int on)636 static int evtchn_fasync(int fd, struct file *filp, int on)
637 {
638 struct per_user_data *u = filp->private_data;
639 return fasync_helper(fd, filp, on, &u->evtchn_async_queue);
640 }
641
evtchn_open(struct inode * inode,struct file * filp)642 static int evtchn_open(struct inode *inode, struct file *filp)
643 {
644 struct per_user_data *u;
645
646 u = kzalloc(sizeof(*u), GFP_KERNEL);
647 if (u == NULL)
648 return -ENOMEM;
649
650 u->name = kasprintf(GFP_KERNEL, "evtchn:%s", current->comm);
651 if (u->name == NULL) {
652 kfree(u);
653 return -ENOMEM;
654 }
655
656 init_waitqueue_head(&u->evtchn_wait);
657
658 mutex_init(&u->bind_mutex);
659 mutex_init(&u->ring_cons_mutex);
660 spin_lock_init(&u->ring_prod_lock);
661
662 u->restrict_domid = UNRESTRICTED_DOMID;
663
664 filp->private_data = u;
665
666 return nonseekable_open(inode, filp);
667 }
668
evtchn_release(struct inode * inode,struct file * filp)669 static int evtchn_release(struct inode *inode, struct file *filp)
670 {
671 struct per_user_data *u = filp->private_data;
672 struct rb_node *node;
673
674 while ((node = u->evtchns.rb_node)) {
675 struct user_evtchn *evtchn;
676
677 evtchn = rb_entry(node, struct user_evtchn, node);
678 disable_irq(irq_from_evtchn(evtchn->port));
679 evtchn_unbind_from_user(u, evtchn);
680 }
681
682 evtchn_free_ring(u->ring);
683 kfree(u->name);
684 kfree(u);
685
686 return 0;
687 }
688
689 static const struct file_operations evtchn_fops = {
690 .owner = THIS_MODULE,
691 .read = evtchn_read,
692 .write = evtchn_write,
693 .unlocked_ioctl = evtchn_ioctl,
694 .poll = evtchn_poll,
695 .fasync = evtchn_fasync,
696 .open = evtchn_open,
697 .release = evtchn_release,
698 .llseek = no_llseek,
699 };
700
701 static struct miscdevice evtchn_miscdev = {
702 .minor = MISC_DYNAMIC_MINOR,
703 .name = "xen/evtchn",
704 .fops = &evtchn_fops,
705 };
evtchn_init(void)706 static int __init evtchn_init(void)
707 {
708 int err;
709
710 if (!xen_domain())
711 return -ENODEV;
712
713 /* Create '/dev/xen/evtchn'. */
714 err = misc_register(&evtchn_miscdev);
715 if (err != 0) {
716 pr_err("Could not register /dev/xen/evtchn\n");
717 return err;
718 }
719
720 pr_info("Event-channel device installed\n");
721
722 return 0;
723 }
724
evtchn_cleanup(void)725 static void __exit evtchn_cleanup(void)
726 {
727 misc_deregister(&evtchn_miscdev);
728 }
729
730 module_init(evtchn_init);
731 module_exit(evtchn_cleanup);
732
733 MODULE_LICENSE("GPL");
734