1 /*
2 * Virtio PCI driver - common functionality for all device versions
3 *
4 * This module allows virtio devices to be used over a virtual PCI device.
5 * This can be used with QEMU based VMMs like KVM or Xen.
6 *
7 * Copyright IBM Corp. 2007
8 * Copyright Red Hat, Inc. 2014
9 *
10 * Authors:
11 * Anthony Liguori <aliguori@us.ibm.com>
12 * Rusty Russell <rusty@rustcorp.com.au>
13 * Michael S. Tsirkin <mst@redhat.com>
14 *
15 * This work is licensed under the terms of the GNU GPL, version 2 or later.
16 * See the COPYING file in the top-level directory.
17 *
18 */
19
20 #include "virtio_pci_common.h"
21
22 static bool force_legacy = false;
23
24 #if IS_ENABLED(CONFIG_VIRTIO_PCI_LEGACY)
25 module_param(force_legacy, bool, 0444);
26 MODULE_PARM_DESC(force_legacy,
27 "Force legacy mode for transitional virtio 1 devices");
28 #endif
29
30 /* wait for pending irq handlers */
vp_synchronize_vectors(struct virtio_device * vdev)31 void vp_synchronize_vectors(struct virtio_device *vdev)
32 {
33 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
34 int i;
35
36 if (vp_dev->intx_enabled)
37 synchronize_irq(vp_dev->pci_dev->irq);
38
39 for (i = 0; i < vp_dev->msix_vectors; ++i)
40 synchronize_irq(pci_irq_vector(vp_dev->pci_dev, i));
41 }
42
43 /* the notify function used when creating a virt queue */
vp_notify(struct virtqueue * vq)44 bool vp_notify(struct virtqueue *vq)
45 {
46 /* we write the queue's selector into the notification register to
47 * signal the other end */
48 iowrite16(vq->index, (void __iomem *)vq->priv);
49 return true;
50 }
51
52 /* Handle a configuration change: Tell driver if it wants to know. */
vp_config_changed(int irq,void * opaque)53 static irqreturn_t vp_config_changed(int irq, void *opaque)
54 {
55 struct virtio_pci_device *vp_dev = opaque;
56
57 virtio_config_changed(&vp_dev->vdev);
58 return IRQ_HANDLED;
59 }
60
61 /* Notify all virtqueues on an interrupt. */
vp_vring_interrupt(int irq,void * opaque)62 static irqreturn_t vp_vring_interrupt(int irq, void *opaque)
63 {
64 struct virtio_pci_device *vp_dev = opaque;
65 struct virtio_pci_vq_info *info;
66 irqreturn_t ret = IRQ_NONE;
67 unsigned long flags;
68
69 spin_lock_irqsave(&vp_dev->lock, flags);
70 list_for_each_entry(info, &vp_dev->virtqueues, node) {
71 if (vring_interrupt(irq, info->vq) == IRQ_HANDLED)
72 ret = IRQ_HANDLED;
73 }
74 spin_unlock_irqrestore(&vp_dev->lock, flags);
75
76 return ret;
77 }
78
79 /* A small wrapper to also acknowledge the interrupt when it's handled.
80 * I really need an EIO hook for the vring so I can ack the interrupt once we
81 * know that we'll be handling the IRQ but before we invoke the callback since
82 * the callback may notify the host which results in the host attempting to
83 * raise an interrupt that we would then mask once we acknowledged the
84 * interrupt. */
vp_interrupt(int irq,void * opaque)85 static irqreturn_t vp_interrupt(int irq, void *opaque)
86 {
87 struct virtio_pci_device *vp_dev = opaque;
88 u8 isr;
89
90 /* reading the ISR has the effect of also clearing it so it's very
91 * important to save off the value. */
92 isr = ioread8(vp_dev->isr);
93
94 /* It's definitely not us if the ISR was not high */
95 if (!isr)
96 return IRQ_NONE;
97
98 /* Configuration change? Tell driver if it wants to know. */
99 if (isr & VIRTIO_PCI_ISR_CONFIG)
100 vp_config_changed(irq, opaque);
101
102 return vp_vring_interrupt(irq, opaque);
103 }
104
vp_request_msix_vectors(struct virtio_device * vdev,int nvectors,bool per_vq_vectors,struct irq_affinity * desc)105 static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
106 bool per_vq_vectors, struct irq_affinity *desc)
107 {
108 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
109 const char *name = dev_name(&vp_dev->vdev.dev);
110 unsigned flags = PCI_IRQ_MSIX;
111 unsigned i, v;
112 int err = -ENOMEM;
113
114 vp_dev->msix_vectors = nvectors;
115
116 vp_dev->msix_names = kmalloc_array(nvectors,
117 sizeof(*vp_dev->msix_names),
118 GFP_KERNEL);
119 if (!vp_dev->msix_names)
120 goto error;
121 vp_dev->msix_affinity_masks
122 = kcalloc(nvectors, sizeof(*vp_dev->msix_affinity_masks),
123 GFP_KERNEL);
124 if (!vp_dev->msix_affinity_masks)
125 goto error;
126 for (i = 0; i < nvectors; ++i)
127 if (!alloc_cpumask_var(&vp_dev->msix_affinity_masks[i],
128 GFP_KERNEL))
129 goto error;
130
131 if (desc) {
132 flags |= PCI_IRQ_AFFINITY;
133 desc->pre_vectors++; /* virtio config vector */
134 }
135
136 err = pci_alloc_irq_vectors_affinity(vp_dev->pci_dev, nvectors,
137 nvectors, flags, desc);
138 if (err < 0)
139 goto error;
140 vp_dev->msix_enabled = 1;
141
142 /* Set the vector used for configuration */
143 v = vp_dev->msix_used_vectors;
144 snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names,
145 "%s-config", name);
146 err = request_irq(pci_irq_vector(vp_dev->pci_dev, v),
147 vp_config_changed, 0, vp_dev->msix_names[v],
148 vp_dev);
149 if (err)
150 goto error;
151 ++vp_dev->msix_used_vectors;
152
153 v = vp_dev->config_vector(vp_dev, v);
154 /* Verify we had enough resources to assign the vector */
155 if (v == VIRTIO_MSI_NO_VECTOR) {
156 err = -EBUSY;
157 goto error;
158 }
159
160 if (!per_vq_vectors) {
161 /* Shared vector for all VQs */
162 v = vp_dev->msix_used_vectors;
163 snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names,
164 "%s-virtqueues", name);
165 err = request_irq(pci_irq_vector(vp_dev->pci_dev, v),
166 vp_vring_interrupt, 0, vp_dev->msix_names[v],
167 vp_dev);
168 if (err)
169 goto error;
170 ++vp_dev->msix_used_vectors;
171 }
172 return 0;
173 error:
174 return err;
175 }
176
vp_setup_vq(struct virtio_device * vdev,unsigned index,void (* callback)(struct virtqueue * vq),const char * name,bool ctx,u16 msix_vec)177 static struct virtqueue *vp_setup_vq(struct virtio_device *vdev, unsigned index,
178 void (*callback)(struct virtqueue *vq),
179 const char *name,
180 bool ctx,
181 u16 msix_vec)
182 {
183 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
184 struct virtio_pci_vq_info *info = kmalloc(sizeof *info, GFP_KERNEL);
185 struct virtqueue *vq;
186 unsigned long flags;
187
188 /* fill out our structure that represents an active queue */
189 if (!info)
190 return ERR_PTR(-ENOMEM);
191
192 vq = vp_dev->setup_vq(vp_dev, info, index, callback, name, ctx,
193 msix_vec);
194 if (IS_ERR(vq))
195 goto out_info;
196
197 info->vq = vq;
198 if (callback) {
199 spin_lock_irqsave(&vp_dev->lock, flags);
200 list_add(&info->node, &vp_dev->virtqueues);
201 spin_unlock_irqrestore(&vp_dev->lock, flags);
202 } else {
203 INIT_LIST_HEAD(&info->node);
204 }
205
206 vp_dev->vqs[index] = info;
207 return vq;
208
209 out_info:
210 kfree(info);
211 return vq;
212 }
213
vp_del_vq(struct virtqueue * vq)214 static void vp_del_vq(struct virtqueue *vq)
215 {
216 struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
217 struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index];
218 unsigned long flags;
219
220 spin_lock_irqsave(&vp_dev->lock, flags);
221 list_del(&info->node);
222 spin_unlock_irqrestore(&vp_dev->lock, flags);
223
224 vp_dev->del_vq(info);
225 kfree(info);
226 }
227
228 /* the config->del_vqs() implementation */
vp_del_vqs(struct virtio_device * vdev)229 void vp_del_vqs(struct virtio_device *vdev)
230 {
231 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
232 struct virtqueue *vq, *n;
233 int i;
234
235 list_for_each_entry_safe(vq, n, &vdev->vqs, list) {
236 if (vp_dev->per_vq_vectors) {
237 int v = vp_dev->vqs[vq->index]->msix_vector;
238
239 if (v != VIRTIO_MSI_NO_VECTOR) {
240 int irq = pci_irq_vector(vp_dev->pci_dev, v);
241
242 irq_set_affinity_hint(irq, NULL);
243 free_irq(irq, vq);
244 }
245 }
246 vp_del_vq(vq);
247 }
248 vp_dev->per_vq_vectors = false;
249
250 if (vp_dev->intx_enabled) {
251 free_irq(vp_dev->pci_dev->irq, vp_dev);
252 vp_dev->intx_enabled = 0;
253 }
254
255 for (i = 0; i < vp_dev->msix_used_vectors; ++i)
256 free_irq(pci_irq_vector(vp_dev->pci_dev, i), vp_dev);
257
258 if (vp_dev->msix_affinity_masks) {
259 for (i = 0; i < vp_dev->msix_vectors; i++)
260 free_cpumask_var(vp_dev->msix_affinity_masks[i]);
261 }
262
263 if (vp_dev->msix_enabled) {
264 /* Disable the vector used for configuration */
265 vp_dev->config_vector(vp_dev, VIRTIO_MSI_NO_VECTOR);
266
267 pci_free_irq_vectors(vp_dev->pci_dev);
268 vp_dev->msix_enabled = 0;
269 }
270
271 vp_dev->msix_vectors = 0;
272 vp_dev->msix_used_vectors = 0;
273 kfree(vp_dev->msix_names);
274 vp_dev->msix_names = NULL;
275 kfree(vp_dev->msix_affinity_masks);
276 vp_dev->msix_affinity_masks = NULL;
277 kfree(vp_dev->vqs);
278 vp_dev->vqs = NULL;
279 }
280
vp_find_vqs_msix(struct virtio_device * vdev,unsigned nvqs,struct virtqueue * vqs[],vq_callback_t * callbacks[],const char * const names[],bool per_vq_vectors,const bool * ctx,struct irq_affinity * desc)281 static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs,
282 struct virtqueue *vqs[], vq_callback_t *callbacks[],
283 const char * const names[], bool per_vq_vectors,
284 const bool *ctx,
285 struct irq_affinity *desc)
286 {
287 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
288 u16 msix_vec;
289 int i, err, nvectors, allocated_vectors;
290
291 vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL);
292 if (!vp_dev->vqs)
293 return -ENOMEM;
294
295 if (per_vq_vectors) {
296 /* Best option: one for change interrupt, one per vq. */
297 nvectors = 1;
298 for (i = 0; i < nvqs; ++i)
299 if (callbacks[i])
300 ++nvectors;
301 } else {
302 /* Second best: one for change, shared for all vqs. */
303 nvectors = 2;
304 }
305
306 err = vp_request_msix_vectors(vdev, nvectors, per_vq_vectors,
307 per_vq_vectors ? desc : NULL);
308 if (err)
309 goto error_find;
310
311 vp_dev->per_vq_vectors = per_vq_vectors;
312 allocated_vectors = vp_dev->msix_used_vectors;
313 for (i = 0; i < nvqs; ++i) {
314 if (!names[i]) {
315 vqs[i] = NULL;
316 continue;
317 }
318
319 if (!callbacks[i])
320 msix_vec = VIRTIO_MSI_NO_VECTOR;
321 else if (vp_dev->per_vq_vectors)
322 msix_vec = allocated_vectors++;
323 else
324 msix_vec = VP_MSIX_VQ_VECTOR;
325 vqs[i] = vp_setup_vq(vdev, i, callbacks[i], names[i],
326 ctx ? ctx[i] : false,
327 msix_vec);
328 if (IS_ERR(vqs[i])) {
329 err = PTR_ERR(vqs[i]);
330 goto error_find;
331 }
332
333 if (!vp_dev->per_vq_vectors || msix_vec == VIRTIO_MSI_NO_VECTOR)
334 continue;
335
336 /* allocate per-vq irq if available and necessary */
337 snprintf(vp_dev->msix_names[msix_vec],
338 sizeof *vp_dev->msix_names,
339 "%s-%s",
340 dev_name(&vp_dev->vdev.dev), names[i]);
341 err = request_irq(pci_irq_vector(vp_dev->pci_dev, msix_vec),
342 vring_interrupt, 0,
343 vp_dev->msix_names[msix_vec],
344 vqs[i]);
345 if (err)
346 goto error_find;
347 }
348 return 0;
349
350 error_find:
351 vp_del_vqs(vdev);
352 return err;
353 }
354
vp_find_vqs_intx(struct virtio_device * vdev,unsigned nvqs,struct virtqueue * vqs[],vq_callback_t * callbacks[],const char * const names[],const bool * ctx)355 static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned nvqs,
356 struct virtqueue *vqs[], vq_callback_t *callbacks[],
357 const char * const names[], const bool *ctx)
358 {
359 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
360 int i, err;
361
362 vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL);
363 if (!vp_dev->vqs)
364 return -ENOMEM;
365
366 err = request_irq(vp_dev->pci_dev->irq, vp_interrupt, IRQF_SHARED,
367 dev_name(&vdev->dev), vp_dev);
368 if (err)
369 goto out_del_vqs;
370
371 vp_dev->intx_enabled = 1;
372 vp_dev->per_vq_vectors = false;
373 for (i = 0; i < nvqs; ++i) {
374 if (!names[i]) {
375 vqs[i] = NULL;
376 continue;
377 }
378 vqs[i] = vp_setup_vq(vdev, i, callbacks[i], names[i],
379 ctx ? ctx[i] : false,
380 VIRTIO_MSI_NO_VECTOR);
381 if (IS_ERR(vqs[i])) {
382 err = PTR_ERR(vqs[i]);
383 goto out_del_vqs;
384 }
385 }
386
387 return 0;
388 out_del_vqs:
389 vp_del_vqs(vdev);
390 return err;
391 }
392
393 /* the config->find_vqs() implementation */
vp_find_vqs(struct virtio_device * vdev,unsigned nvqs,struct virtqueue * vqs[],vq_callback_t * callbacks[],const char * const names[],const bool * ctx,struct irq_affinity * desc)394 int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs,
395 struct virtqueue *vqs[], vq_callback_t *callbacks[],
396 const char * const names[], const bool *ctx,
397 struct irq_affinity *desc)
398 {
399 int err;
400
401 /* Try MSI-X with one vector per queue. */
402 err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, true, ctx, desc);
403 if (!err)
404 return 0;
405 /* Fallback: MSI-X with one vector for config, one shared for queues. */
406 err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, false, ctx, desc);
407 if (!err)
408 return 0;
409 /* Finally fall back to regular interrupts. */
410 return vp_find_vqs_intx(vdev, nvqs, vqs, callbacks, names, ctx);
411 }
412
vp_bus_name(struct virtio_device * vdev)413 const char *vp_bus_name(struct virtio_device *vdev)
414 {
415 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
416
417 return pci_name(vp_dev->pci_dev);
418 }
419
420 /* Setup the affinity for a virtqueue:
421 * - force the affinity for per vq vector
422 * - OR over all affinities for shared MSI
423 * - ignore the affinity request if we're using INTX
424 */
vp_set_vq_affinity(struct virtqueue * vq,const struct cpumask * cpu_mask)425 int vp_set_vq_affinity(struct virtqueue *vq, const struct cpumask *cpu_mask)
426 {
427 struct virtio_device *vdev = vq->vdev;
428 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
429 struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index];
430 struct cpumask *mask;
431 unsigned int irq;
432
433 if (!vq->callback)
434 return -EINVAL;
435
436 if (vp_dev->msix_enabled) {
437 mask = vp_dev->msix_affinity_masks[info->msix_vector];
438 irq = pci_irq_vector(vp_dev->pci_dev, info->msix_vector);
439 if (!cpu_mask)
440 irq_set_affinity_hint(irq, NULL);
441 else {
442 cpumask_copy(mask, cpu_mask);
443 irq_set_affinity_hint(irq, mask);
444 }
445 }
446 return 0;
447 }
448
vp_get_vq_affinity(struct virtio_device * vdev,int index)449 const struct cpumask *vp_get_vq_affinity(struct virtio_device *vdev, int index)
450 {
451 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
452
453 if (!vp_dev->per_vq_vectors ||
454 vp_dev->vqs[index]->msix_vector == VIRTIO_MSI_NO_VECTOR)
455 return NULL;
456
457 return pci_irq_get_affinity(vp_dev->pci_dev,
458 vp_dev->vqs[index]->msix_vector);
459 }
460
461 #ifdef CONFIG_PM_SLEEP
virtio_pci_freeze(struct device * dev)462 static int virtio_pci_freeze(struct device *dev)
463 {
464 struct pci_dev *pci_dev = to_pci_dev(dev);
465 struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
466 int ret;
467
468 ret = virtio_device_freeze(&vp_dev->vdev);
469
470 if (!ret)
471 pci_disable_device(pci_dev);
472 return ret;
473 }
474
virtio_pci_restore(struct device * dev)475 static int virtio_pci_restore(struct device *dev)
476 {
477 struct pci_dev *pci_dev = to_pci_dev(dev);
478 struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
479 int ret;
480
481 ret = pci_enable_device(pci_dev);
482 if (ret)
483 return ret;
484
485 pci_set_master(pci_dev);
486 return virtio_device_restore(&vp_dev->vdev);
487 }
488
489 static const struct dev_pm_ops virtio_pci_pm_ops = {
490 SET_SYSTEM_SLEEP_PM_OPS(virtio_pci_freeze, virtio_pci_restore)
491 };
492 #endif
493
494
495 /* Qumranet donated their vendor ID for devices 0x1000 thru 0x10FF. */
496 static const struct pci_device_id virtio_pci_id_table[] = {
497 { PCI_DEVICE(PCI_VENDOR_ID_REDHAT_QUMRANET, PCI_ANY_ID) },
498 { 0 }
499 };
500
501 MODULE_DEVICE_TABLE(pci, virtio_pci_id_table);
502
virtio_pci_release_dev(struct device * _d)503 static void virtio_pci_release_dev(struct device *_d)
504 {
505 struct virtio_device *vdev = dev_to_virtio(_d);
506 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
507
508 /* As struct device is a kobject, it's not safe to
509 * free the memory (including the reference counter itself)
510 * until it's release callback. */
511 kfree(vp_dev);
512 }
513
virtio_pci_probe(struct pci_dev * pci_dev,const struct pci_device_id * id)514 static int virtio_pci_probe(struct pci_dev *pci_dev,
515 const struct pci_device_id *id)
516 {
517 struct virtio_pci_device *vp_dev, *reg_dev = NULL;
518 int rc;
519
520 /* allocate our structure and fill it out */
521 vp_dev = kzalloc(sizeof(struct virtio_pci_device), GFP_KERNEL);
522 if (!vp_dev)
523 return -ENOMEM;
524
525 pci_set_drvdata(pci_dev, vp_dev);
526 vp_dev->vdev.dev.parent = &pci_dev->dev;
527 vp_dev->vdev.dev.release = virtio_pci_release_dev;
528 vp_dev->pci_dev = pci_dev;
529 INIT_LIST_HEAD(&vp_dev->virtqueues);
530 spin_lock_init(&vp_dev->lock);
531
532 /* enable the device */
533 rc = pci_enable_device(pci_dev);
534 if (rc)
535 goto err_enable_device;
536
537 if (force_legacy) {
538 rc = virtio_pci_legacy_probe(vp_dev);
539 /* Also try modern mode if we can't map BAR0 (no IO space). */
540 if (rc == -ENODEV || rc == -ENOMEM)
541 rc = virtio_pci_modern_probe(vp_dev);
542 if (rc)
543 goto err_probe;
544 } else {
545 rc = virtio_pci_modern_probe(vp_dev);
546 if (rc == -ENODEV)
547 rc = virtio_pci_legacy_probe(vp_dev);
548 if (rc)
549 goto err_probe;
550 }
551
552 pci_set_master(pci_dev);
553
554 rc = register_virtio_device(&vp_dev->vdev);
555 reg_dev = vp_dev;
556 if (rc)
557 goto err_register;
558
559 return 0;
560
561 err_register:
562 if (vp_dev->ioaddr)
563 virtio_pci_legacy_remove(vp_dev);
564 else
565 virtio_pci_modern_remove(vp_dev);
566 err_probe:
567 pci_disable_device(pci_dev);
568 err_enable_device:
569 if (reg_dev)
570 put_device(&vp_dev->vdev.dev);
571 else
572 kfree(vp_dev);
573 return rc;
574 }
575
virtio_pci_remove(struct pci_dev * pci_dev)576 static void virtio_pci_remove(struct pci_dev *pci_dev)
577 {
578 struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
579 struct device *dev = get_device(&vp_dev->vdev.dev);
580
581 /*
582 * Device is marked broken on surprise removal so that virtio upper
583 * layers can abort any ongoing operation.
584 */
585 if (!pci_device_is_present(pci_dev))
586 virtio_break_device(&vp_dev->vdev);
587
588 pci_disable_sriov(pci_dev);
589
590 unregister_virtio_device(&vp_dev->vdev);
591
592 if (vp_dev->ioaddr)
593 virtio_pci_legacy_remove(vp_dev);
594 else
595 virtio_pci_modern_remove(vp_dev);
596
597 pci_disable_device(pci_dev);
598 put_device(dev);
599 }
600
virtio_pci_sriov_configure(struct pci_dev * pci_dev,int num_vfs)601 static int virtio_pci_sriov_configure(struct pci_dev *pci_dev, int num_vfs)
602 {
603 struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
604 struct virtio_device *vdev = &vp_dev->vdev;
605 int ret;
606
607 if (!(vdev->config->get_status(vdev) & VIRTIO_CONFIG_S_DRIVER_OK))
608 return -EBUSY;
609
610 if (!__virtio_test_bit(vdev, VIRTIO_F_SR_IOV))
611 return -EINVAL;
612
613 if (pci_vfs_assigned(pci_dev))
614 return -EPERM;
615
616 if (num_vfs == 0) {
617 pci_disable_sriov(pci_dev);
618 return 0;
619 }
620
621 ret = pci_enable_sriov(pci_dev, num_vfs);
622 if (ret < 0)
623 return ret;
624
625 return num_vfs;
626 }
627
628 static struct pci_driver virtio_pci_driver = {
629 .name = "virtio-pci",
630 .id_table = virtio_pci_id_table,
631 .probe = virtio_pci_probe,
632 .remove = virtio_pci_remove,
633 #ifdef CONFIG_PM_SLEEP
634 .driver.pm = &virtio_pci_pm_ops,
635 #endif
636 .sriov_configure = virtio_pci_sriov_configure,
637 };
638
639 module_pci_driver(virtio_pci_driver);
640
641 MODULE_AUTHOR("Anthony Liguori <aliguori@us.ibm.com>");
642 MODULE_DESCRIPTION("virtio-pci");
643 MODULE_LICENSE("GPL");
644 MODULE_VERSION("1");
645