1 /*
2  * Intel MIC Platform Software Stack (MPSS)
3  *
4  * Copyright(c) 2016 Intel Corporation.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License, version 2, as
8  * published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13  * General Public License for more details.
14  *
15  * The full GNU General Public License is included in this distribution in
16  * the file called "COPYING".
17  *
18  * Adapted from:
19  *
20  * virtio for kvm on s390
21  *
22  * Copyright IBM Corp. 2008
23  *
24  * This program is free software; you can redistribute it and/or modify
25  * it under the terms of the GNU General Public License (version 2 only)
26  * as published by the Free Software Foundation.
27  *
28  *    Author(s): Christian Borntraeger <borntraeger@de.ibm.com>
29  *
30  * Intel Virtio Over PCIe (VOP) driver.
31  *
32  */
33 #include <linux/delay.h>
34 #include <linux/module.h>
35 #include <linux/sched.h>
36 #include <linux/dma-mapping.h>
37 
38 #include "vop_main.h"
39 
40 #define VOP_MAX_VRINGS 4
41 
42 /*
43  * _vop_vdev - Allocated per virtio device instance injected by the peer.
44  *
45  * @vdev: Virtio device
46  * @desc: Virtio device page descriptor
47  * @dc: Virtio device control
48  * @vpdev: VOP device which is the parent for this virtio device
49  * @vr: Buffer for accessing the VRING
50  * @used: Buffer for used
51  * @used_size: Size of the used buffer
52  * @reset_done: Track whether VOP reset is complete
53  * @virtio_cookie: Cookie returned upon requesting a interrupt
54  * @c2h_vdev_db: The doorbell used by the guest to interrupt the host
55  * @h2c_vdev_db: The doorbell used by the host to interrupt the guest
56  * @dnode: The destination node
57  */
58 struct _vop_vdev {
59 	struct virtio_device vdev;
60 	struct mic_device_desc __iomem *desc;
61 	struct mic_device_ctrl __iomem *dc;
62 	struct vop_device *vpdev;
63 	void __iomem *vr[VOP_MAX_VRINGS];
64 	dma_addr_t used[VOP_MAX_VRINGS];
65 	int used_size[VOP_MAX_VRINGS];
66 	struct completion reset_done;
67 	struct mic_irq *virtio_cookie;
68 	int c2h_vdev_db;
69 	int h2c_vdev_db;
70 	int dnode;
71 };
72 
73 #define to_vopvdev(vd) container_of(vd, struct _vop_vdev, vdev)
74 
75 #define _vop_aligned_desc_size(d) __mic_align(_vop_desc_size(d), 8)
76 
77 /* Helper API to obtain the parent of the virtio device */
_vop_dev(struct _vop_vdev * vdev)78 static inline struct device *_vop_dev(struct _vop_vdev *vdev)
79 {
80 	return vdev->vdev.dev.parent;
81 }
82 
_vop_desc_size(struct mic_device_desc __iomem * desc)83 static inline unsigned _vop_desc_size(struct mic_device_desc __iomem *desc)
84 {
85 	return sizeof(*desc)
86 		+ ioread8(&desc->num_vq) * sizeof(struct mic_vqconfig)
87 		+ ioread8(&desc->feature_len) * 2
88 		+ ioread8(&desc->config_len);
89 }
90 
91 static inline struct mic_vqconfig __iomem *
_vop_vq_config(struct mic_device_desc __iomem * desc)92 _vop_vq_config(struct mic_device_desc __iomem *desc)
93 {
94 	return (struct mic_vqconfig __iomem *)(desc + 1);
95 }
96 
97 static inline u8 __iomem *
_vop_vq_features(struct mic_device_desc __iomem * desc)98 _vop_vq_features(struct mic_device_desc __iomem *desc)
99 {
100 	return (u8 __iomem *)(_vop_vq_config(desc) + ioread8(&desc->num_vq));
101 }
102 
103 static inline u8 __iomem *
_vop_vq_configspace(struct mic_device_desc __iomem * desc)104 _vop_vq_configspace(struct mic_device_desc __iomem *desc)
105 {
106 	return _vop_vq_features(desc) + ioread8(&desc->feature_len) * 2;
107 }
108 
109 static inline unsigned
_vop_total_desc_size(struct mic_device_desc __iomem * desc)110 _vop_total_desc_size(struct mic_device_desc __iomem *desc)
111 {
112 	return _vop_aligned_desc_size(desc) + sizeof(struct mic_device_ctrl);
113 }
114 
115 /* This gets the device's feature bits. */
vop_get_features(struct virtio_device * vdev)116 static u64 vop_get_features(struct virtio_device *vdev)
117 {
118 	unsigned int i, bits;
119 	u32 features = 0;
120 	struct mic_device_desc __iomem *desc = to_vopvdev(vdev)->desc;
121 	u8 __iomem *in_features = _vop_vq_features(desc);
122 	int feature_len = ioread8(&desc->feature_len);
123 
124 	bits = min_t(unsigned, feature_len, sizeof(vdev->features)) * 8;
125 	for (i = 0; i < bits; i++)
126 		if (ioread8(&in_features[i / 8]) & (BIT(i % 8)))
127 			features |= BIT(i);
128 
129 	return features;
130 }
131 
vop_finalize_features(struct virtio_device * vdev)132 static int vop_finalize_features(struct virtio_device *vdev)
133 {
134 	unsigned int i, bits;
135 	struct mic_device_desc __iomem *desc = to_vopvdev(vdev)->desc;
136 	u8 feature_len = ioread8(&desc->feature_len);
137 	/* Second half of bitmap is features we accept. */
138 	u8 __iomem *out_features =
139 		_vop_vq_features(desc) + feature_len;
140 
141 	/* Give virtio_ring a chance to accept features. */
142 	vring_transport_features(vdev);
143 
144 	memset_io(out_features, 0, feature_len);
145 	bits = min_t(unsigned, feature_len,
146 		     sizeof(vdev->features)) * 8;
147 	for (i = 0; i < bits; i++) {
148 		if (__virtio_test_bit(vdev, i))
149 			iowrite8(ioread8(&out_features[i / 8]) | (1 << (i % 8)),
150 				 &out_features[i / 8]);
151 	}
152 	return 0;
153 }
154 
155 /*
156  * Reading and writing elements in config space
157  */
vop_get(struct virtio_device * vdev,unsigned int offset,void * buf,unsigned len)158 static void vop_get(struct virtio_device *vdev, unsigned int offset,
159 		    void *buf, unsigned len)
160 {
161 	struct mic_device_desc __iomem *desc = to_vopvdev(vdev)->desc;
162 
163 	if (offset + len > ioread8(&desc->config_len))
164 		return;
165 	memcpy_fromio(buf, _vop_vq_configspace(desc) + offset, len);
166 }
167 
vop_set(struct virtio_device * vdev,unsigned int offset,const void * buf,unsigned len)168 static void vop_set(struct virtio_device *vdev, unsigned int offset,
169 		    const void *buf, unsigned len)
170 {
171 	struct mic_device_desc __iomem *desc = to_vopvdev(vdev)->desc;
172 
173 	if (offset + len > ioread8(&desc->config_len))
174 		return;
175 	memcpy_toio(_vop_vq_configspace(desc) + offset, buf, len);
176 }
177 
178 /*
179  * The operations to get and set the status word just access the status
180  * field of the device descriptor. set_status also interrupts the host
181  * to tell about status changes.
182  */
vop_get_status(struct virtio_device * vdev)183 static u8 vop_get_status(struct virtio_device *vdev)
184 {
185 	return ioread8(&to_vopvdev(vdev)->desc->status);
186 }
187 
vop_set_status(struct virtio_device * dev,u8 status)188 static void vop_set_status(struct virtio_device *dev, u8 status)
189 {
190 	struct _vop_vdev *vdev = to_vopvdev(dev);
191 	struct vop_device *vpdev = vdev->vpdev;
192 
193 	if (!status)
194 		return;
195 	iowrite8(status, &vdev->desc->status);
196 	vpdev->hw_ops->send_intr(vpdev, vdev->c2h_vdev_db);
197 }
198 
199 /* Inform host on a virtio device reset and wait for ack from host */
vop_reset_inform_host(struct virtio_device * dev)200 static void vop_reset_inform_host(struct virtio_device *dev)
201 {
202 	struct _vop_vdev *vdev = to_vopvdev(dev);
203 	struct mic_device_ctrl __iomem *dc = vdev->dc;
204 	struct vop_device *vpdev = vdev->vpdev;
205 	int retry;
206 
207 	iowrite8(0, &dc->host_ack);
208 	iowrite8(1, &dc->vdev_reset);
209 	vpdev->hw_ops->send_intr(vpdev, vdev->c2h_vdev_db);
210 
211 	/* Wait till host completes all card accesses and acks the reset */
212 	for (retry = 100; retry--;) {
213 		if (ioread8(&dc->host_ack))
214 			break;
215 		msleep(100);
216 	};
217 
218 	dev_dbg(_vop_dev(vdev), "%s: retry: %d\n", __func__, retry);
219 
220 	/* Reset status to 0 in case we timed out */
221 	iowrite8(0, &vdev->desc->status);
222 }
223 
vop_reset(struct virtio_device * dev)224 static void vop_reset(struct virtio_device *dev)
225 {
226 	struct _vop_vdev *vdev = to_vopvdev(dev);
227 
228 	dev_dbg(_vop_dev(vdev), "%s: virtio id %d\n",
229 		__func__, dev->id.device);
230 
231 	vop_reset_inform_host(dev);
232 	complete_all(&vdev->reset_done);
233 }
234 
235 /*
236  * The virtio_ring code calls this API when it wants to notify the Host.
237  */
vop_notify(struct virtqueue * vq)238 static bool vop_notify(struct virtqueue *vq)
239 {
240 	struct _vop_vdev *vdev = vq->priv;
241 	struct vop_device *vpdev = vdev->vpdev;
242 
243 	vpdev->hw_ops->send_intr(vpdev, vdev->c2h_vdev_db);
244 	return true;
245 }
246 
vop_del_vq(struct virtqueue * vq,int n)247 static void vop_del_vq(struct virtqueue *vq, int n)
248 {
249 	struct _vop_vdev *vdev = to_vopvdev(vq->vdev);
250 	struct vring *vr = (struct vring *)(vq + 1);
251 	struct vop_device *vpdev = vdev->vpdev;
252 
253 	dma_unmap_single(&vpdev->dev, vdev->used[n],
254 			 vdev->used_size[n], DMA_BIDIRECTIONAL);
255 	free_pages((unsigned long)vr->used, get_order(vdev->used_size[n]));
256 	vring_del_virtqueue(vq);
257 	vpdev->hw_ops->iounmap(vpdev, vdev->vr[n]);
258 	vdev->vr[n] = NULL;
259 }
260 
vop_del_vqs(struct virtio_device * dev)261 static void vop_del_vqs(struct virtio_device *dev)
262 {
263 	struct _vop_vdev *vdev = to_vopvdev(dev);
264 	struct virtqueue *vq, *n;
265 	int idx = 0;
266 
267 	dev_dbg(_vop_dev(vdev), "%s\n", __func__);
268 
269 	list_for_each_entry_safe(vq, n, &dev->vqs, list)
270 		vop_del_vq(vq, idx++);
271 }
272 
273 /*
274  * This routine will assign vring's allocated in host/io memory. Code in
275  * virtio_ring.c however continues to access this io memory as if it were local
276  * memory without io accessors.
277  */
vop_find_vq(struct virtio_device * dev,unsigned index,void (* callback)(struct virtqueue * vq),const char * name,bool ctx)278 static struct virtqueue *vop_find_vq(struct virtio_device *dev,
279 				     unsigned index,
280 				     void (*callback)(struct virtqueue *vq),
281 				     const char *name, bool ctx)
282 {
283 	struct _vop_vdev *vdev = to_vopvdev(dev);
284 	struct vop_device *vpdev = vdev->vpdev;
285 	struct mic_vqconfig __iomem *vqconfig;
286 	struct mic_vqconfig config;
287 	struct virtqueue *vq;
288 	void __iomem *va;
289 	struct _mic_vring_info __iomem *info;
290 	void *used;
291 	int vr_size, _vr_size, err, magic;
292 	struct vring *vr;
293 	u8 type = ioread8(&vdev->desc->type);
294 
295 	if (index >= ioread8(&vdev->desc->num_vq))
296 		return ERR_PTR(-ENOENT);
297 
298 	if (!name)
299 		return ERR_PTR(-ENOENT);
300 
301 	/* First assign the vring's allocated in host memory */
302 	vqconfig = _vop_vq_config(vdev->desc) + index;
303 	memcpy_fromio(&config, vqconfig, sizeof(config));
304 	_vr_size = round_up(vring_size(le16_to_cpu(config.num), MIC_VIRTIO_RING_ALIGN), 4);
305 	vr_size = PAGE_ALIGN(_vr_size + sizeof(struct _mic_vring_info));
306 	va = vpdev->hw_ops->ioremap(vpdev, le64_to_cpu(config.address),
307 			vr_size);
308 	if (!va)
309 		return ERR_PTR(-ENOMEM);
310 	vdev->vr[index] = va;
311 	memset_io(va, 0x0, _vr_size);
312 	vq = vring_new_virtqueue(
313 				index,
314 				le16_to_cpu(config.num), MIC_VIRTIO_RING_ALIGN,
315 				dev,
316 				false,
317 				ctx,
318 				(void __force *)va, vop_notify, callback, name);
319 	if (!vq) {
320 		err = -ENOMEM;
321 		goto unmap;
322 	}
323 	info = va + _vr_size;
324 	magic = ioread32(&info->magic);
325 
326 	if (WARN(magic != MIC_MAGIC + type + index, "magic mismatch")) {
327 		err = -EIO;
328 		goto unmap;
329 	}
330 
331 	/* Allocate and reassign used ring now */
332 	vdev->used_size[index] = PAGE_ALIGN(sizeof(__u16) * 3 +
333 					     sizeof(struct vring_used_elem) *
334 					     le16_to_cpu(config.num));
335 	used = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
336 					get_order(vdev->used_size[index]));
337 	if (!used) {
338 		err = -ENOMEM;
339 		dev_err(_vop_dev(vdev), "%s %d err %d\n",
340 			__func__, __LINE__, err);
341 		goto del_vq;
342 	}
343 	vdev->used[index] = dma_map_single(&vpdev->dev, used,
344 					    vdev->used_size[index],
345 					    DMA_BIDIRECTIONAL);
346 	if (dma_mapping_error(&vpdev->dev, vdev->used[index])) {
347 		err = -ENOMEM;
348 		dev_err(_vop_dev(vdev), "%s %d err %d\n",
349 			__func__, __LINE__, err);
350 		goto free_used;
351 	}
352 	writeq(vdev->used[index], &vqconfig->used_address);
353 	/*
354 	 * To reassign the used ring here we are directly accessing
355 	 * struct vring_virtqueue which is a private data structure
356 	 * in virtio_ring.c. At the minimum, a BUILD_BUG_ON() in
357 	 * vring_new_virtqueue() would ensure that
358 	 *  (&vq->vring == (struct vring *) (&vq->vq + 1));
359 	 */
360 	vr = (struct vring *)(vq + 1);
361 	vr->used = used;
362 
363 	vq->priv = vdev;
364 	return vq;
365 free_used:
366 	free_pages((unsigned long)used,
367 		   get_order(vdev->used_size[index]));
368 del_vq:
369 	vring_del_virtqueue(vq);
370 unmap:
371 	vpdev->hw_ops->iounmap(vpdev, vdev->vr[index]);
372 	return ERR_PTR(err);
373 }
374 
vop_find_vqs(struct virtio_device * dev,unsigned nvqs,struct virtqueue * vqs[],vq_callback_t * callbacks[],const char * const names[],const bool * ctx,struct irq_affinity * desc)375 static int vop_find_vqs(struct virtio_device *dev, unsigned nvqs,
376 			struct virtqueue *vqs[],
377 			vq_callback_t *callbacks[],
378 			const char * const names[], const bool *ctx,
379 			struct irq_affinity *desc)
380 {
381 	struct _vop_vdev *vdev = to_vopvdev(dev);
382 	struct vop_device *vpdev = vdev->vpdev;
383 	struct mic_device_ctrl __iomem *dc = vdev->dc;
384 	int i, err, retry;
385 
386 	/* We must have this many virtqueues. */
387 	if (nvqs > ioread8(&vdev->desc->num_vq))
388 		return -ENOENT;
389 
390 	for (i = 0; i < nvqs; ++i) {
391 		dev_dbg(_vop_dev(vdev), "%s: %d: %s\n",
392 			__func__, i, names[i]);
393 		vqs[i] = vop_find_vq(dev, i, callbacks[i], names[i],
394 				     ctx ? ctx[i] : false);
395 		if (IS_ERR(vqs[i])) {
396 			err = PTR_ERR(vqs[i]);
397 			goto error;
398 		}
399 	}
400 
401 	iowrite8(1, &dc->used_address_updated);
402 	/*
403 	 * Send an interrupt to the host to inform it that used
404 	 * rings have been re-assigned.
405 	 */
406 	vpdev->hw_ops->send_intr(vpdev, vdev->c2h_vdev_db);
407 	for (retry = 100; --retry;) {
408 		if (!ioread8(&dc->used_address_updated))
409 			break;
410 		msleep(100);
411 	};
412 
413 	dev_dbg(_vop_dev(vdev), "%s: retry: %d\n", __func__, retry);
414 	if (!retry) {
415 		err = -ENODEV;
416 		goto error;
417 	}
418 
419 	return 0;
420 error:
421 	vop_del_vqs(dev);
422 	return err;
423 }
424 
425 /*
426  * The config ops structure as defined by virtio config
427  */
428 static struct virtio_config_ops vop_vq_config_ops = {
429 	.get_features = vop_get_features,
430 	.finalize_features = vop_finalize_features,
431 	.get = vop_get,
432 	.set = vop_set,
433 	.get_status = vop_get_status,
434 	.set_status = vop_set_status,
435 	.reset = vop_reset,
436 	.find_vqs = vop_find_vqs,
437 	.del_vqs = vop_del_vqs,
438 };
439 
vop_virtio_intr_handler(int irq,void * data)440 static irqreturn_t vop_virtio_intr_handler(int irq, void *data)
441 {
442 	struct _vop_vdev *vdev = data;
443 	struct vop_device *vpdev = vdev->vpdev;
444 	struct virtqueue *vq;
445 
446 	vpdev->hw_ops->ack_interrupt(vpdev, vdev->h2c_vdev_db);
447 	list_for_each_entry(vq, &vdev->vdev.vqs, list)
448 		vring_interrupt(0, vq);
449 
450 	return IRQ_HANDLED;
451 }
452 
vop_virtio_release_dev(struct device * _d)453 static void vop_virtio_release_dev(struct device *_d)
454 {
455 	struct virtio_device *vdev =
456 			container_of(_d, struct virtio_device, dev);
457 	struct _vop_vdev *vop_vdev =
458 			container_of(vdev, struct _vop_vdev, vdev);
459 
460 	kfree(vop_vdev);
461 }
462 
463 /*
464  * adds a new device and register it with virtio
465  * appropriate drivers are loaded by the device model
466  */
_vop_add_device(struct mic_device_desc __iomem * d,unsigned int offset,struct vop_device * vpdev,int dnode)467 static int _vop_add_device(struct mic_device_desc __iomem *d,
468 			   unsigned int offset, struct vop_device *vpdev,
469 			   int dnode)
470 {
471 	struct _vop_vdev *vdev, *reg_dev = NULL;
472 	int ret;
473 	u8 type = ioread8(&d->type);
474 
475 	vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
476 	if (!vdev)
477 		return -ENOMEM;
478 
479 	vdev->vpdev = vpdev;
480 	vdev->vdev.dev.parent = &vpdev->dev;
481 	vdev->vdev.dev.release = vop_virtio_release_dev;
482 	vdev->vdev.id.device = type;
483 	vdev->vdev.config = &vop_vq_config_ops;
484 	vdev->desc = d;
485 	vdev->dc = (void __iomem *)d + _vop_aligned_desc_size(d);
486 	vdev->dnode = dnode;
487 	vdev->vdev.priv = (void *)(u64)dnode;
488 	init_completion(&vdev->reset_done);
489 
490 	vdev->h2c_vdev_db = vpdev->hw_ops->next_db(vpdev);
491 	vdev->virtio_cookie = vpdev->hw_ops->request_irq(vpdev,
492 			vop_virtio_intr_handler, "virtio intr",
493 			vdev, vdev->h2c_vdev_db);
494 	if (IS_ERR(vdev->virtio_cookie)) {
495 		ret = PTR_ERR(vdev->virtio_cookie);
496 		goto kfree;
497 	}
498 	iowrite8((u8)vdev->h2c_vdev_db, &vdev->dc->h2c_vdev_db);
499 	vdev->c2h_vdev_db = ioread8(&vdev->dc->c2h_vdev_db);
500 
501 	ret = register_virtio_device(&vdev->vdev);
502 	reg_dev = vdev;
503 	if (ret) {
504 		dev_err(_vop_dev(vdev),
505 			"Failed to register vop device %u type %u\n",
506 			offset, type);
507 		goto free_irq;
508 	}
509 	writeq((u64)vdev, &vdev->dc->vdev);
510 	dev_dbg(_vop_dev(vdev), "%s: registered vop device %u type %u vdev %p\n",
511 		__func__, offset, type, vdev);
512 
513 	return 0;
514 
515 free_irq:
516 	vpdev->hw_ops->free_irq(vpdev, vdev->virtio_cookie, vdev);
517 kfree:
518 	if (reg_dev)
519 		put_device(&vdev->vdev.dev);
520 	else
521 		kfree(vdev);
522 	return ret;
523 }
524 
525 /*
526  * match for a vop device with a specific desc pointer
527  */
vop_match_desc(struct device * dev,void * data)528 static int vop_match_desc(struct device *dev, void *data)
529 {
530 	struct virtio_device *_dev = dev_to_virtio(dev);
531 	struct _vop_vdev *vdev = to_vopvdev(_dev);
532 
533 	return vdev->desc == (void __iomem *)data;
534 }
535 
_vop_handle_config_change(struct mic_device_desc __iomem * d,unsigned int offset,struct vop_device * vpdev)536 static void _vop_handle_config_change(struct mic_device_desc __iomem *d,
537 				      unsigned int offset,
538 				      struct vop_device *vpdev)
539 {
540 	struct mic_device_ctrl __iomem *dc
541 		= (void __iomem *)d + _vop_aligned_desc_size(d);
542 	struct _vop_vdev *vdev = (struct _vop_vdev *)readq(&dc->vdev);
543 
544 	if (ioread8(&dc->config_change) != MIC_VIRTIO_PARAM_CONFIG_CHANGED)
545 		return;
546 
547 	dev_dbg(&vpdev->dev, "%s %d\n", __func__, __LINE__);
548 	virtio_config_changed(&vdev->vdev);
549 	iowrite8(1, &dc->guest_ack);
550 }
551 
552 /*
553  * removes a virtio device if a hot remove event has been
554  * requested by the host.
555  */
_vop_remove_device(struct mic_device_desc __iomem * d,unsigned int offset,struct vop_device * vpdev)556 static int _vop_remove_device(struct mic_device_desc __iomem *d,
557 			      unsigned int offset, struct vop_device *vpdev)
558 {
559 	struct mic_device_ctrl __iomem *dc
560 		= (void __iomem *)d + _vop_aligned_desc_size(d);
561 	struct _vop_vdev *vdev = (struct _vop_vdev *)readq(&dc->vdev);
562 	u8 status;
563 	int ret = -1;
564 
565 	if (ioread8(&dc->config_change) == MIC_VIRTIO_PARAM_DEV_REMOVE) {
566 		struct device *dev = get_device(&vdev->vdev.dev);
567 
568 		dev_dbg(&vpdev->dev,
569 			"%s %d config_change %d type %d vdev %p\n",
570 			__func__, __LINE__,
571 			ioread8(&dc->config_change), ioread8(&d->type), vdev);
572 		status = ioread8(&d->status);
573 		reinit_completion(&vdev->reset_done);
574 		unregister_virtio_device(&vdev->vdev);
575 		vpdev->hw_ops->free_irq(vpdev, vdev->virtio_cookie, vdev);
576 		iowrite8(-1, &dc->h2c_vdev_db);
577 		if (status & VIRTIO_CONFIG_S_DRIVER_OK)
578 			wait_for_completion(&vdev->reset_done);
579 		put_device(dev);
580 		iowrite8(1, &dc->guest_ack);
581 		dev_dbg(&vpdev->dev, "%s %d guest_ack %d\n",
582 			__func__, __LINE__, ioread8(&dc->guest_ack));
583 		iowrite8(-1, &d->type);
584 		ret = 0;
585 	}
586 	return ret;
587 }
588 
589 #define REMOVE_DEVICES true
590 
_vop_scan_devices(void __iomem * dp,struct vop_device * vpdev,bool remove,int dnode)591 static void _vop_scan_devices(void __iomem *dp, struct vop_device *vpdev,
592 			      bool remove, int dnode)
593 {
594 	s8 type;
595 	unsigned int i;
596 	struct mic_device_desc __iomem *d;
597 	struct mic_device_ctrl __iomem *dc;
598 	struct device *dev;
599 	int ret;
600 
601 	for (i = sizeof(struct mic_bootparam);
602 			i < MIC_DP_SIZE; i += _vop_total_desc_size(d)) {
603 		d = dp + i;
604 		dc = (void __iomem *)d + _vop_aligned_desc_size(d);
605 		/*
606 		 * This read barrier is paired with the corresponding write
607 		 * barrier on the host which is inserted before adding or
608 		 * removing a virtio device descriptor, by updating the type.
609 		 */
610 		rmb();
611 		type = ioread8(&d->type);
612 
613 		/* end of list */
614 		if (type == 0)
615 			break;
616 
617 		if (type == -1)
618 			continue;
619 
620 		/* device already exists */
621 		dev = device_find_child(&vpdev->dev, (void __force *)d,
622 					vop_match_desc);
623 		if (dev) {
624 			if (remove)
625 				iowrite8(MIC_VIRTIO_PARAM_DEV_REMOVE,
626 					 &dc->config_change);
627 			put_device(dev);
628 			_vop_handle_config_change(d, i, vpdev);
629 			ret = _vop_remove_device(d, i, vpdev);
630 			if (remove) {
631 				iowrite8(0, &dc->config_change);
632 				iowrite8(0, &dc->guest_ack);
633 			}
634 			continue;
635 		}
636 
637 		/* new device */
638 		dev_dbg(&vpdev->dev, "%s %d Adding new virtio device %p\n",
639 			__func__, __LINE__, d);
640 		if (!remove)
641 			_vop_add_device(d, i, vpdev, dnode);
642 	}
643 }
644 
vop_scan_devices(struct vop_info * vi,struct vop_device * vpdev,bool remove)645 static void vop_scan_devices(struct vop_info *vi,
646 			     struct vop_device *vpdev, bool remove)
647 {
648 	void __iomem *dp = vpdev->hw_ops->get_remote_dp(vpdev);
649 
650 	if (!dp)
651 		return;
652 	mutex_lock(&vi->vop_mutex);
653 	_vop_scan_devices(dp, vpdev, remove, vpdev->dnode);
654 	mutex_unlock(&vi->vop_mutex);
655 }
656 
657 /*
658  * vop_hotplug_device tries to find changes in the device page.
659  */
vop_hotplug_devices(struct work_struct * work)660 static void vop_hotplug_devices(struct work_struct *work)
661 {
662 	struct vop_info *vi = container_of(work, struct vop_info,
663 					     hotplug_work);
664 
665 	vop_scan_devices(vi, vi->vpdev, !REMOVE_DEVICES);
666 }
667 
668 /*
669  * Interrupt handler for hot plug/config changes etc.
670  */
vop_extint_handler(int irq,void * data)671 static irqreturn_t vop_extint_handler(int irq, void *data)
672 {
673 	struct vop_info *vi = data;
674 	struct mic_bootparam __iomem *bp;
675 	struct vop_device *vpdev = vi->vpdev;
676 
677 	bp = vpdev->hw_ops->get_remote_dp(vpdev);
678 	dev_dbg(&vpdev->dev, "%s %d hotplug work\n",
679 		__func__, __LINE__);
680 	vpdev->hw_ops->ack_interrupt(vpdev, ioread8(&bp->h2c_config_db));
681 	schedule_work(&vi->hotplug_work);
682 	return IRQ_HANDLED;
683 }
684 
vop_driver_probe(struct vop_device * vpdev)685 static int vop_driver_probe(struct vop_device *vpdev)
686 {
687 	struct vop_info *vi;
688 	int rc;
689 
690 	vi = kzalloc(sizeof(*vi), GFP_KERNEL);
691 	if (!vi) {
692 		rc = -ENOMEM;
693 		goto exit;
694 	}
695 	dev_set_drvdata(&vpdev->dev, vi);
696 	vi->vpdev = vpdev;
697 
698 	mutex_init(&vi->vop_mutex);
699 	INIT_WORK(&vi->hotplug_work, vop_hotplug_devices);
700 	if (vpdev->dnode) {
701 		rc = vop_host_init(vi);
702 		if (rc < 0)
703 			goto free;
704 	} else {
705 		struct mic_bootparam __iomem *bootparam;
706 
707 		vop_scan_devices(vi, vpdev, !REMOVE_DEVICES);
708 
709 		vi->h2c_config_db = vpdev->hw_ops->next_db(vpdev);
710 		vi->cookie = vpdev->hw_ops->request_irq(vpdev,
711 							vop_extint_handler,
712 							"virtio_config_intr",
713 							vi, vi->h2c_config_db);
714 		if (IS_ERR(vi->cookie)) {
715 			rc = PTR_ERR(vi->cookie);
716 			goto free;
717 		}
718 		bootparam = vpdev->hw_ops->get_remote_dp(vpdev);
719 		iowrite8(vi->h2c_config_db, &bootparam->h2c_config_db);
720 	}
721 	vop_init_debugfs(vi);
722 	return 0;
723 free:
724 	kfree(vi);
725 exit:
726 	return rc;
727 }
728 
vop_driver_remove(struct vop_device * vpdev)729 static void vop_driver_remove(struct vop_device *vpdev)
730 {
731 	struct vop_info *vi = dev_get_drvdata(&vpdev->dev);
732 
733 	if (vpdev->dnode) {
734 		vop_host_uninit(vi);
735 	} else {
736 		struct mic_bootparam __iomem *bootparam =
737 			vpdev->hw_ops->get_remote_dp(vpdev);
738 		if (bootparam)
739 			iowrite8(-1, &bootparam->h2c_config_db);
740 		vpdev->hw_ops->free_irq(vpdev, vi->cookie, vi);
741 		flush_work(&vi->hotplug_work);
742 		vop_scan_devices(vi, vpdev, REMOVE_DEVICES);
743 	}
744 	vop_exit_debugfs(vi);
745 	kfree(vi);
746 }
747 
748 static struct vop_device_id id_table[] = {
749 	{ VOP_DEV_TRNSP, VOP_DEV_ANY_ID },
750 	{ 0 },
751 };
752 
753 static struct vop_driver vop_driver = {
754 	.driver.name =	KBUILD_MODNAME,
755 	.driver.owner =	THIS_MODULE,
756 	.id_table = id_table,
757 	.probe = vop_driver_probe,
758 	.remove = vop_driver_remove,
759 };
760 
761 module_vop_driver(vop_driver);
762 
763 MODULE_DEVICE_TABLE(mbus, id_table);
764 MODULE_AUTHOR("Intel Corporation");
765 MODULE_DESCRIPTION("Intel(R) Virtio Over PCIe (VOP) driver");
766 MODULE_LICENSE("GPL v2");
767