1 /*
2  * Implementation of the Xen vTPM device frontend
3  *
4  * Author:  Daniel De Graaf <dgdegra@tycho.nsa.gov>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2,
8  * as published by the Free Software Foundation.
9  */
10 #include <linux/errno.h>
11 #include <linux/err.h>
12 #include <linux/interrupt.h>
13 #include <linux/freezer.h>
14 #include <xen/xen.h>
15 #include <xen/events.h>
16 #include <xen/interface/io/tpmif.h>
17 #include <xen/grant_table.h>
18 #include <xen/xenbus.h>
19 #include <xen/page.h>
20 #include "tpm.h"
21 #include <xen/platform_pci.h>
22 
23 struct tpm_private {
24 	struct tpm_chip *chip;
25 	struct xenbus_device *dev;
26 
27 	struct vtpm_shared_page *shr;
28 
29 	unsigned int evtchn;
30 	int ring_ref;
31 	domid_t backend_id;
32 	int irq;
33 	wait_queue_head_t read_queue;
34 };
35 
36 enum status_bits {
37 	VTPM_STATUS_RUNNING  = 0x1,
38 	VTPM_STATUS_IDLE     = 0x2,
39 	VTPM_STATUS_RESULT   = 0x4,
40 	VTPM_STATUS_CANCELED = 0x8,
41 };
42 
wait_for_tpm_stat_cond(struct tpm_chip * chip,u8 mask,bool check_cancel,bool * canceled)43 static bool wait_for_tpm_stat_cond(struct tpm_chip *chip, u8 mask,
44 					bool check_cancel, bool *canceled)
45 {
46 	u8 status = chip->ops->status(chip);
47 
48 	*canceled = false;
49 	if ((status & mask) == mask)
50 		return true;
51 	if (check_cancel && chip->ops->req_canceled(chip, status)) {
52 		*canceled = true;
53 		return true;
54 	}
55 	return false;
56 }
57 
wait_for_tpm_stat(struct tpm_chip * chip,u8 mask,unsigned long timeout,wait_queue_head_t * queue,bool check_cancel)58 static int wait_for_tpm_stat(struct tpm_chip *chip, u8 mask,
59 		unsigned long timeout, wait_queue_head_t *queue,
60 		bool check_cancel)
61 {
62 	unsigned long stop;
63 	long rc;
64 	u8 status;
65 	bool canceled = false;
66 
67 	/* check current status */
68 	status = chip->ops->status(chip);
69 	if ((status & mask) == mask)
70 		return 0;
71 
72 	stop = jiffies + timeout;
73 
74 	if (chip->flags & TPM_CHIP_FLAG_IRQ) {
75 again:
76 		timeout = stop - jiffies;
77 		if ((long)timeout <= 0)
78 			return -ETIME;
79 		rc = wait_event_interruptible_timeout(*queue,
80 			wait_for_tpm_stat_cond(chip, mask, check_cancel,
81 					       &canceled),
82 			timeout);
83 		if (rc > 0) {
84 			if (canceled)
85 				return -ECANCELED;
86 			return 0;
87 		}
88 		if (rc == -ERESTARTSYS && freezing(current)) {
89 			clear_thread_flag(TIF_SIGPENDING);
90 			goto again;
91 		}
92 	} else {
93 		do {
94 			tpm_msleep(TPM_TIMEOUT);
95 			status = chip->ops->status(chip);
96 			if ((status & mask) == mask)
97 				return 0;
98 		} while (time_before(jiffies, stop));
99 	}
100 	return -ETIME;
101 }
102 
vtpm_status(struct tpm_chip * chip)103 static u8 vtpm_status(struct tpm_chip *chip)
104 {
105 	struct tpm_private *priv = dev_get_drvdata(&chip->dev);
106 	switch (priv->shr->state) {
107 	case VTPM_STATE_IDLE:
108 		return VTPM_STATUS_IDLE | VTPM_STATUS_CANCELED;
109 	case VTPM_STATE_FINISH:
110 		return VTPM_STATUS_IDLE | VTPM_STATUS_RESULT;
111 	case VTPM_STATE_SUBMIT:
112 	case VTPM_STATE_CANCEL: /* cancel requested, not yet canceled */
113 		return VTPM_STATUS_RUNNING;
114 	default:
115 		return 0;
116 	}
117 }
118 
vtpm_req_canceled(struct tpm_chip * chip,u8 status)119 static bool vtpm_req_canceled(struct tpm_chip *chip, u8 status)
120 {
121 	return status & VTPM_STATUS_CANCELED;
122 }
123 
vtpm_cancel(struct tpm_chip * chip)124 static void vtpm_cancel(struct tpm_chip *chip)
125 {
126 	struct tpm_private *priv = dev_get_drvdata(&chip->dev);
127 	priv->shr->state = VTPM_STATE_CANCEL;
128 	wmb();
129 	notify_remote_via_evtchn(priv->evtchn);
130 }
131 
shr_data_offset(struct vtpm_shared_page * shr)132 static unsigned int shr_data_offset(struct vtpm_shared_page *shr)
133 {
134 	return sizeof(*shr) + sizeof(u32) * shr->nr_extra_pages;
135 }
136 
vtpm_send(struct tpm_chip * chip,u8 * buf,size_t count)137 static int vtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
138 {
139 	struct tpm_private *priv = dev_get_drvdata(&chip->dev);
140 	struct vtpm_shared_page *shr = priv->shr;
141 	unsigned int offset = shr_data_offset(shr);
142 
143 	u32 ordinal;
144 	unsigned long duration;
145 
146 	if (offset > PAGE_SIZE)
147 		return -EINVAL;
148 
149 	if (offset + count > PAGE_SIZE)
150 		return -EINVAL;
151 
152 	/* Wait for completion of any existing command or cancellation */
153 	if (wait_for_tpm_stat(chip, VTPM_STATUS_IDLE, chip->timeout_c,
154 			&priv->read_queue, true) < 0) {
155 		vtpm_cancel(chip);
156 		return -ETIME;
157 	}
158 
159 	memcpy(offset + (u8 *)shr, buf, count);
160 	shr->length = count;
161 	barrier();
162 	shr->state = VTPM_STATE_SUBMIT;
163 	wmb();
164 	notify_remote_via_evtchn(priv->evtchn);
165 
166 	ordinal = be32_to_cpu(((struct tpm_input_header*)buf)->ordinal);
167 	duration = tpm_calc_ordinal_duration(chip, ordinal);
168 
169 	if (wait_for_tpm_stat(chip, VTPM_STATUS_IDLE, duration,
170 			&priv->read_queue, true) < 0) {
171 		/* got a signal or timeout, try to cancel */
172 		vtpm_cancel(chip);
173 		return -ETIME;
174 	}
175 
176 	return 0;
177 }
178 
vtpm_recv(struct tpm_chip * chip,u8 * buf,size_t count)179 static int vtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
180 {
181 	struct tpm_private *priv = dev_get_drvdata(&chip->dev);
182 	struct vtpm_shared_page *shr = priv->shr;
183 	unsigned int offset = shr_data_offset(shr);
184 	size_t length = shr->length;
185 
186 	if (shr->state == VTPM_STATE_IDLE)
187 		return -ECANCELED;
188 
189 	/* In theory the wait at the end of _send makes this one unnecessary */
190 	if (wait_for_tpm_stat(chip, VTPM_STATUS_RESULT, chip->timeout_c,
191 			&priv->read_queue, true) < 0) {
192 		vtpm_cancel(chip);
193 		return -ETIME;
194 	}
195 
196 	if (offset > PAGE_SIZE)
197 		return -EIO;
198 
199 	if (offset + length > PAGE_SIZE)
200 		length = PAGE_SIZE - offset;
201 
202 	if (length > count)
203 		length = count;
204 
205 	memcpy(buf, offset + (u8 *)shr, length);
206 
207 	return length;
208 }
209 
210 static const struct tpm_class_ops tpm_vtpm = {
211 	.status = vtpm_status,
212 	.recv = vtpm_recv,
213 	.send = vtpm_send,
214 	.cancel = vtpm_cancel,
215 	.req_complete_mask = VTPM_STATUS_IDLE | VTPM_STATUS_RESULT,
216 	.req_complete_val  = VTPM_STATUS_IDLE | VTPM_STATUS_RESULT,
217 	.req_canceled      = vtpm_req_canceled,
218 };
219 
tpmif_interrupt(int dummy,void * dev_id)220 static irqreturn_t tpmif_interrupt(int dummy, void *dev_id)
221 {
222 	struct tpm_private *priv = dev_id;
223 
224 	switch (priv->shr->state) {
225 	case VTPM_STATE_IDLE:
226 	case VTPM_STATE_FINISH:
227 		wake_up_interruptible(&priv->read_queue);
228 		break;
229 	case VTPM_STATE_SUBMIT:
230 	case VTPM_STATE_CANCEL:
231 	default:
232 		break;
233 	}
234 	return IRQ_HANDLED;
235 }
236 
setup_chip(struct device * dev,struct tpm_private * priv)237 static int setup_chip(struct device *dev, struct tpm_private *priv)
238 {
239 	struct tpm_chip *chip;
240 
241 	chip = tpmm_chip_alloc(dev, &tpm_vtpm);
242 	if (IS_ERR(chip))
243 		return PTR_ERR(chip);
244 
245 	init_waitqueue_head(&priv->read_queue);
246 
247 	priv->chip = chip;
248 	dev_set_drvdata(&chip->dev, priv);
249 
250 	return 0;
251 }
252 
253 /* caller must clean up in case of errors */
setup_ring(struct xenbus_device * dev,struct tpm_private * priv)254 static int setup_ring(struct xenbus_device *dev, struct tpm_private *priv)
255 {
256 	struct xenbus_transaction xbt;
257 	const char *message = NULL;
258 	int rv;
259 	grant_ref_t gref;
260 
261 	priv->shr = (void *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
262 	if (!priv->shr) {
263 		xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring");
264 		return -ENOMEM;
265 	}
266 
267 	rv = xenbus_grant_ring(dev, priv->shr, 1, &gref);
268 	if (rv < 0)
269 		return rv;
270 
271 	priv->ring_ref = gref;
272 
273 	rv = xenbus_alloc_evtchn(dev, &priv->evtchn);
274 	if (rv)
275 		return rv;
276 
277 	rv = bind_evtchn_to_irqhandler(priv->evtchn, tpmif_interrupt, 0,
278 				       "tpmif", priv);
279 	if (rv <= 0) {
280 		xenbus_dev_fatal(dev, rv, "allocating TPM irq");
281 		return rv;
282 	}
283 	priv->irq = rv;
284 
285  again:
286 	rv = xenbus_transaction_start(&xbt);
287 	if (rv) {
288 		xenbus_dev_fatal(dev, rv, "starting transaction");
289 		return rv;
290 	}
291 
292 	rv = xenbus_printf(xbt, dev->nodename,
293 			"ring-ref", "%u", priv->ring_ref);
294 	if (rv) {
295 		message = "writing ring-ref";
296 		goto abort_transaction;
297 	}
298 
299 	rv = xenbus_printf(xbt, dev->nodename, "event-channel", "%u",
300 			priv->evtchn);
301 	if (rv) {
302 		message = "writing event-channel";
303 		goto abort_transaction;
304 	}
305 
306 	rv = xenbus_printf(xbt, dev->nodename, "feature-protocol-v2", "1");
307 	if (rv) {
308 		message = "writing feature-protocol-v2";
309 		goto abort_transaction;
310 	}
311 
312 	rv = xenbus_transaction_end(xbt, 0);
313 	if (rv == -EAGAIN)
314 		goto again;
315 	if (rv) {
316 		xenbus_dev_fatal(dev, rv, "completing transaction");
317 		return rv;
318 	}
319 
320 	xenbus_switch_state(dev, XenbusStateInitialised);
321 
322 	return 0;
323 
324  abort_transaction:
325 	xenbus_transaction_end(xbt, 1);
326 	if (message)
327 		xenbus_dev_error(dev, rv, "%s", message);
328 
329 	return rv;
330 }
331 
ring_free(struct tpm_private * priv)332 static void ring_free(struct tpm_private *priv)
333 {
334 	if (!priv)
335 		return;
336 
337 	if (priv->ring_ref)
338 		gnttab_end_foreign_access(priv->ring_ref, 0,
339 				(unsigned long)priv->shr);
340 	else
341 		free_page((unsigned long)priv->shr);
342 
343 	if (priv->irq)
344 		unbind_from_irqhandler(priv->irq, priv);
345 
346 	kfree(priv);
347 }
348 
tpmfront_probe(struct xenbus_device * dev,const struct xenbus_device_id * id)349 static int tpmfront_probe(struct xenbus_device *dev,
350 		const struct xenbus_device_id *id)
351 {
352 	struct tpm_private *priv;
353 	int rv;
354 
355 	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
356 	if (!priv) {
357 		xenbus_dev_fatal(dev, -ENOMEM, "allocating priv structure");
358 		return -ENOMEM;
359 	}
360 
361 	rv = setup_chip(&dev->dev, priv);
362 	if (rv) {
363 		kfree(priv);
364 		return rv;
365 	}
366 
367 	rv = setup_ring(dev, priv);
368 	if (rv) {
369 		ring_free(priv);
370 		return rv;
371 	}
372 
373 	tpm_get_timeouts(priv->chip);
374 
375 	return tpm_chip_register(priv->chip);
376 }
377 
tpmfront_remove(struct xenbus_device * dev)378 static int tpmfront_remove(struct xenbus_device *dev)
379 {
380 	struct tpm_chip *chip = dev_get_drvdata(&dev->dev);
381 	struct tpm_private *priv = dev_get_drvdata(&chip->dev);
382 	tpm_chip_unregister(chip);
383 	ring_free(priv);
384 	dev_set_drvdata(&chip->dev, NULL);
385 	return 0;
386 }
387 
tpmfront_resume(struct xenbus_device * dev)388 static int tpmfront_resume(struct xenbus_device *dev)
389 {
390 	/* A suspend/resume/migrate will interrupt a vTPM anyway */
391 	tpmfront_remove(dev);
392 	return tpmfront_probe(dev, NULL);
393 }
394 
backend_changed(struct xenbus_device * dev,enum xenbus_state backend_state)395 static void backend_changed(struct xenbus_device *dev,
396 		enum xenbus_state backend_state)
397 {
398 	switch (backend_state) {
399 	case XenbusStateInitialised:
400 	case XenbusStateConnected:
401 		if (dev->state == XenbusStateConnected)
402 			break;
403 
404 		if (!xenbus_read_unsigned(dev->otherend, "feature-protocol-v2",
405 					  0)) {
406 			xenbus_dev_fatal(dev, -EINVAL,
407 					"vTPM protocol 2 required");
408 			return;
409 		}
410 		xenbus_switch_state(dev, XenbusStateConnected);
411 		break;
412 
413 	case XenbusStateClosing:
414 	case XenbusStateClosed:
415 		device_unregister(&dev->dev);
416 		xenbus_frontend_closed(dev);
417 		break;
418 	default:
419 		break;
420 	}
421 }
422 
423 static const struct xenbus_device_id tpmfront_ids[] = {
424 	{ "vtpm" },
425 	{ "" }
426 };
427 MODULE_ALIAS("xen:vtpm");
428 
429 static struct xenbus_driver tpmfront_driver = {
430 	.ids = tpmfront_ids,
431 	.probe = tpmfront_probe,
432 	.remove = tpmfront_remove,
433 	.resume = tpmfront_resume,
434 	.otherend_changed = backend_changed,
435 };
436 
xen_tpmfront_init(void)437 static int __init xen_tpmfront_init(void)
438 {
439 	if (!xen_domain())
440 		return -ENODEV;
441 
442 	if (!xen_has_pv_devices())
443 		return -ENODEV;
444 
445 	return xenbus_register_frontend(&tpmfront_driver);
446 }
447 module_init(xen_tpmfront_init);
448 
xen_tpmfront_exit(void)449 static void __exit xen_tpmfront_exit(void)
450 {
451 	xenbus_unregister_driver(&tpmfront_driver);
452 }
453 module_exit(xen_tpmfront_exit);
454 
455 MODULE_AUTHOR("Daniel De Graaf <dgdegra@tycho.nsa.gov>");
456 MODULE_DESCRIPTION("Xen vTPM Driver");
457 MODULE_LICENSE("GPL");
458