]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/char/tpm/xen-tpmfront.c
tpm: Create a tpm_class_ops structure and use it in the drivers
[mirror_ubuntu-bionic-kernel.git] / drivers / char / tpm / xen-tpmfront.c
CommitLineData
e2683957
DDG
1/*
2 * Implementation of the Xen vTPM device frontend
3 *
4 * Author: Daniel De Graaf <dgdegra@tycho.nsa.gov>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2,
8 * as published by the Free Software Foundation.
9 */
10#include <linux/errno.h>
11#include <linux/err.h>
12#include <linux/interrupt.h>
8702c675 13#include <xen/xen.h>
e2683957
DDG
14#include <xen/events.h>
15#include <xen/interface/io/tpmif.h>
16#include <xen/grant_table.h>
17#include <xen/xenbus.h>
18#include <xen/page.h>
19#include "tpm.h"
20
21struct tpm_private {
22 struct tpm_chip *chip;
23 struct xenbus_device *dev;
24
25 struct vtpm_shared_page *shr;
26
27 unsigned int evtchn;
28 int ring_ref;
29 domid_t backend_id;
30};
31
32enum status_bits {
33 VTPM_STATUS_RUNNING = 0x1,
34 VTPM_STATUS_IDLE = 0x2,
35 VTPM_STATUS_RESULT = 0x4,
36 VTPM_STATUS_CANCELED = 0x8,
37};
38
39static u8 vtpm_status(struct tpm_chip *chip)
40{
41 struct tpm_private *priv = TPM_VPRIV(chip);
42 switch (priv->shr->state) {
43 case VTPM_STATE_IDLE:
44 return VTPM_STATUS_IDLE | VTPM_STATUS_CANCELED;
45 case VTPM_STATE_FINISH:
46 return VTPM_STATUS_IDLE | VTPM_STATUS_RESULT;
47 case VTPM_STATE_SUBMIT:
48 case VTPM_STATE_CANCEL: /* cancel requested, not yet canceled */
49 return VTPM_STATUS_RUNNING;
50 default:
51 return 0;
52 }
53}
54
55static bool vtpm_req_canceled(struct tpm_chip *chip, u8 status)
56{
57 return status & VTPM_STATUS_CANCELED;
58}
59
60static void vtpm_cancel(struct tpm_chip *chip)
61{
62 struct tpm_private *priv = TPM_VPRIV(chip);
63 priv->shr->state = VTPM_STATE_CANCEL;
64 wmb();
65 notify_remote_via_evtchn(priv->evtchn);
66}
67
68static unsigned int shr_data_offset(struct vtpm_shared_page *shr)
69{
70 return sizeof(*shr) + sizeof(u32) * shr->nr_extra_pages;
71}
72
73static int vtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
74{
75 struct tpm_private *priv = TPM_VPRIV(chip);
76 struct vtpm_shared_page *shr = priv->shr;
77 unsigned int offset = shr_data_offset(shr);
78
79 u32 ordinal;
80 unsigned long duration;
81
82 if (offset > PAGE_SIZE)
83 return -EINVAL;
84
85 if (offset + count > PAGE_SIZE)
86 return -EINVAL;
87
88 /* Wait for completion of any existing command or cancellation */
89 if (wait_for_tpm_stat(chip, VTPM_STATUS_IDLE, chip->vendor.timeout_c,
90 &chip->vendor.read_queue, true) < 0) {
91 vtpm_cancel(chip);
92 return -ETIME;
93 }
94
95 memcpy(offset + (u8 *)shr, buf, count);
96 shr->length = count;
97 barrier();
98 shr->state = VTPM_STATE_SUBMIT;
99 wmb();
100 notify_remote_via_evtchn(priv->evtchn);
101
102 ordinal = be32_to_cpu(((struct tpm_input_header*)buf)->ordinal);
103 duration = tpm_calc_ordinal_duration(chip, ordinal);
104
105 if (wait_for_tpm_stat(chip, VTPM_STATUS_IDLE, duration,
106 &chip->vendor.read_queue, true) < 0) {
107 /* got a signal or timeout, try to cancel */
108 vtpm_cancel(chip);
109 return -ETIME;
110 }
111
112 return count;
113}
114
115static int vtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
116{
117 struct tpm_private *priv = TPM_VPRIV(chip);
118 struct vtpm_shared_page *shr = priv->shr;
119 unsigned int offset = shr_data_offset(shr);
120 size_t length = shr->length;
121
122 if (shr->state == VTPM_STATE_IDLE)
123 return -ECANCELED;
124
125 /* In theory the wait at the end of _send makes this one unnecessary */
126 if (wait_for_tpm_stat(chip, VTPM_STATUS_RESULT, chip->vendor.timeout_c,
127 &chip->vendor.read_queue, true) < 0) {
128 vtpm_cancel(chip);
129 return -ETIME;
130 }
131
132 if (offset > PAGE_SIZE)
133 return -EIO;
134
135 if (offset + length > PAGE_SIZE)
136 length = PAGE_SIZE - offset;
137
138 if (length > count)
139 length = count;
140
141 memcpy(buf, offset + (u8 *)shr, length);
142
143 return length;
144}
145
01ad1fa7 146static const struct tpm_class_ops tpm_vtpm = {
e2683957
DDG
147 .status = vtpm_status,
148 .recv = vtpm_recv,
149 .send = vtpm_send,
150 .cancel = vtpm_cancel,
151 .req_complete_mask = VTPM_STATUS_IDLE | VTPM_STATUS_RESULT,
152 .req_complete_val = VTPM_STATUS_IDLE | VTPM_STATUS_RESULT,
153 .req_canceled = vtpm_req_canceled,
e2683957
DDG
154};
155
156static irqreturn_t tpmif_interrupt(int dummy, void *dev_id)
157{
158 struct tpm_private *priv = dev_id;
159
160 switch (priv->shr->state) {
161 case VTPM_STATE_IDLE:
162 case VTPM_STATE_FINISH:
163 wake_up_interruptible(&priv->chip->vendor.read_queue);
164 break;
165 case VTPM_STATE_SUBMIT:
166 case VTPM_STATE_CANCEL:
167 default:
168 break;
169 }
170 return IRQ_HANDLED;
171}
172
173static int setup_chip(struct device *dev, struct tpm_private *priv)
174{
175 struct tpm_chip *chip;
176
177 chip = tpm_register_hardware(dev, &tpm_vtpm);
178 if (!chip)
179 return -ENODEV;
180
181 init_waitqueue_head(&chip->vendor.read_queue);
182
183 priv->chip = chip;
184 TPM_VPRIV(chip) = priv;
185
186 return 0;
187}
188
189/* caller must clean up in case of errors */
190static int setup_ring(struct xenbus_device *dev, struct tpm_private *priv)
191{
192 struct xenbus_transaction xbt;
193 const char *message = NULL;
194 int rv;
195
196 priv->shr = (void *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
197 if (!priv->shr) {
198 xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring");
199 return -ENOMEM;
200 }
201
202 rv = xenbus_grant_ring(dev, virt_to_mfn(priv->shr));
203 if (rv < 0)
204 return rv;
205
206 priv->ring_ref = rv;
207
208 rv = xenbus_alloc_evtchn(dev, &priv->evtchn);
209 if (rv)
210 return rv;
211
212 rv = bind_evtchn_to_irqhandler(priv->evtchn, tpmif_interrupt, 0,
213 "tpmif", priv);
214 if (rv <= 0) {
215 xenbus_dev_fatal(dev, rv, "allocating TPM irq");
216 return rv;
217 }
218 priv->chip->vendor.irq = rv;
219
220 again:
221 rv = xenbus_transaction_start(&xbt);
222 if (rv) {
223 xenbus_dev_fatal(dev, rv, "starting transaction");
224 return rv;
225 }
226
227 rv = xenbus_printf(xbt, dev->nodename,
228 "ring-ref", "%u", priv->ring_ref);
229 if (rv) {
230 message = "writing ring-ref";
231 goto abort_transaction;
232 }
233
234 rv = xenbus_printf(xbt, dev->nodename, "event-channel", "%u",
235 priv->evtchn);
236 if (rv) {
237 message = "writing event-channel";
238 goto abort_transaction;
239 }
240
241 rv = xenbus_printf(xbt, dev->nodename, "feature-protocol-v2", "1");
242 if (rv) {
243 message = "writing feature-protocol-v2";
244 goto abort_transaction;
245 }
246
247 rv = xenbus_transaction_end(xbt, 0);
248 if (rv == -EAGAIN)
249 goto again;
250 if (rv) {
251 xenbus_dev_fatal(dev, rv, "completing transaction");
252 return rv;
253 }
254
255 xenbus_switch_state(dev, XenbusStateInitialised);
256
257 return 0;
258
259 abort_transaction:
260 xenbus_transaction_end(xbt, 1);
261 if (message)
262 xenbus_dev_error(dev, rv, "%s", message);
263
264 return rv;
265}
266
267static void ring_free(struct tpm_private *priv)
268{
269 if (!priv)
270 return;
271
272 if (priv->ring_ref)
273 gnttab_end_foreign_access(priv->ring_ref, 0,
274 (unsigned long)priv->shr);
275 else
276 free_page((unsigned long)priv->shr);
277
278 if (priv->chip && priv->chip->vendor.irq)
279 unbind_from_irqhandler(priv->chip->vendor.irq, priv);
280
281 kfree(priv);
282}
283
284static int tpmfront_probe(struct xenbus_device *dev,
285 const struct xenbus_device_id *id)
286{
287 struct tpm_private *priv;
288 int rv;
289
290 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
291 if (!priv) {
292 xenbus_dev_fatal(dev, -ENOMEM, "allocating priv structure");
293 return -ENOMEM;
294 }
295
296 rv = setup_chip(&dev->dev, priv);
297 if (rv) {
298 kfree(priv);
299 return rv;
300 }
301
302 rv = setup_ring(dev, priv);
303 if (rv) {
304 tpm_remove_hardware(&dev->dev);
305 ring_free(priv);
306 return rv;
307 }
308
309 tpm_get_timeouts(priv->chip);
310
e2683957
DDG
311 return rv;
312}
313
314static int tpmfront_remove(struct xenbus_device *dev)
315{
316 struct tpm_chip *chip = dev_get_drvdata(&dev->dev);
317 struct tpm_private *priv = TPM_VPRIV(chip);
318 tpm_remove_hardware(&dev->dev);
319 ring_free(priv);
320 TPM_VPRIV(chip) = NULL;
321 return 0;
322}
323
324static int tpmfront_resume(struct xenbus_device *dev)
325{
326 /* A suspend/resume/migrate will interrupt a vTPM anyway */
327 tpmfront_remove(dev);
328 return tpmfront_probe(dev, NULL);
329}
330
331static void backend_changed(struct xenbus_device *dev,
332 enum xenbus_state backend_state)
333{
334 int val;
335
336 switch (backend_state) {
337 case XenbusStateInitialised:
338 case XenbusStateConnected:
339 if (dev->state == XenbusStateConnected)
340 break;
341
342 if (xenbus_scanf(XBT_NIL, dev->otherend,
343 "feature-protocol-v2", "%d", &val) < 0)
344 val = 0;
345 if (!val) {
346 xenbus_dev_fatal(dev, -EINVAL,
347 "vTPM protocol 2 required");
348 return;
349 }
350 xenbus_switch_state(dev, XenbusStateConnected);
351 break;
352
353 case XenbusStateClosing:
354 case XenbusStateClosed:
355 device_unregister(&dev->dev);
356 xenbus_frontend_closed(dev);
357 break;
358 default:
359 break;
360 }
361}
362
363static const struct xenbus_device_id tpmfront_ids[] = {
364 { "vtpm" },
365 { "" }
366};
367MODULE_ALIAS("xen:vtpm");
368
369static DEFINE_XENBUS_DRIVER(tpmfront, ,
370 .probe = tpmfront_probe,
371 .remove = tpmfront_remove,
372 .resume = tpmfront_resume,
373 .otherend_changed = backend_changed,
374 );
375
376static int __init xen_tpmfront_init(void)
377{
378 if (!xen_domain())
379 return -ENODEV;
380
381 return xenbus_register_frontend(&tpmfront_driver);
382}
383module_init(xen_tpmfront_init);
384
385static void __exit xen_tpmfront_exit(void)
386{
387 xenbus_unregister_driver(&tpmfront_driver);
388}
389module_exit(xen_tpmfront_exit);
390
391MODULE_AUTHOR("Daniel De Graaf <dgdegra@tycho.nsa.gov>");
392MODULE_DESCRIPTION("Xen vTPM Driver");
393MODULE_LICENSE("GPL");