e3c7c01545151f8d813f62c681285e29bc97b2b8
[cascardo/linux.git] / drivers / char / tpm / xen-tpmfront.c
1 /*
2  * Implementation of the Xen vTPM device frontend
3  *
4  * Author:  Daniel De Graaf <dgdegra@tycho.nsa.gov>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2,
8  * as published by the Free Software Foundation.
9  */
10 #include <linux/errno.h>
11 #include <linux/err.h>
12 #include <linux/interrupt.h>
13 #include <xen/xen.h>
14 #include <xen/events.h>
15 #include <xen/interface/io/tpmif.h>
16 #include <xen/grant_table.h>
17 #include <xen/xenbus.h>
18 #include <xen/page.h>
19 #include "tpm.h"
20
21 struct tpm_private {
22         struct tpm_chip *chip;
23         struct xenbus_device *dev;
24
25         struct vtpm_shared_page *shr;
26
27         unsigned int evtchn;
28         int ring_ref;
29         domid_t backend_id;
30 };
31
32 enum status_bits {
33         VTPM_STATUS_RUNNING  = 0x1,
34         VTPM_STATUS_IDLE     = 0x2,
35         VTPM_STATUS_RESULT   = 0x4,
36         VTPM_STATUS_CANCELED = 0x8,
37 };
38
39 static u8 vtpm_status(struct tpm_chip *chip)
40 {
41         struct tpm_private *priv = TPM_VPRIV(chip);
42         switch (priv->shr->state) {
43         case VTPM_STATE_IDLE:
44                 return VTPM_STATUS_IDLE | VTPM_STATUS_CANCELED;
45         case VTPM_STATE_FINISH:
46                 return VTPM_STATUS_IDLE | VTPM_STATUS_RESULT;
47         case VTPM_STATE_SUBMIT:
48         case VTPM_STATE_CANCEL: /* cancel requested, not yet canceled */
49                 return VTPM_STATUS_RUNNING;
50         default:
51                 return 0;
52         }
53 }
54
55 static bool vtpm_req_canceled(struct tpm_chip *chip, u8 status)
56 {
57         return status & VTPM_STATUS_CANCELED;
58 }
59
60 static void vtpm_cancel(struct tpm_chip *chip)
61 {
62         struct tpm_private *priv = TPM_VPRIV(chip);
63         priv->shr->state = VTPM_STATE_CANCEL;
64         wmb();
65         notify_remote_via_evtchn(priv->evtchn);
66 }
67
68 static unsigned int shr_data_offset(struct vtpm_shared_page *shr)
69 {
70         return sizeof(*shr) + sizeof(u32) * shr->nr_extra_pages;
71 }
72
73 static int vtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
74 {
75         struct tpm_private *priv = TPM_VPRIV(chip);
76         struct vtpm_shared_page *shr = priv->shr;
77         unsigned int offset = shr_data_offset(shr);
78
79         u32 ordinal;
80         unsigned long duration;
81
82         if (offset > PAGE_SIZE)
83                 return -EINVAL;
84
85         if (offset + count > PAGE_SIZE)
86                 return -EINVAL;
87
88         /* Wait for completion of any existing command or cancellation */
89         if (wait_for_tpm_stat(chip, VTPM_STATUS_IDLE, chip->vendor.timeout_c,
90                         &chip->vendor.read_queue, true) < 0) {
91                 vtpm_cancel(chip);
92                 return -ETIME;
93         }
94
95         memcpy(offset + (u8 *)shr, buf, count);
96         shr->length = count;
97         barrier();
98         shr->state = VTPM_STATE_SUBMIT;
99         wmb();
100         notify_remote_via_evtchn(priv->evtchn);
101
102         ordinal = be32_to_cpu(((struct tpm_input_header*)buf)->ordinal);
103         duration = tpm_calc_ordinal_duration(chip, ordinal);
104
105         if (wait_for_tpm_stat(chip, VTPM_STATUS_IDLE, duration,
106                         &chip->vendor.read_queue, true) < 0) {
107                 /* got a signal or timeout, try to cancel */
108                 vtpm_cancel(chip);
109                 return -ETIME;
110         }
111
112         return count;
113 }
114
115 static int vtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
116 {
117         struct tpm_private *priv = TPM_VPRIV(chip);
118         struct vtpm_shared_page *shr = priv->shr;
119         unsigned int offset = shr_data_offset(shr);
120         size_t length = shr->length;
121
122         if (shr->state == VTPM_STATE_IDLE)
123                 return -ECANCELED;
124
125         /* In theory the wait at the end of _send makes this one unnecessary */
126         if (wait_for_tpm_stat(chip, VTPM_STATUS_RESULT, chip->vendor.timeout_c,
127                         &chip->vendor.read_queue, true) < 0) {
128                 vtpm_cancel(chip);
129                 return -ETIME;
130         }
131
132         if (offset > PAGE_SIZE)
133                 return -EIO;
134
135         if (offset + length > PAGE_SIZE)
136                 length = PAGE_SIZE - offset;
137
138         if (length > count)
139                 length = count;
140
141         memcpy(buf, offset + (u8 *)shr, length);
142
143         return length;
144 }
145
146 static DEVICE_ATTR(pubek, S_IRUGO, tpm_show_pubek, NULL);
147 static DEVICE_ATTR(pcrs, S_IRUGO, tpm_show_pcrs, NULL);
148 static DEVICE_ATTR(enabled, S_IRUGO, tpm_show_enabled, NULL);
149 static DEVICE_ATTR(active, S_IRUGO, tpm_show_active, NULL);
150 static DEVICE_ATTR(owned, S_IRUGO, tpm_show_owned, NULL);
151 static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated,
152                 NULL);
153 static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL);
154 static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel);
155 static DEVICE_ATTR(durations, S_IRUGO, tpm_show_durations, NULL);
156 static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL);
157
158 static struct attribute *vtpm_attrs[] = {
159         &dev_attr_pubek.attr,
160         &dev_attr_pcrs.attr,
161         &dev_attr_enabled.attr,
162         &dev_attr_active.attr,
163         &dev_attr_owned.attr,
164         &dev_attr_temp_deactivated.attr,
165         &dev_attr_caps.attr,
166         &dev_attr_cancel.attr,
167         &dev_attr_durations.attr,
168         &dev_attr_timeouts.attr,
169         NULL,
170 };
171
172 static struct attribute_group vtpm_attr_grp = {
173         .attrs = vtpm_attrs,
174 };
175
176 static const struct tpm_vendor_specific tpm_vtpm = {
177         .status = vtpm_status,
178         .recv = vtpm_recv,
179         .send = vtpm_send,
180         .cancel = vtpm_cancel,
181         .req_complete_mask = VTPM_STATUS_IDLE | VTPM_STATUS_RESULT,
182         .req_complete_val  = VTPM_STATUS_IDLE | VTPM_STATUS_RESULT,
183         .req_canceled      = vtpm_req_canceled,
184         .attr_group = &vtpm_attr_grp,
185 };
186
187 static irqreturn_t tpmif_interrupt(int dummy, void *dev_id)
188 {
189         struct tpm_private *priv = dev_id;
190
191         switch (priv->shr->state) {
192         case VTPM_STATE_IDLE:
193         case VTPM_STATE_FINISH:
194                 wake_up_interruptible(&priv->chip->vendor.read_queue);
195                 break;
196         case VTPM_STATE_SUBMIT:
197         case VTPM_STATE_CANCEL:
198         default:
199                 break;
200         }
201         return IRQ_HANDLED;
202 }
203
204 static int setup_chip(struct device *dev, struct tpm_private *priv)
205 {
206         struct tpm_chip *chip;
207
208         chip = tpm_register_hardware(dev, &tpm_vtpm);
209         if (!chip)
210                 return -ENODEV;
211
212         init_waitqueue_head(&chip->vendor.read_queue);
213
214         priv->chip = chip;
215         TPM_VPRIV(chip) = priv;
216
217         return 0;
218 }
219
220 /* caller must clean up in case of errors */
221 static int setup_ring(struct xenbus_device *dev, struct tpm_private *priv)
222 {
223         struct xenbus_transaction xbt;
224         const char *message = NULL;
225         int rv;
226
227         priv->shr = (void *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
228         if (!priv->shr) {
229                 xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring");
230                 return -ENOMEM;
231         }
232
233         rv = xenbus_grant_ring(dev, virt_to_mfn(priv->shr));
234         if (rv < 0)
235                 return rv;
236
237         priv->ring_ref = rv;
238
239         rv = xenbus_alloc_evtchn(dev, &priv->evtchn);
240         if (rv)
241                 return rv;
242
243         rv = bind_evtchn_to_irqhandler(priv->evtchn, tpmif_interrupt, 0,
244                                        "tpmif", priv);
245         if (rv <= 0) {
246                 xenbus_dev_fatal(dev, rv, "allocating TPM irq");
247                 return rv;
248         }
249         priv->chip->vendor.irq = rv;
250
251  again:
252         rv = xenbus_transaction_start(&xbt);
253         if (rv) {
254                 xenbus_dev_fatal(dev, rv, "starting transaction");
255                 return rv;
256         }
257
258         rv = xenbus_printf(xbt, dev->nodename,
259                         "ring-ref", "%u", priv->ring_ref);
260         if (rv) {
261                 message = "writing ring-ref";
262                 goto abort_transaction;
263         }
264
265         rv = xenbus_printf(xbt, dev->nodename, "event-channel", "%u",
266                         priv->evtchn);
267         if (rv) {
268                 message = "writing event-channel";
269                 goto abort_transaction;
270         }
271
272         rv = xenbus_printf(xbt, dev->nodename, "feature-protocol-v2", "1");
273         if (rv) {
274                 message = "writing feature-protocol-v2";
275                 goto abort_transaction;
276         }
277
278         rv = xenbus_transaction_end(xbt, 0);
279         if (rv == -EAGAIN)
280                 goto again;
281         if (rv) {
282                 xenbus_dev_fatal(dev, rv, "completing transaction");
283                 return rv;
284         }
285
286         xenbus_switch_state(dev, XenbusStateInitialised);
287
288         return 0;
289
290  abort_transaction:
291         xenbus_transaction_end(xbt, 1);
292         if (message)
293                 xenbus_dev_error(dev, rv, "%s", message);
294
295         return rv;
296 }
297
298 static void ring_free(struct tpm_private *priv)
299 {
300         if (!priv)
301                 return;
302
303         if (priv->ring_ref)
304                 gnttab_end_foreign_access(priv->ring_ref, 0,
305                                 (unsigned long)priv->shr);
306         else
307                 free_page((unsigned long)priv->shr);
308
309         if (priv->chip && priv->chip->vendor.irq)
310                 unbind_from_irqhandler(priv->chip->vendor.irq, priv);
311
312         kfree(priv);
313 }
314
315 static int tpmfront_probe(struct xenbus_device *dev,
316                 const struct xenbus_device_id *id)
317 {
318         struct tpm_private *priv;
319         int rv;
320
321         priv = kzalloc(sizeof(*priv), GFP_KERNEL);
322         if (!priv) {
323                 xenbus_dev_fatal(dev, -ENOMEM, "allocating priv structure");
324                 return -ENOMEM;
325         }
326
327         rv = setup_chip(&dev->dev, priv);
328         if (rv) {
329                 kfree(priv);
330                 return rv;
331         }
332
333         rv = setup_ring(dev, priv);
334         if (rv) {
335                 tpm_remove_hardware(&dev->dev);
336                 ring_free(priv);
337                 return rv;
338         }
339
340         tpm_get_timeouts(priv->chip);
341
342         return rv;
343 }
344
345 static int tpmfront_remove(struct xenbus_device *dev)
346 {
347         struct tpm_chip *chip = dev_get_drvdata(&dev->dev);
348         struct tpm_private *priv = TPM_VPRIV(chip);
349         tpm_remove_hardware(&dev->dev);
350         ring_free(priv);
351         TPM_VPRIV(chip) = NULL;
352         return 0;
353 }
354
355 static int tpmfront_resume(struct xenbus_device *dev)
356 {
357         /* A suspend/resume/migrate will interrupt a vTPM anyway */
358         tpmfront_remove(dev);
359         return tpmfront_probe(dev, NULL);
360 }
361
362 static void backend_changed(struct xenbus_device *dev,
363                 enum xenbus_state backend_state)
364 {
365         int val;
366
367         switch (backend_state) {
368         case XenbusStateInitialised:
369         case XenbusStateConnected:
370                 if (dev->state == XenbusStateConnected)
371                         break;
372
373                 if (xenbus_scanf(XBT_NIL, dev->otherend,
374                                 "feature-protocol-v2", "%d", &val) < 0)
375                         val = 0;
376                 if (!val) {
377                         xenbus_dev_fatal(dev, -EINVAL,
378                                         "vTPM protocol 2 required");
379                         return;
380                 }
381                 xenbus_switch_state(dev, XenbusStateConnected);
382                 break;
383
384         case XenbusStateClosing:
385         case XenbusStateClosed:
386                 device_unregister(&dev->dev);
387                 xenbus_frontend_closed(dev);
388                 break;
389         default:
390                 break;
391         }
392 }
393
394 static const struct xenbus_device_id tpmfront_ids[] = {
395         { "vtpm" },
396         { "" }
397 };
398 MODULE_ALIAS("xen:vtpm");
399
400 static DEFINE_XENBUS_DRIVER(tpmfront, ,
401                 .probe = tpmfront_probe,
402                 .remove = tpmfront_remove,
403                 .resume = tpmfront_resume,
404                 .otherend_changed = backend_changed,
405         );
406
407 static int __init xen_tpmfront_init(void)
408 {
409         if (!xen_domain())
410                 return -ENODEV;
411
412         return xenbus_register_frontend(&tpmfront_driver);
413 }
414 module_init(xen_tpmfront_init);
415
416 static void __exit xen_tpmfront_exit(void)
417 {
418         xenbus_unregister_driver(&tpmfront_driver);
419 }
420 module_exit(xen_tpmfront_exit);
421
422 MODULE_AUTHOR("Daniel De Graaf <dgdegra@tycho.nsa.gov>");
423 MODULE_DESCRIPTION("Xen vTPM Driver");
424 MODULE_LICENSE("GPL");