2 * Copyright (C) 2005, 2006 IBM Corporation
3 * Copyright (C) 2014, 2015 Intel Corporation
6 * Leendert van Doorn <leendert@watson.ibm.com>
7 * Kylene Hall <kjhall@us.ibm.com>
9 * Maintained by: <tpmdd-devel@lists.sourceforge.net>
11 * Device driver for TCG/TCPA TPM (trusted platform module).
12 * Specifications at www.trustedcomputinggroup.org
14 * This device driver implements the TPM interface as defined in
15 * the TCG TPM Interface Spec version 1.2, revision 1.0.
17 * This program is free software; you can redistribute it and/or
18 * modify it under the terms of the GNU General Public License as
19 * published by the Free Software Foundation, version 2 of the
22 #include <linux/init.h>
23 #include <linux/module.h>
24 #include <linux/moduleparam.h>
25 #include <linux/pnp.h>
26 #include <linux/slab.h>
27 #include <linux/interrupt.h>
28 #include <linux/wait.h>
29 #include <linux/acpi.h>
30 #include <linux/freezer.h>
34 TPM_ACCESS_VALID = 0x80,
35 TPM_ACCESS_ACTIVE_LOCALITY = 0x20,
36 TPM_ACCESS_REQUEST_PENDING = 0x04,
37 TPM_ACCESS_REQUEST_USE = 0x02,
42 TPM_STS_COMMAND_READY = 0x40,
44 TPM_STS_DATA_AVAIL = 0x10,
45 TPM_STS_DATA_EXPECT = 0x08,
49 TPM_GLOBAL_INT_ENABLE = 0x80000000,
50 TPM_INTF_BURST_COUNT_STATIC = 0x100,
51 TPM_INTF_CMD_READY_INT = 0x080,
52 TPM_INTF_INT_EDGE_FALLING = 0x040,
53 TPM_INTF_INT_EDGE_RISING = 0x020,
54 TPM_INTF_INT_LEVEL_LOW = 0x010,
55 TPM_INTF_INT_LEVEL_HIGH = 0x008,
56 TPM_INTF_LOCALITY_CHANGE_INT = 0x004,
57 TPM_INTF_STS_VALID_INT = 0x002,
58 TPM_INTF_DATA_AVAIL_INT = 0x001,
62 TIS_MEM_BASE = 0xFED40000,
64 TIS_SHORT_TIMEOUT = 750, /* ms */
65 TIS_LONG_TIMEOUT = 2000, /* 2 sec */
74 static struct tpm_info tis_default_info = {
75 .start = TIS_MEM_BASE,
80 /* Some timeout values are needed before it is known whether the chip is
83 #define TIS_TIMEOUT_A_MAX max(TIS_SHORT_TIMEOUT, TPM2_TIMEOUT_A)
84 #define TIS_TIMEOUT_B_MAX max(TIS_LONG_TIMEOUT, TPM2_TIMEOUT_B)
85 #define TIS_TIMEOUT_C_MAX max(TIS_SHORT_TIMEOUT, TPM2_TIMEOUT_C)
86 #define TIS_TIMEOUT_D_MAX max(TIS_SHORT_TIMEOUT, TPM2_TIMEOUT_D)
88 #define TPM_ACCESS(l) (0x0000 | ((l) << 12))
89 #define TPM_INT_ENABLE(l) (0x0008 | ((l) << 12))
90 #define TPM_INT_VECTOR(l) (0x000C | ((l) << 12))
91 #define TPM_INT_STATUS(l) (0x0010 | ((l) << 12))
92 #define TPM_INTF_CAPS(l) (0x0014 | ((l) << 12))
93 #define TPM_STS(l) (0x0018 | ((l) << 12))
94 #define TPM_STS3(l) (0x001b | ((l) << 12))
95 #define TPM_DATA_FIFO(l) (0x0024 | ((l) << 12))
97 #define TPM_DID_VID(l) (0x0F00 | ((l) << 12))
98 #define TPM_RID(l) (0x0F04 | ((l) << 12))
104 #if defined(CONFIG_PNP) && defined(CONFIG_ACPI)
105 static int has_hid(struct acpi_device *dev, const char *hid)
107 struct acpi_hardware_id *id;
109 list_for_each_entry(id, &dev->pnp.ids, list)
110 if (!strcmp(hid, id->id))
116 static inline int is_itpm(struct acpi_device *dev)
118 return has_hid(dev, "INTC0102");
121 static inline int is_fifo(struct acpi_device *dev)
123 struct acpi_table_tpm2 *tbl;
127 if (!has_hid(dev, "MSFT0101"))
130 st = acpi_get_table(ACPI_SIG_TPM2, 1,
131 (struct acpi_table_header **) &tbl);
132 if (ACPI_FAILURE(st)) {
133 dev_err(&dev->dev, "failed to get TPM2 ACPI table\n");
137 if (le32_to_cpu(tbl->start_method) != TPM2_START_FIFO)
144 static inline int is_itpm(struct acpi_device *dev)
149 static inline int is_fifo(struct acpi_device *dev)
155 /* Before we attempt to access the TPM we must see that the valid bit is set.
156 * The specification says that this bit is 0 at reset and remains 0 until the
157 * 'TPM has gone through its self test and initialization and has established
158 * correct values in the other bits.' */
159 static int wait_startup(struct tpm_chip *chip, int l)
161 unsigned long stop = jiffies + chip->vendor.timeout_a;
163 if (ioread8(chip->vendor.iobase + TPM_ACCESS(l)) &
167 } while (time_before(jiffies, stop));
171 static int check_locality(struct tpm_chip *chip, int l)
173 if ((ioread8(chip->vendor.iobase + TPM_ACCESS(l)) &
174 (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) ==
175 (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID))
176 return chip->vendor.locality = l;
181 static void release_locality(struct tpm_chip *chip, int l, int force)
183 if (force || (ioread8(chip->vendor.iobase + TPM_ACCESS(l)) &
184 (TPM_ACCESS_REQUEST_PENDING | TPM_ACCESS_VALID)) ==
185 (TPM_ACCESS_REQUEST_PENDING | TPM_ACCESS_VALID))
186 iowrite8(TPM_ACCESS_ACTIVE_LOCALITY,
187 chip->vendor.iobase + TPM_ACCESS(l));
190 static int request_locality(struct tpm_chip *chip, int l)
192 unsigned long stop, timeout;
195 if (check_locality(chip, l) >= 0)
198 iowrite8(TPM_ACCESS_REQUEST_USE,
199 chip->vendor.iobase + TPM_ACCESS(l));
201 stop = jiffies + chip->vendor.timeout_a;
203 if (chip->vendor.irq) {
205 timeout = stop - jiffies;
206 if ((long)timeout <= 0)
208 rc = wait_event_interruptible_timeout(chip->vendor.int_queue,
214 if (rc == -ERESTARTSYS && freezing(current)) {
215 clear_thread_flag(TIF_SIGPENDING);
219 /* wait for burstcount */
221 if (check_locality(chip, l) >= 0)
225 while (time_before(jiffies, stop));
230 static u8 tpm_tis_status(struct tpm_chip *chip)
232 return ioread8(chip->vendor.iobase +
233 TPM_STS(chip->vendor.locality));
236 static void tpm_tis_ready(struct tpm_chip *chip)
238 /* this causes the current command to be aborted */
239 iowrite8(TPM_STS_COMMAND_READY,
240 chip->vendor.iobase + TPM_STS(chip->vendor.locality));
243 static int get_burstcount(struct tpm_chip *chip)
248 /* wait for burstcount */
249 /* which timeout value, spec has 2 answers (c & d) */
250 stop = jiffies + chip->vendor.timeout_d;
252 burstcnt = ioread8(chip->vendor.iobase +
253 TPM_STS(chip->vendor.locality) + 1);
254 burstcnt += ioread8(chip->vendor.iobase +
255 TPM_STS(chip->vendor.locality) +
260 } while (time_before(jiffies, stop));
264 static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count)
266 int size = 0, burstcnt;
267 while (size < count &&
268 wait_for_tpm_stat(chip,
269 TPM_STS_DATA_AVAIL | TPM_STS_VALID,
270 chip->vendor.timeout_c,
271 &chip->vendor.read_queue, true)
273 burstcnt = get_burstcount(chip);
274 for (; burstcnt > 0 && size < count; burstcnt--)
275 buf[size++] = ioread8(chip->vendor.iobase +
276 TPM_DATA_FIFO(chip->vendor.
282 static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count)
285 int expected, status;
287 if (count < TPM_HEADER_SIZE) {
292 /* read first 10 bytes, including tag, paramsize, and result */
294 recv_data(chip, buf, TPM_HEADER_SIZE)) < TPM_HEADER_SIZE) {
295 dev_err(chip->pdev, "Unable to read header\n");
299 expected = be32_to_cpu(*(__be32 *) (buf + 2));
300 if (expected > count) {
306 recv_data(chip, &buf[TPM_HEADER_SIZE],
307 expected - TPM_HEADER_SIZE)) < expected) {
308 dev_err(chip->pdev, "Unable to read remainder of result\n");
313 wait_for_tpm_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c,
314 &chip->vendor.int_queue, false);
315 status = tpm_tis_status(chip);
316 if (status & TPM_STS_DATA_AVAIL) { /* retry? */
317 dev_err(chip->pdev, "Error left over data\n");
324 release_locality(chip, chip->vendor.locality, 0);
329 module_param(itpm, bool, 0444);
330 MODULE_PARM_DESC(itpm, "Force iTPM workarounds (found on some Lenovo laptops)");
333 * If interrupts are used (signaled by an irq set in the vendor structure)
334 * tpm.c can skip polling for the data to be available as the interrupt is
337 static int tpm_tis_send_data(struct tpm_chip *chip, u8 *buf, size_t len)
339 int rc, status, burstcnt;
342 if (request_locality(chip, 0) < 0)
345 status = tpm_tis_status(chip);
346 if ((status & TPM_STS_COMMAND_READY) == 0) {
348 if (wait_for_tpm_stat
349 (chip, TPM_STS_COMMAND_READY, chip->vendor.timeout_b,
350 &chip->vendor.int_queue, false) < 0) {
356 while (count < len - 1) {
357 burstcnt = get_burstcount(chip);
358 for (; burstcnt > 0 && count < len - 1; burstcnt--) {
359 iowrite8(buf[count], chip->vendor.iobase +
360 TPM_DATA_FIFO(chip->vendor.locality));
364 wait_for_tpm_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c,
365 &chip->vendor.int_queue, false);
366 status = tpm_tis_status(chip);
367 if (!itpm && (status & TPM_STS_DATA_EXPECT) == 0) {
373 /* write last byte */
375 chip->vendor.iobase + TPM_DATA_FIFO(chip->vendor.locality));
376 wait_for_tpm_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c,
377 &chip->vendor.int_queue, false);
378 status = tpm_tis_status(chip);
379 if ((status & TPM_STS_DATA_EXPECT) != 0) {
388 release_locality(chip, chip->vendor.locality, 0);
392 static void disable_interrupts(struct tpm_chip *chip)
397 ioread32(chip->vendor.iobase +
398 TPM_INT_ENABLE(chip->vendor.locality));
399 intmask &= ~TPM_GLOBAL_INT_ENABLE;
401 chip->vendor.iobase +
402 TPM_INT_ENABLE(chip->vendor.locality));
403 devm_free_irq(chip->pdev, chip->vendor.irq, chip);
404 chip->vendor.irq = 0;
408 * If interrupts are used (signaled by an irq set in the vendor structure)
409 * tpm.c can skip polling for the data to be available as the interrupt is
412 static int tpm_tis_send_main(struct tpm_chip *chip, u8 *buf, size_t len)
418 rc = tpm_tis_send_data(chip, buf, len);
424 chip->vendor.iobase + TPM_STS(chip->vendor.locality));
426 if (chip->vendor.irq) {
427 ordinal = be32_to_cpu(*((__be32 *) (buf + 6)));
429 if (chip->flags & TPM_CHIP_FLAG_TPM2)
430 dur = tpm2_calc_ordinal_duration(chip, ordinal);
432 dur = tpm_calc_ordinal_duration(chip, ordinal);
434 if (wait_for_tpm_stat
435 (chip, TPM_STS_DATA_AVAIL | TPM_STS_VALID, dur,
436 &chip->vendor.read_queue, false) < 0) {
444 release_locality(chip, chip->vendor.locality, 0);
448 static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len)
451 struct priv_data *priv = chip->vendor.priv;
453 if (!chip->vendor.irq || priv->irq_tested)
454 return tpm_tis_send_main(chip, buf, len);
456 /* Verify receipt of the expected IRQ */
457 irq = chip->vendor.irq;
458 chip->vendor.irq = 0;
459 rc = tpm_tis_send_main(chip, buf, len);
460 chip->vendor.irq = irq;
461 if (!priv->irq_tested)
463 if (!priv->irq_tested)
464 disable_interrupts(chip);
465 priv->irq_tested = true;
469 struct tis_vendor_timeout_override {
471 unsigned long timeout_us[4];
474 static const struct tis_vendor_timeout_override vendor_timeout_overrides[] = {
476 { 0x32041114, { (TIS_SHORT_TIMEOUT*1000), (TIS_LONG_TIMEOUT*1000),
477 (TIS_SHORT_TIMEOUT*1000), (TIS_SHORT_TIMEOUT*1000) } },
480 static bool tpm_tis_update_timeouts(struct tpm_chip *chip,
481 unsigned long *timeout_cap)
486 did_vid = ioread32(chip->vendor.iobase + TPM_DID_VID(0));
488 for (i = 0; i != ARRAY_SIZE(vendor_timeout_overrides); i++) {
489 if (vendor_timeout_overrides[i].did_vid != did_vid)
491 memcpy(timeout_cap, vendor_timeout_overrides[i].timeout_us,
492 sizeof(vendor_timeout_overrides[i].timeout_us));
500 * Early probing for iTPM with STS_DATA_EXPECT flaw.
501 * Try sending command without itpm flag set and if that
502 * fails, repeat with itpm flag set.
504 static int probe_itpm(struct tpm_chip *chip)
507 u8 cmd_getticks[] = {
508 0x00, 0xc1, 0x00, 0x00, 0x00, 0x0a,
509 0x00, 0x00, 0x00, 0xf1
511 size_t len = sizeof(cmd_getticks);
512 bool rem_itpm = itpm;
513 u16 vendor = ioread16(chip->vendor.iobase + TPM_DID_VID(0));
515 /* probe only iTPMS */
516 if (vendor != TPM_VID_INTEL)
521 rc = tpm_tis_send_data(chip, cmd_getticks, len);
526 release_locality(chip, chip->vendor.locality, 0);
530 rc = tpm_tis_send_data(chip, cmd_getticks, len);
532 dev_info(chip->pdev, "Detected an iTPM.\n");
540 release_locality(chip, chip->vendor.locality, 0);
545 static bool tpm_tis_req_canceled(struct tpm_chip *chip, u8 status)
547 switch (chip->vendor.manufacturer_id) {
548 case TPM_VID_WINBOND:
549 return ((status == TPM_STS_VALID) ||
550 (status == (TPM_STS_VALID | TPM_STS_COMMAND_READY)));
552 return (status == (TPM_STS_VALID | TPM_STS_COMMAND_READY));
554 return (status == TPM_STS_COMMAND_READY);
558 static const struct tpm_class_ops tpm_tis = {
559 .status = tpm_tis_status,
560 .recv = tpm_tis_recv,
561 .send = tpm_tis_send,
562 .cancel = tpm_tis_ready,
563 .update_timeouts = tpm_tis_update_timeouts,
564 .req_complete_mask = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
565 .req_complete_val = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
566 .req_canceled = tpm_tis_req_canceled,
569 static irqreturn_t tis_int_handler(int dummy, void *dev_id)
571 struct tpm_chip *chip = dev_id;
575 interrupt = ioread32(chip->vendor.iobase +
576 TPM_INT_STATUS(chip->vendor.locality));
581 ((struct priv_data *)chip->vendor.priv)->irq_tested = true;
582 if (interrupt & TPM_INTF_DATA_AVAIL_INT)
583 wake_up_interruptible(&chip->vendor.read_queue);
584 if (interrupt & TPM_INTF_LOCALITY_CHANGE_INT)
585 for (i = 0; i < 5; i++)
586 if (check_locality(chip, i) >= 0)
589 (TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_STS_VALID_INT |
590 TPM_INTF_CMD_READY_INT))
591 wake_up_interruptible(&chip->vendor.int_queue);
593 /* Clear interrupts handled with TPM_EOI */
595 chip->vendor.iobase +
596 TPM_INT_STATUS(chip->vendor.locality));
597 ioread32(chip->vendor.iobase + TPM_INT_STATUS(chip->vendor.locality));
601 /* Register the IRQ and issue a command that will cause an interrupt. If an
602 * irq is seen then leave the chip setup for IRQ operation, otherwise reverse
603 * everything and leave in polling mode. Returns 0 on success.
605 static int tpm_tis_probe_irq_single(struct tpm_chip *chip, u32 intmask,
608 struct priv_data *priv = chip->vendor.priv;
611 if (devm_request_irq(chip->pdev, irq, tis_int_handler, flags,
612 chip->devname, chip) != 0) {
613 dev_info(chip->pdev, "Unable to request irq: %d for probe\n",
617 chip->vendor.irq = irq;
619 original_int_vec = ioread8(chip->vendor.iobase +
620 TPM_INT_VECTOR(chip->vendor.locality));
622 chip->vendor.iobase + TPM_INT_VECTOR(chip->vendor.locality));
624 /* Clear all existing */
625 iowrite32(ioread32(chip->vendor.iobase +
626 TPM_INT_STATUS(chip->vendor.locality)),
627 chip->vendor.iobase + TPM_INT_STATUS(chip->vendor.locality));
630 iowrite32(intmask | TPM_GLOBAL_INT_ENABLE,
631 chip->vendor.iobase + TPM_INT_ENABLE(chip->vendor.locality));
633 priv->irq_tested = false;
635 /* Generate an interrupt by having the core call through to
638 if (chip->flags & TPM_CHIP_FLAG_TPM2)
639 tpm2_gen_interrupt(chip);
641 tpm_gen_interrupt(chip);
643 /* tpm_tis_send will either confirm the interrupt is working or it
644 * will call disable_irq which undoes all of the above.
646 if (!chip->vendor.irq) {
647 iowrite8(original_int_vec,
648 chip->vendor.iobase +
649 TPM_INT_VECTOR(chip->vendor.locality));
656 /* Try to find the IRQ the TPM is using. This is for legacy x86 systems that
657 * do not have ACPI/etc. We typically expect the interrupt to be declared if
660 static void tpm_tis_probe_irq(struct tpm_chip *chip, u32 intmask)
665 original_int_vec = ioread8(chip->vendor.iobase +
666 TPM_INT_VECTOR(chip->vendor.locality));
668 if (!original_int_vec) {
669 if (IS_ENABLED(CONFIG_X86))
670 for (i = 3; i <= 15; i++)
671 if (!tpm_tis_probe_irq_single(chip, intmask, 0,
674 } else if (!tpm_tis_probe_irq_single(chip, intmask, 0,
679 static bool interrupts = true;
680 module_param(interrupts, bool, 0444);
681 MODULE_PARM_DESC(interrupts, "Enable interrupts");
683 static void tpm_tis_remove(struct tpm_chip *chip)
685 if (chip->flags & TPM_CHIP_FLAG_TPM2)
686 tpm2_shutdown(chip, TPM2_SU_CLEAR);
688 iowrite32(~TPM_GLOBAL_INT_ENABLE &
689 ioread32(chip->vendor.iobase +
690 TPM_INT_ENABLE(chip->vendor.
692 chip->vendor.iobase +
693 TPM_INT_ENABLE(chip->vendor.locality));
694 release_locality(chip, chip->vendor.locality, 1);
697 static int tpm_tis_init(struct device *dev, struct tpm_info *tpm_info,
698 acpi_handle acpi_dev_handle)
700 u32 vendor, intfcaps, intmask;
702 struct tpm_chip *chip;
703 struct priv_data *priv;
705 priv = devm_kzalloc(dev, sizeof(struct priv_data), GFP_KERNEL);
709 chip = tpmm_chip_alloc(dev, &tpm_tis);
711 return PTR_ERR(chip);
713 chip->vendor.priv = priv;
715 chip->acpi_dev_handle = acpi_dev_handle;
718 chip->vendor.iobase = devm_ioremap(dev, tpm_info->start, tpm_info->len);
719 if (!chip->vendor.iobase)
722 /* Maximum timeouts */
723 chip->vendor.timeout_a = TIS_TIMEOUT_A_MAX;
724 chip->vendor.timeout_b = TIS_TIMEOUT_B_MAX;
725 chip->vendor.timeout_c = TIS_TIMEOUT_C_MAX;
726 chip->vendor.timeout_d = TIS_TIMEOUT_D_MAX;
728 if (wait_startup(chip, 0) != 0) {
733 /* Take control of the TPM's interrupt hardware and shut it off */
734 intmask = ioread32(chip->vendor.iobase +
735 TPM_INT_ENABLE(chip->vendor.locality));
736 intmask |= TPM_INTF_CMD_READY_INT | TPM_INTF_LOCALITY_CHANGE_INT |
737 TPM_INTF_DATA_AVAIL_INT | TPM_INTF_STS_VALID_INT;
738 intmask &= ~TPM_GLOBAL_INT_ENABLE;
740 chip->vendor.iobase + TPM_INT_ENABLE(chip->vendor.locality));
742 if (request_locality(chip, 0) != 0) {
747 rc = tpm2_probe(chip);
751 vendor = ioread32(chip->vendor.iobase + TPM_DID_VID(0));
752 chip->vendor.manufacturer_id = vendor;
754 dev_info(dev, "%s TPM (device-id 0x%X, rev-id %d)\n",
755 (chip->flags & TPM_CHIP_FLAG_TPM2) ? "2.0" : "1.2",
756 vendor >> 16, ioread8(chip->vendor.iobase + TPM_RID(0)));
759 probe = probe_itpm(chip);
768 dev_info(dev, "Intel iTPM workaround enabled\n");
771 /* Figure out the capabilities */
773 ioread32(chip->vendor.iobase +
774 TPM_INTF_CAPS(chip->vendor.locality));
775 dev_dbg(dev, "TPM interface capabilities (0x%x):\n",
777 if (intfcaps & TPM_INTF_BURST_COUNT_STATIC)
778 dev_dbg(dev, "\tBurst Count Static\n");
779 if (intfcaps & TPM_INTF_CMD_READY_INT)
780 dev_dbg(dev, "\tCommand Ready Int Support\n");
781 if (intfcaps & TPM_INTF_INT_EDGE_FALLING)
782 dev_dbg(dev, "\tInterrupt Edge Falling\n");
783 if (intfcaps & TPM_INTF_INT_EDGE_RISING)
784 dev_dbg(dev, "\tInterrupt Edge Rising\n");
785 if (intfcaps & TPM_INTF_INT_LEVEL_LOW)
786 dev_dbg(dev, "\tInterrupt Level Low\n");
787 if (intfcaps & TPM_INTF_INT_LEVEL_HIGH)
788 dev_dbg(dev, "\tInterrupt Level High\n");
789 if (intfcaps & TPM_INTF_LOCALITY_CHANGE_INT)
790 dev_dbg(dev, "\tLocality Change Int Support\n");
791 if (intfcaps & TPM_INTF_STS_VALID_INT)
792 dev_dbg(dev, "\tSts Valid Int Support\n");
793 if (intfcaps & TPM_INTF_DATA_AVAIL_INT)
794 dev_dbg(dev, "\tData Avail Int Support\n");
796 /* Very early on issue a command to the TPM in polling mode to make
797 * sure it works. May as well use that command to set the proper
798 * timeouts for the driver.
800 if (tpm_get_timeouts(chip)) {
801 dev_err(dev, "Could not get TPM timeouts and durations\n");
806 /* INTERRUPT Setup */
807 init_waitqueue_head(&chip->vendor.read_queue);
808 init_waitqueue_head(&chip->vendor.int_queue);
811 tpm_tis_probe_irq_single(chip, intmask, IRQF_SHARED,
813 if (!chip->vendor.irq)
814 dev_err(chip->pdev, FW_BUG
815 "TPM interrupt not working, polling instead\n");
817 tpm_tis_probe_irq(chip, intmask);
820 if (chip->flags & TPM_CHIP_FLAG_TPM2) {
821 rc = tpm2_do_selftest(chip);
822 if (rc == TPM2_RC_INITIALIZE) {
823 dev_warn(dev, "Firmware has not started TPM\n");
824 rc = tpm2_startup(chip, TPM2_SU_CLEAR);
826 rc = tpm2_do_selftest(chip);
830 dev_err(dev, "TPM self test failed\n");
836 if (tpm_do_selftest(chip)) {
837 dev_err(dev, "TPM self test failed\n");
843 return tpm_chip_register(chip);
845 tpm_tis_remove(chip);
849 #ifdef CONFIG_PM_SLEEP
850 static void tpm_tis_reenable_interrupts(struct tpm_chip *chip)
854 /* reenable interrupts that device may have lost or
855 BIOS/firmware may have disabled */
856 iowrite8(chip->vendor.irq, chip->vendor.iobase +
857 TPM_INT_VECTOR(chip->vendor.locality));
860 ioread32(chip->vendor.iobase +
861 TPM_INT_ENABLE(chip->vendor.locality));
863 intmask |= TPM_INTF_CMD_READY_INT
864 | TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_DATA_AVAIL_INT
865 | TPM_INTF_STS_VALID_INT | TPM_GLOBAL_INT_ENABLE;
868 chip->vendor.iobase + TPM_INT_ENABLE(chip->vendor.locality));
871 static int tpm_tis_resume(struct device *dev)
873 struct tpm_chip *chip = dev_get_drvdata(dev);
876 if (chip->vendor.irq)
877 tpm_tis_reenable_interrupts(chip);
879 ret = tpm_pm_resume(dev);
883 /* TPM 1.2 requires self-test on resume. This function actually returns
884 * an error code but for unknown reason it isn't handled.
886 if (!(chip->flags & TPM_CHIP_FLAG_TPM2))
887 tpm_do_selftest(chip);
893 static SIMPLE_DEV_PM_OPS(tpm_tis_pm, tpm_pm_suspend, tpm_tis_resume);
896 static int tpm_tis_pnp_init(struct pnp_dev *pnp_dev,
897 const struct pnp_device_id *pnp_id)
899 struct tpm_info tpm_info = tis_default_info;
900 acpi_handle acpi_dev_handle = NULL;
902 tpm_info.start = pnp_mem_start(pnp_dev, 0);
903 tpm_info.len = pnp_mem_len(pnp_dev, 0);
905 if (pnp_irq_valid(pnp_dev, 0))
906 tpm_info.irq = pnp_irq(pnp_dev, 0);
911 if (pnp_acpi_device(pnp_dev)) {
912 if (is_itpm(pnp_acpi_device(pnp_dev)))
915 acpi_dev_handle = pnp_acpi_device(pnp_dev)->handle;
919 return tpm_tis_init(&pnp_dev->dev, &tpm_info, acpi_dev_handle);
922 static struct pnp_device_id tpm_pnp_tbl[] = {
923 {"PNP0C31", 0}, /* TPM */
924 {"ATM1200", 0}, /* Atmel */
925 {"IFX0102", 0}, /* Infineon */
926 {"BCM0101", 0}, /* Broadcom */
927 {"BCM0102", 0}, /* Broadcom */
928 {"NSC1200", 0}, /* National */
929 {"ICO0102", 0}, /* Intel */
931 {"", 0}, /* User Specified */
932 {"", 0} /* Terminator */
934 MODULE_DEVICE_TABLE(pnp, tpm_pnp_tbl);
936 static void tpm_tis_pnp_remove(struct pnp_dev *dev)
938 struct tpm_chip *chip = pnp_get_drvdata(dev);
940 tpm_chip_unregister(chip);
941 tpm_tis_remove(chip);
944 static struct pnp_driver tis_pnp_driver = {
946 .id_table = tpm_pnp_tbl,
947 .probe = tpm_tis_pnp_init,
948 .remove = tpm_tis_pnp_remove,
954 #define TIS_HID_USR_IDX sizeof(tpm_pnp_tbl)/sizeof(struct pnp_device_id) -2
955 module_param_string(hid, tpm_pnp_tbl[TIS_HID_USR_IDX].id,
956 sizeof(tpm_pnp_tbl[TIS_HID_USR_IDX].id), 0444);
957 MODULE_PARM_DESC(hid, "Set additional specific HID for this driver to probe");
961 static int tpm_check_resource(struct acpi_resource *ares, void *data)
963 struct tpm_info *tpm_info = (struct tpm_info *) data;
966 if (acpi_dev_resource_interrupt(ares, 0, &res)) {
967 tpm_info->irq = res.start;
968 } else if (acpi_dev_resource_memory(ares, &res)) {
969 tpm_info->start = res.start;
970 tpm_info->len = resource_size(&res);
976 static int tpm_tis_acpi_init(struct acpi_device *acpi_dev)
978 struct list_head resources;
979 struct tpm_info tpm_info = tis_default_info;
982 if (!is_fifo(acpi_dev))
985 INIT_LIST_HEAD(&resources);
986 ret = acpi_dev_get_resources(acpi_dev, &resources, tpm_check_resource,
991 acpi_dev_free_resource_list(&resources);
996 if (is_itpm(acpi_dev))
999 return tpm_tis_init(&acpi_dev->dev, &tpm_info, acpi_dev->handle);
1002 static int tpm_tis_acpi_remove(struct acpi_device *dev)
1004 struct tpm_chip *chip = dev_get_drvdata(&dev->dev);
1006 tpm_chip_unregister(chip);
1007 tpm_tis_remove(chip);
1012 static struct acpi_device_id tpm_acpi_tbl[] = {
1013 {"MSFT0101", 0}, /* TPM 2.0 */
1015 {"", 0}, /* User Specified */
1016 {"", 0} /* Terminator */
1018 MODULE_DEVICE_TABLE(acpi, tpm_acpi_tbl);
1020 static struct acpi_driver tis_acpi_driver = {
1022 .ids = tpm_acpi_tbl,
1024 .add = tpm_tis_acpi_init,
1025 .remove = tpm_tis_acpi_remove,
1033 static struct platform_driver tis_drv = {
1040 static struct platform_device *pdev;
1043 module_param(force, bool, 0444);
1044 MODULE_PARM_DESC(force, "Force device probe rather than using ACPI entry");
1045 static int __init init_tis(void)
1050 rc = pnp_register_driver(&tis_pnp_driver);
1057 rc = acpi_bus_register_driver(&tis_acpi_driver);
1060 pnp_unregister_driver(&tis_pnp_driver);
1069 rc = platform_driver_register(&tis_drv);
1072 pdev = platform_device_register_simple("tpm_tis", -1, NULL, 0);
1077 rc = tpm_tis_init(&pdev->dev, &tis_default_info, NULL);
1082 platform_device_unregister(pdev);
1084 platform_driver_unregister(&tis_drv);
1088 static void __exit cleanup_tis(void)
1090 struct tpm_chip *chip;
1091 #if defined(CONFIG_PNP) || defined(CONFIG_ACPI)
1094 acpi_bus_unregister_driver(&tis_acpi_driver);
1097 pnp_unregister_driver(&tis_pnp_driver);
1102 chip = dev_get_drvdata(&pdev->dev);
1103 tpm_chip_unregister(chip);
1104 tpm_tis_remove(chip);
1105 platform_device_unregister(pdev);
1106 platform_driver_unregister(&tis_drv);
1109 module_init(init_tis);
1110 module_exit(cleanup_tis);
1111 MODULE_AUTHOR("Leendert van Doorn (leendert@watson.ibm.com)");
1112 MODULE_DESCRIPTION("TPM Driver");
1113 MODULE_VERSION("2.0");
1114 MODULE_LICENSE("GPL");