2 * Intel I/OAT DMA Linux driver
3 * Copyright(c) 2007 - 2009 Intel Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
23 #include <linux/kernel.h>
24 #include <linux/pci.h>
25 #include <linux/smp.h>
26 #include <linux/interrupt.h>
27 #include <linux/dca.h>
29 /* either a kernel change is needed, or we need something like this in kernel */
32 #undef cpu_physical_id
33 #define cpu_physical_id(cpu) (cpuid_ebx(1) >> 24)
37 #include "registers.h"
41 * Bit 7 of a tag map entry is the "valid" bit, if it is set then bits 0:6
42 * contain the bit number of the APIC ID to map into the DCA tag. If the valid
43 * bit is not set, then the value must be 0 or 1 and defines the bit in the tag.
45 #define DCA_TAG_MAP_VALID 0x80
47 #define DCA3_TAG_MAP_BIT_TO_INV 0x80
48 #define DCA3_TAG_MAP_BIT_TO_SEL 0x40
49 #define DCA3_TAG_MAP_LITERAL_VAL 0x1
51 #define DCA_TAG_MAP_MASK 0xDF
53 /* expected tag map bytes for I/OAT ver.2 */
54 #define DCA2_TAG_MAP_BYTE0 0x80
55 #define DCA2_TAG_MAP_BYTE1 0x0
56 #define DCA2_TAG_MAP_BYTE2 0x81
57 #define DCA2_TAG_MAP_BYTE3 0x82
58 #define DCA2_TAG_MAP_BYTE4 0x82
60 /* verify if tag map matches expected values */
61 static inline int dca2_tag_map_valid(u8 *tag_map)
63 return ((tag_map[0] == DCA2_TAG_MAP_BYTE0) &&
64 (tag_map[1] == DCA2_TAG_MAP_BYTE1) &&
65 (tag_map[2] == DCA2_TAG_MAP_BYTE2) &&
66 (tag_map[3] == DCA2_TAG_MAP_BYTE3) &&
67 (tag_map[4] == DCA2_TAG_MAP_BYTE4));
71 * "Legacy" DCA systems do not implement the DCA register set in the
72 * I/OAT device. Software needs direct support for their tag mappings.
75 #define APICID_BIT(x) (DCA_TAG_MAP_VALID | (x))
76 #define IOAT_TAG_MAP_LEN 8
78 static u8 ioat_tag_map_BNB[IOAT_TAG_MAP_LEN] = {
79 1, APICID_BIT(1), APICID_BIT(2), APICID_BIT(2), };
80 static u8 ioat_tag_map_SCNB[IOAT_TAG_MAP_LEN] = {
81 1, APICID_BIT(1), APICID_BIT(2), APICID_BIT(2), };
82 static u8 ioat_tag_map_CNB[IOAT_TAG_MAP_LEN] = {
83 1, APICID_BIT(1), APICID_BIT(3), APICID_BIT(4), APICID_BIT(2), };
84 static u8 ioat_tag_map_UNISYS[IOAT_TAG_MAP_LEN] = { 0 };
86 /* pack PCI B/D/F into a u16 */
87 static inline u16 dcaid_from_pcidev(struct pci_dev *pci)
89 return (pci->bus->number << 8) | pci->devfn;
92 static int dca_enabled_in_bios(struct pci_dev *pdev)
94 /* CPUID level 9 returns DCA configuration */
95 /* Bit 0 indicates DCA enabled by the BIOS */
96 unsigned long cpuid_level_9;
99 cpuid_level_9 = cpuid_eax(9);
100 res = test_bit(0, &cpuid_level_9);
102 dev_dbg(&pdev->dev, "DCA is disabled in BIOS\n");
107 int system_has_dca_enabled(struct pci_dev *pdev)
109 if (boot_cpu_has(X86_FEATURE_DCA))
110 return dca_enabled_in_bios(pdev);
112 dev_dbg(&pdev->dev, "boot cpu doesn't have X86_FEATURE_DCA\n");
116 struct ioat_dca_slot {
117 struct pci_dev *pdev; /* requester device */
118 u16 rid; /* requester id, as used by IOAT */
121 #define IOAT_DCA_MAX_REQ 6
122 #define IOAT3_DCA_MAX_REQ 2
124 struct ioat_dca_priv {
125 void __iomem *iobase;
126 void __iomem *dca_base;
129 u8 tag_map[IOAT_TAG_MAP_LEN];
130 struct ioat_dca_slot req_slots[0];
133 /* 5000 series chipset DCA Port Requester ID Table Entry Format
134 * [15:8] PCI-Express Bus Number
135 * [7:3] PCI-Express Device Number
136 * [2:0] PCI-Express Function Number
138 * 5000 series chipset DCA control register format
140 * [0] Ignore Function Number
143 static int ioat_dca_add_requester(struct dca_provider *dca, struct device *dev)
145 struct ioat_dca_priv *ioatdca = dca_priv(dca);
146 struct pci_dev *pdev;
150 /* This implementation only supports PCI-Express */
151 if (!dev_is_pci(dev))
153 pdev = to_pci_dev(dev);
154 id = dcaid_from_pcidev(pdev);
156 if (ioatdca->requester_count == ioatdca->max_requesters)
159 for (i = 0; i < ioatdca->max_requesters; i++) {
160 if (ioatdca->req_slots[i].pdev == NULL) {
161 /* found an empty slot */
162 ioatdca->requester_count++;
163 ioatdca->req_slots[i].pdev = pdev;
164 ioatdca->req_slots[i].rid = id;
165 writew(id, ioatdca->dca_base + (i * 4));
166 /* make sure the ignore function bit is off */
167 writeb(0, ioatdca->dca_base + (i * 4) + 2);
171 /* Error, ioatdma->requester_count is out of whack */
175 static int ioat_dca_remove_requester(struct dca_provider *dca,
178 struct ioat_dca_priv *ioatdca = dca_priv(dca);
179 struct pci_dev *pdev;
182 /* This implementation only supports PCI-Express */
183 if (!dev_is_pci(dev))
185 pdev = to_pci_dev(dev);
187 for (i = 0; i < ioatdca->max_requesters; i++) {
188 if (ioatdca->req_slots[i].pdev == pdev) {
189 writew(0, ioatdca->dca_base + (i * 4));
190 ioatdca->req_slots[i].pdev = NULL;
191 ioatdca->req_slots[i].rid = 0;
192 ioatdca->requester_count--;
199 static u8 ioat_dca_get_tag(struct dca_provider *dca,
203 struct ioat_dca_priv *ioatdca = dca_priv(dca);
204 int i, apic_id, bit, value;
208 apic_id = cpu_physical_id(cpu);
210 for (i = 0; i < IOAT_TAG_MAP_LEN; i++) {
211 entry = ioatdca->tag_map[i];
212 if (entry & DCA_TAG_MAP_VALID) {
213 bit = entry & ~DCA_TAG_MAP_VALID;
214 value = (apic_id & (1 << bit)) ? 1 : 0;
216 value = entry ? 1 : 0;
223 static int ioat_dca_dev_managed(struct dca_provider *dca,
226 struct ioat_dca_priv *ioatdca = dca_priv(dca);
227 struct pci_dev *pdev;
230 pdev = to_pci_dev(dev);
231 for (i = 0; i < ioatdca->max_requesters; i++) {
232 if (ioatdca->req_slots[i].pdev == pdev)
238 static struct dca_ops ioat_dca_ops = {
239 .add_requester = ioat_dca_add_requester,
240 .remove_requester = ioat_dca_remove_requester,
241 .get_tag = ioat_dca_get_tag,
242 .dev_managed = ioat_dca_dev_managed,
246 struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase)
248 struct dca_provider *dca;
249 struct ioat_dca_priv *ioatdca;
256 if (!system_has_dca_enabled(pdev))
259 /* I/OAT v1 systems must have a known tag_map to support DCA */
260 switch (pdev->vendor) {
261 case PCI_VENDOR_ID_INTEL:
262 switch (pdev->device) {
263 case PCI_DEVICE_ID_INTEL_IOAT:
264 tag_map = ioat_tag_map_BNB;
266 case PCI_DEVICE_ID_INTEL_IOAT_CNB:
267 tag_map = ioat_tag_map_CNB;
269 case PCI_DEVICE_ID_INTEL_IOAT_SCNB:
270 tag_map = ioat_tag_map_SCNB;
274 case PCI_VENDOR_ID_UNISYS:
275 switch (pdev->device) {
276 case PCI_DEVICE_ID_UNISYS_DMA_DIRECTOR:
277 tag_map = ioat_tag_map_UNISYS;
285 version = readb(iobase + IOAT_VER_OFFSET);
286 if (version == IOAT_VER_3_0)
287 max_requesters = IOAT3_DCA_MAX_REQ;
289 max_requesters = IOAT_DCA_MAX_REQ;
291 dca = alloc_dca_provider(&ioat_dca_ops,
293 (sizeof(struct ioat_dca_slot) * max_requesters));
297 ioatdca = dca_priv(dca);
298 ioatdca->max_requesters = max_requesters;
299 ioatdca->dca_base = iobase + 0x54;
301 /* copy over the APIC ID to DCA tag mapping */
302 for (i = 0; i < IOAT_TAG_MAP_LEN; i++)
303 ioatdca->tag_map[i] = tag_map[i];
305 err = register_dca_provider(dca, &pdev->dev);
307 free_dca_provider(dca);
315 static int ioat2_dca_add_requester(struct dca_provider *dca, struct device *dev)
317 struct ioat_dca_priv *ioatdca = dca_priv(dca);
318 struct pci_dev *pdev;
321 u16 global_req_table;
323 /* This implementation only supports PCI-Express */
324 if (!dev_is_pci(dev))
326 pdev = to_pci_dev(dev);
327 id = dcaid_from_pcidev(pdev);
329 if (ioatdca->requester_count == ioatdca->max_requesters)
332 for (i = 0; i < ioatdca->max_requesters; i++) {
333 if (ioatdca->req_slots[i].pdev == NULL) {
334 /* found an empty slot */
335 ioatdca->requester_count++;
336 ioatdca->req_slots[i].pdev = pdev;
337 ioatdca->req_slots[i].rid = id;
339 readw(ioatdca->dca_base + IOAT_DCA_GREQID_OFFSET);
340 writel(id | IOAT_DCA_GREQID_VALID,
341 ioatdca->iobase + global_req_table + (i * 4));
345 /* Error, ioatdma->requester_count is out of whack */
349 static int ioat2_dca_remove_requester(struct dca_provider *dca,
352 struct ioat_dca_priv *ioatdca = dca_priv(dca);
353 struct pci_dev *pdev;
355 u16 global_req_table;
357 /* This implementation only supports PCI-Express */
358 if (!dev_is_pci(dev))
360 pdev = to_pci_dev(dev);
362 for (i = 0; i < ioatdca->max_requesters; i++) {
363 if (ioatdca->req_slots[i].pdev == pdev) {
365 readw(ioatdca->dca_base + IOAT_DCA_GREQID_OFFSET);
366 writel(0, ioatdca->iobase + global_req_table + (i * 4));
367 ioatdca->req_slots[i].pdev = NULL;
368 ioatdca->req_slots[i].rid = 0;
369 ioatdca->requester_count--;
376 static u8 ioat2_dca_get_tag(struct dca_provider *dca,
382 tag = ioat_dca_get_tag(dca, dev, cpu);
387 static struct dca_ops ioat2_dca_ops = {
388 .add_requester = ioat2_dca_add_requester,
389 .remove_requester = ioat2_dca_remove_requester,
390 .get_tag = ioat2_dca_get_tag,
391 .dev_managed = ioat_dca_dev_managed,
394 static int ioat2_dca_count_dca_slots(void __iomem *iobase, u16 dca_offset)
398 u16 global_req_table;
400 global_req_table = readw(iobase + dca_offset + IOAT_DCA_GREQID_OFFSET);
401 if (global_req_table == 0)
404 req = readl(iobase + global_req_table + (slots * sizeof(u32)));
406 } while ((req & IOAT_DCA_GREQID_LASTID) == 0);
411 struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase)
413 struct dca_provider *dca;
414 struct ioat_dca_priv *ioatdca;
424 if (!system_has_dca_enabled(pdev))
427 dca_offset = readw(iobase + IOAT_DCAOFFSET_OFFSET);
431 slots = ioat2_dca_count_dca_slots(iobase, dca_offset);
435 dca = alloc_dca_provider(&ioat2_dca_ops,
437 + (sizeof(struct ioat_dca_slot) * slots));
441 ioatdca = dca_priv(dca);
442 ioatdca->iobase = iobase;
443 ioatdca->dca_base = iobase + dca_offset;
444 ioatdca->max_requesters = slots;
446 /* some bios might not know to turn these on */
447 csi_fsb_control = readw(ioatdca->dca_base + IOAT_FSB_CAP_ENABLE_OFFSET);
448 if ((csi_fsb_control & IOAT_FSB_CAP_ENABLE_PREFETCH) == 0) {
449 csi_fsb_control |= IOAT_FSB_CAP_ENABLE_PREFETCH;
450 writew(csi_fsb_control,
451 ioatdca->dca_base + IOAT_FSB_CAP_ENABLE_OFFSET);
453 pcie_control = readw(ioatdca->dca_base + IOAT_PCI_CAP_ENABLE_OFFSET);
454 if ((pcie_control & IOAT_PCI_CAP_ENABLE_MEMWR) == 0) {
455 pcie_control |= IOAT_PCI_CAP_ENABLE_MEMWR;
457 ioatdca->dca_base + IOAT_PCI_CAP_ENABLE_OFFSET);
461 /* TODO version, compatibility and configuration checks */
463 /* copy out the APIC to DCA tag map */
464 tag_map = readl(ioatdca->dca_base + IOAT_APICID_TAG_MAP_OFFSET);
465 for (i = 0; i < 5; i++) {
466 bit = (tag_map >> (4 * i)) & 0x0f;
468 ioatdca->tag_map[i] = bit | DCA_TAG_MAP_VALID;
470 ioatdca->tag_map[i] = 0;
473 if (!dca2_tag_map_valid(ioatdca->tag_map)) {
474 WARN_TAINT_ONCE(1, TAINT_FIRMWARE_WORKAROUND,
475 "%s %s: APICID_TAG_MAP set incorrectly by BIOS, disabling DCA\n",
476 dev_driver_string(&pdev->dev),
477 dev_name(&pdev->dev));
478 free_dca_provider(dca);
482 err = register_dca_provider(dca, &pdev->dev);
484 free_dca_provider(dca);
491 static int ioat3_dca_add_requester(struct dca_provider *dca, struct device *dev)
493 struct ioat_dca_priv *ioatdca = dca_priv(dca);
494 struct pci_dev *pdev;
497 u16 global_req_table;
499 /* This implementation only supports PCI-Express */
500 if (!dev_is_pci(dev))
502 pdev = to_pci_dev(dev);
503 id = dcaid_from_pcidev(pdev);
505 if (ioatdca->requester_count == ioatdca->max_requesters)
508 for (i = 0; i < ioatdca->max_requesters; i++) {
509 if (ioatdca->req_slots[i].pdev == NULL) {
510 /* found an empty slot */
511 ioatdca->requester_count++;
512 ioatdca->req_slots[i].pdev = pdev;
513 ioatdca->req_slots[i].rid = id;
515 readw(ioatdca->dca_base + IOAT3_DCA_GREQID_OFFSET);
516 writel(id | IOAT_DCA_GREQID_VALID,
517 ioatdca->iobase + global_req_table + (i * 4));
521 /* Error, ioatdma->requester_count is out of whack */
525 static int ioat3_dca_remove_requester(struct dca_provider *dca,
528 struct ioat_dca_priv *ioatdca = dca_priv(dca);
529 struct pci_dev *pdev;
531 u16 global_req_table;
533 /* This implementation only supports PCI-Express */
534 if (!dev_is_pci(dev))
536 pdev = to_pci_dev(dev);
538 for (i = 0; i < ioatdca->max_requesters; i++) {
539 if (ioatdca->req_slots[i].pdev == pdev) {
541 readw(ioatdca->dca_base + IOAT3_DCA_GREQID_OFFSET);
542 writel(0, ioatdca->iobase + global_req_table + (i * 4));
543 ioatdca->req_slots[i].pdev = NULL;
544 ioatdca->req_slots[i].rid = 0;
545 ioatdca->requester_count--;
552 static u8 ioat3_dca_get_tag(struct dca_provider *dca,
558 struct ioat_dca_priv *ioatdca = dca_priv(dca);
559 int i, apic_id, bit, value;
563 apic_id = cpu_physical_id(cpu);
565 for (i = 0; i < IOAT_TAG_MAP_LEN; i++) {
566 entry = ioatdca->tag_map[i];
567 if (entry & DCA3_TAG_MAP_BIT_TO_SEL) {
569 ~(DCA3_TAG_MAP_BIT_TO_SEL | DCA3_TAG_MAP_BIT_TO_INV);
570 value = (apic_id & (1 << bit)) ? 1 : 0;
571 } else if (entry & DCA3_TAG_MAP_BIT_TO_INV) {
572 bit = entry & ~DCA3_TAG_MAP_BIT_TO_INV;
573 value = (apic_id & (1 << bit)) ? 0 : 1;
575 value = (entry & DCA3_TAG_MAP_LITERAL_VAL) ? 1 : 0;
583 static struct dca_ops ioat3_dca_ops = {
584 .add_requester = ioat3_dca_add_requester,
585 .remove_requester = ioat3_dca_remove_requester,
586 .get_tag = ioat3_dca_get_tag,
587 .dev_managed = ioat_dca_dev_managed,
590 static int ioat3_dca_count_dca_slots(void *iobase, u16 dca_offset)
594 u16 global_req_table;
596 global_req_table = readw(iobase + dca_offset + IOAT3_DCA_GREQID_OFFSET);
597 if (global_req_table == 0)
601 req = readl(iobase + global_req_table + (slots * sizeof(u32)));
603 } while ((req & IOAT_DCA_GREQID_LASTID) == 0);
608 static inline int dca3_tag_map_invalid(u8 *tag_map)
611 * If the tag map is not programmed by the BIOS the default is:
612 * 0x80 0x80 0x80 0x80 0x80 0x00 0x00 0x00
614 * This an invalid map and will result in only 2 possible tags
615 * 0x1F and 0x00. 0x00 is an invalid DCA tag so we know that
616 * this entire definition is invalid.
618 return ((tag_map[0] == DCA_TAG_MAP_VALID) &&
619 (tag_map[1] == DCA_TAG_MAP_VALID) &&
620 (tag_map[2] == DCA_TAG_MAP_VALID) &&
621 (tag_map[3] == DCA_TAG_MAP_VALID) &&
622 (tag_map[4] == DCA_TAG_MAP_VALID));
625 struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase)
627 struct dca_provider *dca;
628 struct ioat_dca_priv *ioatdca;
645 if (!system_has_dca_enabled(pdev))
648 dca_offset = readw(iobase + IOAT_DCAOFFSET_OFFSET);
652 slots = ioat3_dca_count_dca_slots(iobase, dca_offset);
656 dca = alloc_dca_provider(&ioat3_dca_ops,
658 + (sizeof(struct ioat_dca_slot) * slots));
662 ioatdca = dca_priv(dca);
663 ioatdca->iobase = iobase;
664 ioatdca->dca_base = iobase + dca_offset;
665 ioatdca->max_requesters = slots;
667 /* some bios might not know to turn these on */
668 csi_fsb_control = readw(ioatdca->dca_base + IOAT3_CSI_CONTROL_OFFSET);
669 if ((csi_fsb_control & IOAT3_CSI_CONTROL_PREFETCH) == 0) {
670 csi_fsb_control |= IOAT3_CSI_CONTROL_PREFETCH;
671 writew(csi_fsb_control,
672 ioatdca->dca_base + IOAT3_CSI_CONTROL_OFFSET);
674 pcie_control = readw(ioatdca->dca_base + IOAT3_PCI_CONTROL_OFFSET);
675 if ((pcie_control & IOAT3_PCI_CONTROL_MEMWR) == 0) {
676 pcie_control |= IOAT3_PCI_CONTROL_MEMWR;
678 ioatdca->dca_base + IOAT3_PCI_CONTROL_OFFSET);
682 /* TODO version, compatibility and configuration checks */
684 /* copy out the APIC to DCA tag map */
686 readl(ioatdca->dca_base + IOAT3_APICID_TAG_MAP_OFFSET_LOW);
688 readl(ioatdca->dca_base + IOAT3_APICID_TAG_MAP_OFFSET_HIGH);
689 for (i = 0; i < 8; i++) {
690 bit = tag_map.full >> (8 * i);
691 ioatdca->tag_map[i] = bit & DCA_TAG_MAP_MASK;
694 if (dca3_tag_map_invalid(ioatdca->tag_map)) {
695 WARN_TAINT_ONCE(1, TAINT_FIRMWARE_WORKAROUND,
696 "%s %s: APICID_TAG_MAP set incorrectly by BIOS, disabling DCA\n",
697 dev_driver_string(&pdev->dev),
698 dev_name(&pdev->dev));
699 free_dca_provider(dca);
703 err = register_dca_provider(dca, &pdev->dev);
705 free_dca_provider(dca);