2 * Copyright (C) 2011-2014 NVIDIA CORPORATION. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
9 #include <linux/bitops.h>
10 #include <linux/debugfs.h>
11 #include <linux/err.h>
12 #include <linux/iommu.h>
13 #include <linux/kernel.h>
15 #include <linux/of_device.h>
16 #include <linux/platform_device.h>
17 #include <linux/slab.h>
19 #include <soc/tegra/ahb.h>
20 #include <soc/tegra/mc.h>
27 const struct tegra_smmu_soc *soc;
29 unsigned long pfn_mask;
34 struct list_head list;
36 struct dentry *debugfs;
39 struct tegra_smmu_as {
40 struct iommu_domain domain;
41 struct tegra_smmu *smmu;
42 unsigned int use_count;
49 static struct tegra_smmu_as *to_smmu_as(struct iommu_domain *dom)
51 return container_of(dom, struct tegra_smmu_as, domain);
54 static inline void smmu_writel(struct tegra_smmu *smmu, u32 value,
57 writel(value, smmu->regs + offset);
60 static inline u32 smmu_readl(struct tegra_smmu *smmu, unsigned long offset)
62 return readl(smmu->regs + offset);
65 #define SMMU_CONFIG 0x010
66 #define SMMU_CONFIG_ENABLE (1 << 0)
68 #define SMMU_TLB_CONFIG 0x14
69 #define SMMU_TLB_CONFIG_HIT_UNDER_MISS (1 << 29)
70 #define SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION (1 << 28)
71 #define SMMU_TLB_CONFIG_ACTIVE_LINES(x) ((x) & 0x3f)
73 #define SMMU_PTC_CONFIG 0x18
74 #define SMMU_PTC_CONFIG_ENABLE (1 << 29)
75 #define SMMU_PTC_CONFIG_REQ_LIMIT(x) (((x) & 0x0f) << 24)
76 #define SMMU_PTC_CONFIG_INDEX_MAP(x) ((x) & 0x3f)
78 #define SMMU_PTB_ASID 0x01c
79 #define SMMU_PTB_ASID_VALUE(x) ((x) & 0x7f)
81 #define SMMU_PTB_DATA 0x020
82 #define SMMU_PTB_DATA_VALUE(page, attr) (page_to_phys(page) >> 12 | (attr))
84 #define SMMU_MK_PDE(page, attr) (page_to_phys(page) >> SMMU_PTE_SHIFT | (attr))
86 #define SMMU_TLB_FLUSH 0x030
87 #define SMMU_TLB_FLUSH_VA_MATCH_ALL (0 << 0)
88 #define SMMU_TLB_FLUSH_VA_MATCH_SECTION (2 << 0)
89 #define SMMU_TLB_FLUSH_VA_MATCH_GROUP (3 << 0)
90 #define SMMU_TLB_FLUSH_ASID(x) (((x) & 0x7f) << 24)
91 #define SMMU_TLB_FLUSH_VA_SECTION(addr) ((((addr) & 0xffc00000) >> 12) | \
92 SMMU_TLB_FLUSH_VA_MATCH_SECTION)
93 #define SMMU_TLB_FLUSH_VA_GROUP(addr) ((((addr) & 0xffffc000) >> 12) | \
94 SMMU_TLB_FLUSH_VA_MATCH_GROUP)
95 #define SMMU_TLB_FLUSH_ASID_MATCH (1 << 31)
97 #define SMMU_PTC_FLUSH 0x034
98 #define SMMU_PTC_FLUSH_TYPE_ALL (0 << 0)
99 #define SMMU_PTC_FLUSH_TYPE_ADR (1 << 0)
101 #define SMMU_PTC_FLUSH_HI 0x9b8
102 #define SMMU_PTC_FLUSH_HI_MASK 0x3
104 /* per-SWGROUP SMMU_*_ASID register */
105 #define SMMU_ASID_ENABLE (1 << 31)
106 #define SMMU_ASID_MASK 0x7f
107 #define SMMU_ASID_VALUE(x) ((x) & SMMU_ASID_MASK)
109 /* page table definitions */
110 #define SMMU_NUM_PDE 1024
111 #define SMMU_NUM_PTE 1024
113 #define SMMU_SIZE_PD (SMMU_NUM_PDE * 4)
114 #define SMMU_SIZE_PT (SMMU_NUM_PTE * 4)
116 #define SMMU_PDE_SHIFT 22
117 #define SMMU_PTE_SHIFT 12
119 #define SMMU_PD_READABLE (1 << 31)
120 #define SMMU_PD_WRITABLE (1 << 30)
121 #define SMMU_PD_NONSECURE (1 << 29)
123 #define SMMU_PDE_READABLE (1 << 31)
124 #define SMMU_PDE_WRITABLE (1 << 30)
125 #define SMMU_PDE_NONSECURE (1 << 29)
126 #define SMMU_PDE_NEXT (1 << 28)
128 #define SMMU_PTE_READABLE (1 << 31)
129 #define SMMU_PTE_WRITABLE (1 << 30)
130 #define SMMU_PTE_NONSECURE (1 << 29)
132 #define SMMU_PDE_ATTR (SMMU_PDE_READABLE | SMMU_PDE_WRITABLE | \
134 #define SMMU_PTE_ATTR (SMMU_PTE_READABLE | SMMU_PTE_WRITABLE | \
137 static unsigned int iova_pd_index(unsigned long iova)
139 return (iova >> SMMU_PDE_SHIFT) & (SMMU_NUM_PDE - 1);
142 static unsigned int iova_pt_index(unsigned long iova)
144 return (iova >> SMMU_PTE_SHIFT) & (SMMU_NUM_PTE - 1);
147 static inline void smmu_flush_ptc(struct tegra_smmu *smmu, struct page *page,
148 unsigned long offset)
150 phys_addr_t phys = page ? page_to_phys(page) : 0;
154 offset &= ~(smmu->mc->soc->atom_size - 1);
156 if (smmu->mc->soc->num_address_bits > 32) {
157 #ifdef CONFIG_PHYS_ADDR_T_64BIT
158 value = (phys >> 32) & SMMU_PTC_FLUSH_HI_MASK;
162 smmu_writel(smmu, value, SMMU_PTC_FLUSH_HI);
165 value = (phys + offset) | SMMU_PTC_FLUSH_TYPE_ADR;
167 value = SMMU_PTC_FLUSH_TYPE_ALL;
170 smmu_writel(smmu, value, SMMU_PTC_FLUSH);
173 static inline void smmu_flush_tlb(struct tegra_smmu *smmu)
175 smmu_writel(smmu, SMMU_TLB_FLUSH_VA_MATCH_ALL, SMMU_TLB_FLUSH);
178 static inline void smmu_flush_tlb_asid(struct tegra_smmu *smmu,
183 value = SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_ASID(asid) |
184 SMMU_TLB_FLUSH_VA_MATCH_ALL;
185 smmu_writel(smmu, value, SMMU_TLB_FLUSH);
188 static inline void smmu_flush_tlb_section(struct tegra_smmu *smmu,
194 value = SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_ASID(asid) |
195 SMMU_TLB_FLUSH_VA_SECTION(iova);
196 smmu_writel(smmu, value, SMMU_TLB_FLUSH);
199 static inline void smmu_flush_tlb_group(struct tegra_smmu *smmu,
205 value = SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_ASID(asid) |
206 SMMU_TLB_FLUSH_VA_GROUP(iova);
207 smmu_writel(smmu, value, SMMU_TLB_FLUSH);
210 static inline void smmu_flush(struct tegra_smmu *smmu)
212 smmu_readl(smmu, SMMU_CONFIG);
215 static int tegra_smmu_alloc_asid(struct tegra_smmu *smmu, unsigned int *idp)
219 mutex_lock(&smmu->lock);
221 id = find_first_zero_bit(smmu->asids, smmu->soc->num_asids);
222 if (id >= smmu->soc->num_asids) {
223 mutex_unlock(&smmu->lock);
227 set_bit(id, smmu->asids);
230 mutex_unlock(&smmu->lock);
234 static void tegra_smmu_free_asid(struct tegra_smmu *smmu, unsigned int id)
236 mutex_lock(&smmu->lock);
237 clear_bit(id, smmu->asids);
238 mutex_unlock(&smmu->lock);
241 static bool tegra_smmu_capable(enum iommu_cap cap)
246 static struct iommu_domain *tegra_smmu_domain_alloc(unsigned type)
248 struct tegra_smmu_as *as;
252 if (type != IOMMU_DOMAIN_UNMANAGED)
255 as = kzalloc(sizeof(*as), GFP_KERNEL);
259 as->attr = SMMU_PD_READABLE | SMMU_PD_WRITABLE | SMMU_PD_NONSECURE;
261 as->pd = alloc_page(GFP_KERNEL | __GFP_DMA);
267 as->count = alloc_page(GFP_KERNEL);
275 pd = page_address(as->pd);
276 SetPageReserved(as->pd);
278 for (i = 0; i < SMMU_NUM_PDE; i++)
281 /* clear PDE usage counters */
282 pd = page_address(as->count);
283 SetPageReserved(as->count);
285 for (i = 0; i < SMMU_NUM_PDE; i++)
289 as->domain.geometry.aperture_start = 0;
290 as->domain.geometry.aperture_end = 0xffffffff;
291 as->domain.geometry.force_aperture = true;
296 static void tegra_smmu_domain_free(struct iommu_domain *domain)
298 struct tegra_smmu_as *as = to_smmu_as(domain);
300 /* TODO: free page directory and page tables */
301 ClearPageReserved(as->pd);
306 static const struct tegra_smmu_swgroup *
307 tegra_smmu_find_swgroup(struct tegra_smmu *smmu, unsigned int swgroup)
309 const struct tegra_smmu_swgroup *group = NULL;
312 for (i = 0; i < smmu->soc->num_swgroups; i++) {
313 if (smmu->soc->swgroups[i].swgroup == swgroup) {
314 group = &smmu->soc->swgroups[i];
322 static void tegra_smmu_enable(struct tegra_smmu *smmu, unsigned int swgroup,
325 const struct tegra_smmu_swgroup *group;
329 for (i = 0; i < smmu->soc->num_clients; i++) {
330 const struct tegra_mc_client *client = &smmu->soc->clients[i];
332 if (client->swgroup != swgroup)
335 value = smmu_readl(smmu, client->smmu.reg);
336 value |= BIT(client->smmu.bit);
337 smmu_writel(smmu, value, client->smmu.reg);
340 group = tegra_smmu_find_swgroup(smmu, swgroup);
342 value = smmu_readl(smmu, group->reg);
343 value &= ~SMMU_ASID_MASK;
344 value |= SMMU_ASID_VALUE(asid);
345 value |= SMMU_ASID_ENABLE;
346 smmu_writel(smmu, value, group->reg);
350 static void tegra_smmu_disable(struct tegra_smmu *smmu, unsigned int swgroup,
353 const struct tegra_smmu_swgroup *group;
357 group = tegra_smmu_find_swgroup(smmu, swgroup);
359 value = smmu_readl(smmu, group->reg);
360 value &= ~SMMU_ASID_MASK;
361 value |= SMMU_ASID_VALUE(asid);
362 value &= ~SMMU_ASID_ENABLE;
363 smmu_writel(smmu, value, group->reg);
366 for (i = 0; i < smmu->soc->num_clients; i++) {
367 const struct tegra_mc_client *client = &smmu->soc->clients[i];
369 if (client->swgroup != swgroup)
372 value = smmu_readl(smmu, client->smmu.reg);
373 value &= ~BIT(client->smmu.bit);
374 smmu_writel(smmu, value, client->smmu.reg);
378 static int tegra_smmu_as_prepare(struct tegra_smmu *smmu,
379 struct tegra_smmu_as *as)
384 if (as->use_count > 0) {
389 err = tegra_smmu_alloc_asid(smmu, &as->id);
393 smmu->soc->ops->flush_dcache(as->pd, 0, SMMU_SIZE_PD);
394 smmu_flush_ptc(smmu, as->pd, 0);
395 smmu_flush_tlb_asid(smmu, as->id);
397 smmu_writel(smmu, as->id & 0x7f, SMMU_PTB_ASID);
398 value = SMMU_PTB_DATA_VALUE(as->pd, as->attr);
399 smmu_writel(smmu, value, SMMU_PTB_DATA);
408 static void tegra_smmu_as_unprepare(struct tegra_smmu *smmu,
409 struct tegra_smmu_as *as)
411 if (--as->use_count > 0)
414 tegra_smmu_free_asid(smmu, as->id);
418 static int tegra_smmu_attach_dev(struct iommu_domain *domain,
421 struct tegra_smmu *smmu = dev->archdata.iommu;
422 struct tegra_smmu_as *as = to_smmu_as(domain);
423 struct device_node *np = dev->of_node;
424 struct of_phandle_args args;
425 unsigned int index = 0;
428 while (!of_parse_phandle_with_args(np, "iommus", "#iommu-cells", index,
430 unsigned int swgroup = args.args[0];
432 if (args.np != smmu->dev->of_node) {
433 of_node_put(args.np);
437 of_node_put(args.np);
439 err = tegra_smmu_as_prepare(smmu, as);
443 tegra_smmu_enable(smmu, swgroup, as->id);
453 static void tegra_smmu_detach_dev(struct iommu_domain *domain, struct device *dev)
455 struct tegra_smmu_as *as = to_smmu_as(domain);
456 struct device_node *np = dev->of_node;
457 struct tegra_smmu *smmu = as->smmu;
458 struct of_phandle_args args;
459 unsigned int index = 0;
461 while (!of_parse_phandle_with_args(np, "iommus", "#iommu-cells", index,
463 unsigned int swgroup = args.args[0];
465 if (args.np != smmu->dev->of_node) {
466 of_node_put(args.np);
470 of_node_put(args.np);
472 tegra_smmu_disable(smmu, swgroup, as->id);
473 tegra_smmu_as_unprepare(smmu, as);
478 static u32 *tegra_smmu_pte_offset(struct page *pt_page, unsigned long iova)
480 u32 *pt = page_address(pt_page);
482 return pt + iova_pt_index(iova);
485 static u32 *tegra_smmu_pte_lookup(struct tegra_smmu_as *as, unsigned long iova,
488 unsigned int pd_index = iova_pd_index(iova);
489 struct page *pt_page;
492 pd = page_address(as->pd);
497 pt_page = pfn_to_page(pd[pd_index] & as->smmu->pfn_mask);
500 return tegra_smmu_pte_offset(pt_page, iova);
503 static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova,
506 u32 *pd = page_address(as->pd), *pt, *count;
507 unsigned int pde = iova_pd_index(iova);
508 struct tegra_smmu *smmu = as->smmu;
513 page = alloc_page(GFP_KERNEL | __GFP_DMA);
517 pt = page_address(page);
518 SetPageReserved(page);
520 for (i = 0; i < SMMU_NUM_PTE; i++)
523 smmu->soc->ops->flush_dcache(page, 0, SMMU_SIZE_PT);
525 pd[pde] = SMMU_MK_PDE(page, SMMU_PDE_ATTR | SMMU_PDE_NEXT);
527 smmu->soc->ops->flush_dcache(as->pd, pde << 2, 4);
528 smmu_flush_ptc(smmu, as->pd, pde << 2);
529 smmu_flush_tlb_section(smmu, as->id, iova);
532 page = pfn_to_page(pd[pde] & smmu->pfn_mask);
537 pt = page_address(page);
539 /* Keep track of entries in this page table. */
540 count = page_address(as->count);
541 if (pt[iova_pt_index(iova)] == 0)
544 return tegra_smmu_pte_offset(page, iova);
547 static void tegra_smmu_pte_put_use(struct tegra_smmu_as *as, unsigned long iova)
549 struct tegra_smmu *smmu = as->smmu;
550 unsigned int pde = iova_pd_index(iova);
551 u32 *count = page_address(as->count);
552 u32 *pd = page_address(as->pd);
555 page = pfn_to_page(pd[pde] & smmu->pfn_mask);
558 * When no entries in this page table are used anymore, return the
559 * memory page to the system.
561 if (--count[pde] == 0) {
562 unsigned int offset = pde * sizeof(*pd);
564 /* Clear the page directory entry first */
567 /* Flush the page directory entry */
568 smmu->soc->ops->flush_dcache(as->pd, offset, sizeof(*pd));
569 smmu_flush_ptc(smmu, as->pd, offset);
570 smmu_flush_tlb_section(smmu, as->id, iova);
573 /* Finally, free the page */
574 ClearPageReserved(page);
579 static void tegra_smmu_set_pte(struct tegra_smmu_as *as, unsigned long iova,
580 u32 *pte, struct page *pte_page, u32 val)
582 struct tegra_smmu *smmu = as->smmu;
583 unsigned long offset = offset_in_page(pte);
587 smmu->soc->ops->flush_dcache(pte_page, offset, 4);
588 smmu_flush_ptc(smmu, pte_page, offset);
589 smmu_flush_tlb_group(smmu, as->id, iova);
593 static int tegra_smmu_map(struct iommu_domain *domain, unsigned long iova,
594 phys_addr_t paddr, size_t size, int prot)
596 struct tegra_smmu_as *as = to_smmu_as(domain);
600 pte = as_get_pte(as, iova, &page);
604 tegra_smmu_set_pte(as, iova, pte, page,
605 __phys_to_pfn(paddr) | SMMU_PTE_ATTR);
610 static size_t tegra_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
613 struct tegra_smmu_as *as = to_smmu_as(domain);
614 struct page *pte_page;
617 pte = tegra_smmu_pte_lookup(as, iova, &pte_page);
621 tegra_smmu_set_pte(as, iova, pte, pte_page, 0);
622 tegra_smmu_pte_put_use(as, iova);
627 static phys_addr_t tegra_smmu_iova_to_phys(struct iommu_domain *domain,
630 struct tegra_smmu_as *as = to_smmu_as(domain);
631 struct page *pte_page;
635 pte = tegra_smmu_pte_lookup(as, iova, &pte_page);
639 pfn = *pte & as->smmu->pfn_mask;
641 return PFN_PHYS(pfn);
644 static struct tegra_smmu *tegra_smmu_find(struct device_node *np)
646 struct platform_device *pdev;
649 pdev = of_find_device_by_node(np);
653 mc = platform_get_drvdata(pdev);
660 static int tegra_smmu_add_device(struct device *dev)
662 struct device_node *np = dev->of_node;
663 struct of_phandle_args args;
664 unsigned int index = 0;
666 while (of_parse_phandle_with_args(np, "iommus", "#iommu-cells", index,
668 struct tegra_smmu *smmu;
670 smmu = tegra_smmu_find(args.np);
673 * Only a single IOMMU master interface is currently
674 * supported by the Linux kernel, so abort after the
677 dev->archdata.iommu = smmu;
687 static void tegra_smmu_remove_device(struct device *dev)
689 dev->archdata.iommu = NULL;
692 static const struct iommu_ops tegra_smmu_ops = {
693 .capable = tegra_smmu_capable,
694 .domain_alloc = tegra_smmu_domain_alloc,
695 .domain_free = tegra_smmu_domain_free,
696 .attach_dev = tegra_smmu_attach_dev,
697 .detach_dev = tegra_smmu_detach_dev,
698 .add_device = tegra_smmu_add_device,
699 .remove_device = tegra_smmu_remove_device,
700 .map = tegra_smmu_map,
701 .unmap = tegra_smmu_unmap,
702 .map_sg = default_iommu_map_sg,
703 .iova_to_phys = tegra_smmu_iova_to_phys,
705 .pgsize_bitmap = SZ_4K,
708 static void tegra_smmu_ahb_enable(void)
710 static const struct of_device_id ahb_match[] = {
711 { .compatible = "nvidia,tegra30-ahb", },
714 struct device_node *ahb;
716 ahb = of_find_matching_node(NULL, ahb_match);
718 tegra_ahb_enable_smmu(ahb);
723 static int tegra_smmu_swgroups_show(struct seq_file *s, void *data)
725 struct tegra_smmu *smmu = s->private;
729 seq_printf(s, "swgroup enabled ASID\n");
730 seq_printf(s, "------------------------\n");
732 for (i = 0; i < smmu->soc->num_swgroups; i++) {
733 const struct tegra_smmu_swgroup *group = &smmu->soc->swgroups[i];
737 value = smmu_readl(smmu, group->reg);
739 if (value & SMMU_ASID_ENABLE)
744 asid = value & SMMU_ASID_MASK;
746 seq_printf(s, "%-9s %-7s %#04x\n", group->name, status,
753 static int tegra_smmu_swgroups_open(struct inode *inode, struct file *file)
755 return single_open(file, tegra_smmu_swgroups_show, inode->i_private);
758 static const struct file_operations tegra_smmu_swgroups_fops = {
759 .open = tegra_smmu_swgroups_open,
762 .release = single_release,
765 static int tegra_smmu_clients_show(struct seq_file *s, void *data)
767 struct tegra_smmu *smmu = s->private;
771 seq_printf(s, "client enabled\n");
772 seq_printf(s, "--------------------\n");
774 for (i = 0; i < smmu->soc->num_clients; i++) {
775 const struct tegra_mc_client *client = &smmu->soc->clients[i];
778 value = smmu_readl(smmu, client->smmu.reg);
780 if (value & BIT(client->smmu.bit))
785 seq_printf(s, "%-12s %s\n", client->name, status);
791 static int tegra_smmu_clients_open(struct inode *inode, struct file *file)
793 return single_open(file, tegra_smmu_clients_show, inode->i_private);
796 static const struct file_operations tegra_smmu_clients_fops = {
797 .open = tegra_smmu_clients_open,
800 .release = single_release,
803 static void tegra_smmu_debugfs_init(struct tegra_smmu *smmu)
805 smmu->debugfs = debugfs_create_dir("smmu", NULL);
809 debugfs_create_file("swgroups", S_IRUGO, smmu->debugfs, smmu,
810 &tegra_smmu_swgroups_fops);
811 debugfs_create_file("clients", S_IRUGO, smmu->debugfs, smmu,
812 &tegra_smmu_clients_fops);
815 static void tegra_smmu_debugfs_exit(struct tegra_smmu *smmu)
817 debugfs_remove_recursive(smmu->debugfs);
820 struct tegra_smmu *tegra_smmu_probe(struct device *dev,
821 const struct tegra_smmu_soc *soc,
824 struct tegra_smmu *smmu;
829 /* This can happen on Tegra20 which doesn't have an SMMU */
833 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
835 return ERR_PTR(-ENOMEM);
838 * This is a bit of a hack. Ideally we'd want to simply return this
839 * value. However the IOMMU registration process will attempt to add
840 * all devices to the IOMMU when bus_set_iommu() is called. In order
841 * not to rely on global variables to track the IOMMU instance, we
842 * set it here so that it can be looked up from the .add_device()
843 * callback via the IOMMU device's .drvdata field.
847 size = BITS_TO_LONGS(soc->num_asids) * sizeof(long);
849 smmu->asids = devm_kzalloc(dev, size, GFP_KERNEL);
851 return ERR_PTR(-ENOMEM);
853 mutex_init(&smmu->lock);
855 smmu->regs = mc->regs;
860 smmu->pfn_mask = BIT_MASK(mc->soc->num_address_bits - PAGE_SHIFT) - 1;
861 dev_dbg(dev, "address bits: %u, PFN mask: %#lx\n",
862 mc->soc->num_address_bits, smmu->pfn_mask);
864 value = SMMU_PTC_CONFIG_ENABLE | SMMU_PTC_CONFIG_INDEX_MAP(0x3f);
866 if (soc->supports_request_limit)
867 value |= SMMU_PTC_CONFIG_REQ_LIMIT(8);
869 smmu_writel(smmu, value, SMMU_PTC_CONFIG);
871 value = SMMU_TLB_CONFIG_HIT_UNDER_MISS |
872 SMMU_TLB_CONFIG_ACTIVE_LINES(0x20);
874 if (soc->supports_round_robin_arbitration)
875 value |= SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION;
877 smmu_writel(smmu, value, SMMU_TLB_CONFIG);
879 smmu_flush_ptc(smmu, NULL, 0);
880 smmu_flush_tlb(smmu);
881 smmu_writel(smmu, SMMU_CONFIG_ENABLE, SMMU_CONFIG);
884 tegra_smmu_ahb_enable();
886 err = bus_set_iommu(&platform_bus_type, &tegra_smmu_ops);
890 if (IS_ENABLED(CONFIG_DEBUG_FS))
891 tegra_smmu_debugfs_init(smmu);
896 void tegra_smmu_remove(struct tegra_smmu *smmu)
898 if (IS_ENABLED(CONFIG_DEBUG_FS))
899 tegra_smmu_debugfs_exit(smmu);