2 * Copyright (C) 2011-2014 NVIDIA CORPORATION. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
9 #include <linux/bitops.h>
10 #include <linux/debugfs.h>
11 #include <linux/err.h>
12 #include <linux/iommu.h>
13 #include <linux/kernel.h>
15 #include <linux/of_device.h>
16 #include <linux/platform_device.h>
17 #include <linux/slab.h>
19 #include <soc/tegra/ahb.h>
20 #include <soc/tegra/mc.h>
27 const struct tegra_smmu_soc *soc;
29 unsigned long pfn_mask;
34 struct list_head list;
36 struct dentry *debugfs;
39 struct tegra_smmu_as {
40 struct iommu_domain domain;
41 struct tegra_smmu *smmu;
42 unsigned int use_count;
50 static struct tegra_smmu_as *to_smmu_as(struct iommu_domain *dom)
52 return container_of(dom, struct tegra_smmu_as, domain);
55 static inline void smmu_writel(struct tegra_smmu *smmu, u32 value,
58 writel(value, smmu->regs + offset);
61 static inline u32 smmu_readl(struct tegra_smmu *smmu, unsigned long offset)
63 return readl(smmu->regs + offset);
66 #define SMMU_CONFIG 0x010
67 #define SMMU_CONFIG_ENABLE (1 << 0)
69 #define SMMU_TLB_CONFIG 0x14
70 #define SMMU_TLB_CONFIG_HIT_UNDER_MISS (1 << 29)
71 #define SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION (1 << 28)
72 #define SMMU_TLB_CONFIG_ACTIVE_LINES(x) ((x) & 0x3f)
74 #define SMMU_PTC_CONFIG 0x18
75 #define SMMU_PTC_CONFIG_ENABLE (1 << 29)
76 #define SMMU_PTC_CONFIG_REQ_LIMIT(x) (((x) & 0x0f) << 24)
77 #define SMMU_PTC_CONFIG_INDEX_MAP(x) ((x) & 0x3f)
79 #define SMMU_PTB_ASID 0x01c
80 #define SMMU_PTB_ASID_VALUE(x) ((x) & 0x7f)
82 #define SMMU_PTB_DATA 0x020
83 #define SMMU_PTB_DATA_VALUE(page, attr) (page_to_phys(page) >> 12 | (attr))
85 #define SMMU_MK_PDE(page, attr) (page_to_phys(page) >> SMMU_PTE_SHIFT | (attr))
87 #define SMMU_TLB_FLUSH 0x030
88 #define SMMU_TLB_FLUSH_VA_MATCH_ALL (0 << 0)
89 #define SMMU_TLB_FLUSH_VA_MATCH_SECTION (2 << 0)
90 #define SMMU_TLB_FLUSH_VA_MATCH_GROUP (3 << 0)
91 #define SMMU_TLB_FLUSH_ASID(x) (((x) & 0x7f) << 24)
92 #define SMMU_TLB_FLUSH_VA_SECTION(addr) ((((addr) & 0xffc00000) >> 12) | \
93 SMMU_TLB_FLUSH_VA_MATCH_SECTION)
94 #define SMMU_TLB_FLUSH_VA_GROUP(addr) ((((addr) & 0xffffc000) >> 12) | \
95 SMMU_TLB_FLUSH_VA_MATCH_GROUP)
96 #define SMMU_TLB_FLUSH_ASID_MATCH (1 << 31)
98 #define SMMU_PTC_FLUSH 0x034
99 #define SMMU_PTC_FLUSH_TYPE_ALL (0 << 0)
100 #define SMMU_PTC_FLUSH_TYPE_ADR (1 << 0)
102 #define SMMU_PTC_FLUSH_HI 0x9b8
103 #define SMMU_PTC_FLUSH_HI_MASK 0x3
105 /* per-SWGROUP SMMU_*_ASID register */
106 #define SMMU_ASID_ENABLE (1 << 31)
107 #define SMMU_ASID_MASK 0x7f
108 #define SMMU_ASID_VALUE(x) ((x) & SMMU_ASID_MASK)
110 /* page table definitions */
111 #define SMMU_NUM_PDE 1024
112 #define SMMU_NUM_PTE 1024
114 #define SMMU_SIZE_PD (SMMU_NUM_PDE * 4)
115 #define SMMU_SIZE_PT (SMMU_NUM_PTE * 4)
117 #define SMMU_PDE_SHIFT 22
118 #define SMMU_PTE_SHIFT 12
120 #define SMMU_PD_READABLE (1 << 31)
121 #define SMMU_PD_WRITABLE (1 << 30)
122 #define SMMU_PD_NONSECURE (1 << 29)
124 #define SMMU_PDE_READABLE (1 << 31)
125 #define SMMU_PDE_WRITABLE (1 << 30)
126 #define SMMU_PDE_NONSECURE (1 << 29)
127 #define SMMU_PDE_NEXT (1 << 28)
129 #define SMMU_PTE_READABLE (1 << 31)
130 #define SMMU_PTE_WRITABLE (1 << 30)
131 #define SMMU_PTE_NONSECURE (1 << 29)
133 #define SMMU_PDE_ATTR (SMMU_PDE_READABLE | SMMU_PDE_WRITABLE | \
135 #define SMMU_PTE_ATTR (SMMU_PTE_READABLE | SMMU_PTE_WRITABLE | \
138 static unsigned int iova_pd_index(unsigned long iova)
140 return (iova >> SMMU_PDE_SHIFT) & (SMMU_NUM_PDE - 1);
143 static unsigned int iova_pt_index(unsigned long iova)
145 return (iova >> SMMU_PTE_SHIFT) & (SMMU_NUM_PTE - 1);
148 static inline void smmu_flush_ptc(struct tegra_smmu *smmu, struct page *page,
149 unsigned long offset)
151 phys_addr_t phys = page ? page_to_phys(page) : 0;
155 offset &= ~(smmu->mc->soc->atom_size - 1);
157 if (smmu->mc->soc->num_address_bits > 32) {
158 #ifdef CONFIG_PHYS_ADDR_T_64BIT
159 value = (phys >> 32) & SMMU_PTC_FLUSH_HI_MASK;
163 smmu_writel(smmu, value, SMMU_PTC_FLUSH_HI);
166 value = (phys + offset) | SMMU_PTC_FLUSH_TYPE_ADR;
168 value = SMMU_PTC_FLUSH_TYPE_ALL;
171 smmu_writel(smmu, value, SMMU_PTC_FLUSH);
174 static inline void smmu_flush_tlb(struct tegra_smmu *smmu)
176 smmu_writel(smmu, SMMU_TLB_FLUSH_VA_MATCH_ALL, SMMU_TLB_FLUSH);
179 static inline void smmu_flush_tlb_asid(struct tegra_smmu *smmu,
184 value = SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_ASID(asid) |
185 SMMU_TLB_FLUSH_VA_MATCH_ALL;
186 smmu_writel(smmu, value, SMMU_TLB_FLUSH);
189 static inline void smmu_flush_tlb_section(struct tegra_smmu *smmu,
195 value = SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_ASID(asid) |
196 SMMU_TLB_FLUSH_VA_SECTION(iova);
197 smmu_writel(smmu, value, SMMU_TLB_FLUSH);
200 static inline void smmu_flush_tlb_group(struct tegra_smmu *smmu,
206 value = SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_ASID(asid) |
207 SMMU_TLB_FLUSH_VA_GROUP(iova);
208 smmu_writel(smmu, value, SMMU_TLB_FLUSH);
211 static inline void smmu_flush(struct tegra_smmu *smmu)
213 smmu_readl(smmu, SMMU_CONFIG);
216 static int tegra_smmu_alloc_asid(struct tegra_smmu *smmu, unsigned int *idp)
220 mutex_lock(&smmu->lock);
222 id = find_first_zero_bit(smmu->asids, smmu->soc->num_asids);
223 if (id >= smmu->soc->num_asids) {
224 mutex_unlock(&smmu->lock);
228 set_bit(id, smmu->asids);
231 mutex_unlock(&smmu->lock);
235 static void tegra_smmu_free_asid(struct tegra_smmu *smmu, unsigned int id)
237 mutex_lock(&smmu->lock);
238 clear_bit(id, smmu->asids);
239 mutex_unlock(&smmu->lock);
242 static bool tegra_smmu_capable(enum iommu_cap cap)
247 static struct iommu_domain *tegra_smmu_domain_alloc(unsigned type)
249 struct tegra_smmu_as *as;
253 if (type != IOMMU_DOMAIN_UNMANAGED)
256 as = kzalloc(sizeof(*as), GFP_KERNEL);
260 as->attr = SMMU_PD_READABLE | SMMU_PD_WRITABLE | SMMU_PD_NONSECURE;
262 as->pd = alloc_page(GFP_KERNEL | __GFP_DMA);
268 as->count = kcalloc(SMMU_NUM_PDE, sizeof(u32), GFP_KERNEL);
275 as->pts = kcalloc(SMMU_NUM_PDE, sizeof(*as->pts), GFP_KERNEL);
284 pd = page_address(as->pd);
285 SetPageReserved(as->pd);
287 for (i = 0; i < SMMU_NUM_PDE; i++)
291 as->domain.geometry.aperture_start = 0;
292 as->domain.geometry.aperture_end = 0xffffffff;
293 as->domain.geometry.force_aperture = true;
298 static void tegra_smmu_domain_free(struct iommu_domain *domain)
300 struct tegra_smmu_as *as = to_smmu_as(domain);
302 /* TODO: free page directory and page tables */
303 ClearPageReserved(as->pd);
308 static const struct tegra_smmu_swgroup *
309 tegra_smmu_find_swgroup(struct tegra_smmu *smmu, unsigned int swgroup)
311 const struct tegra_smmu_swgroup *group = NULL;
314 for (i = 0; i < smmu->soc->num_swgroups; i++) {
315 if (smmu->soc->swgroups[i].swgroup == swgroup) {
316 group = &smmu->soc->swgroups[i];
324 static void tegra_smmu_enable(struct tegra_smmu *smmu, unsigned int swgroup,
327 const struct tegra_smmu_swgroup *group;
331 for (i = 0; i < smmu->soc->num_clients; i++) {
332 const struct tegra_mc_client *client = &smmu->soc->clients[i];
334 if (client->swgroup != swgroup)
337 value = smmu_readl(smmu, client->smmu.reg);
338 value |= BIT(client->smmu.bit);
339 smmu_writel(smmu, value, client->smmu.reg);
342 group = tegra_smmu_find_swgroup(smmu, swgroup);
344 value = smmu_readl(smmu, group->reg);
345 value &= ~SMMU_ASID_MASK;
346 value |= SMMU_ASID_VALUE(asid);
347 value |= SMMU_ASID_ENABLE;
348 smmu_writel(smmu, value, group->reg);
352 static void tegra_smmu_disable(struct tegra_smmu *smmu, unsigned int swgroup,
355 const struct tegra_smmu_swgroup *group;
359 group = tegra_smmu_find_swgroup(smmu, swgroup);
361 value = smmu_readl(smmu, group->reg);
362 value &= ~SMMU_ASID_MASK;
363 value |= SMMU_ASID_VALUE(asid);
364 value &= ~SMMU_ASID_ENABLE;
365 smmu_writel(smmu, value, group->reg);
368 for (i = 0; i < smmu->soc->num_clients; i++) {
369 const struct tegra_mc_client *client = &smmu->soc->clients[i];
371 if (client->swgroup != swgroup)
374 value = smmu_readl(smmu, client->smmu.reg);
375 value &= ~BIT(client->smmu.bit);
376 smmu_writel(smmu, value, client->smmu.reg);
380 static int tegra_smmu_as_prepare(struct tegra_smmu *smmu,
381 struct tegra_smmu_as *as)
386 if (as->use_count > 0) {
391 err = tegra_smmu_alloc_asid(smmu, &as->id);
395 smmu->soc->ops->flush_dcache(as->pd, 0, SMMU_SIZE_PD);
396 smmu_flush_ptc(smmu, as->pd, 0);
397 smmu_flush_tlb_asid(smmu, as->id);
399 smmu_writel(smmu, as->id & 0x7f, SMMU_PTB_ASID);
400 value = SMMU_PTB_DATA_VALUE(as->pd, as->attr);
401 smmu_writel(smmu, value, SMMU_PTB_DATA);
410 static void tegra_smmu_as_unprepare(struct tegra_smmu *smmu,
411 struct tegra_smmu_as *as)
413 if (--as->use_count > 0)
416 tegra_smmu_free_asid(smmu, as->id);
420 static int tegra_smmu_attach_dev(struct iommu_domain *domain,
423 struct tegra_smmu *smmu = dev->archdata.iommu;
424 struct tegra_smmu_as *as = to_smmu_as(domain);
425 struct device_node *np = dev->of_node;
426 struct of_phandle_args args;
427 unsigned int index = 0;
430 while (!of_parse_phandle_with_args(np, "iommus", "#iommu-cells", index,
432 unsigned int swgroup = args.args[0];
434 if (args.np != smmu->dev->of_node) {
435 of_node_put(args.np);
439 of_node_put(args.np);
441 err = tegra_smmu_as_prepare(smmu, as);
445 tegra_smmu_enable(smmu, swgroup, as->id);
455 static void tegra_smmu_detach_dev(struct iommu_domain *domain, struct device *dev)
457 struct tegra_smmu_as *as = to_smmu_as(domain);
458 struct device_node *np = dev->of_node;
459 struct tegra_smmu *smmu = as->smmu;
460 struct of_phandle_args args;
461 unsigned int index = 0;
463 while (!of_parse_phandle_with_args(np, "iommus", "#iommu-cells", index,
465 unsigned int swgroup = args.args[0];
467 if (args.np != smmu->dev->of_node) {
468 of_node_put(args.np);
472 of_node_put(args.np);
474 tegra_smmu_disable(smmu, swgroup, as->id);
475 tegra_smmu_as_unprepare(smmu, as);
480 static u32 *tegra_smmu_pte_offset(struct page *pt_page, unsigned long iova)
482 u32 *pt = page_address(pt_page);
484 return pt + iova_pt_index(iova);
487 static u32 *tegra_smmu_pte_lookup(struct tegra_smmu_as *as, unsigned long iova,
490 unsigned int pd_index = iova_pd_index(iova);
491 struct page *pt_page;
493 pt_page = as->pts[pd_index];
499 return tegra_smmu_pte_offset(pt_page, iova);
502 static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova,
505 u32 *pd = page_address(as->pd), *pt;
506 unsigned int pde = iova_pd_index(iova);
507 struct tegra_smmu *smmu = as->smmu;
512 page = alloc_page(GFP_KERNEL | __GFP_DMA);
516 pt = page_address(page);
517 SetPageReserved(page);
519 for (i = 0; i < SMMU_NUM_PTE; i++)
524 smmu->soc->ops->flush_dcache(page, 0, SMMU_SIZE_PT);
526 pd[pde] = SMMU_MK_PDE(page, SMMU_PDE_ATTR | SMMU_PDE_NEXT);
528 smmu->soc->ops->flush_dcache(as->pd, pde << 2, 4);
529 smmu_flush_ptc(smmu, as->pd, pde << 2);
530 smmu_flush_tlb_section(smmu, as->id, iova);
538 pt = page_address(page);
540 /* Keep track of entries in this page table. */
541 if (pt[iova_pt_index(iova)] == 0)
544 return tegra_smmu_pte_offset(page, iova);
547 static void tegra_smmu_pte_put_use(struct tegra_smmu_as *as, unsigned long iova)
549 struct tegra_smmu *smmu = as->smmu;
550 unsigned int pde = iova_pd_index(iova);
551 u32 *pd = page_address(as->pd);
552 struct page *page = as->pts[pde];
555 * When no entries in this page table are used anymore, return the
556 * memory page to the system.
558 if (--as->count[pde] == 0) {
559 unsigned int offset = pde * sizeof(*pd);
561 /* Clear the page directory entry first */
564 /* Flush the page directory entry */
565 smmu->soc->ops->flush_dcache(as->pd, offset, sizeof(*pd));
566 smmu_flush_ptc(smmu, as->pd, offset);
567 smmu_flush_tlb_section(smmu, as->id, iova);
570 /* Finally, free the page */
571 ClearPageReserved(page);
577 static void tegra_smmu_set_pte(struct tegra_smmu_as *as, unsigned long iova,
578 u32 *pte, struct page *pte_page, u32 val)
580 struct tegra_smmu *smmu = as->smmu;
581 unsigned long offset = offset_in_page(pte);
585 smmu->soc->ops->flush_dcache(pte_page, offset, 4);
586 smmu_flush_ptc(smmu, pte_page, offset);
587 smmu_flush_tlb_group(smmu, as->id, iova);
591 static int tegra_smmu_map(struct iommu_domain *domain, unsigned long iova,
592 phys_addr_t paddr, size_t size, int prot)
594 struct tegra_smmu_as *as = to_smmu_as(domain);
598 pte = as_get_pte(as, iova, &page);
602 tegra_smmu_set_pte(as, iova, pte, page,
603 __phys_to_pfn(paddr) | SMMU_PTE_ATTR);
608 static size_t tegra_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
611 struct tegra_smmu_as *as = to_smmu_as(domain);
612 struct page *pte_page;
615 pte = tegra_smmu_pte_lookup(as, iova, &pte_page);
619 tegra_smmu_set_pte(as, iova, pte, pte_page, 0);
620 tegra_smmu_pte_put_use(as, iova);
625 static phys_addr_t tegra_smmu_iova_to_phys(struct iommu_domain *domain,
628 struct tegra_smmu_as *as = to_smmu_as(domain);
629 struct page *pte_page;
633 pte = tegra_smmu_pte_lookup(as, iova, &pte_page);
637 pfn = *pte & as->smmu->pfn_mask;
639 return PFN_PHYS(pfn);
642 static struct tegra_smmu *tegra_smmu_find(struct device_node *np)
644 struct platform_device *pdev;
647 pdev = of_find_device_by_node(np);
651 mc = platform_get_drvdata(pdev);
658 static int tegra_smmu_add_device(struct device *dev)
660 struct device_node *np = dev->of_node;
661 struct of_phandle_args args;
662 unsigned int index = 0;
664 while (of_parse_phandle_with_args(np, "iommus", "#iommu-cells", index,
666 struct tegra_smmu *smmu;
668 smmu = tegra_smmu_find(args.np);
671 * Only a single IOMMU master interface is currently
672 * supported by the Linux kernel, so abort after the
675 dev->archdata.iommu = smmu;
685 static void tegra_smmu_remove_device(struct device *dev)
687 dev->archdata.iommu = NULL;
690 static const struct iommu_ops tegra_smmu_ops = {
691 .capable = tegra_smmu_capable,
692 .domain_alloc = tegra_smmu_domain_alloc,
693 .domain_free = tegra_smmu_domain_free,
694 .attach_dev = tegra_smmu_attach_dev,
695 .detach_dev = tegra_smmu_detach_dev,
696 .add_device = tegra_smmu_add_device,
697 .remove_device = tegra_smmu_remove_device,
698 .map = tegra_smmu_map,
699 .unmap = tegra_smmu_unmap,
700 .map_sg = default_iommu_map_sg,
701 .iova_to_phys = tegra_smmu_iova_to_phys,
703 .pgsize_bitmap = SZ_4K,
706 static void tegra_smmu_ahb_enable(void)
708 static const struct of_device_id ahb_match[] = {
709 { .compatible = "nvidia,tegra30-ahb", },
712 struct device_node *ahb;
714 ahb = of_find_matching_node(NULL, ahb_match);
716 tegra_ahb_enable_smmu(ahb);
721 static int tegra_smmu_swgroups_show(struct seq_file *s, void *data)
723 struct tegra_smmu *smmu = s->private;
727 seq_printf(s, "swgroup enabled ASID\n");
728 seq_printf(s, "------------------------\n");
730 for (i = 0; i < smmu->soc->num_swgroups; i++) {
731 const struct tegra_smmu_swgroup *group = &smmu->soc->swgroups[i];
735 value = smmu_readl(smmu, group->reg);
737 if (value & SMMU_ASID_ENABLE)
742 asid = value & SMMU_ASID_MASK;
744 seq_printf(s, "%-9s %-7s %#04x\n", group->name, status,
751 static int tegra_smmu_swgroups_open(struct inode *inode, struct file *file)
753 return single_open(file, tegra_smmu_swgroups_show, inode->i_private);
756 static const struct file_operations tegra_smmu_swgroups_fops = {
757 .open = tegra_smmu_swgroups_open,
760 .release = single_release,
763 static int tegra_smmu_clients_show(struct seq_file *s, void *data)
765 struct tegra_smmu *smmu = s->private;
769 seq_printf(s, "client enabled\n");
770 seq_printf(s, "--------------------\n");
772 for (i = 0; i < smmu->soc->num_clients; i++) {
773 const struct tegra_mc_client *client = &smmu->soc->clients[i];
776 value = smmu_readl(smmu, client->smmu.reg);
778 if (value & BIT(client->smmu.bit))
783 seq_printf(s, "%-12s %s\n", client->name, status);
789 static int tegra_smmu_clients_open(struct inode *inode, struct file *file)
791 return single_open(file, tegra_smmu_clients_show, inode->i_private);
794 static const struct file_operations tegra_smmu_clients_fops = {
795 .open = tegra_smmu_clients_open,
798 .release = single_release,
801 static void tegra_smmu_debugfs_init(struct tegra_smmu *smmu)
803 smmu->debugfs = debugfs_create_dir("smmu", NULL);
807 debugfs_create_file("swgroups", S_IRUGO, smmu->debugfs, smmu,
808 &tegra_smmu_swgroups_fops);
809 debugfs_create_file("clients", S_IRUGO, smmu->debugfs, smmu,
810 &tegra_smmu_clients_fops);
813 static void tegra_smmu_debugfs_exit(struct tegra_smmu *smmu)
815 debugfs_remove_recursive(smmu->debugfs);
818 struct tegra_smmu *tegra_smmu_probe(struct device *dev,
819 const struct tegra_smmu_soc *soc,
822 struct tegra_smmu *smmu;
827 /* This can happen on Tegra20 which doesn't have an SMMU */
831 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
833 return ERR_PTR(-ENOMEM);
836 * This is a bit of a hack. Ideally we'd want to simply return this
837 * value. However the IOMMU registration process will attempt to add
838 * all devices to the IOMMU when bus_set_iommu() is called. In order
839 * not to rely on global variables to track the IOMMU instance, we
840 * set it here so that it can be looked up from the .add_device()
841 * callback via the IOMMU device's .drvdata field.
845 size = BITS_TO_LONGS(soc->num_asids) * sizeof(long);
847 smmu->asids = devm_kzalloc(dev, size, GFP_KERNEL);
849 return ERR_PTR(-ENOMEM);
851 mutex_init(&smmu->lock);
853 smmu->regs = mc->regs;
858 smmu->pfn_mask = BIT_MASK(mc->soc->num_address_bits - PAGE_SHIFT) - 1;
859 dev_dbg(dev, "address bits: %u, PFN mask: %#lx\n",
860 mc->soc->num_address_bits, smmu->pfn_mask);
862 value = SMMU_PTC_CONFIG_ENABLE | SMMU_PTC_CONFIG_INDEX_MAP(0x3f);
864 if (soc->supports_request_limit)
865 value |= SMMU_PTC_CONFIG_REQ_LIMIT(8);
867 smmu_writel(smmu, value, SMMU_PTC_CONFIG);
869 value = SMMU_TLB_CONFIG_HIT_UNDER_MISS |
870 SMMU_TLB_CONFIG_ACTIVE_LINES(0x20);
872 if (soc->supports_round_robin_arbitration)
873 value |= SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION;
875 smmu_writel(smmu, value, SMMU_TLB_CONFIG);
877 smmu_flush_ptc(smmu, NULL, 0);
878 smmu_flush_tlb(smmu);
879 smmu_writel(smmu, SMMU_CONFIG_ENABLE, SMMU_CONFIG);
882 tegra_smmu_ahb_enable();
884 err = bus_set_iommu(&platform_bus_type, &tegra_smmu_ops);
888 if (IS_ENABLED(CONFIG_DEBUG_FS))
889 tegra_smmu_debugfs_init(smmu);
894 void tegra_smmu_remove(struct tegra_smmu *smmu)
896 if (IS_ENABLED(CONFIG_DEBUG_FS))
897 tegra_smmu_debugfs_exit(smmu);