2 * Copyright (C) 2011-2014 NVIDIA CORPORATION. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
9 #include <linux/bitops.h>
10 #include <linux/debugfs.h>
11 #include <linux/err.h>
12 #include <linux/iommu.h>
13 #include <linux/kernel.h>
15 #include <linux/of_device.h>
16 #include <linux/platform_device.h>
17 #include <linux/slab.h>
19 #include <soc/tegra/ahb.h>
20 #include <soc/tegra/mc.h>
27 const struct tegra_smmu_soc
*soc
;
29 unsigned long pfn_mask
;
34 struct list_head list
;
36 struct dentry
*debugfs
;
39 struct tegra_smmu_as
{
40 struct iommu_domain domain
;
41 struct tegra_smmu
*smmu
;
42 unsigned int use_count
;
49 static struct tegra_smmu_as
*to_smmu_as(struct iommu_domain
*dom
)
51 return container_of(dom
, struct tegra_smmu_as
, domain
);
54 static inline void smmu_writel(struct tegra_smmu
*smmu
, u32 value
,
57 writel(value
, smmu
->regs
+ offset
);
60 static inline u32
smmu_readl(struct tegra_smmu
*smmu
, unsigned long offset
)
62 return readl(smmu
->regs
+ offset
);
65 #define SMMU_CONFIG 0x010
66 #define SMMU_CONFIG_ENABLE (1 << 0)
68 #define SMMU_TLB_CONFIG 0x14
69 #define SMMU_TLB_CONFIG_HIT_UNDER_MISS (1 << 29)
70 #define SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION (1 << 28)
71 #define SMMU_TLB_CONFIG_ACTIVE_LINES(x) ((x) & 0x3f)
73 #define SMMU_PTC_CONFIG 0x18
74 #define SMMU_PTC_CONFIG_ENABLE (1 << 29)
75 #define SMMU_PTC_CONFIG_REQ_LIMIT(x) (((x) & 0x0f) << 24)
76 #define SMMU_PTC_CONFIG_INDEX_MAP(x) ((x) & 0x3f)
78 #define SMMU_PTB_ASID 0x01c
79 #define SMMU_PTB_ASID_VALUE(x) ((x) & 0x7f)
81 #define SMMU_PTB_DATA 0x020
82 #define SMMU_PTB_DATA_VALUE(page, attr) (page_to_phys(page) >> 12 | (attr))
84 #define SMMU_MK_PDE(page, attr) (page_to_phys(page) >> SMMU_PTE_SHIFT | (attr))
86 #define SMMU_TLB_FLUSH 0x030
87 #define SMMU_TLB_FLUSH_VA_MATCH_ALL (0 << 0)
88 #define SMMU_TLB_FLUSH_VA_MATCH_SECTION (2 << 0)
89 #define SMMU_TLB_FLUSH_VA_MATCH_GROUP (3 << 0)
90 #define SMMU_TLB_FLUSH_ASID(x) (((x) & 0x7f) << 24)
91 #define SMMU_TLB_FLUSH_VA_SECTION(addr) ((((addr) & 0xffc00000) >> 12) | \
92 SMMU_TLB_FLUSH_VA_MATCH_SECTION)
93 #define SMMU_TLB_FLUSH_VA_GROUP(addr) ((((addr) & 0xffffc000) >> 12) | \
94 SMMU_TLB_FLUSH_VA_MATCH_GROUP)
95 #define SMMU_TLB_FLUSH_ASID_MATCH (1 << 31)
97 #define SMMU_PTC_FLUSH 0x034
98 #define SMMU_PTC_FLUSH_TYPE_ALL (0 << 0)
99 #define SMMU_PTC_FLUSH_TYPE_ADR (1 << 0)
101 #define SMMU_PTC_FLUSH_HI 0x9b8
102 #define SMMU_PTC_FLUSH_HI_MASK 0x3
104 /* per-SWGROUP SMMU_*_ASID register */
105 #define SMMU_ASID_ENABLE (1 << 31)
106 #define SMMU_ASID_MASK 0x7f
107 #define SMMU_ASID_VALUE(x) ((x) & SMMU_ASID_MASK)
109 /* page table definitions */
110 #define SMMU_NUM_PDE 1024
111 #define SMMU_NUM_PTE 1024
113 #define SMMU_SIZE_PD (SMMU_NUM_PDE * 4)
114 #define SMMU_SIZE_PT (SMMU_NUM_PTE * 4)
116 #define SMMU_PDE_SHIFT 22
117 #define SMMU_PTE_SHIFT 12
119 #define SMMU_PD_READABLE (1 << 31)
120 #define SMMU_PD_WRITABLE (1 << 30)
121 #define SMMU_PD_NONSECURE (1 << 29)
123 #define SMMU_PDE_READABLE (1 << 31)
124 #define SMMU_PDE_WRITABLE (1 << 30)
125 #define SMMU_PDE_NONSECURE (1 << 29)
126 #define SMMU_PDE_NEXT (1 << 28)
128 #define SMMU_PTE_READABLE (1 << 31)
129 #define SMMU_PTE_WRITABLE (1 << 30)
130 #define SMMU_PTE_NONSECURE (1 << 29)
132 #define SMMU_PDE_ATTR (SMMU_PDE_READABLE | SMMU_PDE_WRITABLE | \
134 #define SMMU_PTE_ATTR (SMMU_PTE_READABLE | SMMU_PTE_WRITABLE | \
137 static inline void smmu_flush_ptc(struct tegra_smmu
*smmu
, struct page
*page
,
138 unsigned long offset
)
140 phys_addr_t phys
= page
? page_to_phys(page
) : 0;
144 offset
&= ~(smmu
->mc
->soc
->atom_size
- 1);
146 if (smmu
->mc
->soc
->num_address_bits
> 32) {
147 #ifdef CONFIG_PHYS_ADDR_T_64BIT
148 value
= (phys
>> 32) & SMMU_PTC_FLUSH_HI_MASK
;
152 smmu_writel(smmu
, value
, SMMU_PTC_FLUSH_HI
);
155 value
= (phys
+ offset
) | SMMU_PTC_FLUSH_TYPE_ADR
;
157 value
= SMMU_PTC_FLUSH_TYPE_ALL
;
160 smmu_writel(smmu
, value
, SMMU_PTC_FLUSH
);
163 static inline void smmu_flush_tlb(struct tegra_smmu
*smmu
)
165 smmu_writel(smmu
, SMMU_TLB_FLUSH_VA_MATCH_ALL
, SMMU_TLB_FLUSH
);
168 static inline void smmu_flush_tlb_asid(struct tegra_smmu
*smmu
,
173 value
= SMMU_TLB_FLUSH_ASID_MATCH
| SMMU_TLB_FLUSH_ASID(asid
) |
174 SMMU_TLB_FLUSH_VA_MATCH_ALL
;
175 smmu_writel(smmu
, value
, SMMU_TLB_FLUSH
);
178 static inline void smmu_flush_tlb_section(struct tegra_smmu
*smmu
,
184 value
= SMMU_TLB_FLUSH_ASID_MATCH
| SMMU_TLB_FLUSH_ASID(asid
) |
185 SMMU_TLB_FLUSH_VA_SECTION(iova
);
186 smmu_writel(smmu
, value
, SMMU_TLB_FLUSH
);
189 static inline void smmu_flush_tlb_group(struct tegra_smmu
*smmu
,
195 value
= SMMU_TLB_FLUSH_ASID_MATCH
| SMMU_TLB_FLUSH_ASID(asid
) |
196 SMMU_TLB_FLUSH_VA_GROUP(iova
);
197 smmu_writel(smmu
, value
, SMMU_TLB_FLUSH
);
200 static inline void smmu_flush(struct tegra_smmu
*smmu
)
202 smmu_readl(smmu
, SMMU_CONFIG
);
205 static int tegra_smmu_alloc_asid(struct tegra_smmu
*smmu
, unsigned int *idp
)
209 mutex_lock(&smmu
->lock
);
211 id
= find_first_zero_bit(smmu
->asids
, smmu
->soc
->num_asids
);
212 if (id
>= smmu
->soc
->num_asids
) {
213 mutex_unlock(&smmu
->lock
);
217 set_bit(id
, smmu
->asids
);
220 mutex_unlock(&smmu
->lock
);
224 static void tegra_smmu_free_asid(struct tegra_smmu
*smmu
, unsigned int id
)
226 mutex_lock(&smmu
->lock
);
227 clear_bit(id
, smmu
->asids
);
228 mutex_unlock(&smmu
->lock
);
231 static bool tegra_smmu_capable(enum iommu_cap cap
)
236 static struct iommu_domain
*tegra_smmu_domain_alloc(unsigned type
)
238 struct tegra_smmu_as
*as
;
242 if (type
!= IOMMU_DOMAIN_UNMANAGED
)
245 as
= kzalloc(sizeof(*as
), GFP_KERNEL
);
249 as
->attr
= SMMU_PD_READABLE
| SMMU_PD_WRITABLE
| SMMU_PD_NONSECURE
;
251 as
->pd
= alloc_page(GFP_KERNEL
| __GFP_DMA
);
257 as
->count
= alloc_page(GFP_KERNEL
);
265 pd
= page_address(as
->pd
);
266 SetPageReserved(as
->pd
);
268 for (i
= 0; i
< SMMU_NUM_PDE
; i
++)
271 /* clear PDE usage counters */
272 pd
= page_address(as
->count
);
273 SetPageReserved(as
->count
);
275 for (i
= 0; i
< SMMU_NUM_PDE
; i
++)
279 as
->domain
.geometry
.aperture_start
= 0;
280 as
->domain
.geometry
.aperture_end
= 0xffffffff;
281 as
->domain
.geometry
.force_aperture
= true;
286 static void tegra_smmu_domain_free(struct iommu_domain
*domain
)
288 struct tegra_smmu_as
*as
= to_smmu_as(domain
);
290 /* TODO: free page directory and page tables */
291 ClearPageReserved(as
->pd
);
296 static const struct tegra_smmu_swgroup
*
297 tegra_smmu_find_swgroup(struct tegra_smmu
*smmu
, unsigned int swgroup
)
299 const struct tegra_smmu_swgroup
*group
= NULL
;
302 for (i
= 0; i
< smmu
->soc
->num_swgroups
; i
++) {
303 if (smmu
->soc
->swgroups
[i
].swgroup
== swgroup
) {
304 group
= &smmu
->soc
->swgroups
[i
];
312 static void tegra_smmu_enable(struct tegra_smmu
*smmu
, unsigned int swgroup
,
315 const struct tegra_smmu_swgroup
*group
;
319 for (i
= 0; i
< smmu
->soc
->num_clients
; i
++) {
320 const struct tegra_mc_client
*client
= &smmu
->soc
->clients
[i
];
322 if (client
->swgroup
!= swgroup
)
325 value
= smmu_readl(smmu
, client
->smmu
.reg
);
326 value
|= BIT(client
->smmu
.bit
);
327 smmu_writel(smmu
, value
, client
->smmu
.reg
);
330 group
= tegra_smmu_find_swgroup(smmu
, swgroup
);
332 value
= smmu_readl(smmu
, group
->reg
);
333 value
&= ~SMMU_ASID_MASK
;
334 value
|= SMMU_ASID_VALUE(asid
);
335 value
|= SMMU_ASID_ENABLE
;
336 smmu_writel(smmu
, value
, group
->reg
);
340 static void tegra_smmu_disable(struct tegra_smmu
*smmu
, unsigned int swgroup
,
343 const struct tegra_smmu_swgroup
*group
;
347 group
= tegra_smmu_find_swgroup(smmu
, swgroup
);
349 value
= smmu_readl(smmu
, group
->reg
);
350 value
&= ~SMMU_ASID_MASK
;
351 value
|= SMMU_ASID_VALUE(asid
);
352 value
&= ~SMMU_ASID_ENABLE
;
353 smmu_writel(smmu
, value
, group
->reg
);
356 for (i
= 0; i
< smmu
->soc
->num_clients
; i
++) {
357 const struct tegra_mc_client
*client
= &smmu
->soc
->clients
[i
];
359 if (client
->swgroup
!= swgroup
)
362 value
= smmu_readl(smmu
, client
->smmu
.reg
);
363 value
&= ~BIT(client
->smmu
.bit
);
364 smmu_writel(smmu
, value
, client
->smmu
.reg
);
368 static int tegra_smmu_as_prepare(struct tegra_smmu
*smmu
,
369 struct tegra_smmu_as
*as
)
374 if (as
->use_count
> 0) {
379 err
= tegra_smmu_alloc_asid(smmu
, &as
->id
);
383 smmu
->soc
->ops
->flush_dcache(as
->pd
, 0, SMMU_SIZE_PD
);
384 smmu_flush_ptc(smmu
, as
->pd
, 0);
385 smmu_flush_tlb_asid(smmu
, as
->id
);
387 smmu_writel(smmu
, as
->id
& 0x7f, SMMU_PTB_ASID
);
388 value
= SMMU_PTB_DATA_VALUE(as
->pd
, as
->attr
);
389 smmu_writel(smmu
, value
, SMMU_PTB_DATA
);
398 static void tegra_smmu_as_unprepare(struct tegra_smmu
*smmu
,
399 struct tegra_smmu_as
*as
)
401 if (--as
->use_count
> 0)
404 tegra_smmu_free_asid(smmu
, as
->id
);
408 static int tegra_smmu_attach_dev(struct iommu_domain
*domain
,
411 struct tegra_smmu
*smmu
= dev
->archdata
.iommu
;
412 struct tegra_smmu_as
*as
= to_smmu_as(domain
);
413 struct device_node
*np
= dev
->of_node
;
414 struct of_phandle_args args
;
415 unsigned int index
= 0;
418 while (!of_parse_phandle_with_args(np
, "iommus", "#iommu-cells", index
,
420 unsigned int swgroup
= args
.args
[0];
422 if (args
.np
!= smmu
->dev
->of_node
) {
423 of_node_put(args
.np
);
427 of_node_put(args
.np
);
429 err
= tegra_smmu_as_prepare(smmu
, as
);
433 tegra_smmu_enable(smmu
, swgroup
, as
->id
);
443 static void tegra_smmu_detach_dev(struct iommu_domain
*domain
, struct device
*dev
)
445 struct tegra_smmu_as
*as
= to_smmu_as(domain
);
446 struct device_node
*np
= dev
->of_node
;
447 struct tegra_smmu
*smmu
= as
->smmu
;
448 struct of_phandle_args args
;
449 unsigned int index
= 0;
451 while (!of_parse_phandle_with_args(np
, "iommus", "#iommu-cells", index
,
453 unsigned int swgroup
= args
.args
[0];
455 if (args
.np
!= smmu
->dev
->of_node
) {
456 of_node_put(args
.np
);
460 of_node_put(args
.np
);
462 tegra_smmu_disable(smmu
, swgroup
, as
->id
);
463 tegra_smmu_as_unprepare(smmu
, as
);
468 static u32
*as_get_pte(struct tegra_smmu_as
*as
, dma_addr_t iova
,
471 u32
*pd
= page_address(as
->pd
), *pt
, *count
;
472 u32 pde
= (iova
>> SMMU_PDE_SHIFT
) & 0x3ff;
473 u32 pte
= (iova
>> SMMU_PTE_SHIFT
) & 0x3ff;
474 struct tegra_smmu
*smmu
= as
->smmu
;
479 page
= alloc_page(GFP_KERNEL
| __GFP_DMA
);
483 pt
= page_address(page
);
484 SetPageReserved(page
);
486 for (i
= 0; i
< SMMU_NUM_PTE
; i
++)
489 smmu
->soc
->ops
->flush_dcache(page
, 0, SMMU_SIZE_PT
);
491 pd
[pde
] = SMMU_MK_PDE(page
, SMMU_PDE_ATTR
| SMMU_PDE_NEXT
);
493 smmu
->soc
->ops
->flush_dcache(as
->pd
, pde
<< 2, 4);
494 smmu_flush_ptc(smmu
, as
->pd
, pde
<< 2);
495 smmu_flush_tlb_section(smmu
, as
->id
, iova
);
498 page
= pfn_to_page(pd
[pde
] & smmu
->pfn_mask
);
499 pt
= page_address(page
);
504 /* Keep track of entries in this page table. */
505 count
= page_address(as
->count
);
512 static void as_put_pte(struct tegra_smmu_as
*as
, dma_addr_t iova
)
514 u32 pde
= (iova
>> SMMU_PDE_SHIFT
) & 0x3ff;
515 u32 pte
= (iova
>> SMMU_PTE_SHIFT
) & 0x3ff;
516 u32
*count
= page_address(as
->count
);
517 u32
*pd
= page_address(as
->pd
), *pt
;
520 page
= pfn_to_page(pd
[pde
] & as
->smmu
->pfn_mask
);
521 pt
= page_address(page
);
524 * When no entries in this page table are used anymore, return the
525 * memory page to the system.
528 if (--count
[pde
] == 0) {
529 ClearPageReserved(page
);
538 static int tegra_smmu_map(struct iommu_domain
*domain
, unsigned long iova
,
539 phys_addr_t paddr
, size_t size
, int prot
)
541 struct tegra_smmu_as
*as
= to_smmu_as(domain
);
542 struct tegra_smmu
*smmu
= as
->smmu
;
543 unsigned long offset
;
547 pte
= as_get_pte(as
, iova
, &page
);
551 *pte
= __phys_to_pfn(paddr
) | SMMU_PTE_ATTR
;
552 offset
= offset_in_page(pte
);
554 smmu
->soc
->ops
->flush_dcache(page
, offset
, 4);
555 smmu_flush_ptc(smmu
, page
, offset
);
556 smmu_flush_tlb_group(smmu
, as
->id
, iova
);
562 static size_t tegra_smmu_unmap(struct iommu_domain
*domain
, unsigned long iova
,
565 struct tegra_smmu_as
*as
= to_smmu_as(domain
);
566 struct tegra_smmu
*smmu
= as
->smmu
;
567 unsigned long offset
;
571 pte
= as_get_pte(as
, iova
, &page
);
575 offset
= offset_in_page(pte
);
576 as_put_pte(as
, iova
);
578 smmu
->soc
->ops
->flush_dcache(page
, offset
, 4);
579 smmu_flush_ptc(smmu
, page
, offset
);
580 smmu_flush_tlb_group(smmu
, as
->id
, iova
);
586 static phys_addr_t
tegra_smmu_iova_to_phys(struct iommu_domain
*domain
,
589 struct tegra_smmu_as
*as
= to_smmu_as(domain
);
594 pte
= as_get_pte(as
, iova
, &page
);
595 pfn
= *pte
& as
->smmu
->pfn_mask
;
597 return PFN_PHYS(pfn
);
600 static struct tegra_smmu
*tegra_smmu_find(struct device_node
*np
)
602 struct platform_device
*pdev
;
605 pdev
= of_find_device_by_node(np
);
609 mc
= platform_get_drvdata(pdev
);
616 static int tegra_smmu_add_device(struct device
*dev
)
618 struct device_node
*np
= dev
->of_node
;
619 struct of_phandle_args args
;
620 unsigned int index
= 0;
622 while (of_parse_phandle_with_args(np
, "iommus", "#iommu-cells", index
,
624 struct tegra_smmu
*smmu
;
626 smmu
= tegra_smmu_find(args
.np
);
629 * Only a single IOMMU master interface is currently
630 * supported by the Linux kernel, so abort after the
633 dev
->archdata
.iommu
= smmu
;
643 static void tegra_smmu_remove_device(struct device
*dev
)
645 dev
->archdata
.iommu
= NULL
;
648 static const struct iommu_ops tegra_smmu_ops
= {
649 .capable
= tegra_smmu_capable
,
650 .domain_alloc
= tegra_smmu_domain_alloc
,
651 .domain_free
= tegra_smmu_domain_free
,
652 .attach_dev
= tegra_smmu_attach_dev
,
653 .detach_dev
= tegra_smmu_detach_dev
,
654 .add_device
= tegra_smmu_add_device
,
655 .remove_device
= tegra_smmu_remove_device
,
656 .map
= tegra_smmu_map
,
657 .unmap
= tegra_smmu_unmap
,
658 .map_sg
= default_iommu_map_sg
,
659 .iova_to_phys
= tegra_smmu_iova_to_phys
,
661 .pgsize_bitmap
= SZ_4K
,
664 static void tegra_smmu_ahb_enable(void)
666 static const struct of_device_id ahb_match
[] = {
667 { .compatible
= "nvidia,tegra30-ahb", },
670 struct device_node
*ahb
;
672 ahb
= of_find_matching_node(NULL
, ahb_match
);
674 tegra_ahb_enable_smmu(ahb
);
679 static int tegra_smmu_swgroups_show(struct seq_file
*s
, void *data
)
681 struct tegra_smmu
*smmu
= s
->private;
685 seq_printf(s
, "swgroup enabled ASID\n");
686 seq_printf(s
, "------------------------\n");
688 for (i
= 0; i
< smmu
->soc
->num_swgroups
; i
++) {
689 const struct tegra_smmu_swgroup
*group
= &smmu
->soc
->swgroups
[i
];
693 value
= smmu_readl(smmu
, group
->reg
);
695 if (value
& SMMU_ASID_ENABLE
)
700 asid
= value
& SMMU_ASID_MASK
;
702 seq_printf(s
, "%-9s %-7s %#04x\n", group
->name
, status
,
709 static int tegra_smmu_swgroups_open(struct inode
*inode
, struct file
*file
)
711 return single_open(file
, tegra_smmu_swgroups_show
, inode
->i_private
);
714 static const struct file_operations tegra_smmu_swgroups_fops
= {
715 .open
= tegra_smmu_swgroups_open
,
718 .release
= single_release
,
721 static int tegra_smmu_clients_show(struct seq_file
*s
, void *data
)
723 struct tegra_smmu
*smmu
= s
->private;
727 seq_printf(s
, "client enabled\n");
728 seq_printf(s
, "--------------------\n");
730 for (i
= 0; i
< smmu
->soc
->num_clients
; i
++) {
731 const struct tegra_mc_client
*client
= &smmu
->soc
->clients
[i
];
734 value
= smmu_readl(smmu
, client
->smmu
.reg
);
736 if (value
& BIT(client
->smmu
.bit
))
741 seq_printf(s
, "%-12s %s\n", client
->name
, status
);
747 static int tegra_smmu_clients_open(struct inode
*inode
, struct file
*file
)
749 return single_open(file
, tegra_smmu_clients_show
, inode
->i_private
);
752 static const struct file_operations tegra_smmu_clients_fops
= {
753 .open
= tegra_smmu_clients_open
,
756 .release
= single_release
,
759 static void tegra_smmu_debugfs_init(struct tegra_smmu
*smmu
)
761 smmu
->debugfs
= debugfs_create_dir("smmu", NULL
);
765 debugfs_create_file("swgroups", S_IRUGO
, smmu
->debugfs
, smmu
,
766 &tegra_smmu_swgroups_fops
);
767 debugfs_create_file("clients", S_IRUGO
, smmu
->debugfs
, smmu
,
768 &tegra_smmu_clients_fops
);
771 static void tegra_smmu_debugfs_exit(struct tegra_smmu
*smmu
)
773 debugfs_remove_recursive(smmu
->debugfs
);
776 struct tegra_smmu
*tegra_smmu_probe(struct device
*dev
,
777 const struct tegra_smmu_soc
*soc
,
780 struct tegra_smmu
*smmu
;
785 /* This can happen on Tegra20 which doesn't have an SMMU */
789 smmu
= devm_kzalloc(dev
, sizeof(*smmu
), GFP_KERNEL
);
791 return ERR_PTR(-ENOMEM
);
794 * This is a bit of a hack. Ideally we'd want to simply return this
795 * value. However the IOMMU registration process will attempt to add
796 * all devices to the IOMMU when bus_set_iommu() is called. In order
797 * not to rely on global variables to track the IOMMU instance, we
798 * set it here so that it can be looked up from the .add_device()
799 * callback via the IOMMU device's .drvdata field.
803 size
= BITS_TO_LONGS(soc
->num_asids
) * sizeof(long);
805 smmu
->asids
= devm_kzalloc(dev
, size
, GFP_KERNEL
);
807 return ERR_PTR(-ENOMEM
);
809 mutex_init(&smmu
->lock
);
811 smmu
->regs
= mc
->regs
;
816 smmu
->pfn_mask
= BIT_MASK(mc
->soc
->num_address_bits
- PAGE_SHIFT
) - 1;
817 dev_dbg(dev
, "address bits: %u, PFN mask: %#lx\n",
818 mc
->soc
->num_address_bits
, smmu
->pfn_mask
);
820 value
= SMMU_PTC_CONFIG_ENABLE
| SMMU_PTC_CONFIG_INDEX_MAP(0x3f);
822 if (soc
->supports_request_limit
)
823 value
|= SMMU_PTC_CONFIG_REQ_LIMIT(8);
825 smmu_writel(smmu
, value
, SMMU_PTC_CONFIG
);
827 value
= SMMU_TLB_CONFIG_HIT_UNDER_MISS
|
828 SMMU_TLB_CONFIG_ACTIVE_LINES(0x20);
830 if (soc
->supports_round_robin_arbitration
)
831 value
|= SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION
;
833 smmu_writel(smmu
, value
, SMMU_TLB_CONFIG
);
835 smmu_flush_ptc(smmu
, NULL
, 0);
836 smmu_flush_tlb(smmu
);
837 smmu_writel(smmu
, SMMU_CONFIG_ENABLE
, SMMU_CONFIG
);
840 tegra_smmu_ahb_enable();
842 err
= bus_set_iommu(&platform_bus_type
, &tegra_smmu_ops
);
846 if (IS_ENABLED(CONFIG_DEBUG_FS
))
847 tegra_smmu_debugfs_init(smmu
);
852 void tegra_smmu_remove(struct tegra_smmu
*smmu
)
854 if (IS_ENABLED(CONFIG_DEBUG_FS
))
855 tegra_smmu_debugfs_exit(smmu
);