2 * IOMMU API for ARM architected SMMU implementations.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 * Copyright (C) 2013 ARM Limited
19 * Author: Will Deacon <will.deacon@arm.com>
21 * This driver currently supports:
22 * - SMMUv1 and v2 implementations
23 * - Stream-matching and stream-indexing
24 * - v7/v8 long-descriptor format
25 * - Non-secure access to the SMMU
26 * - Context fault reporting
29 #define pr_fmt(fmt) "arm-smmu: " fmt
31 #include <linux/delay.h>
32 #include <linux/dma-mapping.h>
33 #include <linux/err.h>
34 #include <linux/interrupt.h>
36 #include <linux/iommu.h>
37 #include <linux/iopoll.h>
38 #include <linux/module.h>
40 #include <linux/pci.h>
41 #include <linux/platform_device.h>
42 #include <linux/slab.h>
43 #include <linux/spinlock.h>
45 #include <linux/amba/bus.h>
47 #include "io-pgtable.h"
49 /* Maximum number of stream IDs assigned to a single device */
50 #define MAX_MASTER_STREAMIDS MAX_PHANDLE_ARGS
52 /* Maximum number of context banks per SMMU */
53 #define ARM_SMMU_MAX_CBS 128
55 /* Maximum number of mapping groups per SMMU */
56 #define ARM_SMMU_MAX_SMRS 128
58 /* SMMU global address space */
59 #define ARM_SMMU_GR0(smmu) ((smmu)->base)
60 #define ARM_SMMU_GR1(smmu) ((smmu)->base + (1 << (smmu)->pgshift))
63 * SMMU global address space with conditional offset to access secure
64 * aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448,
67 #define ARM_SMMU_GR0_NS(smmu) \
69 ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \
72 /* Configuration registers */
73 #define ARM_SMMU_GR0_sCR0 0x0
74 #define sCR0_CLIENTPD (1 << 0)
75 #define sCR0_GFRE (1 << 1)
76 #define sCR0_GFIE (1 << 2)
77 #define sCR0_GCFGFRE (1 << 4)
78 #define sCR0_GCFGFIE (1 << 5)
79 #define sCR0_USFCFG (1 << 10)
80 #define sCR0_VMIDPNE (1 << 11)
81 #define sCR0_PTM (1 << 12)
82 #define sCR0_FB (1 << 13)
83 #define sCR0_BSU_SHIFT 14
84 #define sCR0_BSU_MASK 0x3
86 /* Identification registers */
87 #define ARM_SMMU_GR0_ID0 0x20
88 #define ARM_SMMU_GR0_ID1 0x24
89 #define ARM_SMMU_GR0_ID2 0x28
90 #define ARM_SMMU_GR0_ID3 0x2c
91 #define ARM_SMMU_GR0_ID4 0x30
92 #define ARM_SMMU_GR0_ID5 0x34
93 #define ARM_SMMU_GR0_ID6 0x38
94 #define ARM_SMMU_GR0_ID7 0x3c
95 #define ARM_SMMU_GR0_sGFSR 0x48
96 #define ARM_SMMU_GR0_sGFSYNR0 0x50
97 #define ARM_SMMU_GR0_sGFSYNR1 0x54
98 #define ARM_SMMU_GR0_sGFSYNR2 0x58
100 #define ID0_S1TS (1 << 30)
101 #define ID0_S2TS (1 << 29)
102 #define ID0_NTS (1 << 28)
103 #define ID0_SMS (1 << 27)
104 #define ID0_ATOSNS (1 << 26)
105 #define ID0_CTTW (1 << 14)
106 #define ID0_NUMIRPT_SHIFT 16
107 #define ID0_NUMIRPT_MASK 0xff
108 #define ID0_NUMSIDB_SHIFT 9
109 #define ID0_NUMSIDB_MASK 0xf
110 #define ID0_NUMSMRG_SHIFT 0
111 #define ID0_NUMSMRG_MASK 0xff
113 #define ID1_PAGESIZE (1 << 31)
114 #define ID1_NUMPAGENDXB_SHIFT 28
115 #define ID1_NUMPAGENDXB_MASK 7
116 #define ID1_NUMS2CB_SHIFT 16
117 #define ID1_NUMS2CB_MASK 0xff
118 #define ID1_NUMCB_SHIFT 0
119 #define ID1_NUMCB_MASK 0xff
121 #define ID2_OAS_SHIFT 4
122 #define ID2_OAS_MASK 0xf
123 #define ID2_IAS_SHIFT 0
124 #define ID2_IAS_MASK 0xf
125 #define ID2_UBS_SHIFT 8
126 #define ID2_UBS_MASK 0xf
127 #define ID2_PTFS_4K (1 << 12)
128 #define ID2_PTFS_16K (1 << 13)
129 #define ID2_PTFS_64K (1 << 14)
131 /* Global TLB invalidation */
132 #define ARM_SMMU_GR0_TLBIVMID 0x64
133 #define ARM_SMMU_GR0_TLBIALLNSNH 0x68
134 #define ARM_SMMU_GR0_TLBIALLH 0x6c
135 #define ARM_SMMU_GR0_sTLBGSYNC 0x70
136 #define ARM_SMMU_GR0_sTLBGSTATUS 0x74
137 #define sTLBGSTATUS_GSACTIVE (1 << 0)
138 #define TLB_LOOP_TIMEOUT 1000000 /* 1s! */
140 /* Stream mapping registers */
141 #define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2))
142 #define SMR_VALID (1 << 31)
143 #define SMR_MASK_SHIFT 16
144 #define SMR_MASK_MASK 0x7fff
145 #define SMR_ID_SHIFT 0
146 #define SMR_ID_MASK 0x7fff
148 #define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2))
149 #define S2CR_CBNDX_SHIFT 0
150 #define S2CR_CBNDX_MASK 0xff
151 #define S2CR_TYPE_SHIFT 16
152 #define S2CR_TYPE_MASK 0x3
153 #define S2CR_TYPE_TRANS (0 << S2CR_TYPE_SHIFT)
154 #define S2CR_TYPE_BYPASS (1 << S2CR_TYPE_SHIFT)
155 #define S2CR_TYPE_FAULT (2 << S2CR_TYPE_SHIFT)
157 /* Context bank attribute registers */
158 #define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2))
159 #define CBAR_VMID_SHIFT 0
160 #define CBAR_VMID_MASK 0xff
161 #define CBAR_S1_BPSHCFG_SHIFT 8
162 #define CBAR_S1_BPSHCFG_MASK 3
163 #define CBAR_S1_BPSHCFG_NSH 3
164 #define CBAR_S1_MEMATTR_SHIFT 12
165 #define CBAR_S1_MEMATTR_MASK 0xf
166 #define CBAR_S1_MEMATTR_WB 0xf
167 #define CBAR_TYPE_SHIFT 16
168 #define CBAR_TYPE_MASK 0x3
169 #define CBAR_TYPE_S2_TRANS (0 << CBAR_TYPE_SHIFT)
170 #define CBAR_TYPE_S1_TRANS_S2_BYPASS (1 << CBAR_TYPE_SHIFT)
171 #define CBAR_TYPE_S1_TRANS_S2_FAULT (2 << CBAR_TYPE_SHIFT)
172 #define CBAR_TYPE_S1_TRANS_S2_TRANS (3 << CBAR_TYPE_SHIFT)
173 #define CBAR_IRPTNDX_SHIFT 24
174 #define CBAR_IRPTNDX_MASK 0xff
176 #define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2))
177 #define CBA2R_RW64_32BIT (0 << 0)
178 #define CBA2R_RW64_64BIT (1 << 0)
180 /* Translation context bank */
181 #define ARM_SMMU_CB_BASE(smmu) ((smmu)->base + ((smmu)->size >> 1))
182 #define ARM_SMMU_CB(smmu, n) ((n) * (1 << (smmu)->pgshift))
184 #define ARM_SMMU_CB_SCTLR 0x0
185 #define ARM_SMMU_CB_RESUME 0x8
186 #define ARM_SMMU_CB_TTBCR2 0x10
187 #define ARM_SMMU_CB_TTBR0_LO 0x20
188 #define ARM_SMMU_CB_TTBR0_HI 0x24
189 #define ARM_SMMU_CB_TTBR1_LO 0x28
190 #define ARM_SMMU_CB_TTBR1_HI 0x2c
191 #define ARM_SMMU_CB_TTBCR 0x30
192 #define ARM_SMMU_CB_S1_MAIR0 0x38
193 #define ARM_SMMU_CB_S1_MAIR1 0x3c
194 #define ARM_SMMU_CB_PAR_LO 0x50
195 #define ARM_SMMU_CB_PAR_HI 0x54
196 #define ARM_SMMU_CB_FSR 0x58
197 #define ARM_SMMU_CB_FAR_LO 0x60
198 #define ARM_SMMU_CB_FAR_HI 0x64
199 #define ARM_SMMU_CB_FSYNR0 0x68
200 #define ARM_SMMU_CB_S1_TLBIVA 0x600
201 #define ARM_SMMU_CB_S1_TLBIASID 0x610
202 #define ARM_SMMU_CB_S1_TLBIVAL 0x620
203 #define ARM_SMMU_CB_S2_TLBIIPAS2 0x630
204 #define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638
205 #define ARM_SMMU_CB_ATS1PR_LO 0x800
206 #define ARM_SMMU_CB_ATS1PR_HI 0x804
207 #define ARM_SMMU_CB_ATSR 0x8f0
209 #define SCTLR_S1_ASIDPNE (1 << 12)
210 #define SCTLR_CFCFG (1 << 7)
211 #define SCTLR_CFIE (1 << 6)
212 #define SCTLR_CFRE (1 << 5)
213 #define SCTLR_E (1 << 4)
214 #define SCTLR_AFE (1 << 2)
215 #define SCTLR_TRE (1 << 1)
216 #define SCTLR_M (1 << 0)
217 #define SCTLR_EAE_SBOP (SCTLR_AFE | SCTLR_TRE)
219 #define CB_PAR_F (1 << 0)
221 #define ATSR_ACTIVE (1 << 0)
223 #define RESUME_RETRY (0 << 0)
224 #define RESUME_TERMINATE (1 << 0)
226 #define TTBCR2_SEP_SHIFT 15
227 #define TTBCR2_SEP_MASK 0x7
229 #define TTBCR2_ADDR_32 0
230 #define TTBCR2_ADDR_36 1
231 #define TTBCR2_ADDR_40 2
232 #define TTBCR2_ADDR_42 3
233 #define TTBCR2_ADDR_44 4
234 #define TTBCR2_ADDR_48 5
236 #define TTBRn_HI_ASID_SHIFT 16
238 #define FSR_MULTI (1 << 31)
239 #define FSR_SS (1 << 30)
240 #define FSR_UUT (1 << 8)
241 #define FSR_ASF (1 << 7)
242 #define FSR_TLBLKF (1 << 6)
243 #define FSR_TLBMCF (1 << 5)
244 #define FSR_EF (1 << 4)
245 #define FSR_PF (1 << 3)
246 #define FSR_AFF (1 << 2)
247 #define FSR_TF (1 << 1)
249 #define FSR_IGN (FSR_AFF | FSR_ASF | \
250 FSR_TLBMCF | FSR_TLBLKF)
251 #define FSR_FAULT (FSR_MULTI | FSR_SS | FSR_UUT | \
252 FSR_EF | FSR_PF | FSR_TF | FSR_IGN)
254 #define FSYNR0_WNR (1 << 4)
256 static int force_stage
;
257 module_param_named(force_stage
, force_stage
, int, S_IRUGO
| S_IWUSR
);
258 MODULE_PARM_DESC(force_stage
,
259 "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
261 enum arm_smmu_arch_version
{
266 struct arm_smmu_smr
{
272 struct arm_smmu_master_cfg
{
274 u16 streamids
[MAX_MASTER_STREAMIDS
];
275 struct arm_smmu_smr
*smrs
;
278 struct arm_smmu_master
{
279 struct device_node
*of_node
;
281 struct arm_smmu_master_cfg cfg
;
284 struct arm_smmu_device
{
289 unsigned long pgshift
;
291 #define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0)
292 #define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1)
293 #define ARM_SMMU_FEAT_TRANS_S1 (1 << 2)
294 #define ARM_SMMU_FEAT_TRANS_S2 (1 << 3)
295 #define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4)
296 #define ARM_SMMU_FEAT_TRANS_OPS (1 << 5)
299 #define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
301 enum arm_smmu_arch_version version
;
303 u32 num_context_banks
;
304 u32 num_s2_context_banks
;
305 DECLARE_BITMAP(context_map
, ARM_SMMU_MAX_CBS
);
308 u32 num_mapping_groups
;
309 DECLARE_BITMAP(smr_map
, ARM_SMMU_MAX_SMRS
);
311 unsigned long va_size
;
312 unsigned long ipa_size
;
313 unsigned long pa_size
;
316 u32 num_context_irqs
;
319 struct list_head list
;
320 struct rb_root masters
;
323 struct arm_smmu_cfg
{
328 #define INVALID_IRPTNDX 0xff
330 #define ARM_SMMU_CB_ASID(cfg) ((cfg)->cbndx)
331 #define ARM_SMMU_CB_VMID(cfg) ((cfg)->cbndx + 1)
333 enum arm_smmu_domain_stage
{
334 ARM_SMMU_DOMAIN_S1
= 0,
336 ARM_SMMU_DOMAIN_NESTED
,
339 struct arm_smmu_domain
{
340 struct arm_smmu_device
*smmu
;
341 struct io_pgtable_ops
*pgtbl_ops
;
342 spinlock_t pgtbl_lock
;
343 struct arm_smmu_cfg cfg
;
344 enum arm_smmu_domain_stage stage
;
345 struct mutex init_mutex
; /* Protects smmu pointer */
346 struct iommu_domain domain
;
349 static struct iommu_ops arm_smmu_ops
;
351 static DEFINE_SPINLOCK(arm_smmu_devices_lock
);
352 static LIST_HEAD(arm_smmu_devices
);
354 struct arm_smmu_option_prop
{
359 static struct arm_smmu_option_prop arm_smmu_options
[] = {
360 { ARM_SMMU_OPT_SECURE_CFG_ACCESS
, "calxeda,smmu-secure-config-access" },
364 static struct arm_smmu_domain
*to_smmu_domain(struct iommu_domain
*dom
)
366 return container_of(dom
, struct arm_smmu_domain
, domain
);
369 static void parse_driver_options(struct arm_smmu_device
*smmu
)
374 if (of_property_read_bool(smmu
->dev
->of_node
,
375 arm_smmu_options
[i
].prop
)) {
376 smmu
->options
|= arm_smmu_options
[i
].opt
;
377 dev_notice(smmu
->dev
, "option %s\n",
378 arm_smmu_options
[i
].prop
);
380 } while (arm_smmu_options
[++i
].opt
);
383 static struct device_node
*dev_get_dev_node(struct device
*dev
)
385 if (dev_is_pci(dev
)) {
386 struct pci_bus
*bus
= to_pci_dev(dev
)->bus
;
388 while (!pci_is_root_bus(bus
))
390 return bus
->bridge
->parent
->of_node
;
396 static struct arm_smmu_master
*find_smmu_master(struct arm_smmu_device
*smmu
,
397 struct device_node
*dev_node
)
399 struct rb_node
*node
= smmu
->masters
.rb_node
;
402 struct arm_smmu_master
*master
;
404 master
= container_of(node
, struct arm_smmu_master
, node
);
406 if (dev_node
< master
->of_node
)
407 node
= node
->rb_left
;
408 else if (dev_node
> master
->of_node
)
409 node
= node
->rb_right
;
417 static struct arm_smmu_master_cfg
*
418 find_smmu_master_cfg(struct device
*dev
)
420 struct arm_smmu_master_cfg
*cfg
= NULL
;
421 struct iommu_group
*group
= iommu_group_get(dev
);
424 cfg
= iommu_group_get_iommudata(group
);
425 iommu_group_put(group
);
431 static int insert_smmu_master(struct arm_smmu_device
*smmu
,
432 struct arm_smmu_master
*master
)
434 struct rb_node
**new, *parent
;
436 new = &smmu
->masters
.rb_node
;
439 struct arm_smmu_master
*this
440 = container_of(*new, struct arm_smmu_master
, node
);
443 if (master
->of_node
< this->of_node
)
444 new = &((*new)->rb_left
);
445 else if (master
->of_node
> this->of_node
)
446 new = &((*new)->rb_right
);
451 rb_link_node(&master
->node
, parent
, new);
452 rb_insert_color(&master
->node
, &smmu
->masters
);
456 static int register_smmu_master(struct arm_smmu_device
*smmu
,
458 struct of_phandle_args
*masterspec
)
461 struct arm_smmu_master
*master
;
463 master
= find_smmu_master(smmu
, masterspec
->np
);
466 "rejecting multiple registrations for master device %s\n",
467 masterspec
->np
->name
);
471 if (masterspec
->args_count
> MAX_MASTER_STREAMIDS
) {
473 "reached maximum number (%d) of stream IDs for master device %s\n",
474 MAX_MASTER_STREAMIDS
, masterspec
->np
->name
);
478 master
= devm_kzalloc(dev
, sizeof(*master
), GFP_KERNEL
);
482 master
->of_node
= masterspec
->np
;
483 master
->cfg
.num_streamids
= masterspec
->args_count
;
485 for (i
= 0; i
< master
->cfg
.num_streamids
; ++i
) {
486 u16 streamid
= masterspec
->args
[i
];
488 if (!(smmu
->features
& ARM_SMMU_FEAT_STREAM_MATCH
) &&
489 (streamid
>= smmu
->num_mapping_groups
)) {
491 "stream ID for master device %s greater than maximum allowed (%d)\n",
492 masterspec
->np
->name
, smmu
->num_mapping_groups
);
495 master
->cfg
.streamids
[i
] = streamid
;
497 return insert_smmu_master(smmu
, master
);
500 static struct arm_smmu_device
*find_smmu_for_device(struct device
*dev
)
502 struct arm_smmu_device
*smmu
;
503 struct arm_smmu_master
*master
= NULL
;
504 struct device_node
*dev_node
= dev_get_dev_node(dev
);
506 spin_lock(&arm_smmu_devices_lock
);
507 list_for_each_entry(smmu
, &arm_smmu_devices
, list
) {
508 master
= find_smmu_master(smmu
, dev_node
);
512 spin_unlock(&arm_smmu_devices_lock
);
514 return master
? smmu
: NULL
;
517 static int __arm_smmu_alloc_bitmap(unsigned long *map
, int start
, int end
)
522 idx
= find_next_zero_bit(map
, end
, start
);
525 } while (test_and_set_bit(idx
, map
));
530 static void __arm_smmu_free_bitmap(unsigned long *map
, int idx
)
535 /* Wait for any pending TLB invalidations to complete */
536 static void __arm_smmu_tlb_sync(struct arm_smmu_device
*smmu
)
539 void __iomem
*gr0_base
= ARM_SMMU_GR0(smmu
);
541 writel_relaxed(0, gr0_base
+ ARM_SMMU_GR0_sTLBGSYNC
);
542 while (readl_relaxed(gr0_base
+ ARM_SMMU_GR0_sTLBGSTATUS
)
543 & sTLBGSTATUS_GSACTIVE
) {
545 if (++count
== TLB_LOOP_TIMEOUT
) {
546 dev_err_ratelimited(smmu
->dev
,
547 "TLB sync timed out -- SMMU may be deadlocked\n");
554 static void arm_smmu_tlb_sync(void *cookie
)
556 struct arm_smmu_domain
*smmu_domain
= cookie
;
557 __arm_smmu_tlb_sync(smmu_domain
->smmu
);
560 static void arm_smmu_tlb_inv_context(void *cookie
)
562 struct arm_smmu_domain
*smmu_domain
= cookie
;
563 struct arm_smmu_cfg
*cfg
= &smmu_domain
->cfg
;
564 struct arm_smmu_device
*smmu
= smmu_domain
->smmu
;
565 bool stage1
= cfg
->cbar
!= CBAR_TYPE_S2_TRANS
;
569 base
= ARM_SMMU_CB_BASE(smmu
) + ARM_SMMU_CB(smmu
, cfg
->cbndx
);
570 writel_relaxed(ARM_SMMU_CB_ASID(cfg
),
571 base
+ ARM_SMMU_CB_S1_TLBIASID
);
573 base
= ARM_SMMU_GR0(smmu
);
574 writel_relaxed(ARM_SMMU_CB_VMID(cfg
),
575 base
+ ARM_SMMU_GR0_TLBIVMID
);
578 __arm_smmu_tlb_sync(smmu
);
581 static void arm_smmu_tlb_inv_range_nosync(unsigned long iova
, size_t size
,
582 bool leaf
, void *cookie
)
584 struct arm_smmu_domain
*smmu_domain
= cookie
;
585 struct arm_smmu_cfg
*cfg
= &smmu_domain
->cfg
;
586 struct arm_smmu_device
*smmu
= smmu_domain
->smmu
;
587 bool stage1
= cfg
->cbar
!= CBAR_TYPE_S2_TRANS
;
591 reg
= ARM_SMMU_CB_BASE(smmu
) + ARM_SMMU_CB(smmu
, cfg
->cbndx
);
592 reg
+= leaf
? ARM_SMMU_CB_S1_TLBIVAL
: ARM_SMMU_CB_S1_TLBIVA
;
594 if (!IS_ENABLED(CONFIG_64BIT
) || smmu
->version
== ARM_SMMU_V1
) {
596 iova
|= ARM_SMMU_CB_ASID(cfg
);
597 writel_relaxed(iova
, reg
);
601 iova
|= (u64
)ARM_SMMU_CB_ASID(cfg
) << 48;
602 writeq_relaxed(iova
, reg
);
606 } else if (smmu
->version
== ARM_SMMU_V2
) {
607 reg
= ARM_SMMU_CB_BASE(smmu
) + ARM_SMMU_CB(smmu
, cfg
->cbndx
);
608 reg
+= leaf
? ARM_SMMU_CB_S2_TLBIIPAS2L
:
609 ARM_SMMU_CB_S2_TLBIIPAS2
;
610 writeq_relaxed(iova
>> 12, reg
);
613 reg
= ARM_SMMU_GR0(smmu
) + ARM_SMMU_GR0_TLBIVMID
;
614 writel_relaxed(ARM_SMMU_CB_VMID(cfg
), reg
);
618 static void arm_smmu_flush_pgtable(void *addr
, size_t size
, void *cookie
)
620 struct arm_smmu_domain
*smmu_domain
= cookie
;
621 struct arm_smmu_device
*smmu
= smmu_domain
->smmu
;
622 unsigned long offset
= (unsigned long)addr
& ~PAGE_MASK
;
625 /* Ensure new page tables are visible to the hardware walker */
626 if (smmu
->features
& ARM_SMMU_FEAT_COHERENT_WALK
) {
630 * If the SMMU can't walk tables in the CPU caches, treat them
631 * like non-coherent DMA since we need to flush the new entries
632 * all the way out to memory. There's no possibility of
633 * recursion here as the SMMU table walker will not be wired
634 * through another SMMU.
636 dma_map_page(smmu
->dev
, virt_to_page(addr
), offset
, size
,
641 static struct iommu_gather_ops arm_smmu_gather_ops
= {
642 .tlb_flush_all
= arm_smmu_tlb_inv_context
,
643 .tlb_add_flush
= arm_smmu_tlb_inv_range_nosync
,
644 .tlb_sync
= arm_smmu_tlb_sync
,
645 .flush_pgtable
= arm_smmu_flush_pgtable
,
648 static irqreturn_t
arm_smmu_context_fault(int irq
, void *dev
)
651 u32 fsr
, far
, fsynr
, resume
;
653 struct iommu_domain
*domain
= dev
;
654 struct arm_smmu_domain
*smmu_domain
= to_smmu_domain(domain
);
655 struct arm_smmu_cfg
*cfg
= &smmu_domain
->cfg
;
656 struct arm_smmu_device
*smmu
= smmu_domain
->smmu
;
657 void __iomem
*cb_base
;
659 cb_base
= ARM_SMMU_CB_BASE(smmu
) + ARM_SMMU_CB(smmu
, cfg
->cbndx
);
660 fsr
= readl_relaxed(cb_base
+ ARM_SMMU_CB_FSR
);
662 if (!(fsr
& FSR_FAULT
))
666 dev_err_ratelimited(smmu
->dev
,
667 "Unexpected context fault (fsr 0x%x)\n",
670 fsynr
= readl_relaxed(cb_base
+ ARM_SMMU_CB_FSYNR0
);
671 flags
= fsynr
& FSYNR0_WNR
? IOMMU_FAULT_WRITE
: IOMMU_FAULT_READ
;
673 far
= readl_relaxed(cb_base
+ ARM_SMMU_CB_FAR_LO
);
676 far
= readl_relaxed(cb_base
+ ARM_SMMU_CB_FAR_HI
);
677 iova
|= ((unsigned long)far
<< 32);
680 if (!report_iommu_fault(domain
, smmu
->dev
, iova
, flags
)) {
682 resume
= RESUME_RETRY
;
684 dev_err_ratelimited(smmu
->dev
,
685 "Unhandled context fault: iova=0x%08lx, fsynr=0x%x, cb=%d\n",
686 iova
, fsynr
, cfg
->cbndx
);
688 resume
= RESUME_TERMINATE
;
691 /* Clear the faulting FSR */
692 writel(fsr
, cb_base
+ ARM_SMMU_CB_FSR
);
694 /* Retry or terminate any stalled transactions */
696 writel_relaxed(resume
, cb_base
+ ARM_SMMU_CB_RESUME
);
701 static irqreturn_t
arm_smmu_global_fault(int irq
, void *dev
)
703 u32 gfsr
, gfsynr0
, gfsynr1
, gfsynr2
;
704 struct arm_smmu_device
*smmu
= dev
;
705 void __iomem
*gr0_base
= ARM_SMMU_GR0_NS(smmu
);
707 gfsr
= readl_relaxed(gr0_base
+ ARM_SMMU_GR0_sGFSR
);
708 gfsynr0
= readl_relaxed(gr0_base
+ ARM_SMMU_GR0_sGFSYNR0
);
709 gfsynr1
= readl_relaxed(gr0_base
+ ARM_SMMU_GR0_sGFSYNR1
);
710 gfsynr2
= readl_relaxed(gr0_base
+ ARM_SMMU_GR0_sGFSYNR2
);
715 dev_err_ratelimited(smmu
->dev
,
716 "Unexpected global fault, this could be serious\n");
717 dev_err_ratelimited(smmu
->dev
,
718 "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
719 gfsr
, gfsynr0
, gfsynr1
, gfsynr2
);
721 writel(gfsr
, gr0_base
+ ARM_SMMU_GR0_sGFSR
);
725 static void arm_smmu_init_context_bank(struct arm_smmu_domain
*smmu_domain
,
726 struct io_pgtable_cfg
*pgtbl_cfg
)
730 struct arm_smmu_cfg
*cfg
= &smmu_domain
->cfg
;
731 struct arm_smmu_device
*smmu
= smmu_domain
->smmu
;
732 void __iomem
*cb_base
, *gr0_base
, *gr1_base
;
734 gr0_base
= ARM_SMMU_GR0(smmu
);
735 gr1_base
= ARM_SMMU_GR1(smmu
);
736 stage1
= cfg
->cbar
!= CBAR_TYPE_S2_TRANS
;
737 cb_base
= ARM_SMMU_CB_BASE(smmu
) + ARM_SMMU_CB(smmu
, cfg
->cbndx
);
739 if (smmu
->version
> ARM_SMMU_V1
) {
742 * *Must* be initialised before CBAR thanks to VMID16
743 * architectural oversight affected some implementations.
746 reg
= CBA2R_RW64_64BIT
;
748 reg
= CBA2R_RW64_32BIT
;
750 writel_relaxed(reg
, gr1_base
+ ARM_SMMU_GR1_CBA2R(cfg
->cbndx
));
755 if (smmu
->version
== ARM_SMMU_V1
)
756 reg
|= cfg
->irptndx
<< CBAR_IRPTNDX_SHIFT
;
759 * Use the weakest shareability/memory types, so they are
760 * overridden by the ttbcr/pte.
763 reg
|= (CBAR_S1_BPSHCFG_NSH
<< CBAR_S1_BPSHCFG_SHIFT
) |
764 (CBAR_S1_MEMATTR_WB
<< CBAR_S1_MEMATTR_SHIFT
);
766 reg
|= ARM_SMMU_CB_VMID(cfg
) << CBAR_VMID_SHIFT
;
768 writel_relaxed(reg
, gr1_base
+ ARM_SMMU_GR1_CBAR(cfg
->cbndx
));
772 reg
= pgtbl_cfg
->arm_lpae_s1_cfg
.ttbr
[0];
773 writel_relaxed(reg
, cb_base
+ ARM_SMMU_CB_TTBR0_LO
);
774 reg
= pgtbl_cfg
->arm_lpae_s1_cfg
.ttbr
[0] >> 32;
775 reg
|= ARM_SMMU_CB_ASID(cfg
) << TTBRn_HI_ASID_SHIFT
;
776 writel_relaxed(reg
, cb_base
+ ARM_SMMU_CB_TTBR0_HI
);
778 reg
= pgtbl_cfg
->arm_lpae_s1_cfg
.ttbr
[1];
779 writel_relaxed(reg
, cb_base
+ ARM_SMMU_CB_TTBR1_LO
);
780 reg
= pgtbl_cfg
->arm_lpae_s1_cfg
.ttbr
[1] >> 32;
781 reg
|= ARM_SMMU_CB_ASID(cfg
) << TTBRn_HI_ASID_SHIFT
;
782 writel_relaxed(reg
, cb_base
+ ARM_SMMU_CB_TTBR1_HI
);
784 reg
= pgtbl_cfg
->arm_lpae_s2_cfg
.vttbr
;
785 writel_relaxed(reg
, cb_base
+ ARM_SMMU_CB_TTBR0_LO
);
786 reg
= pgtbl_cfg
->arm_lpae_s2_cfg
.vttbr
>> 32;
787 writel_relaxed(reg
, cb_base
+ ARM_SMMU_CB_TTBR0_HI
);
792 reg
= pgtbl_cfg
->arm_lpae_s1_cfg
.tcr
;
793 writel_relaxed(reg
, cb_base
+ ARM_SMMU_CB_TTBCR
);
794 if (smmu
->version
> ARM_SMMU_V1
) {
795 reg
= pgtbl_cfg
->arm_lpae_s1_cfg
.tcr
>> 32;
796 switch (smmu
->va_size
) {
798 reg
|= (TTBCR2_ADDR_32
<< TTBCR2_SEP_SHIFT
);
801 reg
|= (TTBCR2_ADDR_36
<< TTBCR2_SEP_SHIFT
);
804 reg
|= (TTBCR2_ADDR_40
<< TTBCR2_SEP_SHIFT
);
807 reg
|= (TTBCR2_ADDR_42
<< TTBCR2_SEP_SHIFT
);
810 reg
|= (TTBCR2_ADDR_44
<< TTBCR2_SEP_SHIFT
);
813 reg
|= (TTBCR2_ADDR_48
<< TTBCR2_SEP_SHIFT
);
816 writel_relaxed(reg
, cb_base
+ ARM_SMMU_CB_TTBCR2
);
819 reg
= pgtbl_cfg
->arm_lpae_s2_cfg
.vtcr
;
820 writel_relaxed(reg
, cb_base
+ ARM_SMMU_CB_TTBCR
);
823 /* MAIRs (stage-1 only) */
825 reg
= pgtbl_cfg
->arm_lpae_s1_cfg
.mair
[0];
826 writel_relaxed(reg
, cb_base
+ ARM_SMMU_CB_S1_MAIR0
);
827 reg
= pgtbl_cfg
->arm_lpae_s1_cfg
.mair
[1];
828 writel_relaxed(reg
, cb_base
+ ARM_SMMU_CB_S1_MAIR1
);
832 reg
= SCTLR_CFCFG
| SCTLR_CFIE
| SCTLR_CFRE
| SCTLR_M
| SCTLR_EAE_SBOP
;
834 reg
|= SCTLR_S1_ASIDPNE
;
838 writel_relaxed(reg
, cb_base
+ ARM_SMMU_CB_SCTLR
);
841 static int arm_smmu_init_domain_context(struct iommu_domain
*domain
,
842 struct arm_smmu_device
*smmu
)
844 int irq
, start
, ret
= 0;
845 unsigned long ias
, oas
;
846 struct io_pgtable_ops
*pgtbl_ops
;
847 struct io_pgtable_cfg pgtbl_cfg
;
848 enum io_pgtable_fmt fmt
;
849 struct arm_smmu_domain
*smmu_domain
= to_smmu_domain(domain
);
850 struct arm_smmu_cfg
*cfg
= &smmu_domain
->cfg
;
852 mutex_lock(&smmu_domain
->init_mutex
);
853 if (smmu_domain
->smmu
)
857 * Mapping the requested stage onto what we support is surprisingly
858 * complicated, mainly because the spec allows S1+S2 SMMUs without
859 * support for nested translation. That means we end up with the
862 * Requested Supported Actual
872 * Note that you can't actually request stage-2 mappings.
874 if (!(smmu
->features
& ARM_SMMU_FEAT_TRANS_S1
))
875 smmu_domain
->stage
= ARM_SMMU_DOMAIN_S2
;
876 if (!(smmu
->features
& ARM_SMMU_FEAT_TRANS_S2
))
877 smmu_domain
->stage
= ARM_SMMU_DOMAIN_S1
;
879 switch (smmu_domain
->stage
) {
880 case ARM_SMMU_DOMAIN_S1
:
881 cfg
->cbar
= CBAR_TYPE_S1_TRANS_S2_BYPASS
;
882 start
= smmu
->num_s2_context_banks
;
884 oas
= smmu
->ipa_size
;
885 if (IS_ENABLED(CONFIG_64BIT
))
886 fmt
= ARM_64_LPAE_S1
;
888 fmt
= ARM_32_LPAE_S1
;
890 case ARM_SMMU_DOMAIN_NESTED
:
892 * We will likely want to change this if/when KVM gets
895 case ARM_SMMU_DOMAIN_S2
:
896 cfg
->cbar
= CBAR_TYPE_S2_TRANS
;
898 ias
= smmu
->ipa_size
;
900 if (IS_ENABLED(CONFIG_64BIT
))
901 fmt
= ARM_64_LPAE_S2
;
903 fmt
= ARM_32_LPAE_S2
;
910 ret
= __arm_smmu_alloc_bitmap(smmu
->context_map
, start
,
911 smmu
->num_context_banks
);
912 if (IS_ERR_VALUE(ret
))
916 if (smmu
->version
== ARM_SMMU_V1
) {
917 cfg
->irptndx
= atomic_inc_return(&smmu
->irptndx
);
918 cfg
->irptndx
%= smmu
->num_context_irqs
;
920 cfg
->irptndx
= cfg
->cbndx
;
923 pgtbl_cfg
= (struct io_pgtable_cfg
) {
924 .pgsize_bitmap
= arm_smmu_ops
.pgsize_bitmap
,
927 .tlb
= &arm_smmu_gather_ops
,
930 smmu_domain
->smmu
= smmu
;
931 pgtbl_ops
= alloc_io_pgtable_ops(fmt
, &pgtbl_cfg
, smmu_domain
);
937 /* Update our support page sizes to reflect the page table format */
938 arm_smmu_ops
.pgsize_bitmap
= pgtbl_cfg
.pgsize_bitmap
;
940 /* Initialise the context bank with our page table cfg */
941 arm_smmu_init_context_bank(smmu_domain
, &pgtbl_cfg
);
944 * Request context fault interrupt. Do this last to avoid the
945 * handler seeing a half-initialised domain state.
947 irq
= smmu
->irqs
[smmu
->num_global_irqs
+ cfg
->irptndx
];
948 ret
= request_irq(irq
, arm_smmu_context_fault
, IRQF_SHARED
,
949 "arm-smmu-context-fault", domain
);
950 if (IS_ERR_VALUE(ret
)) {
951 dev_err(smmu
->dev
, "failed to request context IRQ %d (%u)\n",
953 cfg
->irptndx
= INVALID_IRPTNDX
;
956 mutex_unlock(&smmu_domain
->init_mutex
);
958 /* Publish page table ops for map/unmap */
959 smmu_domain
->pgtbl_ops
= pgtbl_ops
;
963 smmu_domain
->smmu
= NULL
;
965 mutex_unlock(&smmu_domain
->init_mutex
);
969 static void arm_smmu_destroy_domain_context(struct iommu_domain
*domain
)
971 struct arm_smmu_domain
*smmu_domain
= to_smmu_domain(domain
);
972 struct arm_smmu_device
*smmu
= smmu_domain
->smmu
;
973 struct arm_smmu_cfg
*cfg
= &smmu_domain
->cfg
;
974 void __iomem
*cb_base
;
981 * Disable the context bank and free the page tables before freeing
984 cb_base
= ARM_SMMU_CB_BASE(smmu
) + ARM_SMMU_CB(smmu
, cfg
->cbndx
);
985 writel_relaxed(0, cb_base
+ ARM_SMMU_CB_SCTLR
);
987 if (cfg
->irptndx
!= INVALID_IRPTNDX
) {
988 irq
= smmu
->irqs
[smmu
->num_global_irqs
+ cfg
->irptndx
];
989 free_irq(irq
, domain
);
992 if (smmu_domain
->pgtbl_ops
)
993 free_io_pgtable_ops(smmu_domain
->pgtbl_ops
);
995 __arm_smmu_free_bitmap(smmu
->context_map
, cfg
->cbndx
);
998 static struct iommu_domain
*arm_smmu_domain_alloc(unsigned type
)
1000 struct arm_smmu_domain
*smmu_domain
;
1002 if (type
!= IOMMU_DOMAIN_UNMANAGED
)
1005 * Allocate the domain and initialise some of its data structures.
1006 * We can't really do anything meaningful until we've added a
1009 smmu_domain
= kzalloc(sizeof(*smmu_domain
), GFP_KERNEL
);
1013 mutex_init(&smmu_domain
->init_mutex
);
1014 spin_lock_init(&smmu_domain
->pgtbl_lock
);
1016 return &smmu_domain
->domain
;
1019 static void arm_smmu_domain_free(struct iommu_domain
*domain
)
1021 struct arm_smmu_domain
*smmu_domain
= to_smmu_domain(domain
);
1024 * Free the domain resources. We assume that all devices have
1025 * already been detached.
1027 arm_smmu_destroy_domain_context(domain
);
1031 static int arm_smmu_master_configure_smrs(struct arm_smmu_device
*smmu
,
1032 struct arm_smmu_master_cfg
*cfg
)
1035 struct arm_smmu_smr
*smrs
;
1036 void __iomem
*gr0_base
= ARM_SMMU_GR0(smmu
);
1038 if (!(smmu
->features
& ARM_SMMU_FEAT_STREAM_MATCH
))
1044 smrs
= kmalloc_array(cfg
->num_streamids
, sizeof(*smrs
), GFP_KERNEL
);
1046 dev_err(smmu
->dev
, "failed to allocate %d SMRs\n",
1047 cfg
->num_streamids
);
1051 /* Allocate the SMRs on the SMMU */
1052 for (i
= 0; i
< cfg
->num_streamids
; ++i
) {
1053 int idx
= __arm_smmu_alloc_bitmap(smmu
->smr_map
, 0,
1054 smmu
->num_mapping_groups
);
1055 if (IS_ERR_VALUE(idx
)) {
1056 dev_err(smmu
->dev
, "failed to allocate free SMR\n");
1060 smrs
[i
] = (struct arm_smmu_smr
) {
1062 .mask
= 0, /* We don't currently share SMRs */
1063 .id
= cfg
->streamids
[i
],
1067 /* It worked! Now, poke the actual hardware */
1068 for (i
= 0; i
< cfg
->num_streamids
; ++i
) {
1069 u32 reg
= SMR_VALID
| smrs
[i
].id
<< SMR_ID_SHIFT
|
1070 smrs
[i
].mask
<< SMR_MASK_SHIFT
;
1071 writel_relaxed(reg
, gr0_base
+ ARM_SMMU_GR0_SMR(smrs
[i
].idx
));
1079 __arm_smmu_free_bitmap(smmu
->smr_map
, smrs
[i
].idx
);
1084 static void arm_smmu_master_free_smrs(struct arm_smmu_device
*smmu
,
1085 struct arm_smmu_master_cfg
*cfg
)
1088 void __iomem
*gr0_base
= ARM_SMMU_GR0(smmu
);
1089 struct arm_smmu_smr
*smrs
= cfg
->smrs
;
1094 /* Invalidate the SMRs before freeing back to the allocator */
1095 for (i
= 0; i
< cfg
->num_streamids
; ++i
) {
1096 u8 idx
= smrs
[i
].idx
;
1098 writel_relaxed(~SMR_VALID
, gr0_base
+ ARM_SMMU_GR0_SMR(idx
));
1099 __arm_smmu_free_bitmap(smmu
->smr_map
, idx
);
1106 static int arm_smmu_domain_add_master(struct arm_smmu_domain
*smmu_domain
,
1107 struct arm_smmu_master_cfg
*cfg
)
1110 struct arm_smmu_device
*smmu
= smmu_domain
->smmu
;
1111 void __iomem
*gr0_base
= ARM_SMMU_GR0(smmu
);
1113 /* Devices in an IOMMU group may already be configured */
1114 ret
= arm_smmu_master_configure_smrs(smmu
, cfg
);
1116 return ret
== -EEXIST
? 0 : ret
;
1118 for (i
= 0; i
< cfg
->num_streamids
; ++i
) {
1121 idx
= cfg
->smrs
? cfg
->smrs
[i
].idx
: cfg
->streamids
[i
];
1122 s2cr
= S2CR_TYPE_TRANS
|
1123 (smmu_domain
->cfg
.cbndx
<< S2CR_CBNDX_SHIFT
);
1124 writel_relaxed(s2cr
, gr0_base
+ ARM_SMMU_GR0_S2CR(idx
));
1130 static void arm_smmu_domain_remove_master(struct arm_smmu_domain
*smmu_domain
,
1131 struct arm_smmu_master_cfg
*cfg
)
1134 struct arm_smmu_device
*smmu
= smmu_domain
->smmu
;
1135 void __iomem
*gr0_base
= ARM_SMMU_GR0(smmu
);
1137 /* An IOMMU group is torn down by the first device to be removed */
1138 if ((smmu
->features
& ARM_SMMU_FEAT_STREAM_MATCH
) && !cfg
->smrs
)
1142 * We *must* clear the S2CR first, because freeing the SMR means
1143 * that it can be re-allocated immediately.
1145 for (i
= 0; i
< cfg
->num_streamids
; ++i
) {
1146 u32 idx
= cfg
->smrs
? cfg
->smrs
[i
].idx
: cfg
->streamids
[i
];
1148 writel_relaxed(S2CR_TYPE_BYPASS
,
1149 gr0_base
+ ARM_SMMU_GR0_S2CR(idx
));
1152 arm_smmu_master_free_smrs(smmu
, cfg
);
1155 static int arm_smmu_attach_dev(struct iommu_domain
*domain
, struct device
*dev
)
1158 struct arm_smmu_domain
*smmu_domain
= to_smmu_domain(domain
);
1159 struct arm_smmu_device
*smmu
;
1160 struct arm_smmu_master_cfg
*cfg
;
1162 smmu
= find_smmu_for_device(dev
);
1164 dev_err(dev
, "cannot attach to SMMU, is it on the same bus?\n");
1168 if (dev
->archdata
.iommu
) {
1169 dev_err(dev
, "already attached to IOMMU domain\n");
1173 /* Ensure that the domain is finalised */
1174 ret
= arm_smmu_init_domain_context(domain
, smmu
);
1175 if (IS_ERR_VALUE(ret
))
1179 * Sanity check the domain. We don't support domains across
1182 if (smmu_domain
->smmu
!= smmu
) {
1184 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
1185 dev_name(smmu_domain
->smmu
->dev
), dev_name(smmu
->dev
));
1189 /* Looks ok, so add the device to the domain */
1190 cfg
= find_smmu_master_cfg(dev
);
1194 ret
= arm_smmu_domain_add_master(smmu_domain
, cfg
);
1196 dev
->archdata
.iommu
= domain
;
1200 static void arm_smmu_detach_dev(struct iommu_domain
*domain
, struct device
*dev
)
1202 struct arm_smmu_domain
*smmu_domain
= to_smmu_domain(domain
);
1203 struct arm_smmu_master_cfg
*cfg
;
1205 cfg
= find_smmu_master_cfg(dev
);
1209 dev
->archdata
.iommu
= NULL
;
1210 arm_smmu_domain_remove_master(smmu_domain
, cfg
);
1213 static int arm_smmu_map(struct iommu_domain
*domain
, unsigned long iova
,
1214 phys_addr_t paddr
, size_t size
, int prot
)
1217 unsigned long flags
;
1218 struct arm_smmu_domain
*smmu_domain
= to_smmu_domain(domain
);
1219 struct io_pgtable_ops
*ops
= smmu_domain
->pgtbl_ops
;
1224 spin_lock_irqsave(&smmu_domain
->pgtbl_lock
, flags
);
1225 ret
= ops
->map(ops
, iova
, paddr
, size
, prot
);
1226 spin_unlock_irqrestore(&smmu_domain
->pgtbl_lock
, flags
);
1230 static size_t arm_smmu_unmap(struct iommu_domain
*domain
, unsigned long iova
,
1234 unsigned long flags
;
1235 struct arm_smmu_domain
*smmu_domain
= to_smmu_domain(domain
);
1236 struct io_pgtable_ops
*ops
= smmu_domain
->pgtbl_ops
;
1241 spin_lock_irqsave(&smmu_domain
->pgtbl_lock
, flags
);
1242 ret
= ops
->unmap(ops
, iova
, size
);
1243 spin_unlock_irqrestore(&smmu_domain
->pgtbl_lock
, flags
);
1247 static phys_addr_t
arm_smmu_iova_to_phys_hard(struct iommu_domain
*domain
,
1250 struct arm_smmu_domain
*smmu_domain
= to_smmu_domain(domain
);
1251 struct arm_smmu_device
*smmu
= smmu_domain
->smmu
;
1252 struct arm_smmu_cfg
*cfg
= &smmu_domain
->cfg
;
1253 struct io_pgtable_ops
*ops
= smmu_domain
->pgtbl_ops
;
1254 struct device
*dev
= smmu
->dev
;
1255 void __iomem
*cb_base
;
1259 cb_base
= ARM_SMMU_CB_BASE(smmu
) + ARM_SMMU_CB(smmu
, cfg
->cbndx
);
1261 if (smmu
->version
== 1) {
1262 u32 reg
= iova
& ~0xfff;
1263 writel_relaxed(reg
, cb_base
+ ARM_SMMU_CB_ATS1PR_LO
);
1265 u32 reg
= iova
& ~0xfff;
1266 writel_relaxed(reg
, cb_base
+ ARM_SMMU_CB_ATS1PR_LO
);
1267 reg
= ((u64
)iova
& ~0xfff) >> 32;
1268 writel_relaxed(reg
, cb_base
+ ARM_SMMU_CB_ATS1PR_HI
);
1271 if (readl_poll_timeout_atomic(cb_base
+ ARM_SMMU_CB_ATSR
, tmp
,
1272 !(tmp
& ATSR_ACTIVE
), 5, 50)) {
1274 "iova to phys timed out on 0x%pad. Falling back to software table walk.\n",
1276 return ops
->iova_to_phys(ops
, iova
);
1279 phys
= readl_relaxed(cb_base
+ ARM_SMMU_CB_PAR_LO
);
1280 phys
|= ((u64
)readl_relaxed(cb_base
+ ARM_SMMU_CB_PAR_HI
)) << 32;
1282 if (phys
& CB_PAR_F
) {
1283 dev_err(dev
, "translation fault!\n");
1284 dev_err(dev
, "PAR = 0x%llx\n", phys
);
1288 return (phys
& GENMASK_ULL(39, 12)) | (iova
& 0xfff);
1291 static phys_addr_t
arm_smmu_iova_to_phys(struct iommu_domain
*domain
,
1295 unsigned long flags
;
1296 struct arm_smmu_domain
*smmu_domain
= to_smmu_domain(domain
);
1297 struct io_pgtable_ops
*ops
= smmu_domain
->pgtbl_ops
;
1302 spin_lock_irqsave(&smmu_domain
->pgtbl_lock
, flags
);
1303 if (smmu_domain
->smmu
->features
& ARM_SMMU_FEAT_TRANS_OPS
&&
1304 smmu_domain
->stage
== ARM_SMMU_DOMAIN_S1
) {
1305 ret
= arm_smmu_iova_to_phys_hard(domain
, iova
);
1307 ret
= ops
->iova_to_phys(ops
, iova
);
1310 spin_unlock_irqrestore(&smmu_domain
->pgtbl_lock
, flags
);
1315 static bool arm_smmu_capable(enum iommu_cap cap
)
1318 case IOMMU_CAP_CACHE_COHERENCY
:
1320 * Return true here as the SMMU can always send out coherent
1324 case IOMMU_CAP_INTR_REMAP
:
1325 return true; /* MSIs are just memory writes */
1326 case IOMMU_CAP_NOEXEC
:
1333 static int __arm_smmu_get_pci_sid(struct pci_dev
*pdev
, u16 alias
, void *data
)
1335 *((u16
*)data
) = alias
;
1336 return 0; /* Continue walking */
1339 static void __arm_smmu_release_pci_iommudata(void *data
)
1344 static int arm_smmu_add_pci_device(struct pci_dev
*pdev
)
1348 struct iommu_group
*group
;
1349 struct arm_smmu_master_cfg
*cfg
;
1351 group
= iommu_group_get_for_dev(&pdev
->dev
);
1353 return PTR_ERR(group
);
1355 cfg
= iommu_group_get_iommudata(group
);
1357 cfg
= kzalloc(sizeof(*cfg
), GFP_KERNEL
);
1363 iommu_group_set_iommudata(group
, cfg
,
1364 __arm_smmu_release_pci_iommudata
);
1367 if (cfg
->num_streamids
>= MAX_MASTER_STREAMIDS
) {
1373 * Assume Stream ID == Requester ID for now.
1374 * We need a way to describe the ID mappings in FDT.
1376 pci_for_each_dma_alias(pdev
, __arm_smmu_get_pci_sid
, &sid
);
1377 for (i
= 0; i
< cfg
->num_streamids
; ++i
)
1378 if (cfg
->streamids
[i
] == sid
)
1381 /* Avoid duplicate SIDs, as this can lead to SMR conflicts */
1382 if (i
== cfg
->num_streamids
)
1383 cfg
->streamids
[cfg
->num_streamids
++] = sid
;
1387 iommu_group_put(group
);
1391 static int arm_smmu_add_platform_device(struct device
*dev
)
1393 struct iommu_group
*group
;
1394 struct arm_smmu_master
*master
;
1395 struct arm_smmu_device
*smmu
= find_smmu_for_device(dev
);
1400 master
= find_smmu_master(smmu
, dev
->of_node
);
1404 /* No automatic group creation for platform devices */
1405 group
= iommu_group_alloc();
1407 return PTR_ERR(group
);
1409 iommu_group_set_iommudata(group
, &master
->cfg
, NULL
);
1410 return iommu_group_add_device(group
, dev
);
1413 static int arm_smmu_add_device(struct device
*dev
)
1415 if (dev_is_pci(dev
))
1416 return arm_smmu_add_pci_device(to_pci_dev(dev
));
1418 return arm_smmu_add_platform_device(dev
);
1421 static void arm_smmu_remove_device(struct device
*dev
)
1423 iommu_group_remove_device(dev
);
1426 static int arm_smmu_domain_get_attr(struct iommu_domain
*domain
,
1427 enum iommu_attr attr
, void *data
)
1429 struct arm_smmu_domain
*smmu_domain
= to_smmu_domain(domain
);
1432 case DOMAIN_ATTR_NESTING
:
1433 *(int *)data
= (smmu_domain
->stage
== ARM_SMMU_DOMAIN_NESTED
);
1440 static int arm_smmu_domain_set_attr(struct iommu_domain
*domain
,
1441 enum iommu_attr attr
, void *data
)
1444 struct arm_smmu_domain
*smmu_domain
= to_smmu_domain(domain
);
1446 mutex_lock(&smmu_domain
->init_mutex
);
1449 case DOMAIN_ATTR_NESTING
:
1450 if (smmu_domain
->smmu
) {
1456 smmu_domain
->stage
= ARM_SMMU_DOMAIN_NESTED
;
1458 smmu_domain
->stage
= ARM_SMMU_DOMAIN_S1
;
1466 mutex_unlock(&smmu_domain
->init_mutex
);
1470 static struct iommu_ops arm_smmu_ops
= {
1471 .capable
= arm_smmu_capable
,
1472 .domain_alloc
= arm_smmu_domain_alloc
,
1473 .domain_free
= arm_smmu_domain_free
,
1474 .attach_dev
= arm_smmu_attach_dev
,
1475 .detach_dev
= arm_smmu_detach_dev
,
1476 .map
= arm_smmu_map
,
1477 .unmap
= arm_smmu_unmap
,
1478 .map_sg
= default_iommu_map_sg
,
1479 .iova_to_phys
= arm_smmu_iova_to_phys
,
1480 .add_device
= arm_smmu_add_device
,
1481 .remove_device
= arm_smmu_remove_device
,
1482 .domain_get_attr
= arm_smmu_domain_get_attr
,
1483 .domain_set_attr
= arm_smmu_domain_set_attr
,
1484 .pgsize_bitmap
= -1UL, /* Restricted during device attach */
1487 static void arm_smmu_device_reset(struct arm_smmu_device
*smmu
)
1489 void __iomem
*gr0_base
= ARM_SMMU_GR0(smmu
);
1490 void __iomem
*cb_base
;
1494 /* clear global FSR */
1495 reg
= readl_relaxed(ARM_SMMU_GR0_NS(smmu
) + ARM_SMMU_GR0_sGFSR
);
1496 writel(reg
, ARM_SMMU_GR0_NS(smmu
) + ARM_SMMU_GR0_sGFSR
);
1498 /* Mark all SMRn as invalid and all S2CRn as bypass */
1499 for (i
= 0; i
< smmu
->num_mapping_groups
; ++i
) {
1500 writel_relaxed(0, gr0_base
+ ARM_SMMU_GR0_SMR(i
));
1501 writel_relaxed(S2CR_TYPE_BYPASS
,
1502 gr0_base
+ ARM_SMMU_GR0_S2CR(i
));
1505 /* Make sure all context banks are disabled and clear CB_FSR */
1506 for (i
= 0; i
< smmu
->num_context_banks
; ++i
) {
1507 cb_base
= ARM_SMMU_CB_BASE(smmu
) + ARM_SMMU_CB(smmu
, i
);
1508 writel_relaxed(0, cb_base
+ ARM_SMMU_CB_SCTLR
);
1509 writel_relaxed(FSR_FAULT
, cb_base
+ ARM_SMMU_CB_FSR
);
1512 /* Invalidate the TLB, just in case */
1513 writel_relaxed(0, gr0_base
+ ARM_SMMU_GR0_TLBIALLH
);
1514 writel_relaxed(0, gr0_base
+ ARM_SMMU_GR0_TLBIALLNSNH
);
1516 reg
= readl_relaxed(ARM_SMMU_GR0_NS(smmu
) + ARM_SMMU_GR0_sCR0
);
1518 /* Enable fault reporting */
1519 reg
|= (sCR0_GFRE
| sCR0_GFIE
| sCR0_GCFGFRE
| sCR0_GCFGFIE
);
1521 /* Disable TLB broadcasting. */
1522 reg
|= (sCR0_VMIDPNE
| sCR0_PTM
);
1524 /* Enable client access, but bypass when no mapping is found */
1525 reg
&= ~(sCR0_CLIENTPD
| sCR0_USFCFG
);
1527 /* Disable forced broadcasting */
1530 /* Don't upgrade barriers */
1531 reg
&= ~(sCR0_BSU_MASK
<< sCR0_BSU_SHIFT
);
1533 /* Push the button */
1534 __arm_smmu_tlb_sync(smmu
);
1535 writel(reg
, ARM_SMMU_GR0_NS(smmu
) + ARM_SMMU_GR0_sCR0
);
1538 static int arm_smmu_id_size_to_bits(int size
)
1557 static int arm_smmu_device_cfg_probe(struct arm_smmu_device
*smmu
)
1560 void __iomem
*gr0_base
= ARM_SMMU_GR0(smmu
);
1563 dev_notice(smmu
->dev
, "probing hardware configuration...\n");
1564 dev_notice(smmu
->dev
, "SMMUv%d with:\n", smmu
->version
);
1567 id
= readl_relaxed(gr0_base
+ ARM_SMMU_GR0_ID0
);
1569 /* Restrict available stages based on module parameter */
1570 if (force_stage
== 1)
1571 id
&= ~(ID0_S2TS
| ID0_NTS
);
1572 else if (force_stage
== 2)
1573 id
&= ~(ID0_S1TS
| ID0_NTS
);
1575 if (id
& ID0_S1TS
) {
1576 smmu
->features
|= ARM_SMMU_FEAT_TRANS_S1
;
1577 dev_notice(smmu
->dev
, "\tstage 1 translation\n");
1580 if (id
& ID0_S2TS
) {
1581 smmu
->features
|= ARM_SMMU_FEAT_TRANS_S2
;
1582 dev_notice(smmu
->dev
, "\tstage 2 translation\n");
1586 smmu
->features
|= ARM_SMMU_FEAT_TRANS_NESTED
;
1587 dev_notice(smmu
->dev
, "\tnested translation\n");
1590 if (!(smmu
->features
&
1591 (ARM_SMMU_FEAT_TRANS_S1
| ARM_SMMU_FEAT_TRANS_S2
))) {
1592 dev_err(smmu
->dev
, "\tno translation support!\n");
1596 if ((id
& ID0_S1TS
) && ((smmu
->version
== 1) || (id
& ID0_ATOSNS
))) {
1597 smmu
->features
|= ARM_SMMU_FEAT_TRANS_OPS
;
1598 dev_notice(smmu
->dev
, "\taddress translation ops\n");
1601 if (id
& ID0_CTTW
) {
1602 smmu
->features
|= ARM_SMMU_FEAT_COHERENT_WALK
;
1603 dev_notice(smmu
->dev
, "\tcoherent table walk\n");
1609 smmu
->features
|= ARM_SMMU_FEAT_STREAM_MATCH
;
1610 smmu
->num_mapping_groups
= (id
>> ID0_NUMSMRG_SHIFT
) &
1612 if (smmu
->num_mapping_groups
== 0) {
1614 "stream-matching supported, but no SMRs present!\n");
1618 smr
= SMR_MASK_MASK
<< SMR_MASK_SHIFT
;
1619 smr
|= (SMR_ID_MASK
<< SMR_ID_SHIFT
);
1620 writel_relaxed(smr
, gr0_base
+ ARM_SMMU_GR0_SMR(0));
1621 smr
= readl_relaxed(gr0_base
+ ARM_SMMU_GR0_SMR(0));
1623 mask
= (smr
>> SMR_MASK_SHIFT
) & SMR_MASK_MASK
;
1624 sid
= (smr
>> SMR_ID_SHIFT
) & SMR_ID_MASK
;
1625 if ((mask
& sid
) != sid
) {
1627 "SMR mask bits (0x%x) insufficient for ID field (0x%x)\n",
1632 dev_notice(smmu
->dev
,
1633 "\tstream matching with %u register groups, mask 0x%x",
1634 smmu
->num_mapping_groups
, mask
);
1636 smmu
->num_mapping_groups
= (id
>> ID0_NUMSIDB_SHIFT
) &
1641 id
= readl_relaxed(gr0_base
+ ARM_SMMU_GR0_ID1
);
1642 smmu
->pgshift
= (id
& ID1_PAGESIZE
) ? 16 : 12;
1644 /* Check for size mismatch of SMMU address space from mapped region */
1645 size
= 1 << (((id
>> ID1_NUMPAGENDXB_SHIFT
) & ID1_NUMPAGENDXB_MASK
) + 1);
1646 size
*= 2 << smmu
->pgshift
;
1647 if (smmu
->size
!= size
)
1649 "SMMU address space size (0x%lx) differs from mapped region size (0x%lx)!\n",
1652 smmu
->num_s2_context_banks
= (id
>> ID1_NUMS2CB_SHIFT
) & ID1_NUMS2CB_MASK
;
1653 smmu
->num_context_banks
= (id
>> ID1_NUMCB_SHIFT
) & ID1_NUMCB_MASK
;
1654 if (smmu
->num_s2_context_banks
> smmu
->num_context_banks
) {
1655 dev_err(smmu
->dev
, "impossible number of S2 context banks!\n");
1658 dev_notice(smmu
->dev
, "\t%u context banks (%u stage-2 only)\n",
1659 smmu
->num_context_banks
, smmu
->num_s2_context_banks
);
1662 id
= readl_relaxed(gr0_base
+ ARM_SMMU_GR0_ID2
);
1663 size
= arm_smmu_id_size_to_bits((id
>> ID2_IAS_SHIFT
) & ID2_IAS_MASK
);
1664 smmu
->ipa_size
= size
;
1666 /* The output mask is also applied for bypass */
1667 size
= arm_smmu_id_size_to_bits((id
>> ID2_OAS_SHIFT
) & ID2_OAS_MASK
);
1668 smmu
->pa_size
= size
;
1671 * What the page table walker can address actually depends on which
1672 * descriptor format is in use, but since a) we don't know that yet,
1673 * and b) it can vary per context bank, this will have to do...
1675 if (dma_set_mask_and_coherent(smmu
->dev
, DMA_BIT_MASK(size
)))
1677 "failed to set DMA mask for table walker\n");
1679 if (smmu
->version
== ARM_SMMU_V1
) {
1680 smmu
->va_size
= smmu
->ipa_size
;
1681 size
= SZ_4K
| SZ_2M
| SZ_1G
;
1683 size
= (id
>> ID2_UBS_SHIFT
) & ID2_UBS_MASK
;
1684 smmu
->va_size
= arm_smmu_id_size_to_bits(size
);
1685 #ifndef CONFIG_64BIT
1686 smmu
->va_size
= min(32UL, smmu
->va_size
);
1689 if (id
& ID2_PTFS_4K
)
1690 size
|= SZ_4K
| SZ_2M
| SZ_1G
;
1691 if (id
& ID2_PTFS_16K
)
1692 size
|= SZ_16K
| SZ_32M
;
1693 if (id
& ID2_PTFS_64K
)
1694 size
|= SZ_64K
| SZ_512M
;
1697 arm_smmu_ops
.pgsize_bitmap
&= size
;
1698 dev_notice(smmu
->dev
, "\tSupported page sizes: 0x%08lx\n", size
);
1700 if (smmu
->features
& ARM_SMMU_FEAT_TRANS_S1
)
1701 dev_notice(smmu
->dev
, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
1702 smmu
->va_size
, smmu
->ipa_size
);
1704 if (smmu
->features
& ARM_SMMU_FEAT_TRANS_S2
)
1705 dev_notice(smmu
->dev
, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
1706 smmu
->ipa_size
, smmu
->pa_size
);
1711 static const struct of_device_id arm_smmu_of_match
[] = {
1712 { .compatible
= "arm,smmu-v1", .data
= (void *)ARM_SMMU_V1
},
1713 { .compatible
= "arm,smmu-v2", .data
= (void *)ARM_SMMU_V2
},
1714 { .compatible
= "arm,mmu-400", .data
= (void *)ARM_SMMU_V1
},
1715 { .compatible
= "arm,mmu-401", .data
= (void *)ARM_SMMU_V1
},
1716 { .compatible
= "arm,mmu-500", .data
= (void *)ARM_SMMU_V2
},
1719 MODULE_DEVICE_TABLE(of
, arm_smmu_of_match
);
1721 static int arm_smmu_device_dt_probe(struct platform_device
*pdev
)
1723 const struct of_device_id
*of_id
;
1724 struct resource
*res
;
1725 struct arm_smmu_device
*smmu
;
1726 struct device
*dev
= &pdev
->dev
;
1727 struct rb_node
*node
;
1728 struct of_phandle_args masterspec
;
1729 int num_irqs
, i
, err
;
1731 smmu
= devm_kzalloc(dev
, sizeof(*smmu
), GFP_KERNEL
);
1733 dev_err(dev
, "failed to allocate arm_smmu_device\n");
1738 of_id
= of_match_node(arm_smmu_of_match
, dev
->of_node
);
1739 smmu
->version
= (enum arm_smmu_arch_version
)of_id
->data
;
1741 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1742 smmu
->base
= devm_ioremap_resource(dev
, res
);
1743 if (IS_ERR(smmu
->base
))
1744 return PTR_ERR(smmu
->base
);
1745 smmu
->size
= resource_size(res
);
1747 if (of_property_read_u32(dev
->of_node
, "#global-interrupts",
1748 &smmu
->num_global_irqs
)) {
1749 dev_err(dev
, "missing #global-interrupts property\n");
1754 while ((res
= platform_get_resource(pdev
, IORESOURCE_IRQ
, num_irqs
))) {
1756 if (num_irqs
> smmu
->num_global_irqs
)
1757 smmu
->num_context_irqs
++;
1760 if (!smmu
->num_context_irqs
) {
1761 dev_err(dev
, "found %d interrupts but expected at least %d\n",
1762 num_irqs
, smmu
->num_global_irqs
+ 1);
1766 smmu
->irqs
= devm_kzalloc(dev
, sizeof(*smmu
->irqs
) * num_irqs
,
1769 dev_err(dev
, "failed to allocate %d irqs\n", num_irqs
);
1773 for (i
= 0; i
< num_irqs
; ++i
) {
1774 int irq
= platform_get_irq(pdev
, i
);
1777 dev_err(dev
, "failed to get irq index %d\n", i
);
1780 smmu
->irqs
[i
] = irq
;
1783 err
= arm_smmu_device_cfg_probe(smmu
);
1788 smmu
->masters
= RB_ROOT
;
1789 while (!of_parse_phandle_with_args(dev
->of_node
, "mmu-masters",
1790 "#stream-id-cells", i
,
1792 err
= register_smmu_master(smmu
, dev
, &masterspec
);
1794 dev_err(dev
, "failed to add master %s\n",
1795 masterspec
.np
->name
);
1796 goto out_put_masters
;
1801 dev_notice(dev
, "registered %d master devices\n", i
);
1803 parse_driver_options(smmu
);
1805 if (smmu
->version
> ARM_SMMU_V1
&&
1806 smmu
->num_context_banks
!= smmu
->num_context_irqs
) {
1808 "found only %d context interrupt(s) but %d required\n",
1809 smmu
->num_context_irqs
, smmu
->num_context_banks
);
1811 goto out_put_masters
;
1814 for (i
= 0; i
< smmu
->num_global_irqs
; ++i
) {
1815 err
= request_irq(smmu
->irqs
[i
],
1816 arm_smmu_global_fault
,
1818 "arm-smmu global fault",
1821 dev_err(dev
, "failed to request global IRQ %d (%u)\n",
1827 INIT_LIST_HEAD(&smmu
->list
);
1828 spin_lock(&arm_smmu_devices_lock
);
1829 list_add(&smmu
->list
, &arm_smmu_devices
);
1830 spin_unlock(&arm_smmu_devices_lock
);
1832 arm_smmu_device_reset(smmu
);
1837 free_irq(smmu
->irqs
[i
], smmu
);
1840 for (node
= rb_first(&smmu
->masters
); node
; node
= rb_next(node
)) {
1841 struct arm_smmu_master
*master
1842 = container_of(node
, struct arm_smmu_master
, node
);
1843 of_node_put(master
->of_node
);
1849 static int arm_smmu_device_remove(struct platform_device
*pdev
)
1852 struct device
*dev
= &pdev
->dev
;
1853 struct arm_smmu_device
*curr
, *smmu
= NULL
;
1854 struct rb_node
*node
;
1856 spin_lock(&arm_smmu_devices_lock
);
1857 list_for_each_entry(curr
, &arm_smmu_devices
, list
) {
1858 if (curr
->dev
== dev
) {
1860 list_del(&smmu
->list
);
1864 spin_unlock(&arm_smmu_devices_lock
);
1869 for (node
= rb_first(&smmu
->masters
); node
; node
= rb_next(node
)) {
1870 struct arm_smmu_master
*master
1871 = container_of(node
, struct arm_smmu_master
, node
);
1872 of_node_put(master
->of_node
);
1875 if (!bitmap_empty(smmu
->context_map
, ARM_SMMU_MAX_CBS
))
1876 dev_err(dev
, "removing device with active domains!\n");
1878 for (i
= 0; i
< smmu
->num_global_irqs
; ++i
)
1879 free_irq(smmu
->irqs
[i
], smmu
);
1881 /* Turn the thing off */
1882 writel(sCR0_CLIENTPD
, ARM_SMMU_GR0_NS(smmu
) + ARM_SMMU_GR0_sCR0
);
1886 static struct platform_driver arm_smmu_driver
= {
1889 .of_match_table
= of_match_ptr(arm_smmu_of_match
),
1891 .probe
= arm_smmu_device_dt_probe
,
1892 .remove
= arm_smmu_device_remove
,
1895 static int __init
arm_smmu_init(void)
1897 struct device_node
*np
;
1901 * Play nice with systems that don't have an ARM SMMU by checking that
1902 * an ARM SMMU exists in the system before proceeding with the driver
1903 * and IOMMU bus operation registration.
1905 np
= of_find_matching_node(NULL
, arm_smmu_of_match
);
1911 ret
= platform_driver_register(&arm_smmu_driver
);
1915 /* Oh, for a proper bus abstraction */
1916 if (!iommu_present(&platform_bus_type
))
1917 bus_set_iommu(&platform_bus_type
, &arm_smmu_ops
);
1919 #ifdef CONFIG_ARM_AMBA
1920 if (!iommu_present(&amba_bustype
))
1921 bus_set_iommu(&amba_bustype
, &arm_smmu_ops
);
1925 if (!iommu_present(&pci_bus_type
))
1926 bus_set_iommu(&pci_bus_type
, &arm_smmu_ops
);
1932 static void __exit
arm_smmu_exit(void)
1934 return platform_driver_unregister(&arm_smmu_driver
);
1937 subsys_initcall(arm_smmu_init
);
1938 module_exit(arm_smmu_exit
);
1940 MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
1941 MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
1942 MODULE_LICENSE("GPL v2");