Commit | Line | Data |
---|---|---|
c68a2921 DK |
1 | /* |
2 | * This program is free software; you can redistribute it and/or modify | |
3 | * it under the terms of the GNU General Public License version 2 as | |
4 | * published by the Free Software Foundation. | |
5 | */ | |
6 | ||
7 | #include <asm/cacheflush.h> | |
8 | #include <asm/pgtable.h> | |
9 | #include <linux/compiler.h> | |
10 | #include <linux/delay.h> | |
11 | #include <linux/device.h> | |
12 | #include <linux/errno.h> | |
13 | #include <linux/interrupt.h> | |
14 | #include <linux/io.h> | |
15 | #include <linux/iommu.h> | |
16 | #include <linux/jiffies.h> | |
17 | #include <linux/list.h> | |
18 | #include <linux/mm.h> | |
19 | #include <linux/module.h> | |
20 | #include <linux/of.h> | |
21 | #include <linux/of_platform.h> | |
22 | #include <linux/platform_device.h> | |
23 | #include <linux/slab.h> | |
24 | #include <linux/spinlock.h> | |
25 | ||
26 | /** MMU register offsets */ | |
27 | #define RK_MMU_DTE_ADDR 0x00 /* Directory table address */ | |
28 | #define RK_MMU_STATUS 0x04 | |
29 | #define RK_MMU_COMMAND 0x08 | |
30 | #define RK_MMU_PAGE_FAULT_ADDR 0x0C /* IOVA of last page fault */ | |
31 | #define RK_MMU_ZAP_ONE_LINE 0x10 /* Shootdown one IOTLB entry */ | |
32 | #define RK_MMU_INT_RAWSTAT 0x14 /* IRQ status ignoring mask */ | |
33 | #define RK_MMU_INT_CLEAR 0x18 /* Acknowledge and re-arm irq */ | |
34 | #define RK_MMU_INT_MASK 0x1C /* IRQ enable */ | |
35 | #define RK_MMU_INT_STATUS 0x20 /* IRQ status after masking */ | |
36 | #define RK_MMU_AUTO_GATING 0x24 | |
37 | ||
38 | #define DTE_ADDR_DUMMY 0xCAFEBABE | |
39 | #define FORCE_RESET_TIMEOUT 100 /* ms */ | |
40 | ||
41 | /* RK_MMU_STATUS fields */ | |
42 | #define RK_MMU_STATUS_PAGING_ENABLED BIT(0) | |
43 | #define RK_MMU_STATUS_PAGE_FAULT_ACTIVE BIT(1) | |
44 | #define RK_MMU_STATUS_STALL_ACTIVE BIT(2) | |
45 | #define RK_MMU_STATUS_IDLE BIT(3) | |
46 | #define RK_MMU_STATUS_REPLAY_BUFFER_EMPTY BIT(4) | |
47 | #define RK_MMU_STATUS_PAGE_FAULT_IS_WRITE BIT(5) | |
48 | #define RK_MMU_STATUS_STALL_NOT_ACTIVE BIT(31) | |
49 | ||
50 | /* RK_MMU_COMMAND command values */ | |
51 | #define RK_MMU_CMD_ENABLE_PAGING 0 /* Enable memory translation */ | |
52 | #define RK_MMU_CMD_DISABLE_PAGING 1 /* Disable memory translation */ | |
53 | #define RK_MMU_CMD_ENABLE_STALL 2 /* Stall paging to allow other cmds */ | |
54 | #define RK_MMU_CMD_DISABLE_STALL 3 /* Stop stall re-enables paging */ | |
55 | #define RK_MMU_CMD_ZAP_CACHE 4 /* Shoot down entire IOTLB */ | |
56 | #define RK_MMU_CMD_PAGE_FAULT_DONE 5 /* Clear page fault */ | |
57 | #define RK_MMU_CMD_FORCE_RESET 6 /* Reset all registers */ | |
58 | ||
59 | /* RK_MMU_INT_* register fields */ | |
60 | #define RK_MMU_IRQ_PAGE_FAULT 0x01 /* page fault */ | |
61 | #define RK_MMU_IRQ_BUS_ERROR 0x02 /* bus read error */ | |
62 | #define RK_MMU_IRQ_MASK (RK_MMU_IRQ_PAGE_FAULT | RK_MMU_IRQ_BUS_ERROR) | |
63 | ||
64 | #define NUM_DT_ENTRIES 1024 | |
65 | #define NUM_PT_ENTRIES 1024 | |
66 | ||
67 | #define SPAGE_ORDER 12 | |
68 | #define SPAGE_SIZE (1 << SPAGE_ORDER) | |
69 | ||
70 | /* | |
71 | * Support mapping any size that fits in one page table: | |
72 | * 4 KiB to 4 MiB | |
73 | */ | |
74 | #define RK_IOMMU_PGSIZE_BITMAP 0x007ff000 | |
75 | ||
76 | #define IOMMU_REG_POLL_COUNT_FAST 1000 | |
77 | ||
78 | struct rk_iommu_domain { | |
79 | struct list_head iommus; | |
80 | u32 *dt; /* page directory table */ | |
81 | spinlock_t iommus_lock; /* lock for iommus list */ | |
82 | spinlock_t dt_lock; /* lock for modifying page directory table */ | |
83 | }; | |
84 | ||
85 | struct rk_iommu { | |
86 | struct device *dev; | |
87 | void __iomem *base; | |
88 | int irq; | |
89 | struct list_head node; /* entry in rk_iommu_domain.iommus */ | |
90 | struct iommu_domain *domain; /* domain to which iommu is attached */ | |
91 | }; | |
92 | ||
93 | static inline void rk_table_flush(u32 *va, unsigned int count) | |
94 | { | |
95 | phys_addr_t pa_start = virt_to_phys(va); | |
96 | phys_addr_t pa_end = virt_to_phys(va + count); | |
97 | size_t size = pa_end - pa_start; | |
98 | ||
99 | __cpuc_flush_dcache_area(va, size); | |
100 | outer_flush_range(pa_start, pa_end); | |
101 | } | |
102 | ||
103 | /** | |
104 | * Inspired by _wait_for in intel_drv.h | |
105 | * This is NOT safe for use in interrupt context. | |
106 | * | |
107 | * Note that it's important that we check the condition again after having | |
108 | * timed out, since the timeout could be due to preemption or similar and | |
109 | * we've never had a chance to check the condition before the timeout. | |
110 | */ | |
111 | #define rk_wait_for(COND, MS) ({ \ | |
112 | unsigned long timeout__ = jiffies + msecs_to_jiffies(MS) + 1; \ | |
113 | int ret__ = 0; \ | |
114 | while (!(COND)) { \ | |
115 | if (time_after(jiffies, timeout__)) { \ | |
116 | ret__ = (COND) ? 0 : -ETIMEDOUT; \ | |
117 | break; \ | |
118 | } \ | |
119 | usleep_range(50, 100); \ | |
120 | } \ | |
121 | ret__; \ | |
122 | }) | |
123 | ||
124 | /* | |
125 | * The Rockchip rk3288 iommu uses a 2-level page table. | |
126 | * The first level is the "Directory Table" (DT). | |
127 | * The DT consists of 1024 4-byte Directory Table Entries (DTEs), each pointing | |
128 | * to a "Page Table". | |
129 | * The second level is the 1024 Page Tables (PT). | |
130 | * Each PT consists of 1024 4-byte Page Table Entries (PTEs), each pointing to | |
131 | * a 4 KB page of physical memory. | |
132 | * | |
133 | * The DT and each PT fits in a single 4 KB page (4-bytes * 1024 entries). | |
134 | * Each iommu device has a MMU_DTE_ADDR register that contains the physical | |
135 | * address of the start of the DT page. | |
136 | * | |
137 | * The structure of the page table is as follows: | |
138 | * | |
139 | * DT | |
140 | * MMU_DTE_ADDR -> +-----+ | |
141 | * | | | |
142 | * +-----+ PT | |
143 | * | DTE | -> +-----+ | |
144 | * +-----+ | | Memory | |
145 | * | | +-----+ Page | |
146 | * | | | PTE | -> +-----+ | |
147 | * +-----+ +-----+ | | | |
148 | * | | | | | |
149 | * | | | | | |
150 | * +-----+ | | | |
151 | * | | | |
152 | * | | | |
153 | * +-----+ | |
154 | */ | |
155 | ||
156 | /* | |
157 | * Each DTE has a PT address and a valid bit: | |
158 | * +---------------------+-----------+-+ | |
159 | * | PT address | Reserved |V| | |
160 | * +---------------------+-----------+-+ | |
161 | * 31:12 - PT address (PTs always starts on a 4 KB boundary) | |
162 | * 11: 1 - Reserved | |
163 | * 0 - 1 if PT @ PT address is valid | |
164 | */ | |
165 | #define RK_DTE_PT_ADDRESS_MASK 0xfffff000 | |
166 | #define RK_DTE_PT_VALID BIT(0) | |
167 | ||
168 | static inline phys_addr_t rk_dte_pt_address(u32 dte) | |
169 | { | |
170 | return (phys_addr_t)dte & RK_DTE_PT_ADDRESS_MASK; | |
171 | } | |
172 | ||
173 | static inline bool rk_dte_is_pt_valid(u32 dte) | |
174 | { | |
175 | return dte & RK_DTE_PT_VALID; | |
176 | } | |
177 | ||
178 | static u32 rk_mk_dte(u32 *pt) | |
179 | { | |
180 | phys_addr_t pt_phys = virt_to_phys(pt); | |
181 | return (pt_phys & RK_DTE_PT_ADDRESS_MASK) | RK_DTE_PT_VALID; | |
182 | } | |
183 | ||
184 | /* | |
185 | * Each PTE has a Page address, some flags and a valid bit: | |
186 | * +---------------------+---+-------+-+ | |
187 | * | Page address |Rsv| Flags |V| | |
188 | * +---------------------+---+-------+-+ | |
189 | * 31:12 - Page address (Pages always start on a 4 KB boundary) | |
190 | * 11: 9 - Reserved | |
191 | * 8: 1 - Flags | |
192 | * 8 - Read allocate - allocate cache space on read misses | |
193 | * 7 - Read cache - enable cache & prefetch of data | |
194 | * 6 - Write buffer - enable delaying writes on their way to memory | |
195 | * 5 - Write allocate - allocate cache space on write misses | |
196 | * 4 - Write cache - different writes can be merged together | |
197 | * 3 - Override cache attributes | |
198 | * if 1, bits 4-8 control cache attributes | |
199 | * if 0, the system bus defaults are used | |
200 | * 2 - Writable | |
201 | * 1 - Readable | |
202 | * 0 - 1 if Page @ Page address is valid | |
203 | */ | |
204 | #define RK_PTE_PAGE_ADDRESS_MASK 0xfffff000 | |
205 | #define RK_PTE_PAGE_FLAGS_MASK 0x000001fe | |
206 | #define RK_PTE_PAGE_WRITABLE BIT(2) | |
207 | #define RK_PTE_PAGE_READABLE BIT(1) | |
208 | #define RK_PTE_PAGE_VALID BIT(0) | |
209 | ||
210 | static inline phys_addr_t rk_pte_page_address(u32 pte) | |
211 | { | |
212 | return (phys_addr_t)pte & RK_PTE_PAGE_ADDRESS_MASK; | |
213 | } | |
214 | ||
215 | static inline bool rk_pte_is_page_valid(u32 pte) | |
216 | { | |
217 | return pte & RK_PTE_PAGE_VALID; | |
218 | } | |
219 | ||
220 | /* TODO: set cache flags per prot IOMMU_CACHE */ | |
221 | static u32 rk_mk_pte(phys_addr_t page, int prot) | |
222 | { | |
223 | u32 flags = 0; | |
224 | flags |= (prot & IOMMU_READ) ? RK_PTE_PAGE_READABLE : 0; | |
225 | flags |= (prot & IOMMU_WRITE) ? RK_PTE_PAGE_WRITABLE : 0; | |
226 | page &= RK_PTE_PAGE_ADDRESS_MASK; | |
227 | return page | flags | RK_PTE_PAGE_VALID; | |
228 | } | |
229 | ||
230 | static u32 rk_mk_pte_invalid(u32 pte) | |
231 | { | |
232 | return pte & ~RK_PTE_PAGE_VALID; | |
233 | } | |
234 | ||
235 | /* | |
236 | * rk3288 iova (IOMMU Virtual Address) format | |
237 | * 31 22.21 12.11 0 | |
238 | * +-----------+-----------+-------------+ | |
239 | * | DTE index | PTE index | Page offset | | |
240 | * +-----------+-----------+-------------+ | |
241 | * 31:22 - DTE index - index of DTE in DT | |
242 | * 21:12 - PTE index - index of PTE in PT @ DTE.pt_address | |
243 | * 11: 0 - Page offset - offset into page @ PTE.page_address | |
244 | */ | |
245 | #define RK_IOVA_DTE_MASK 0xffc00000 | |
246 | #define RK_IOVA_DTE_SHIFT 22 | |
247 | #define RK_IOVA_PTE_MASK 0x003ff000 | |
248 | #define RK_IOVA_PTE_SHIFT 12 | |
249 | #define RK_IOVA_PAGE_MASK 0x00000fff | |
250 | #define RK_IOVA_PAGE_SHIFT 0 | |
251 | ||
252 | static u32 rk_iova_dte_index(dma_addr_t iova) | |
253 | { | |
254 | return (u32)(iova & RK_IOVA_DTE_MASK) >> RK_IOVA_DTE_SHIFT; | |
255 | } | |
256 | ||
257 | static u32 rk_iova_pte_index(dma_addr_t iova) | |
258 | { | |
259 | return (u32)(iova & RK_IOVA_PTE_MASK) >> RK_IOVA_PTE_SHIFT; | |
260 | } | |
261 | ||
262 | static u32 rk_iova_page_offset(dma_addr_t iova) | |
263 | { | |
264 | return (u32)(iova & RK_IOVA_PAGE_MASK) >> RK_IOVA_PAGE_SHIFT; | |
265 | } | |
266 | ||
267 | static u32 rk_iommu_read(struct rk_iommu *iommu, u32 offset) | |
268 | { | |
269 | return readl(iommu->base + offset); | |
270 | } | |
271 | ||
272 | static void rk_iommu_write(struct rk_iommu *iommu, u32 offset, u32 value) | |
273 | { | |
274 | writel(value, iommu->base + offset); | |
275 | } | |
276 | ||
277 | static void rk_iommu_command(struct rk_iommu *iommu, u32 command) | |
278 | { | |
279 | writel(command, iommu->base + RK_MMU_COMMAND); | |
280 | } | |
281 | ||
282 | static void rk_iommu_zap_lines(struct rk_iommu *iommu, dma_addr_t iova, | |
283 | size_t size) | |
284 | { | |
285 | dma_addr_t iova_end = iova + size; | |
286 | /* | |
287 | * TODO(djkurtz): Figure out when it is more efficient to shootdown the | |
288 | * entire iotlb rather than iterate over individual iovas. | |
289 | */ | |
290 | for (; iova < iova_end; iova += SPAGE_SIZE) | |
291 | rk_iommu_write(iommu, RK_MMU_ZAP_ONE_LINE, iova); | |
292 | } | |
293 | ||
294 | static bool rk_iommu_is_stall_active(struct rk_iommu *iommu) | |
295 | { | |
296 | return rk_iommu_read(iommu, RK_MMU_STATUS) & RK_MMU_STATUS_STALL_ACTIVE; | |
297 | } | |
298 | ||
299 | static bool rk_iommu_is_paging_enabled(struct rk_iommu *iommu) | |
300 | { | |
301 | return rk_iommu_read(iommu, RK_MMU_STATUS) & | |
302 | RK_MMU_STATUS_PAGING_ENABLED; | |
303 | } | |
304 | ||
305 | static int rk_iommu_enable_stall(struct rk_iommu *iommu) | |
306 | { | |
307 | int ret; | |
308 | ||
309 | if (rk_iommu_is_stall_active(iommu)) | |
310 | return 0; | |
311 | ||
312 | /* Stall can only be enabled if paging is enabled */ | |
313 | if (!rk_iommu_is_paging_enabled(iommu)) | |
314 | return 0; | |
315 | ||
316 | rk_iommu_command(iommu, RK_MMU_CMD_ENABLE_STALL); | |
317 | ||
318 | ret = rk_wait_for(rk_iommu_is_stall_active(iommu), 1); | |
319 | if (ret) | |
320 | dev_err(iommu->dev, "Enable stall request timed out, status: %#08x\n", | |
321 | rk_iommu_read(iommu, RK_MMU_STATUS)); | |
322 | ||
323 | return ret; | |
324 | } | |
325 | ||
326 | static int rk_iommu_disable_stall(struct rk_iommu *iommu) | |
327 | { | |
328 | int ret; | |
329 | ||
330 | if (!rk_iommu_is_stall_active(iommu)) | |
331 | return 0; | |
332 | ||
333 | rk_iommu_command(iommu, RK_MMU_CMD_DISABLE_STALL); | |
334 | ||
335 | ret = rk_wait_for(!rk_iommu_is_stall_active(iommu), 1); | |
336 | if (ret) | |
337 | dev_err(iommu->dev, "Disable stall request timed out, status: %#08x\n", | |
338 | rk_iommu_read(iommu, RK_MMU_STATUS)); | |
339 | ||
340 | return ret; | |
341 | } | |
342 | ||
343 | static int rk_iommu_enable_paging(struct rk_iommu *iommu) | |
344 | { | |
345 | int ret; | |
346 | ||
347 | if (rk_iommu_is_paging_enabled(iommu)) | |
348 | return 0; | |
349 | ||
350 | rk_iommu_command(iommu, RK_MMU_CMD_ENABLE_PAGING); | |
351 | ||
352 | ret = rk_wait_for(rk_iommu_is_paging_enabled(iommu), 1); | |
353 | if (ret) | |
354 | dev_err(iommu->dev, "Enable paging request timed out, status: %#08x\n", | |
355 | rk_iommu_read(iommu, RK_MMU_STATUS)); | |
356 | ||
357 | return ret; | |
358 | } | |
359 | ||
360 | static int rk_iommu_disable_paging(struct rk_iommu *iommu) | |
361 | { | |
362 | int ret; | |
363 | ||
364 | if (!rk_iommu_is_paging_enabled(iommu)) | |
365 | return 0; | |
366 | ||
367 | rk_iommu_command(iommu, RK_MMU_CMD_DISABLE_PAGING); | |
368 | ||
369 | ret = rk_wait_for(!rk_iommu_is_paging_enabled(iommu), 1); | |
370 | if (ret) | |
371 | dev_err(iommu->dev, "Disable paging request timed out, status: %#08x\n", | |
372 | rk_iommu_read(iommu, RK_MMU_STATUS)); | |
373 | ||
374 | return ret; | |
375 | } | |
376 | ||
377 | static int rk_iommu_force_reset(struct rk_iommu *iommu) | |
378 | { | |
379 | int ret; | |
380 | u32 dte_addr; | |
381 | ||
382 | /* | |
383 | * Check if register DTE_ADDR is working by writing DTE_ADDR_DUMMY | |
384 | * and verifying that upper 5 nybbles are read back. | |
385 | */ | |
386 | rk_iommu_write(iommu, RK_MMU_DTE_ADDR, DTE_ADDR_DUMMY); | |
387 | ||
388 | dte_addr = rk_iommu_read(iommu, RK_MMU_DTE_ADDR); | |
389 | if (dte_addr != (DTE_ADDR_DUMMY & RK_DTE_PT_ADDRESS_MASK)) { | |
390 | dev_err(iommu->dev, "Error during raw reset. MMU_DTE_ADDR is not functioning\n"); | |
391 | return -EFAULT; | |
392 | } | |
393 | ||
394 | rk_iommu_command(iommu, RK_MMU_CMD_FORCE_RESET); | |
395 | ||
396 | ret = rk_wait_for(rk_iommu_read(iommu, RK_MMU_DTE_ADDR) == 0x00000000, | |
397 | FORCE_RESET_TIMEOUT); | |
398 | if (ret) | |
399 | dev_err(iommu->dev, "FORCE_RESET command timed out\n"); | |
400 | ||
401 | return ret; | |
402 | } | |
403 | ||
404 | static void log_iova(struct rk_iommu *iommu, dma_addr_t iova) | |
405 | { | |
406 | u32 dte_index, pte_index, page_offset; | |
407 | u32 mmu_dte_addr; | |
408 | phys_addr_t mmu_dte_addr_phys, dte_addr_phys; | |
409 | u32 *dte_addr; | |
410 | u32 dte; | |
411 | phys_addr_t pte_addr_phys = 0; | |
412 | u32 *pte_addr = NULL; | |
413 | u32 pte = 0; | |
414 | phys_addr_t page_addr_phys = 0; | |
415 | u32 page_flags = 0; | |
416 | ||
417 | dte_index = rk_iova_dte_index(iova); | |
418 | pte_index = rk_iova_pte_index(iova); | |
419 | page_offset = rk_iova_page_offset(iova); | |
420 | ||
421 | mmu_dte_addr = rk_iommu_read(iommu, RK_MMU_DTE_ADDR); | |
422 | mmu_dte_addr_phys = (phys_addr_t)mmu_dte_addr; | |
423 | ||
424 | dte_addr_phys = mmu_dte_addr_phys + (4 * dte_index); | |
425 | dte_addr = phys_to_virt(dte_addr_phys); | |
426 | dte = *dte_addr; | |
427 | ||
428 | if (!rk_dte_is_pt_valid(dte)) | |
429 | goto print_it; | |
430 | ||
431 | pte_addr_phys = rk_dte_pt_address(dte) + (pte_index * 4); | |
432 | pte_addr = phys_to_virt(pte_addr_phys); | |
433 | pte = *pte_addr; | |
434 | ||
435 | if (!rk_pte_is_page_valid(pte)) | |
436 | goto print_it; | |
437 | ||
438 | page_addr_phys = rk_pte_page_address(pte) + page_offset; | |
439 | page_flags = pte & RK_PTE_PAGE_FLAGS_MASK; | |
440 | ||
441 | print_it: | |
442 | dev_err(iommu->dev, "iova = %pad: dte_index: %#03x pte_index: %#03x page_offset: %#03x\n", | |
443 | &iova, dte_index, pte_index, page_offset); | |
444 | dev_err(iommu->dev, "mmu_dte_addr: %pa dte@%pa: %#08x valid: %u pte@%pa: %#08x valid: %u page@%pa flags: %#03x\n", | |
445 | &mmu_dte_addr_phys, &dte_addr_phys, dte, | |
446 | rk_dte_is_pt_valid(dte), &pte_addr_phys, pte, | |
447 | rk_pte_is_page_valid(pte), &page_addr_phys, page_flags); | |
448 | } | |
449 | ||
450 | static irqreturn_t rk_iommu_irq(int irq, void *dev_id) | |
451 | { | |
452 | struct rk_iommu *iommu = dev_id; | |
453 | u32 status; | |
454 | u32 int_status; | |
455 | dma_addr_t iova; | |
456 | ||
457 | int_status = rk_iommu_read(iommu, RK_MMU_INT_STATUS); | |
458 | if (int_status == 0) | |
459 | return IRQ_NONE; | |
460 | ||
461 | iova = rk_iommu_read(iommu, RK_MMU_PAGE_FAULT_ADDR); | |
462 | ||
463 | if (int_status & RK_MMU_IRQ_PAGE_FAULT) { | |
464 | int flags; | |
465 | ||
466 | status = rk_iommu_read(iommu, RK_MMU_STATUS); | |
467 | flags = (status & RK_MMU_STATUS_PAGE_FAULT_IS_WRITE) ? | |
468 | IOMMU_FAULT_WRITE : IOMMU_FAULT_READ; | |
469 | ||
470 | dev_err(iommu->dev, "Page fault at %pad of type %s\n", | |
471 | &iova, | |
472 | (flags == IOMMU_FAULT_WRITE) ? "write" : "read"); | |
473 | ||
474 | log_iova(iommu, iova); | |
475 | ||
476 | /* | |
477 | * Report page fault to any installed handlers. | |
478 | * Ignore the return code, though, since we always zap cache | |
479 | * and clear the page fault anyway. | |
480 | */ | |
481 | if (iommu->domain) | |
482 | report_iommu_fault(iommu->domain, iommu->dev, iova, | |
483 | flags); | |
484 | else | |
485 | dev_err(iommu->dev, "Page fault while iommu not attached to domain?\n"); | |
486 | ||
487 | rk_iommu_command(iommu, RK_MMU_CMD_ZAP_CACHE); | |
488 | rk_iommu_command(iommu, RK_MMU_CMD_PAGE_FAULT_DONE); | |
489 | } | |
490 | ||
491 | if (int_status & RK_MMU_IRQ_BUS_ERROR) | |
492 | dev_err(iommu->dev, "BUS_ERROR occurred at %pad\n", &iova); | |
493 | ||
494 | if (int_status & ~RK_MMU_IRQ_MASK) | |
495 | dev_err(iommu->dev, "unexpected int_status: %#08x\n", | |
496 | int_status); | |
497 | ||
498 | rk_iommu_write(iommu, RK_MMU_INT_CLEAR, int_status); | |
499 | ||
500 | return IRQ_HANDLED; | |
501 | } | |
502 | ||
503 | static phys_addr_t rk_iommu_iova_to_phys(struct iommu_domain *domain, | |
504 | dma_addr_t iova) | |
505 | { | |
506 | struct rk_iommu_domain *rk_domain = domain->priv; | |
507 | unsigned long flags; | |
508 | phys_addr_t pt_phys, phys = 0; | |
509 | u32 dte, pte; | |
510 | u32 *page_table; | |
511 | ||
512 | spin_lock_irqsave(&rk_domain->dt_lock, flags); | |
513 | ||
514 | dte = rk_domain->dt[rk_iova_dte_index(iova)]; | |
515 | if (!rk_dte_is_pt_valid(dte)) | |
516 | goto out; | |
517 | ||
518 | pt_phys = rk_dte_pt_address(dte); | |
519 | page_table = (u32 *)phys_to_virt(pt_phys); | |
520 | pte = page_table[rk_iova_pte_index(iova)]; | |
521 | if (!rk_pte_is_page_valid(pte)) | |
522 | goto out; | |
523 | ||
524 | phys = rk_pte_page_address(pte) + rk_iova_page_offset(iova); | |
525 | out: | |
526 | spin_unlock_irqrestore(&rk_domain->dt_lock, flags); | |
527 | ||
528 | return phys; | |
529 | } | |
530 | ||
531 | static void rk_iommu_zap_iova(struct rk_iommu_domain *rk_domain, | |
532 | dma_addr_t iova, size_t size) | |
533 | { | |
534 | struct list_head *pos; | |
535 | unsigned long flags; | |
536 | ||
537 | /* shootdown these iova from all iommus using this domain */ | |
538 | spin_lock_irqsave(&rk_domain->iommus_lock, flags); | |
539 | list_for_each(pos, &rk_domain->iommus) { | |
540 | struct rk_iommu *iommu; | |
541 | iommu = list_entry(pos, struct rk_iommu, node); | |
542 | rk_iommu_zap_lines(iommu, iova, size); | |
543 | } | |
544 | spin_unlock_irqrestore(&rk_domain->iommus_lock, flags); | |
545 | } | |
546 | ||
547 | static u32 *rk_dte_get_page_table(struct rk_iommu_domain *rk_domain, | |
548 | dma_addr_t iova) | |
549 | { | |
550 | u32 *page_table, *dte_addr; | |
551 | u32 dte; | |
552 | phys_addr_t pt_phys; | |
553 | ||
554 | assert_spin_locked(&rk_domain->dt_lock); | |
555 | ||
556 | dte_addr = &rk_domain->dt[rk_iova_dte_index(iova)]; | |
557 | dte = *dte_addr; | |
558 | if (rk_dte_is_pt_valid(dte)) | |
559 | goto done; | |
560 | ||
561 | page_table = (u32 *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32); | |
562 | if (!page_table) | |
563 | return ERR_PTR(-ENOMEM); | |
564 | ||
565 | dte = rk_mk_dte(page_table); | |
566 | *dte_addr = dte; | |
567 | ||
568 | rk_table_flush(page_table, NUM_PT_ENTRIES); | |
569 | rk_table_flush(dte_addr, 1); | |
570 | ||
571 | /* | |
572 | * Zap the first iova of newly allocated page table so iommu evicts | |
573 | * old cached value of new dte from the iotlb. | |
574 | */ | |
575 | rk_iommu_zap_iova(rk_domain, iova, SPAGE_SIZE); | |
576 | ||
577 | done: | |
578 | pt_phys = rk_dte_pt_address(dte); | |
579 | return (u32 *)phys_to_virt(pt_phys); | |
580 | } | |
581 | ||
582 | static size_t rk_iommu_unmap_iova(struct rk_iommu_domain *rk_domain, | |
583 | u32 *pte_addr, dma_addr_t iova, size_t size) | |
584 | { | |
585 | unsigned int pte_count; | |
586 | unsigned int pte_total = size / SPAGE_SIZE; | |
587 | ||
588 | assert_spin_locked(&rk_domain->dt_lock); | |
589 | ||
590 | for (pte_count = 0; pte_count < pte_total; pte_count++) { | |
591 | u32 pte = pte_addr[pte_count]; | |
592 | if (!rk_pte_is_page_valid(pte)) | |
593 | break; | |
594 | ||
595 | pte_addr[pte_count] = rk_mk_pte_invalid(pte); | |
596 | } | |
597 | ||
598 | rk_table_flush(pte_addr, pte_count); | |
599 | ||
600 | return pte_count * SPAGE_SIZE; | |
601 | } | |
602 | ||
603 | static int rk_iommu_map_iova(struct rk_iommu_domain *rk_domain, u32 *pte_addr, | |
604 | dma_addr_t iova, phys_addr_t paddr, size_t size, | |
605 | int prot) | |
606 | { | |
607 | unsigned int pte_count; | |
608 | unsigned int pte_total = size / SPAGE_SIZE; | |
609 | phys_addr_t page_phys; | |
610 | ||
611 | assert_spin_locked(&rk_domain->dt_lock); | |
612 | ||
613 | for (pte_count = 0; pte_count < pte_total; pte_count++) { | |
614 | u32 pte = pte_addr[pte_count]; | |
615 | ||
616 | if (rk_pte_is_page_valid(pte)) | |
617 | goto unwind; | |
618 | ||
619 | pte_addr[pte_count] = rk_mk_pte(paddr, prot); | |
620 | ||
621 | paddr += SPAGE_SIZE; | |
622 | } | |
623 | ||
624 | rk_table_flush(pte_addr, pte_count); | |
625 | ||
626 | return 0; | |
627 | unwind: | |
628 | /* Unmap the range of iovas that we just mapped */ | |
629 | rk_iommu_unmap_iova(rk_domain, pte_addr, iova, pte_count * SPAGE_SIZE); | |
630 | ||
631 | iova += pte_count * SPAGE_SIZE; | |
632 | page_phys = rk_pte_page_address(pte_addr[pte_count]); | |
633 | pr_err("iova: %pad already mapped to %pa cannot remap to phys: %pa prot: %#x\n", | |
634 | &iova, &page_phys, &paddr, prot); | |
635 | ||
636 | return -EADDRINUSE; | |
637 | } | |
638 | ||
639 | static int rk_iommu_map(struct iommu_domain *domain, unsigned long _iova, | |
640 | phys_addr_t paddr, size_t size, int prot) | |
641 | { | |
642 | struct rk_iommu_domain *rk_domain = domain->priv; | |
643 | unsigned long flags; | |
644 | dma_addr_t iova = (dma_addr_t)_iova; | |
645 | u32 *page_table, *pte_addr; | |
646 | int ret; | |
647 | ||
648 | spin_lock_irqsave(&rk_domain->dt_lock, flags); | |
649 | ||
650 | /* | |
651 | * pgsize_bitmap specifies iova sizes that fit in one page table | |
652 | * (1024 4-KiB pages = 4 MiB). | |
653 | * So, size will always be 4096 <= size <= 4194304. | |
654 | * Since iommu_map() guarantees that both iova and size will be | |
655 | * aligned, we will always only be mapping from a single dte here. | |
656 | */ | |
657 | page_table = rk_dte_get_page_table(rk_domain, iova); | |
658 | if (IS_ERR(page_table)) { | |
659 | spin_unlock_irqrestore(&rk_domain->dt_lock, flags); | |
660 | return PTR_ERR(page_table); | |
661 | } | |
662 | ||
663 | pte_addr = &page_table[rk_iova_pte_index(iova)]; | |
664 | ret = rk_iommu_map_iova(rk_domain, pte_addr, iova, paddr, size, prot); | |
665 | spin_unlock_irqrestore(&rk_domain->dt_lock, flags); | |
666 | ||
667 | return ret; | |
668 | } | |
669 | ||
670 | static size_t rk_iommu_unmap(struct iommu_domain *domain, unsigned long _iova, | |
671 | size_t size) | |
672 | { | |
673 | struct rk_iommu_domain *rk_domain = domain->priv; | |
674 | unsigned long flags; | |
675 | dma_addr_t iova = (dma_addr_t)_iova; | |
676 | phys_addr_t pt_phys; | |
677 | u32 dte; | |
678 | u32 *pte_addr; | |
679 | size_t unmap_size; | |
680 | ||
681 | spin_lock_irqsave(&rk_domain->dt_lock, flags); | |
682 | ||
683 | /* | |
684 | * pgsize_bitmap specifies iova sizes that fit in one page table | |
685 | * (1024 4-KiB pages = 4 MiB). | |
686 | * So, size will always be 4096 <= size <= 4194304. | |
687 | * Since iommu_unmap() guarantees that both iova and size will be | |
688 | * aligned, we will always only be unmapping from a single dte here. | |
689 | */ | |
690 | dte = rk_domain->dt[rk_iova_dte_index(iova)]; | |
691 | /* Just return 0 if iova is unmapped */ | |
692 | if (!rk_dte_is_pt_valid(dte)) { | |
693 | spin_unlock_irqrestore(&rk_domain->dt_lock, flags); | |
694 | return 0; | |
695 | } | |
696 | ||
697 | pt_phys = rk_dte_pt_address(dte); | |
698 | pte_addr = (u32 *)phys_to_virt(pt_phys) + rk_iova_pte_index(iova); | |
699 | unmap_size = rk_iommu_unmap_iova(rk_domain, pte_addr, iova, size); | |
700 | ||
701 | spin_unlock_irqrestore(&rk_domain->dt_lock, flags); | |
702 | ||
703 | /* Shootdown iotlb entries for iova range that was just unmapped */ | |
704 | rk_iommu_zap_iova(rk_domain, iova, unmap_size); | |
705 | ||
706 | return unmap_size; | |
707 | } | |
708 | ||
709 | static struct rk_iommu *rk_iommu_from_dev(struct device *dev) | |
710 | { | |
711 | struct iommu_group *group; | |
712 | struct device *iommu_dev; | |
713 | struct rk_iommu *rk_iommu; | |
714 | ||
715 | group = iommu_group_get(dev); | |
716 | if (!group) | |
717 | return NULL; | |
718 | iommu_dev = iommu_group_get_iommudata(group); | |
719 | rk_iommu = dev_get_drvdata(iommu_dev); | |
720 | iommu_group_put(group); | |
721 | ||
722 | return rk_iommu; | |
723 | } | |
724 | ||
725 | static int rk_iommu_attach_device(struct iommu_domain *domain, | |
726 | struct device *dev) | |
727 | { | |
728 | struct rk_iommu *iommu; | |
729 | struct rk_iommu_domain *rk_domain = domain->priv; | |
730 | unsigned long flags; | |
731 | int ret; | |
732 | phys_addr_t dte_addr; | |
733 | ||
734 | /* | |
735 | * Allow 'virtual devices' (e.g., drm) to attach to domain. | |
736 | * Such a device does not belong to an iommu group. | |
737 | */ | |
738 | iommu = rk_iommu_from_dev(dev); | |
739 | if (!iommu) | |
740 | return 0; | |
741 | ||
742 | ret = rk_iommu_enable_stall(iommu); | |
743 | if (ret) | |
744 | return ret; | |
745 | ||
746 | ret = rk_iommu_force_reset(iommu); | |
747 | if (ret) | |
748 | return ret; | |
749 | ||
750 | iommu->domain = domain; | |
751 | ||
752 | ret = devm_request_irq(dev, iommu->irq, rk_iommu_irq, | |
753 | IRQF_SHARED, dev_name(dev), iommu); | |
754 | if (ret) | |
755 | return ret; | |
756 | ||
757 | dte_addr = virt_to_phys(rk_domain->dt); | |
758 | rk_iommu_write(iommu, RK_MMU_DTE_ADDR, dte_addr); | |
759 | rk_iommu_command(iommu, RK_MMU_CMD_ZAP_CACHE); | |
760 | rk_iommu_write(iommu, RK_MMU_INT_MASK, RK_MMU_IRQ_MASK); | |
761 | ||
762 | ret = rk_iommu_enable_paging(iommu); | |
763 | if (ret) | |
764 | return ret; | |
765 | ||
766 | spin_lock_irqsave(&rk_domain->iommus_lock, flags); | |
767 | list_add_tail(&iommu->node, &rk_domain->iommus); | |
768 | spin_unlock_irqrestore(&rk_domain->iommus_lock, flags); | |
769 | ||
770 | dev_info(dev, "Attached to iommu domain\n"); | |
771 | ||
772 | rk_iommu_disable_stall(iommu); | |
773 | ||
774 | return 0; | |
775 | } | |
776 | ||
777 | static void rk_iommu_detach_device(struct iommu_domain *domain, | |
778 | struct device *dev) | |
779 | { | |
780 | struct rk_iommu *iommu; | |
781 | struct rk_iommu_domain *rk_domain = domain->priv; | |
782 | unsigned long flags; | |
783 | ||
784 | /* Allow 'virtual devices' (eg drm) to detach from domain */ | |
785 | iommu = rk_iommu_from_dev(dev); | |
786 | if (!iommu) | |
787 | return; | |
788 | ||
789 | spin_lock_irqsave(&rk_domain->iommus_lock, flags); | |
790 | list_del_init(&iommu->node); | |
791 | spin_unlock_irqrestore(&rk_domain->iommus_lock, flags); | |
792 | ||
793 | /* Ignore error while disabling, just keep going */ | |
794 | rk_iommu_enable_stall(iommu); | |
795 | rk_iommu_disable_paging(iommu); | |
796 | rk_iommu_write(iommu, RK_MMU_INT_MASK, 0); | |
797 | rk_iommu_write(iommu, RK_MMU_DTE_ADDR, 0); | |
798 | rk_iommu_disable_stall(iommu); | |
799 | ||
800 | devm_free_irq(dev, iommu->irq, iommu); | |
801 | ||
802 | iommu->domain = NULL; | |
803 | ||
804 | dev_info(dev, "Detached from iommu domain\n"); | |
805 | } | |
806 | ||
807 | static int rk_iommu_domain_init(struct iommu_domain *domain) | |
808 | { | |
809 | struct rk_iommu_domain *rk_domain; | |
810 | ||
811 | rk_domain = kzalloc(sizeof(*rk_domain), GFP_KERNEL); | |
812 | if (!rk_domain) | |
813 | return -ENOMEM; | |
814 | ||
815 | /* | |
816 | * rk32xx iommus use a 2 level pagetable. | |
817 | * Each level1 (dt) and level2 (pt) table has 1024 4-byte entries. | |
818 | * Allocate one 4 KiB page for each table. | |
819 | */ | |
820 | rk_domain->dt = (u32 *)get_zeroed_page(GFP_KERNEL | GFP_DMA32); | |
821 | if (!rk_domain->dt) | |
822 | goto err_dt; | |
823 | ||
824 | rk_table_flush(rk_domain->dt, NUM_DT_ENTRIES); | |
825 | ||
826 | spin_lock_init(&rk_domain->iommus_lock); | |
827 | spin_lock_init(&rk_domain->dt_lock); | |
828 | INIT_LIST_HEAD(&rk_domain->iommus); | |
829 | ||
830 | domain->priv = rk_domain; | |
831 | ||
832 | return 0; | |
833 | err_dt: | |
834 | kfree(rk_domain); | |
835 | return -ENOMEM; | |
836 | } | |
837 | ||
838 | static void rk_iommu_domain_destroy(struct iommu_domain *domain) | |
839 | { | |
840 | struct rk_iommu_domain *rk_domain = domain->priv; | |
841 | int i; | |
842 | ||
843 | WARN_ON(!list_empty(&rk_domain->iommus)); | |
844 | ||
845 | for (i = 0; i < NUM_DT_ENTRIES; i++) { | |
846 | u32 dte = rk_domain->dt[i]; | |
847 | if (rk_dte_is_pt_valid(dte)) { | |
848 | phys_addr_t pt_phys = rk_dte_pt_address(dte); | |
849 | u32 *page_table = phys_to_virt(pt_phys); | |
850 | free_page((unsigned long)page_table); | |
851 | } | |
852 | } | |
853 | ||
854 | free_page((unsigned long)rk_domain->dt); | |
855 | kfree(domain->priv); | |
856 | domain->priv = NULL; | |
857 | } | |
858 | ||
859 | static bool rk_iommu_is_dev_iommu_master(struct device *dev) | |
860 | { | |
861 | struct device_node *np = dev->of_node; | |
862 | int ret; | |
863 | ||
864 | /* | |
865 | * An iommu master has an iommus property containing a list of phandles | |
866 | * to iommu nodes, each with an #iommu-cells property with value 0. | |
867 | */ | |
868 | ret = of_count_phandle_with_args(np, "iommus", "#iommu-cells"); | |
869 | return (ret > 0); | |
870 | } | |
871 | ||
872 | static int rk_iommu_group_set_iommudata(struct iommu_group *group, | |
873 | struct device *dev) | |
874 | { | |
875 | struct device_node *np = dev->of_node; | |
876 | struct platform_device *pd; | |
877 | int ret; | |
878 | struct of_phandle_args args; | |
879 | ||
880 | /* | |
881 | * An iommu master has an iommus property containing a list of phandles | |
882 | * to iommu nodes, each with an #iommu-cells property with value 0. | |
883 | */ | |
884 | ret = of_parse_phandle_with_args(np, "iommus", "#iommu-cells", 0, | |
885 | &args); | |
886 | if (ret) { | |
887 | dev_err(dev, "of_parse_phandle_with_args(%s) => %d\n", | |
888 | np->full_name, ret); | |
889 | return ret; | |
890 | } | |
891 | if (args.args_count != 0) { | |
892 | dev_err(dev, "incorrect number of iommu params found for %s (found %d, expected 0)\n", | |
893 | args.np->full_name, args.args_count); | |
894 | return -EINVAL; | |
895 | } | |
896 | ||
897 | pd = of_find_device_by_node(args.np); | |
898 | of_node_put(args.np); | |
899 | if (!pd) { | |
900 | dev_err(dev, "iommu %s not found\n", args.np->full_name); | |
901 | return -EPROBE_DEFER; | |
902 | } | |
903 | ||
904 | /* TODO(djkurtz): handle multiple slave iommus for a single master */ | |
905 | iommu_group_set_iommudata(group, &pd->dev, NULL); | |
906 | ||
907 | return 0; | |
908 | } | |
909 | ||
910 | static int rk_iommu_add_device(struct device *dev) | |
911 | { | |
912 | struct iommu_group *group; | |
913 | int ret; | |
914 | ||
915 | if (!rk_iommu_is_dev_iommu_master(dev)) | |
916 | return -ENODEV; | |
917 | ||
918 | group = iommu_group_get(dev); | |
919 | if (!group) { | |
920 | group = iommu_group_alloc(); | |
921 | if (IS_ERR(group)) { | |
922 | dev_err(dev, "Failed to allocate IOMMU group\n"); | |
923 | return PTR_ERR(group); | |
924 | } | |
925 | } | |
926 | ||
927 | ret = iommu_group_add_device(group, dev); | |
928 | if (ret) | |
929 | goto err_put_group; | |
930 | ||
931 | ret = rk_iommu_group_set_iommudata(group, dev); | |
932 | if (ret) | |
933 | goto err_remove_device; | |
934 | ||
935 | iommu_group_put(group); | |
936 | ||
937 | return 0; | |
938 | ||
939 | err_remove_device: | |
940 | iommu_group_remove_device(dev); | |
941 | err_put_group: | |
942 | iommu_group_put(group); | |
943 | return ret; | |
944 | } | |
945 | ||
946 | static void rk_iommu_remove_device(struct device *dev) | |
947 | { | |
948 | if (!rk_iommu_is_dev_iommu_master(dev)) | |
949 | return; | |
950 | ||
951 | iommu_group_remove_device(dev); | |
952 | } | |
953 | ||
954 | static const struct iommu_ops rk_iommu_ops = { | |
955 | .domain_init = rk_iommu_domain_init, | |
956 | .domain_destroy = rk_iommu_domain_destroy, | |
957 | .attach_dev = rk_iommu_attach_device, | |
958 | .detach_dev = rk_iommu_detach_device, | |
959 | .map = rk_iommu_map, | |
960 | .unmap = rk_iommu_unmap, | |
961 | .add_device = rk_iommu_add_device, | |
962 | .remove_device = rk_iommu_remove_device, | |
963 | .iova_to_phys = rk_iommu_iova_to_phys, | |
964 | .pgsize_bitmap = RK_IOMMU_PGSIZE_BITMAP, | |
965 | }; | |
966 | ||
967 | static int rk_iommu_probe(struct platform_device *pdev) | |
968 | { | |
969 | struct device *dev = &pdev->dev; | |
970 | struct rk_iommu *iommu; | |
971 | struct resource *res; | |
972 | ||
973 | iommu = devm_kzalloc(dev, sizeof(*iommu), GFP_KERNEL); | |
974 | if (!iommu) | |
975 | return -ENOMEM; | |
976 | ||
977 | platform_set_drvdata(pdev, iommu); | |
978 | iommu->dev = dev; | |
979 | ||
980 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
981 | iommu->base = devm_ioremap_resource(&pdev->dev, res); | |
982 | if (IS_ERR(iommu->base)) | |
983 | return PTR_ERR(iommu->base); | |
984 | ||
985 | iommu->irq = platform_get_irq(pdev, 0); | |
986 | if (iommu->irq < 0) { | |
987 | dev_err(dev, "Failed to get IRQ, %d\n", iommu->irq); | |
988 | return -ENXIO; | |
989 | } | |
990 | ||
991 | return 0; | |
992 | } | |
993 | ||
994 | static int rk_iommu_remove(struct platform_device *pdev) | |
995 | { | |
996 | return 0; | |
997 | } | |
998 | ||
999 | #ifdef CONFIG_OF | |
1000 | static const struct of_device_id rk_iommu_dt_ids[] = { | |
1001 | { .compatible = "rockchip,iommu" }, | |
1002 | { /* sentinel */ } | |
1003 | }; | |
1004 | MODULE_DEVICE_TABLE(of, rk_iommu_dt_ids); | |
1005 | #endif | |
1006 | ||
1007 | static struct platform_driver rk_iommu_driver = { | |
1008 | .probe = rk_iommu_probe, | |
1009 | .remove = rk_iommu_remove, | |
1010 | .driver = { | |
1011 | .name = "rk_iommu", | |
1012 | .owner = THIS_MODULE, | |
1013 | .of_match_table = of_match_ptr(rk_iommu_dt_ids), | |
1014 | }, | |
1015 | }; | |
1016 | ||
1017 | static int __init rk_iommu_init(void) | |
1018 | { | |
1019 | int ret; | |
1020 | ||
1021 | ret = bus_set_iommu(&platform_bus_type, &rk_iommu_ops); | |
1022 | if (ret) | |
1023 | return ret; | |
1024 | ||
1025 | return platform_driver_register(&rk_iommu_driver); | |
1026 | } | |
1027 | static void __exit rk_iommu_exit(void) | |
1028 | { | |
1029 | platform_driver_unregister(&rk_iommu_driver); | |
1030 | } | |
1031 | ||
1032 | subsys_initcall(rk_iommu_init); | |
1033 | module_exit(rk_iommu_exit); | |
1034 | ||
1035 | MODULE_DESCRIPTION("IOMMU API for Rockchip"); | |
1036 | MODULE_AUTHOR("Simon Xue <xxm@rock-chips.com> and Daniel Kurtz <djkurtz@chromium.org>"); | |
1037 | MODULE_ALIAS("platform:rockchip-iommu"); | |
1038 | MODULE_LICENSE("GPL v2"); |