Commit | Line | Data |
---|---|---|
d53e54b4 HD |
1 | /* |
2 | * IOMMU API for GART in Tegra20 | |
3 | * | |
4 | * Copyright (c) 2010-2012, NVIDIA CORPORATION. All rights reserved. | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify it | |
7 | * under the terms and conditions of the GNU General Public License, | |
8 | * version 2, as published by the Free Software Foundation. | |
9 | * | |
10 | * This program is distributed in the hope it will be useful, but WITHOUT | |
11 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
12 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
13 | * more details. | |
14 | * | |
15 | * You should have received a copy of the GNU General Public License along with | |
16 | * this program; if not, write to the Free Software Foundation, Inc., | |
17 | * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | |
18 | */ | |
19 | ||
20 | #define pr_fmt(fmt) "%s(): " fmt, __func__ | |
21 | ||
22 | #include <linux/module.h> | |
23 | #include <linux/platform_device.h> | |
24 | #include <linux/spinlock.h> | |
25 | #include <linux/slab.h> | |
26 | #include <linux/vmalloc.h> | |
27 | #include <linux/mm.h> | |
28 | #include <linux/list.h> | |
29 | #include <linux/device.h> | |
30 | #include <linux/io.h> | |
31 | #include <linux/iommu.h> | |
7cffae42 | 32 | #include <linux/of.h> |
d53e54b4 HD |
33 | |
34 | #include <asm/cacheflush.h> | |
35 | ||
36 | /* bitmap of the page sizes currently supported */ | |
37 | #define GART_IOMMU_PGSIZES (SZ_4K) | |
38 | ||
774dfc9b HD |
39 | #define GART_REG_BASE 0x24 |
40 | #define GART_CONFIG (0x24 - GART_REG_BASE) | |
41 | #define GART_ENTRY_ADDR (0x28 - GART_REG_BASE) | |
42 | #define GART_ENTRY_DATA (0x2c - GART_REG_BASE) | |
d53e54b4 HD |
43 | #define GART_ENTRY_PHYS_ADDR_VALID (1 << 31) |
44 | ||
45 | #define GART_PAGE_SHIFT 12 | |
46 | #define GART_PAGE_SIZE (1 << GART_PAGE_SHIFT) | |
47 | #define GART_PAGE_MASK \ | |
48 | (~(GART_PAGE_SIZE - 1) & ~GART_ENTRY_PHYS_ADDR_VALID) | |
49 | ||
50 | struct gart_client { | |
51 | struct device *dev; | |
52 | struct list_head list; | |
53 | }; | |
54 | ||
55 | struct gart_device { | |
56 | void __iomem *regs; | |
57 | u32 *savedata; | |
58 | u32 page_count; /* total remappable size */ | |
59 | dma_addr_t iovmm_base; /* offset to vmm_area */ | |
60 | spinlock_t pte_lock; /* for pagetable */ | |
61 | struct list_head client; | |
62 | spinlock_t client_lock; /* for client list */ | |
63 | struct device *dev; | |
64 | }; | |
65 | ||
b5cbb386 JR |
66 | struct gart_domain { |
67 | struct iommu_domain domain; /* generic domain handle */ | |
68 | struct gart_device *gart; /* link to gart device */ | |
69 | }; | |
70 | ||
d53e54b4 HD |
71 | static struct gart_device *gart_handle; /* unique for a system */ |
72 | ||
73 | #define GART_PTE(_pfn) \ | |
74 | (GART_ENTRY_PHYS_ADDR_VALID | ((_pfn) << PAGE_SHIFT)) | |
75 | ||
b5cbb386 JR |
76 | static struct gart_domain *to_gart_domain(struct iommu_domain *dom) |
77 | { | |
78 | return container_of(dom, struct gart_domain, domain); | |
79 | } | |
80 | ||
d53e54b4 HD |
81 | /* |
82 | * Any interaction between any block on PPSB and a block on APB or AHB | |
83 | * must have these read-back to ensure the APB/AHB bus transaction is | |
84 | * complete before initiating activity on the PPSB block. | |
85 | */ | |
86 | #define FLUSH_GART_REGS(gart) ((void)readl((gart)->regs + GART_CONFIG)) | |
87 | ||
88 | #define for_each_gart_pte(gart, iova) \ | |
89 | for (iova = gart->iovmm_base; \ | |
90 | iova < gart->iovmm_base + GART_PAGE_SIZE * gart->page_count; \ | |
91 | iova += GART_PAGE_SIZE) | |
92 | ||
93 | static inline void gart_set_pte(struct gart_device *gart, | |
94 | unsigned long offs, u32 pte) | |
95 | { | |
96 | writel(offs, gart->regs + GART_ENTRY_ADDR); | |
97 | writel(pte, gart->regs + GART_ENTRY_DATA); | |
98 | ||
99 | dev_dbg(gart->dev, "%s %08lx:%08x\n", | |
100 | pte ? "map" : "unmap", offs, pte & GART_PAGE_MASK); | |
101 | } | |
102 | ||
103 | static inline unsigned long gart_read_pte(struct gart_device *gart, | |
104 | unsigned long offs) | |
105 | { | |
106 | unsigned long pte; | |
107 | ||
108 | writel(offs, gart->regs + GART_ENTRY_ADDR); | |
109 | pte = readl(gart->regs + GART_ENTRY_DATA); | |
110 | ||
111 | return pte; | |
112 | } | |
113 | ||
114 | static void do_gart_setup(struct gart_device *gart, const u32 *data) | |
115 | { | |
116 | unsigned long iova; | |
117 | ||
118 | for_each_gart_pte(gart, iova) | |
119 | gart_set_pte(gart, iova, data ? *(data++) : 0); | |
120 | ||
121 | writel(1, gart->regs + GART_CONFIG); | |
122 | FLUSH_GART_REGS(gart); | |
123 | } | |
124 | ||
125 | #ifdef DEBUG | |
126 | static void gart_dump_table(struct gart_device *gart) | |
127 | { | |
128 | unsigned long iova; | |
129 | unsigned long flags; | |
130 | ||
131 | spin_lock_irqsave(&gart->pte_lock, flags); | |
132 | for_each_gart_pte(gart, iova) { | |
133 | unsigned long pte; | |
134 | ||
135 | pte = gart_read_pte(gart, iova); | |
136 | ||
137 | dev_dbg(gart->dev, "%s %08lx:%08lx\n", | |
138 | (GART_ENTRY_PHYS_ADDR_VALID & pte) ? "v" : " ", | |
139 | iova, pte & GART_PAGE_MASK); | |
140 | } | |
141 | spin_unlock_irqrestore(&gart->pte_lock, flags); | |
142 | } | |
143 | #else | |
144 | static inline void gart_dump_table(struct gart_device *gart) | |
145 | { | |
146 | } | |
147 | #endif | |
148 | ||
149 | static inline bool gart_iova_range_valid(struct gart_device *gart, | |
150 | unsigned long iova, size_t bytes) | |
151 | { | |
152 | unsigned long iova_start, iova_end, gart_start, gart_end; | |
153 | ||
154 | iova_start = iova; | |
155 | iova_end = iova_start + bytes - 1; | |
156 | gart_start = gart->iovmm_base; | |
157 | gart_end = gart_start + gart->page_count * GART_PAGE_SIZE - 1; | |
158 | ||
159 | if (iova_start < gart_start) | |
160 | return false; | |
161 | if (iova_end > gart_end) | |
162 | return false; | |
163 | return true; | |
164 | } | |
165 | ||
166 | static int gart_iommu_attach_dev(struct iommu_domain *domain, | |
167 | struct device *dev) | |
168 | { | |
b5cbb386 | 169 | struct gart_domain *gart_domain = to_gart_domain(domain); |
7f65ef01 | 170 | struct gart_device *gart = gart_domain->gart; |
d53e54b4 HD |
171 | struct gart_client *client, *c; |
172 | int err = 0; | |
173 | ||
d53e54b4 HD |
174 | client = devm_kzalloc(gart->dev, sizeof(*c), GFP_KERNEL); |
175 | if (!client) | |
176 | return -ENOMEM; | |
177 | client->dev = dev; | |
178 | ||
179 | spin_lock(&gart->client_lock); | |
180 | list_for_each_entry(c, &gart->client, list) { | |
181 | if (c->dev == dev) { | |
182 | dev_err(gart->dev, | |
183 | "%s is already attached\n", dev_name(dev)); | |
184 | err = -EINVAL; | |
185 | goto fail; | |
186 | } | |
187 | } | |
188 | list_add(&client->list, &gart->client); | |
189 | spin_unlock(&gart->client_lock); | |
190 | dev_dbg(gart->dev, "Attached %s\n", dev_name(dev)); | |
191 | return 0; | |
192 | ||
193 | fail: | |
194 | devm_kfree(gart->dev, client); | |
195 | spin_unlock(&gart->client_lock); | |
196 | return err; | |
197 | } | |
198 | ||
199 | static void gart_iommu_detach_dev(struct iommu_domain *domain, | |
200 | struct device *dev) | |
201 | { | |
b5cbb386 JR |
202 | struct gart_domain *gart_domain = to_gart_domain(domain); |
203 | struct gart_device *gart = gart_domain->gart; | |
d53e54b4 HD |
204 | struct gart_client *c; |
205 | ||
206 | spin_lock(&gart->client_lock); | |
207 | ||
208 | list_for_each_entry(c, &gart->client, list) { | |
209 | if (c->dev == dev) { | |
210 | list_del(&c->list); | |
211 | devm_kfree(gart->dev, c); | |
212 | dev_dbg(gart->dev, "Detached %s\n", dev_name(dev)); | |
213 | goto out; | |
214 | } | |
215 | } | |
216 | dev_err(gart->dev, "Couldn't find\n"); | |
217 | out: | |
218 | spin_unlock(&gart->client_lock); | |
219 | } | |
220 | ||
b5cbb386 | 221 | static struct iommu_domain *gart_iommu_domain_alloc(unsigned type) |
d53e54b4 | 222 | { |
b5cbb386 | 223 | struct gart_domain *gart_domain; |
836a8ac9 | 224 | struct gart_device *gart; |
d53e54b4 | 225 | |
b5cbb386 JR |
226 | if (type != IOMMU_DOMAIN_UNMANAGED) |
227 | return NULL; | |
d53e54b4 | 228 | |
836a8ac9 | 229 | gart = gart_handle; |
d53e54b4 | 230 | if (!gart) |
7f65ef01 | 231 | return NULL; |
d53e54b4 | 232 | |
b5cbb386 JR |
233 | gart_domain = kzalloc(sizeof(*gart_domain), GFP_KERNEL); |
234 | if (!gart_domain) | |
235 | return NULL; | |
836a8ac9 | 236 | |
7f65ef01 JR |
237 | gart_domain->gart = gart; |
238 | gart_domain->domain.geometry.aperture_start = gart->iovmm_base; | |
239 | gart_domain->domain.geometry.aperture_end = gart->iovmm_base + | |
836a8ac9 | 240 | gart->page_count * GART_PAGE_SIZE - 1; |
7f65ef01 | 241 | gart_domain->domain.geometry.force_aperture = true; |
836a8ac9 | 242 | |
b5cbb386 | 243 | return &gart_domain->domain; |
d53e54b4 HD |
244 | } |
245 | ||
b5cbb386 | 246 | static void gart_iommu_domain_free(struct iommu_domain *domain) |
d53e54b4 | 247 | { |
b5cbb386 JR |
248 | struct gart_domain *gart_domain = to_gart_domain(domain); |
249 | struct gart_device *gart = gart_domain->gart; | |
d53e54b4 | 250 | |
b5cbb386 JR |
251 | if (gart) { |
252 | spin_lock(&gart->client_lock); | |
253 | if (!list_empty(&gart->client)) { | |
254 | struct gart_client *c; | |
d53e54b4 | 255 | |
b5cbb386 JR |
256 | list_for_each_entry(c, &gart->client, list) |
257 | gart_iommu_detach_dev(domain, c->dev); | |
258 | } | |
259 | spin_unlock(&gart->client_lock); | |
d53e54b4 | 260 | } |
b5cbb386 JR |
261 | |
262 | kfree(gart_domain); | |
d53e54b4 HD |
263 | } |
264 | ||
265 | static int gart_iommu_map(struct iommu_domain *domain, unsigned long iova, | |
266 | phys_addr_t pa, size_t bytes, int prot) | |
267 | { | |
b5cbb386 JR |
268 | struct gart_domain *gart_domain = to_gart_domain(domain); |
269 | struct gart_device *gart = gart_domain->gart; | |
d53e54b4 HD |
270 | unsigned long flags; |
271 | unsigned long pfn; | |
272 | ||
273 | if (!gart_iova_range_valid(gart, iova, bytes)) | |
274 | return -EINVAL; | |
275 | ||
276 | spin_lock_irqsave(&gart->pte_lock, flags); | |
277 | pfn = __phys_to_pfn(pa); | |
278 | if (!pfn_valid(pfn)) { | |
e56b3dab | 279 | dev_err(gart->dev, "Invalid page: %pa\n", &pa); |
09c32533 | 280 | spin_unlock_irqrestore(&gart->pte_lock, flags); |
d53e54b4 HD |
281 | return -EINVAL; |
282 | } | |
283 | gart_set_pte(gart, iova, GART_PTE(pfn)); | |
284 | FLUSH_GART_REGS(gart); | |
285 | spin_unlock_irqrestore(&gart->pte_lock, flags); | |
286 | return 0; | |
287 | } | |
288 | ||
289 | static size_t gart_iommu_unmap(struct iommu_domain *domain, unsigned long iova, | |
290 | size_t bytes) | |
291 | { | |
b5cbb386 JR |
292 | struct gart_domain *gart_domain = to_gart_domain(domain); |
293 | struct gart_device *gart = gart_domain->gart; | |
d53e54b4 HD |
294 | unsigned long flags; |
295 | ||
296 | if (!gart_iova_range_valid(gart, iova, bytes)) | |
297 | return 0; | |
298 | ||
299 | spin_lock_irqsave(&gart->pte_lock, flags); | |
300 | gart_set_pte(gart, iova, 0); | |
301 | FLUSH_GART_REGS(gart); | |
302 | spin_unlock_irqrestore(&gart->pte_lock, flags); | |
303 | return 0; | |
304 | } | |
305 | ||
306 | static phys_addr_t gart_iommu_iova_to_phys(struct iommu_domain *domain, | |
bb5547ac | 307 | dma_addr_t iova) |
d53e54b4 | 308 | { |
b5cbb386 JR |
309 | struct gart_domain *gart_domain = to_gart_domain(domain); |
310 | struct gart_device *gart = gart_domain->gart; | |
d53e54b4 HD |
311 | unsigned long pte; |
312 | phys_addr_t pa; | |
313 | unsigned long flags; | |
314 | ||
315 | if (!gart_iova_range_valid(gart, iova, 0)) | |
316 | return -EINVAL; | |
317 | ||
318 | spin_lock_irqsave(&gart->pte_lock, flags); | |
319 | pte = gart_read_pte(gart, iova); | |
320 | spin_unlock_irqrestore(&gart->pte_lock, flags); | |
321 | ||
322 | pa = (pte & GART_PAGE_MASK); | |
323 | if (!pfn_valid(__phys_to_pfn(pa))) { | |
e56b3dab TR |
324 | dev_err(gart->dev, "No entry for %08llx:%pa\n", |
325 | (unsigned long long)iova, &pa); | |
d53e54b4 HD |
326 | gart_dump_table(gart); |
327 | return -EINVAL; | |
328 | } | |
329 | return pa; | |
330 | } | |
331 | ||
7c2aa644 | 332 | static bool gart_iommu_capable(enum iommu_cap cap) |
d53e54b4 | 333 | { |
7c2aa644 | 334 | return false; |
d53e54b4 HD |
335 | } |
336 | ||
b22f6434 | 337 | static const struct iommu_ops gart_iommu_ops = { |
7c2aa644 | 338 | .capable = gart_iommu_capable, |
b5cbb386 JR |
339 | .domain_alloc = gart_iommu_domain_alloc, |
340 | .domain_free = gart_iommu_domain_free, | |
d53e54b4 HD |
341 | .attach_dev = gart_iommu_attach_dev, |
342 | .detach_dev = gart_iommu_detach_dev, | |
343 | .map = gart_iommu_map, | |
35577079 | 344 | .map_sg = default_iommu_map_sg, |
d53e54b4 HD |
345 | .unmap = gart_iommu_unmap, |
346 | .iova_to_phys = gart_iommu_iova_to_phys, | |
d53e54b4 HD |
347 | .pgsize_bitmap = GART_IOMMU_PGSIZES, |
348 | }; | |
349 | ||
350 | static int tegra_gart_suspend(struct device *dev) | |
351 | { | |
352 | struct gart_device *gart = dev_get_drvdata(dev); | |
353 | unsigned long iova; | |
354 | u32 *data = gart->savedata; | |
355 | unsigned long flags; | |
356 | ||
357 | spin_lock_irqsave(&gart->pte_lock, flags); | |
358 | for_each_gart_pte(gart, iova) | |
359 | *(data++) = gart_read_pte(gart, iova); | |
360 | spin_unlock_irqrestore(&gart->pte_lock, flags); | |
361 | return 0; | |
362 | } | |
363 | ||
364 | static int tegra_gart_resume(struct device *dev) | |
365 | { | |
366 | struct gart_device *gart = dev_get_drvdata(dev); | |
367 | unsigned long flags; | |
368 | ||
369 | spin_lock_irqsave(&gart->pte_lock, flags); | |
370 | do_gart_setup(gart, gart->savedata); | |
371 | spin_unlock_irqrestore(&gart->pte_lock, flags); | |
372 | return 0; | |
373 | } | |
374 | ||
375 | static int tegra_gart_probe(struct platform_device *pdev) | |
376 | { | |
377 | struct gart_device *gart; | |
378 | struct resource *res, *res_remap; | |
379 | void __iomem *gart_regs; | |
d53e54b4 HD |
380 | struct device *dev = &pdev->dev; |
381 | ||
382 | if (gart_handle) | |
383 | return -EIO; | |
384 | ||
385 | BUILD_BUG_ON(PAGE_SHIFT != GART_PAGE_SHIFT); | |
386 | ||
387 | /* the GART memory aperture is required */ | |
388 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
389 | res_remap = platform_get_resource(pdev, IORESOURCE_MEM, 1); | |
390 | if (!res || !res_remap) { | |
391 | dev_err(dev, "GART memory aperture expected\n"); | |
392 | return -ENXIO; | |
393 | } | |
394 | ||
395 | gart = devm_kzalloc(dev, sizeof(*gart), GFP_KERNEL); | |
396 | if (!gart) { | |
397 | dev_err(dev, "failed to allocate gart_device\n"); | |
398 | return -ENOMEM; | |
399 | } | |
400 | ||
401 | gart_regs = devm_ioremap(dev, res->start, resource_size(res)); | |
402 | if (!gart_regs) { | |
403 | dev_err(dev, "failed to remap GART registers\n"); | |
d0c5b257 | 404 | return -ENXIO; |
d53e54b4 HD |
405 | } |
406 | ||
407 | gart->dev = &pdev->dev; | |
408 | spin_lock_init(&gart->pte_lock); | |
409 | spin_lock_init(&gart->client_lock); | |
410 | INIT_LIST_HEAD(&gart->client); | |
411 | gart->regs = gart_regs; | |
412 | gart->iovmm_base = (dma_addr_t)res_remap->start; | |
413 | gart->page_count = (resource_size(res_remap) >> GART_PAGE_SHIFT); | |
414 | ||
415 | gart->savedata = vmalloc(sizeof(u32) * gart->page_count); | |
416 | if (!gart->savedata) { | |
417 | dev_err(dev, "failed to allocate context save area\n"); | |
d0c5b257 | 418 | return -ENOMEM; |
d53e54b4 HD |
419 | } |
420 | ||
421 | platform_set_drvdata(pdev, gart); | |
422 | do_gart_setup(gart, NULL); | |
423 | ||
424 | gart_handle = gart; | |
c7e3ca51 | 425 | |
d53e54b4 | 426 | return 0; |
d53e54b4 HD |
427 | } |
428 | ||
429 | static int tegra_gart_remove(struct platform_device *pdev) | |
430 | { | |
431 | struct gart_device *gart = platform_get_drvdata(pdev); | |
d53e54b4 HD |
432 | |
433 | writel(0, gart->regs + GART_CONFIG); | |
434 | if (gart->savedata) | |
435 | vfree(gart->savedata); | |
d53e54b4 HD |
436 | gart_handle = NULL; |
437 | return 0; | |
438 | } | |
439 | ||
8a788659 | 440 | static const struct dev_pm_ops tegra_gart_pm_ops = { |
d53e54b4 HD |
441 | .suspend = tegra_gart_suspend, |
442 | .resume = tegra_gart_resume, | |
443 | }; | |
444 | ||
d943b0ff | 445 | static const struct of_device_id tegra_gart_of_match[] = { |
7cffae42 TR |
446 | { .compatible = "nvidia,tegra20-gart", }, |
447 | { }, | |
448 | }; | |
449 | MODULE_DEVICE_TABLE(of, tegra_gart_of_match); | |
7cffae42 | 450 | |
d53e54b4 HD |
451 | static struct platform_driver tegra_gart_driver = { |
452 | .probe = tegra_gart_probe, | |
453 | .remove = tegra_gart_remove, | |
454 | .driver = { | |
d53e54b4 HD |
455 | .name = "tegra-gart", |
456 | .pm = &tegra_gart_pm_ops, | |
e664e8c0 | 457 | .of_match_table = tegra_gart_of_match, |
d53e54b4 HD |
458 | }, |
459 | }; | |
460 | ||
d34d6517 | 461 | static int tegra_gart_init(void) |
d53e54b4 | 462 | { |
d53e54b4 HD |
463 | return platform_driver_register(&tegra_gart_driver); |
464 | } | |
465 | ||
466 | static void __exit tegra_gart_exit(void) | |
467 | { | |
468 | platform_driver_unregister(&tegra_gart_driver); | |
469 | } | |
470 | ||
471 | subsys_initcall(tegra_gart_init); | |
472 | module_exit(tegra_gart_exit); | |
473 | ||
474 | MODULE_DESCRIPTION("IOMMU API for GART in Tegra20"); | |
475 | MODULE_AUTHOR("Hiroshi DOYU <hdoyu@nvidia.com>"); | |
7cffae42 | 476 | MODULE_ALIAS("platform:tegra-gart"); |
d53e54b4 | 477 | MODULE_LICENSE("GPL v2"); |