mnt: Don't propagate umounts in __detach_mounts
[deliverable/linux.git] / drivers / iommu / fsl_pamu_domain.c
1 /*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright (C) 2013 Freescale Semiconductor, Inc.
16 * Author: Varun Sethi <varun.sethi@freescale.com>
17 *
18 */
19
20 #define pr_fmt(fmt) "fsl-pamu-domain: %s: " fmt, __func__
21
22 #include "fsl_pamu_domain.h"
23
24 #include <sysdev/fsl_pci.h>
25
26 /*
27 * Global spinlock that needs to be held while
28 * configuring PAMU.
29 */
30 static DEFINE_SPINLOCK(iommu_lock);
31
32 static struct kmem_cache *fsl_pamu_domain_cache;
33 static struct kmem_cache *iommu_devinfo_cache;
34 static DEFINE_SPINLOCK(device_domain_lock);
35
36 static int __init iommu_init_mempool(void)
37 {
38 fsl_pamu_domain_cache = kmem_cache_create("fsl_pamu_domain",
39 sizeof(struct fsl_dma_domain),
40 0,
41 SLAB_HWCACHE_ALIGN,
42 NULL);
43 if (!fsl_pamu_domain_cache) {
44 pr_debug("Couldn't create fsl iommu_domain cache\n");
45 return -ENOMEM;
46 }
47
48 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
49 sizeof(struct device_domain_info),
50 0,
51 SLAB_HWCACHE_ALIGN,
52 NULL);
53 if (!iommu_devinfo_cache) {
54 pr_debug("Couldn't create devinfo cache\n");
55 kmem_cache_destroy(fsl_pamu_domain_cache);
56 return -ENOMEM;
57 }
58
59 return 0;
60 }
61
62 static phys_addr_t get_phys_addr(struct fsl_dma_domain *dma_domain, dma_addr_t iova)
63 {
64 u32 win_cnt = dma_domain->win_cnt;
65 struct dma_window *win_ptr = &dma_domain->win_arr[0];
66 struct iommu_domain_geometry *geom;
67
68 geom = &dma_domain->iommu_domain->geometry;
69
70 if (!win_cnt || !dma_domain->geom_size) {
71 pr_debug("Number of windows/geometry not configured for the domain\n");
72 return 0;
73 }
74
75 if (win_cnt > 1) {
76 u64 subwin_size;
77 dma_addr_t subwin_iova;
78 u32 wnd;
79
80 subwin_size = dma_domain->geom_size >> ilog2(win_cnt);
81 subwin_iova = iova & ~(subwin_size - 1);
82 wnd = (subwin_iova - geom->aperture_start) >> ilog2(subwin_size);
83 win_ptr = &dma_domain->win_arr[wnd];
84 }
85
86 if (win_ptr->valid)
87 return win_ptr->paddr + (iova & (win_ptr->size - 1));
88
89 return 0;
90 }
91
92 static int map_subwins(int liodn, struct fsl_dma_domain *dma_domain)
93 {
94 struct dma_window *sub_win_ptr = &dma_domain->win_arr[0];
95 int i, ret;
96 unsigned long rpn, flags;
97
98 for (i = 0; i < dma_domain->win_cnt; i++) {
99 if (sub_win_ptr[i].valid) {
100 rpn = sub_win_ptr[i].paddr >> PAMU_PAGE_SHIFT;
101 spin_lock_irqsave(&iommu_lock, flags);
102 ret = pamu_config_spaace(liodn, dma_domain->win_cnt, i,
103 sub_win_ptr[i].size,
104 ~(u32)0,
105 rpn,
106 dma_domain->snoop_id,
107 dma_domain->stash_id,
108 (i > 0) ? 1 : 0,
109 sub_win_ptr[i].prot);
110 spin_unlock_irqrestore(&iommu_lock, flags);
111 if (ret) {
112 pr_debug("SPAACE configuration failed for liodn %d\n",
113 liodn);
114 return ret;
115 }
116 }
117 }
118
119 return ret;
120 }
121
122 static int map_win(int liodn, struct fsl_dma_domain *dma_domain)
123 {
124 int ret;
125 struct dma_window *wnd = &dma_domain->win_arr[0];
126 phys_addr_t wnd_addr = dma_domain->iommu_domain->geometry.aperture_start;
127 unsigned long flags;
128
129 spin_lock_irqsave(&iommu_lock, flags);
130 ret = pamu_config_ppaace(liodn, wnd_addr,
131 wnd->size,
132 ~(u32)0,
133 wnd->paddr >> PAMU_PAGE_SHIFT,
134 dma_domain->snoop_id, dma_domain->stash_id,
135 0, wnd->prot);
136 spin_unlock_irqrestore(&iommu_lock, flags);
137 if (ret)
138 pr_debug("PAACE configuration failed for liodn %d\n", liodn);
139
140 return ret;
141 }
142
143 /* Map the DMA window corresponding to the LIODN */
144 static int map_liodn(int liodn, struct fsl_dma_domain *dma_domain)
145 {
146 if (dma_domain->win_cnt > 1)
147 return map_subwins(liodn, dma_domain);
148 else
149 return map_win(liodn, dma_domain);
150 }
151
152 /* Update window/subwindow mapping for the LIODN */
153 static int update_liodn(int liodn, struct fsl_dma_domain *dma_domain, u32 wnd_nr)
154 {
155 int ret;
156 struct dma_window *wnd = &dma_domain->win_arr[wnd_nr];
157 unsigned long flags;
158
159 spin_lock_irqsave(&iommu_lock, flags);
160 if (dma_domain->win_cnt > 1) {
161 ret = pamu_config_spaace(liodn, dma_domain->win_cnt, wnd_nr,
162 wnd->size,
163 ~(u32)0,
164 wnd->paddr >> PAMU_PAGE_SHIFT,
165 dma_domain->snoop_id,
166 dma_domain->stash_id,
167 (wnd_nr > 0) ? 1 : 0,
168 wnd->prot);
169 if (ret)
170 pr_debug("Subwindow reconfiguration failed for liodn %d\n",
171 liodn);
172 } else {
173 phys_addr_t wnd_addr;
174
175 wnd_addr = dma_domain->iommu_domain->geometry.aperture_start;
176
177 ret = pamu_config_ppaace(liodn, wnd_addr,
178 wnd->size,
179 ~(u32)0,
180 wnd->paddr >> PAMU_PAGE_SHIFT,
181 dma_domain->snoop_id, dma_domain->stash_id,
182 0, wnd->prot);
183 if (ret)
184 pr_debug("Window reconfiguration failed for liodn %d\n",
185 liodn);
186 }
187
188 spin_unlock_irqrestore(&iommu_lock, flags);
189
190 return ret;
191 }
192
193 static int update_liodn_stash(int liodn, struct fsl_dma_domain *dma_domain,
194 u32 val)
195 {
196 int ret = 0, i;
197 unsigned long flags;
198
199 spin_lock_irqsave(&iommu_lock, flags);
200 if (!dma_domain->win_arr) {
201 pr_debug("Windows not configured, stash destination update failed for liodn %d\n",
202 liodn);
203 spin_unlock_irqrestore(&iommu_lock, flags);
204 return -EINVAL;
205 }
206
207 for (i = 0; i < dma_domain->win_cnt; i++) {
208 ret = pamu_update_paace_stash(liodn, i, val);
209 if (ret) {
210 pr_debug("Failed to update SPAACE %d field for liodn %d\n ",
211 i, liodn);
212 spin_unlock_irqrestore(&iommu_lock, flags);
213 return ret;
214 }
215 }
216
217 spin_unlock_irqrestore(&iommu_lock, flags);
218
219 return ret;
220 }
221
222 /* Set the geometry parameters for a LIODN */
223 static int pamu_set_liodn(int liodn, struct device *dev,
224 struct fsl_dma_domain *dma_domain,
225 struct iommu_domain_geometry *geom_attr,
226 u32 win_cnt)
227 {
228 phys_addr_t window_addr, window_size;
229 phys_addr_t subwin_size;
230 int ret = 0, i;
231 u32 omi_index = ~(u32)0;
232 unsigned long flags;
233
234 /*
235 * Configure the omi_index at the geometry setup time.
236 * This is a static value which depends on the type of
237 * device and would not change thereafter.
238 */
239 get_ome_index(&omi_index, dev);
240
241 window_addr = geom_attr->aperture_start;
242 window_size = dma_domain->geom_size;
243
244 spin_lock_irqsave(&iommu_lock, flags);
245 ret = pamu_disable_liodn(liodn);
246 if (!ret)
247 ret = pamu_config_ppaace(liodn, window_addr, window_size, omi_index,
248 0, dma_domain->snoop_id,
249 dma_domain->stash_id, win_cnt, 0);
250 spin_unlock_irqrestore(&iommu_lock, flags);
251 if (ret) {
252 pr_debug("PAACE configuration failed for liodn %d, win_cnt =%d\n",
253 liodn, win_cnt);
254 return ret;
255 }
256
257 if (win_cnt > 1) {
258 subwin_size = window_size >> ilog2(win_cnt);
259 for (i = 0; i < win_cnt; i++) {
260 spin_lock_irqsave(&iommu_lock, flags);
261 ret = pamu_disable_spaace(liodn, i);
262 if (!ret)
263 ret = pamu_config_spaace(liodn, win_cnt, i,
264 subwin_size, omi_index,
265 0, dma_domain->snoop_id,
266 dma_domain->stash_id,
267 0, 0);
268 spin_unlock_irqrestore(&iommu_lock, flags);
269 if (ret) {
270 pr_debug("SPAACE configuration failed for liodn %d\n",
271 liodn);
272 return ret;
273 }
274 }
275 }
276
277 return ret;
278 }
279
280 static int check_size(u64 size, dma_addr_t iova)
281 {
282 /*
283 * Size must be a power of two and at least be equal
284 * to PAMU page size.
285 */
286 if ((size & (size - 1)) || size < PAMU_PAGE_SIZE) {
287 pr_debug("Size too small or not a power of two\n");
288 return -EINVAL;
289 }
290
291 /* iova must be page size aligned */
292 if (iova & (size - 1)) {
293 pr_debug("Address is not aligned with window size\n");
294 return -EINVAL;
295 }
296
297 return 0;
298 }
299
300 static struct fsl_dma_domain *iommu_alloc_dma_domain(void)
301 {
302 struct fsl_dma_domain *domain;
303
304 domain = kmem_cache_zalloc(fsl_pamu_domain_cache, GFP_KERNEL);
305 if (!domain)
306 return NULL;
307
308 domain->stash_id = ~(u32)0;
309 domain->snoop_id = ~(u32)0;
310 domain->win_cnt = pamu_get_max_subwin_cnt();
311 domain->geom_size = 0;
312
313 INIT_LIST_HEAD(&domain->devices);
314
315 spin_lock_init(&domain->domain_lock);
316
317 return domain;
318 }
319
320 static void remove_device_ref(struct device_domain_info *info, u32 win_cnt)
321 {
322 unsigned long flags;
323
324 list_del(&info->link);
325 spin_lock_irqsave(&iommu_lock, flags);
326 if (win_cnt > 1)
327 pamu_free_subwins(info->liodn);
328 pamu_disable_liodn(info->liodn);
329 spin_unlock_irqrestore(&iommu_lock, flags);
330 spin_lock_irqsave(&device_domain_lock, flags);
331 info->dev->archdata.iommu_domain = NULL;
332 kmem_cache_free(iommu_devinfo_cache, info);
333 spin_unlock_irqrestore(&device_domain_lock, flags);
334 }
335
336 static void detach_device(struct device *dev, struct fsl_dma_domain *dma_domain)
337 {
338 struct device_domain_info *info, *tmp;
339 unsigned long flags;
340
341 spin_lock_irqsave(&dma_domain->domain_lock, flags);
342 /* Remove the device from the domain device list */
343 list_for_each_entry_safe(info, tmp, &dma_domain->devices, link) {
344 if (!dev || (info->dev == dev))
345 remove_device_ref(info, dma_domain->win_cnt);
346 }
347 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
348 }
349
350 static void attach_device(struct fsl_dma_domain *dma_domain, int liodn, struct device *dev)
351 {
352 struct device_domain_info *info, *old_domain_info;
353 unsigned long flags;
354
355 spin_lock_irqsave(&device_domain_lock, flags);
356 /*
357 * Check here if the device is already attached to domain or not.
358 * If the device is already attached to a domain detach it.
359 */
360 old_domain_info = dev->archdata.iommu_domain;
361 if (old_domain_info && old_domain_info->domain != dma_domain) {
362 spin_unlock_irqrestore(&device_domain_lock, flags);
363 detach_device(dev, old_domain_info->domain);
364 spin_lock_irqsave(&device_domain_lock, flags);
365 }
366
367 info = kmem_cache_zalloc(iommu_devinfo_cache, GFP_ATOMIC);
368
369 info->dev = dev;
370 info->liodn = liodn;
371 info->domain = dma_domain;
372
373 list_add(&info->link, &dma_domain->devices);
374 /*
375 * In case of devices with multiple LIODNs just store
376 * the info for the first LIODN as all
377 * LIODNs share the same domain
378 */
379 if (!dev->archdata.iommu_domain)
380 dev->archdata.iommu_domain = info;
381 spin_unlock_irqrestore(&device_domain_lock, flags);
382 }
383
384 static phys_addr_t fsl_pamu_iova_to_phys(struct iommu_domain *domain,
385 dma_addr_t iova)
386 {
387 struct fsl_dma_domain *dma_domain = domain->priv;
388
389 if (iova < domain->geometry.aperture_start ||
390 iova > domain->geometry.aperture_end)
391 return 0;
392
393 return get_phys_addr(dma_domain, iova);
394 }
395
396 static bool fsl_pamu_capable(enum iommu_cap cap)
397 {
398 return cap == IOMMU_CAP_CACHE_COHERENCY;
399 }
400
401 static void fsl_pamu_domain_destroy(struct iommu_domain *domain)
402 {
403 struct fsl_dma_domain *dma_domain = domain->priv;
404
405 domain->priv = NULL;
406
407 /* remove all the devices from the device list */
408 detach_device(NULL, dma_domain);
409
410 dma_domain->enabled = 0;
411 dma_domain->mapped = 0;
412
413 kmem_cache_free(fsl_pamu_domain_cache, dma_domain);
414 }
415
416 static int fsl_pamu_domain_init(struct iommu_domain *domain)
417 {
418 struct fsl_dma_domain *dma_domain;
419
420 dma_domain = iommu_alloc_dma_domain();
421 if (!dma_domain) {
422 pr_debug("dma_domain allocation failed\n");
423 return -ENOMEM;
424 }
425 domain->priv = dma_domain;
426 dma_domain->iommu_domain = domain;
427 /* defaul geometry 64 GB i.e. maximum system address */
428 domain->geometry.aperture_start = 0;
429 domain->geometry.aperture_end = (1ULL << 36) - 1;
430 domain->geometry.force_aperture = true;
431
432 return 0;
433 }
434
435 /* Configure geometry settings for all LIODNs associated with domain */
436 static int pamu_set_domain_geometry(struct fsl_dma_domain *dma_domain,
437 struct iommu_domain_geometry *geom_attr,
438 u32 win_cnt)
439 {
440 struct device_domain_info *info;
441 int ret = 0;
442
443 list_for_each_entry(info, &dma_domain->devices, link) {
444 ret = pamu_set_liodn(info->liodn, info->dev, dma_domain,
445 geom_attr, win_cnt);
446 if (ret)
447 break;
448 }
449
450 return ret;
451 }
452
453 /* Update stash destination for all LIODNs associated with the domain */
454 static int update_domain_stash(struct fsl_dma_domain *dma_domain, u32 val)
455 {
456 struct device_domain_info *info;
457 int ret = 0;
458
459 list_for_each_entry(info, &dma_domain->devices, link) {
460 ret = update_liodn_stash(info->liodn, dma_domain, val);
461 if (ret)
462 break;
463 }
464
465 return ret;
466 }
467
468 /* Update domain mappings for all LIODNs associated with the domain */
469 static int update_domain_mapping(struct fsl_dma_domain *dma_domain, u32 wnd_nr)
470 {
471 struct device_domain_info *info;
472 int ret = 0;
473
474 list_for_each_entry(info, &dma_domain->devices, link) {
475 ret = update_liodn(info->liodn, dma_domain, wnd_nr);
476 if (ret)
477 break;
478 }
479 return ret;
480 }
481
482 static int disable_domain_win(struct fsl_dma_domain *dma_domain, u32 wnd_nr)
483 {
484 struct device_domain_info *info;
485 int ret = 0;
486
487 list_for_each_entry(info, &dma_domain->devices, link) {
488 if (dma_domain->win_cnt == 1 && dma_domain->enabled) {
489 ret = pamu_disable_liodn(info->liodn);
490 if (!ret)
491 dma_domain->enabled = 0;
492 } else {
493 ret = pamu_disable_spaace(info->liodn, wnd_nr);
494 }
495 }
496
497 return ret;
498 }
499
500 static void fsl_pamu_window_disable(struct iommu_domain *domain, u32 wnd_nr)
501 {
502 struct fsl_dma_domain *dma_domain = domain->priv;
503 unsigned long flags;
504 int ret;
505
506 spin_lock_irqsave(&dma_domain->domain_lock, flags);
507 if (!dma_domain->win_arr) {
508 pr_debug("Number of windows not configured\n");
509 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
510 return;
511 }
512
513 if (wnd_nr >= dma_domain->win_cnt) {
514 pr_debug("Invalid window index\n");
515 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
516 return;
517 }
518
519 if (dma_domain->win_arr[wnd_nr].valid) {
520 ret = disable_domain_win(dma_domain, wnd_nr);
521 if (!ret) {
522 dma_domain->win_arr[wnd_nr].valid = 0;
523 dma_domain->mapped--;
524 }
525 }
526
527 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
528 }
529
530 static int fsl_pamu_window_enable(struct iommu_domain *domain, u32 wnd_nr,
531 phys_addr_t paddr, u64 size, int prot)
532 {
533 struct fsl_dma_domain *dma_domain = domain->priv;
534 struct dma_window *wnd;
535 int pamu_prot = 0;
536 int ret;
537 unsigned long flags;
538 u64 win_size;
539
540 if (prot & IOMMU_READ)
541 pamu_prot |= PAACE_AP_PERMS_QUERY;
542 if (prot & IOMMU_WRITE)
543 pamu_prot |= PAACE_AP_PERMS_UPDATE;
544
545 spin_lock_irqsave(&dma_domain->domain_lock, flags);
546 if (!dma_domain->win_arr) {
547 pr_debug("Number of windows not configured\n");
548 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
549 return -ENODEV;
550 }
551
552 if (wnd_nr >= dma_domain->win_cnt) {
553 pr_debug("Invalid window index\n");
554 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
555 return -EINVAL;
556 }
557
558 win_size = dma_domain->geom_size >> ilog2(dma_domain->win_cnt);
559 if (size > win_size) {
560 pr_debug("Invalid window size\n");
561 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
562 return -EINVAL;
563 }
564
565 if (dma_domain->win_cnt == 1) {
566 if (dma_domain->enabled) {
567 pr_debug("Disable the window before updating the mapping\n");
568 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
569 return -EBUSY;
570 }
571
572 ret = check_size(size, domain->geometry.aperture_start);
573 if (ret) {
574 pr_debug("Aperture start not aligned to the size\n");
575 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
576 return -EINVAL;
577 }
578 }
579
580 wnd = &dma_domain->win_arr[wnd_nr];
581 if (!wnd->valid) {
582 wnd->paddr = paddr;
583 wnd->size = size;
584 wnd->prot = pamu_prot;
585
586 ret = update_domain_mapping(dma_domain, wnd_nr);
587 if (!ret) {
588 wnd->valid = 1;
589 dma_domain->mapped++;
590 }
591 } else {
592 pr_debug("Disable the window before updating the mapping\n");
593 ret = -EBUSY;
594 }
595
596 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
597
598 return ret;
599 }
600
601 /*
602 * Attach the LIODN to the DMA domain and configure the geometry
603 * and window mappings.
604 */
605 static int handle_attach_device(struct fsl_dma_domain *dma_domain,
606 struct device *dev, const u32 *liodn,
607 int num)
608 {
609 unsigned long flags;
610 struct iommu_domain *domain = dma_domain->iommu_domain;
611 int ret = 0;
612 int i;
613
614 spin_lock_irqsave(&dma_domain->domain_lock, flags);
615 for (i = 0; i < num; i++) {
616 /* Ensure that LIODN value is valid */
617 if (liodn[i] >= PAACE_NUMBER_ENTRIES) {
618 pr_debug("Invalid liodn %d, attach device failed for %s\n",
619 liodn[i], dev->of_node->full_name);
620 ret = -EINVAL;
621 break;
622 }
623
624 attach_device(dma_domain, liodn[i], dev);
625 /*
626 * Check if geometry has already been configured
627 * for the domain. If yes, set the geometry for
628 * the LIODN.
629 */
630 if (dma_domain->win_arr) {
631 u32 win_cnt = dma_domain->win_cnt > 1 ? dma_domain->win_cnt : 0;
632
633 ret = pamu_set_liodn(liodn[i], dev, dma_domain,
634 &domain->geometry, win_cnt);
635 if (ret)
636 break;
637 if (dma_domain->mapped) {
638 /*
639 * Create window/subwindow mapping for
640 * the LIODN.
641 */
642 ret = map_liodn(liodn[i], dma_domain);
643 if (ret)
644 break;
645 }
646 }
647 }
648 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
649
650 return ret;
651 }
652
653 static int fsl_pamu_attach_device(struct iommu_domain *domain,
654 struct device *dev)
655 {
656 struct fsl_dma_domain *dma_domain = domain->priv;
657 const u32 *liodn;
658 u32 liodn_cnt;
659 int len, ret = 0;
660 struct pci_dev *pdev = NULL;
661 struct pci_controller *pci_ctl;
662
663 /*
664 * Use LIODN of the PCI controller while attaching a
665 * PCI device.
666 */
667 if (dev_is_pci(dev)) {
668 pdev = to_pci_dev(dev);
669 pci_ctl = pci_bus_to_host(pdev->bus);
670 /*
671 * make dev point to pci controller device
672 * so we can get the LIODN programmed by
673 * u-boot.
674 */
675 dev = pci_ctl->parent;
676 }
677
678 liodn = of_get_property(dev->of_node, "fsl,liodn", &len);
679 if (liodn) {
680 liodn_cnt = len / sizeof(u32);
681 ret = handle_attach_device(dma_domain, dev, liodn, liodn_cnt);
682 } else {
683 pr_debug("missing fsl,liodn property at %s\n",
684 dev->of_node->full_name);
685 ret = -EINVAL;
686 }
687
688 return ret;
689 }
690
691 static void fsl_pamu_detach_device(struct iommu_domain *domain,
692 struct device *dev)
693 {
694 struct fsl_dma_domain *dma_domain = domain->priv;
695 const u32 *prop;
696 int len;
697 struct pci_dev *pdev = NULL;
698 struct pci_controller *pci_ctl;
699
700 /*
701 * Use LIODN of the PCI controller while detaching a
702 * PCI device.
703 */
704 if (dev_is_pci(dev)) {
705 pdev = to_pci_dev(dev);
706 pci_ctl = pci_bus_to_host(pdev->bus);
707 /*
708 * make dev point to pci controller device
709 * so we can get the LIODN programmed by
710 * u-boot.
711 */
712 dev = pci_ctl->parent;
713 }
714
715 prop = of_get_property(dev->of_node, "fsl,liodn", &len);
716 if (prop)
717 detach_device(dev, dma_domain);
718 else
719 pr_debug("missing fsl,liodn property at %s\n",
720 dev->of_node->full_name);
721 }
722
723 static int configure_domain_geometry(struct iommu_domain *domain, void *data)
724 {
725 struct iommu_domain_geometry *geom_attr = data;
726 struct fsl_dma_domain *dma_domain = domain->priv;
727 dma_addr_t geom_size;
728 unsigned long flags;
729
730 geom_size = geom_attr->aperture_end - geom_attr->aperture_start + 1;
731 /*
732 * Sanity check the geometry size. Also, we do not support
733 * DMA outside of the geometry.
734 */
735 if (check_size(geom_size, geom_attr->aperture_start) ||
736 !geom_attr->force_aperture) {
737 pr_debug("Invalid PAMU geometry attributes\n");
738 return -EINVAL;
739 }
740
741 spin_lock_irqsave(&dma_domain->domain_lock, flags);
742 if (dma_domain->enabled) {
743 pr_debug("Can't set geometry attributes as domain is active\n");
744 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
745 return -EBUSY;
746 }
747
748 /* Copy the domain geometry information */
749 memcpy(&domain->geometry, geom_attr,
750 sizeof(struct iommu_domain_geometry));
751 dma_domain->geom_size = geom_size;
752
753 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
754
755 return 0;
756 }
757
758 /* Set the domain stash attribute */
759 static int configure_domain_stash(struct fsl_dma_domain *dma_domain, void *data)
760 {
761 struct pamu_stash_attribute *stash_attr = data;
762 unsigned long flags;
763 int ret;
764
765 spin_lock_irqsave(&dma_domain->domain_lock, flags);
766
767 memcpy(&dma_domain->dma_stash, stash_attr,
768 sizeof(struct pamu_stash_attribute));
769
770 dma_domain->stash_id = get_stash_id(stash_attr->cache,
771 stash_attr->cpu);
772 if (dma_domain->stash_id == ~(u32)0) {
773 pr_debug("Invalid stash attributes\n");
774 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
775 return -EINVAL;
776 }
777
778 ret = update_domain_stash(dma_domain, dma_domain->stash_id);
779
780 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
781
782 return ret;
783 }
784
785 /* Configure domain dma state i.e. enable/disable DMA */
786 static int configure_domain_dma_state(struct fsl_dma_domain *dma_domain, bool enable)
787 {
788 struct device_domain_info *info;
789 unsigned long flags;
790 int ret;
791
792 spin_lock_irqsave(&dma_domain->domain_lock, flags);
793
794 if (enable && !dma_domain->mapped) {
795 pr_debug("Can't enable DMA domain without valid mapping\n");
796 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
797 return -ENODEV;
798 }
799
800 dma_domain->enabled = enable;
801 list_for_each_entry(info, &dma_domain->devices, link) {
802 ret = (enable) ? pamu_enable_liodn(info->liodn) :
803 pamu_disable_liodn(info->liodn);
804 if (ret)
805 pr_debug("Unable to set dma state for liodn %d",
806 info->liodn);
807 }
808 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
809
810 return 0;
811 }
812
813 static int fsl_pamu_set_domain_attr(struct iommu_domain *domain,
814 enum iommu_attr attr_type, void *data)
815 {
816 struct fsl_dma_domain *dma_domain = domain->priv;
817 int ret = 0;
818
819 switch (attr_type) {
820 case DOMAIN_ATTR_GEOMETRY:
821 ret = configure_domain_geometry(domain, data);
822 break;
823 case DOMAIN_ATTR_FSL_PAMU_STASH:
824 ret = configure_domain_stash(dma_domain, data);
825 break;
826 case DOMAIN_ATTR_FSL_PAMU_ENABLE:
827 ret = configure_domain_dma_state(dma_domain, *(int *)data);
828 break;
829 default:
830 pr_debug("Unsupported attribute type\n");
831 ret = -EINVAL;
832 break;
833 }
834
835 return ret;
836 }
837
838 static int fsl_pamu_get_domain_attr(struct iommu_domain *domain,
839 enum iommu_attr attr_type, void *data)
840 {
841 struct fsl_dma_domain *dma_domain = domain->priv;
842 int ret = 0;
843
844 switch (attr_type) {
845 case DOMAIN_ATTR_FSL_PAMU_STASH:
846 memcpy(data, &dma_domain->dma_stash,
847 sizeof(struct pamu_stash_attribute));
848 break;
849 case DOMAIN_ATTR_FSL_PAMU_ENABLE:
850 *(int *)data = dma_domain->enabled;
851 break;
852 case DOMAIN_ATTR_FSL_PAMUV1:
853 *(int *)data = DOMAIN_ATTR_FSL_PAMUV1;
854 break;
855 default:
856 pr_debug("Unsupported attribute type\n");
857 ret = -EINVAL;
858 break;
859 }
860
861 return ret;
862 }
863
864 static struct iommu_group *get_device_iommu_group(struct device *dev)
865 {
866 struct iommu_group *group;
867
868 group = iommu_group_get(dev);
869 if (!group)
870 group = iommu_group_alloc();
871
872 return group;
873 }
874
875 static bool check_pci_ctl_endpt_part(struct pci_controller *pci_ctl)
876 {
877 u32 version;
878
879 /* Check the PCI controller version number by readding BRR1 register */
880 version = in_be32(pci_ctl->cfg_addr + (PCI_FSL_BRR1 >> 2));
881 version &= PCI_FSL_BRR1_VER;
882 /* If PCI controller version is >= 0x204 we can partition endpoints */
883 return version >= 0x204;
884 }
885
886 /* Get iommu group information from peer devices or devices on the parent bus */
887 static struct iommu_group *get_shared_pci_device_group(struct pci_dev *pdev)
888 {
889 struct pci_dev *tmp;
890 struct iommu_group *group;
891 struct pci_bus *bus = pdev->bus;
892
893 /*
894 * Traverese the pci bus device list to get
895 * the shared iommu group.
896 */
897 while (bus) {
898 list_for_each_entry(tmp, &bus->devices, bus_list) {
899 if (tmp == pdev)
900 continue;
901 group = iommu_group_get(&tmp->dev);
902 if (group)
903 return group;
904 }
905
906 bus = bus->parent;
907 }
908
909 return NULL;
910 }
911
912 static struct iommu_group *get_pci_device_group(struct pci_dev *pdev)
913 {
914 struct pci_controller *pci_ctl;
915 bool pci_endpt_partioning;
916 struct iommu_group *group = NULL;
917
918 pci_ctl = pci_bus_to_host(pdev->bus);
919 pci_endpt_partioning = check_pci_ctl_endpt_part(pci_ctl);
920 /* We can partition PCIe devices so assign device group to the device */
921 if (pci_endpt_partioning) {
922 group = iommu_group_get_for_dev(&pdev->dev);
923
924 /*
925 * PCIe controller is not a paritionable entity
926 * free the controller device iommu_group.
927 */
928 if (pci_ctl->parent->iommu_group)
929 iommu_group_remove_device(pci_ctl->parent);
930 } else {
931 /*
932 * All devices connected to the controller will share the
933 * PCI controllers device group. If this is the first
934 * device to be probed for the pci controller, copy the
935 * device group information from the PCI controller device
936 * node and remove the PCI controller iommu group.
937 * For subsequent devices, the iommu group information can
938 * be obtained from sibling devices (i.e. from the bus_devices
939 * link list).
940 */
941 if (pci_ctl->parent->iommu_group) {
942 group = get_device_iommu_group(pci_ctl->parent);
943 iommu_group_remove_device(pci_ctl->parent);
944 } else {
945 group = get_shared_pci_device_group(pdev);
946 }
947 }
948
949 if (!group)
950 group = ERR_PTR(-ENODEV);
951
952 return group;
953 }
954
955 static int fsl_pamu_add_device(struct device *dev)
956 {
957 struct iommu_group *group = ERR_PTR(-ENODEV);
958 struct pci_dev *pdev;
959 const u32 *prop;
960 int ret = 0, len;
961
962 /*
963 * For platform devices we allocate a separate group for
964 * each of the devices.
965 */
966 if (dev_is_pci(dev)) {
967 pdev = to_pci_dev(dev);
968 /* Don't create device groups for virtual PCI bridges */
969 if (pdev->subordinate)
970 return 0;
971
972 group = get_pci_device_group(pdev);
973
974 } else {
975 prop = of_get_property(dev->of_node, "fsl,liodn", &len);
976 if (prop)
977 group = get_device_iommu_group(dev);
978 }
979
980 if (IS_ERR(group))
981 return PTR_ERR(group);
982
983 /*
984 * Check if device has already been added to an iommu group.
985 * Group could have already been created for a PCI device in
986 * the iommu_group_get_for_dev path.
987 */
988 if (!dev->iommu_group)
989 ret = iommu_group_add_device(group, dev);
990
991 iommu_group_put(group);
992 return ret;
993 }
994
995 static void fsl_pamu_remove_device(struct device *dev)
996 {
997 iommu_group_remove_device(dev);
998 }
999
1000 static int fsl_pamu_set_windows(struct iommu_domain *domain, u32 w_count)
1001 {
1002 struct fsl_dma_domain *dma_domain = domain->priv;
1003 unsigned long flags;
1004 int ret;
1005
1006 spin_lock_irqsave(&dma_domain->domain_lock, flags);
1007 /* Ensure domain is inactive i.e. DMA should be disabled for the domain */
1008 if (dma_domain->enabled) {
1009 pr_debug("Can't set geometry attributes as domain is active\n");
1010 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
1011 return -EBUSY;
1012 }
1013
1014 /* Ensure that the geometry has been set for the domain */
1015 if (!dma_domain->geom_size) {
1016 pr_debug("Please configure geometry before setting the number of windows\n");
1017 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
1018 return -EINVAL;
1019 }
1020
1021 /*
1022 * Ensure we have valid window count i.e. it should be less than
1023 * maximum permissible limit and should be a power of two.
1024 */
1025 if (w_count > pamu_get_max_subwin_cnt() || !is_power_of_2(w_count)) {
1026 pr_debug("Invalid window count\n");
1027 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
1028 return -EINVAL;
1029 }
1030
1031 ret = pamu_set_domain_geometry(dma_domain, &domain->geometry,
1032 w_count > 1 ? w_count : 0);
1033 if (!ret) {
1034 kfree(dma_domain->win_arr);
1035 dma_domain->win_arr = kcalloc(w_count,
1036 sizeof(*dma_domain->win_arr),
1037 GFP_ATOMIC);
1038 if (!dma_domain->win_arr) {
1039 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
1040 return -ENOMEM;
1041 }
1042 dma_domain->win_cnt = w_count;
1043 }
1044 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
1045
1046 return ret;
1047 }
1048
1049 static u32 fsl_pamu_get_windows(struct iommu_domain *domain)
1050 {
1051 struct fsl_dma_domain *dma_domain = domain->priv;
1052
1053 return dma_domain->win_cnt;
1054 }
1055
1056 static const struct iommu_ops fsl_pamu_ops = {
1057 .capable = fsl_pamu_capable,
1058 .domain_init = fsl_pamu_domain_init,
1059 .domain_destroy = fsl_pamu_domain_destroy,
1060 .attach_dev = fsl_pamu_attach_device,
1061 .detach_dev = fsl_pamu_detach_device,
1062 .domain_window_enable = fsl_pamu_window_enable,
1063 .domain_window_disable = fsl_pamu_window_disable,
1064 .domain_get_windows = fsl_pamu_get_windows,
1065 .domain_set_windows = fsl_pamu_set_windows,
1066 .iova_to_phys = fsl_pamu_iova_to_phys,
1067 .domain_set_attr = fsl_pamu_set_domain_attr,
1068 .domain_get_attr = fsl_pamu_get_domain_attr,
1069 .add_device = fsl_pamu_add_device,
1070 .remove_device = fsl_pamu_remove_device,
1071 };
1072
1073 int __init pamu_domain_init(void)
1074 {
1075 int ret = 0;
1076
1077 ret = iommu_init_mempool();
1078 if (ret)
1079 return ret;
1080
1081 bus_set_iommu(&platform_bus_type, &fsl_pamu_ops);
1082 bus_set_iommu(&pci_bus_type, &fsl_pamu_ops);
1083
1084 return ret;
1085 }
This page took 0.06079 seconds and 5 git commands to generate.