Commit | Line | Data |
---|---|---|
fc2100eb JR |
1 | /* |
2 | * Copyright (C) 2007-2008 Advanced Micro Devices, Inc. | |
63ce3ae8 | 3 | * Author: Joerg Roedel <jroedel@suse.de> |
fc2100eb JR |
4 | * |
5 | * This program is free software; you can redistribute it and/or modify it | |
6 | * under the terms of the GNU General Public License version 2 as published | |
7 | * by the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License | |
15 | * along with this program; if not, write to the Free Software | |
16 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | |
17 | */ | |
18 | ||
92e7066f | 19 | #define pr_fmt(fmt) "iommu: " fmt |
7d3002cc | 20 | |
905d66c1 | 21 | #include <linux/device.h> |
40998188 | 22 | #include <linux/kernel.h> |
fc2100eb JR |
23 | #include <linux/bug.h> |
24 | #include <linux/types.h> | |
60db4027 AM |
25 | #include <linux/module.h> |
26 | #include <linux/slab.h> | |
fc2100eb JR |
27 | #include <linux/errno.h> |
28 | #include <linux/iommu.h> | |
d72e31c9 AW |
29 | #include <linux/idr.h> |
30 | #include <linux/notifier.h> | |
31 | #include <linux/err.h> | |
104a1c13 | 32 | #include <linux/pci.h> |
f096c061 | 33 | #include <linux/bitops.h> |
7f6db171 | 34 | #include <trace/events/iommu.h> |
d72e31c9 AW |
35 | |
36 | static struct kset *iommu_group_kset; | |
37 | static struct ida iommu_group_ida; | |
38 | static struct mutex iommu_group_mutex; | |
39 | ||
b22f6434 TR |
40 | struct iommu_callback_data { |
41 | const struct iommu_ops *ops; | |
42 | }; | |
43 | ||
d72e31c9 AW |
44 | struct iommu_group { |
45 | struct kobject kobj; | |
46 | struct kobject *devices_kobj; | |
47 | struct list_head devices; | |
48 | struct mutex mutex; | |
49 | struct blocking_notifier_head notifier; | |
50 | void *iommu_data; | |
51 | void (*iommu_data_release)(void *iommu_data); | |
52 | char *name; | |
53 | int id; | |
53723dc5 | 54 | struct iommu_domain *default_domain; |
e39cb8a3 | 55 | struct iommu_domain *domain; |
d72e31c9 AW |
56 | }; |
57 | ||
58 | struct iommu_device { | |
59 | struct list_head list; | |
60 | struct device *dev; | |
61 | char *name; | |
62 | }; | |
63 | ||
64 | struct iommu_group_attribute { | |
65 | struct attribute attr; | |
66 | ssize_t (*show)(struct iommu_group *group, char *buf); | |
67 | ssize_t (*store)(struct iommu_group *group, | |
68 | const char *buf, size_t count); | |
69 | }; | |
70 | ||
71 | #define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \ | |
72 | struct iommu_group_attribute iommu_group_attr_##_name = \ | |
73 | __ATTR(_name, _mode, _show, _store) | |
fc2100eb | 74 | |
d72e31c9 AW |
75 | #define to_iommu_group_attr(_attr) \ |
76 | container_of(_attr, struct iommu_group_attribute, attr) | |
77 | #define to_iommu_group(_kobj) \ | |
78 | container_of(_kobj, struct iommu_group, kobj) | |
fc2100eb | 79 | |
53723dc5 JR |
80 | static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus, |
81 | unsigned type); | |
e39cb8a3 JR |
82 | static int __iommu_attach_device(struct iommu_domain *domain, |
83 | struct device *dev); | |
84 | static int __iommu_attach_group(struct iommu_domain *domain, | |
85 | struct iommu_group *group); | |
86 | static void __iommu_detach_group(struct iommu_domain *domain, | |
87 | struct iommu_group *group); | |
53723dc5 | 88 | |
d72e31c9 AW |
89 | static ssize_t iommu_group_attr_show(struct kobject *kobj, |
90 | struct attribute *__attr, char *buf) | |
1460432c | 91 | { |
d72e31c9 AW |
92 | struct iommu_group_attribute *attr = to_iommu_group_attr(__attr); |
93 | struct iommu_group *group = to_iommu_group(kobj); | |
94 | ssize_t ret = -EIO; | |
1460432c | 95 | |
d72e31c9 AW |
96 | if (attr->show) |
97 | ret = attr->show(group, buf); | |
98 | return ret; | |
99 | } | |
100 | ||
101 | static ssize_t iommu_group_attr_store(struct kobject *kobj, | |
102 | struct attribute *__attr, | |
103 | const char *buf, size_t count) | |
104 | { | |
105 | struct iommu_group_attribute *attr = to_iommu_group_attr(__attr); | |
106 | struct iommu_group *group = to_iommu_group(kobj); | |
107 | ssize_t ret = -EIO; | |
1460432c | 108 | |
d72e31c9 AW |
109 | if (attr->store) |
110 | ret = attr->store(group, buf, count); | |
111 | return ret; | |
1460432c | 112 | } |
1460432c | 113 | |
d72e31c9 AW |
114 | static const struct sysfs_ops iommu_group_sysfs_ops = { |
115 | .show = iommu_group_attr_show, | |
116 | .store = iommu_group_attr_store, | |
117 | }; | |
1460432c | 118 | |
d72e31c9 AW |
119 | static int iommu_group_create_file(struct iommu_group *group, |
120 | struct iommu_group_attribute *attr) | |
121 | { | |
122 | return sysfs_create_file(&group->kobj, &attr->attr); | |
1460432c | 123 | } |
1460432c | 124 | |
d72e31c9 AW |
125 | static void iommu_group_remove_file(struct iommu_group *group, |
126 | struct iommu_group_attribute *attr) | |
127 | { | |
128 | sysfs_remove_file(&group->kobj, &attr->attr); | |
129 | } | |
130 | ||
131 | static ssize_t iommu_group_show_name(struct iommu_group *group, char *buf) | |
132 | { | |
133 | return sprintf(buf, "%s\n", group->name); | |
134 | } | |
135 | ||
136 | static IOMMU_GROUP_ATTR(name, S_IRUGO, iommu_group_show_name, NULL); | |
137 | ||
138 | static void iommu_group_release(struct kobject *kobj) | |
139 | { | |
140 | struct iommu_group *group = to_iommu_group(kobj); | |
141 | ||
269aa808 JR |
142 | pr_debug("Releasing group %d\n", group->id); |
143 | ||
d72e31c9 AW |
144 | if (group->iommu_data_release) |
145 | group->iommu_data_release(group->iommu_data); | |
146 | ||
147 | mutex_lock(&iommu_group_mutex); | |
148 | ida_remove(&iommu_group_ida, group->id); | |
149 | mutex_unlock(&iommu_group_mutex); | |
150 | ||
53723dc5 JR |
151 | if (group->default_domain) |
152 | iommu_domain_free(group->default_domain); | |
153 | ||
d72e31c9 AW |
154 | kfree(group->name); |
155 | kfree(group); | |
156 | } | |
157 | ||
158 | static struct kobj_type iommu_group_ktype = { | |
159 | .sysfs_ops = &iommu_group_sysfs_ops, | |
160 | .release = iommu_group_release, | |
161 | }; | |
162 | ||
163 | /** | |
164 | * iommu_group_alloc - Allocate a new group | |
165 | * @name: Optional name to associate with group, visible in sysfs | |
166 | * | |
167 | * This function is called by an iommu driver to allocate a new iommu | |
168 | * group. The iommu group represents the minimum granularity of the iommu. | |
169 | * Upon successful return, the caller holds a reference to the supplied | |
170 | * group in order to hold the group until devices are added. Use | |
171 | * iommu_group_put() to release this extra reference count, allowing the | |
172 | * group to be automatically reclaimed once it has no devices or external | |
173 | * references. | |
174 | */ | |
175 | struct iommu_group *iommu_group_alloc(void) | |
1460432c | 176 | { |
d72e31c9 AW |
177 | struct iommu_group *group; |
178 | int ret; | |
179 | ||
180 | group = kzalloc(sizeof(*group), GFP_KERNEL); | |
181 | if (!group) | |
182 | return ERR_PTR(-ENOMEM); | |
183 | ||
184 | group->kobj.kset = iommu_group_kset; | |
185 | mutex_init(&group->mutex); | |
186 | INIT_LIST_HEAD(&group->devices); | |
187 | BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier); | |
188 | ||
189 | mutex_lock(&iommu_group_mutex); | |
190 | ||
191 | again: | |
192 | if (unlikely(0 == ida_pre_get(&iommu_group_ida, GFP_KERNEL))) { | |
193 | kfree(group); | |
194 | mutex_unlock(&iommu_group_mutex); | |
195 | return ERR_PTR(-ENOMEM); | |
196 | } | |
197 | ||
198 | if (-EAGAIN == ida_get_new(&iommu_group_ida, &group->id)) | |
199 | goto again; | |
200 | ||
201 | mutex_unlock(&iommu_group_mutex); | |
1460432c | 202 | |
d72e31c9 AW |
203 | ret = kobject_init_and_add(&group->kobj, &iommu_group_ktype, |
204 | NULL, "%d", group->id); | |
205 | if (ret) { | |
206 | mutex_lock(&iommu_group_mutex); | |
207 | ida_remove(&iommu_group_ida, group->id); | |
208 | mutex_unlock(&iommu_group_mutex); | |
209 | kfree(group); | |
210 | return ERR_PTR(ret); | |
211 | } | |
212 | ||
213 | group->devices_kobj = kobject_create_and_add("devices", &group->kobj); | |
214 | if (!group->devices_kobj) { | |
215 | kobject_put(&group->kobj); /* triggers .release & free */ | |
216 | return ERR_PTR(-ENOMEM); | |
217 | } | |
218 | ||
219 | /* | |
220 | * The devices_kobj holds a reference on the group kobject, so | |
221 | * as long as that exists so will the group. We can therefore | |
222 | * use the devices_kobj for reference counting. | |
223 | */ | |
224 | kobject_put(&group->kobj); | |
225 | ||
269aa808 JR |
226 | pr_debug("Allocated group %d\n", group->id); |
227 | ||
d72e31c9 AW |
228 | return group; |
229 | } | |
230 | EXPORT_SYMBOL_GPL(iommu_group_alloc); | |
231 | ||
aa16bea9 AK |
232 | struct iommu_group *iommu_group_get_by_id(int id) |
233 | { | |
234 | struct kobject *group_kobj; | |
235 | struct iommu_group *group; | |
236 | const char *name; | |
237 | ||
238 | if (!iommu_group_kset) | |
239 | return NULL; | |
240 | ||
241 | name = kasprintf(GFP_KERNEL, "%d", id); | |
242 | if (!name) | |
243 | return NULL; | |
244 | ||
245 | group_kobj = kset_find_obj(iommu_group_kset, name); | |
246 | kfree(name); | |
247 | ||
248 | if (!group_kobj) | |
249 | return NULL; | |
250 | ||
251 | group = container_of(group_kobj, struct iommu_group, kobj); | |
252 | BUG_ON(group->id != id); | |
253 | ||
254 | kobject_get(group->devices_kobj); | |
255 | kobject_put(&group->kobj); | |
256 | ||
257 | return group; | |
258 | } | |
259 | EXPORT_SYMBOL_GPL(iommu_group_get_by_id); | |
260 | ||
d72e31c9 AW |
261 | /** |
262 | * iommu_group_get_iommudata - retrieve iommu_data registered for a group | |
263 | * @group: the group | |
264 | * | |
265 | * iommu drivers can store data in the group for use when doing iommu | |
266 | * operations. This function provides a way to retrieve it. Caller | |
267 | * should hold a group reference. | |
268 | */ | |
269 | void *iommu_group_get_iommudata(struct iommu_group *group) | |
270 | { | |
271 | return group->iommu_data; | |
272 | } | |
273 | EXPORT_SYMBOL_GPL(iommu_group_get_iommudata); | |
274 | ||
275 | /** | |
276 | * iommu_group_set_iommudata - set iommu_data for a group | |
277 | * @group: the group | |
278 | * @iommu_data: new data | |
279 | * @release: release function for iommu_data | |
280 | * | |
281 | * iommu drivers can store data in the group for use when doing iommu | |
282 | * operations. This function provides a way to set the data after | |
283 | * the group has been allocated. Caller should hold a group reference. | |
284 | */ | |
285 | void iommu_group_set_iommudata(struct iommu_group *group, void *iommu_data, | |
286 | void (*release)(void *iommu_data)) | |
1460432c | 287 | { |
d72e31c9 AW |
288 | group->iommu_data = iommu_data; |
289 | group->iommu_data_release = release; | |
290 | } | |
291 | EXPORT_SYMBOL_GPL(iommu_group_set_iommudata); | |
1460432c | 292 | |
d72e31c9 AW |
293 | /** |
294 | * iommu_group_set_name - set name for a group | |
295 | * @group: the group | |
296 | * @name: name | |
297 | * | |
298 | * Allow iommu driver to set a name for a group. When set it will | |
299 | * appear in a name attribute file under the group in sysfs. | |
300 | */ | |
301 | int iommu_group_set_name(struct iommu_group *group, const char *name) | |
302 | { | |
303 | int ret; | |
304 | ||
305 | if (group->name) { | |
306 | iommu_group_remove_file(group, &iommu_group_attr_name); | |
307 | kfree(group->name); | |
308 | group->name = NULL; | |
309 | if (!name) | |
310 | return 0; | |
311 | } | |
312 | ||
313 | group->name = kstrdup(name, GFP_KERNEL); | |
314 | if (!group->name) | |
315 | return -ENOMEM; | |
316 | ||
317 | ret = iommu_group_create_file(group, &iommu_group_attr_name); | |
318 | if (ret) { | |
319 | kfree(group->name); | |
320 | group->name = NULL; | |
321 | return ret; | |
322 | } | |
1460432c AW |
323 | |
324 | return 0; | |
325 | } | |
d72e31c9 | 326 | EXPORT_SYMBOL_GPL(iommu_group_set_name); |
1460432c | 327 | |
beed2821 JR |
328 | static int iommu_group_create_direct_mappings(struct iommu_group *group, |
329 | struct device *dev) | |
330 | { | |
331 | struct iommu_domain *domain = group->default_domain; | |
332 | struct iommu_dm_region *entry; | |
333 | struct list_head mappings; | |
334 | unsigned long pg_size; | |
335 | int ret = 0; | |
336 | ||
337 | if (!domain || domain->type != IOMMU_DOMAIN_DMA) | |
338 | return 0; | |
339 | ||
340 | BUG_ON(!domain->ops->pgsize_bitmap); | |
341 | ||
342 | pg_size = 1UL << __ffs(domain->ops->pgsize_bitmap); | |
343 | INIT_LIST_HEAD(&mappings); | |
344 | ||
345 | iommu_get_dm_regions(dev, &mappings); | |
346 | ||
347 | /* We need to consider overlapping regions for different devices */ | |
348 | list_for_each_entry(entry, &mappings, list) { | |
349 | dma_addr_t start, end, addr; | |
350 | ||
351 | start = ALIGN(entry->start, pg_size); | |
352 | end = ALIGN(entry->start + entry->length, pg_size); | |
353 | ||
354 | for (addr = start; addr < end; addr += pg_size) { | |
355 | phys_addr_t phys_addr; | |
356 | ||
357 | phys_addr = iommu_iova_to_phys(domain, addr); | |
358 | if (phys_addr) | |
359 | continue; | |
360 | ||
361 | ret = iommu_map(domain, addr, addr, pg_size, entry->prot); | |
362 | if (ret) | |
363 | goto out; | |
364 | } | |
365 | ||
366 | } | |
367 | ||
368 | out: | |
369 | iommu_put_dm_regions(dev, &mappings); | |
370 | ||
371 | return ret; | |
372 | } | |
373 | ||
d72e31c9 AW |
374 | /** |
375 | * iommu_group_add_device - add a device to an iommu group | |
376 | * @group: the group into which to add the device (reference should be held) | |
377 | * @dev: the device | |
378 | * | |
379 | * This function is called by an iommu driver to add a device into a | |
380 | * group. Adding a device increments the group reference count. | |
381 | */ | |
382 | int iommu_group_add_device(struct iommu_group *group, struct device *dev) | |
1460432c | 383 | { |
d72e31c9 AW |
384 | int ret, i = 0; |
385 | struct iommu_device *device; | |
386 | ||
387 | device = kzalloc(sizeof(*device), GFP_KERNEL); | |
388 | if (!device) | |
389 | return -ENOMEM; | |
390 | ||
391 | device->dev = dev; | |
1460432c | 392 | |
d72e31c9 AW |
393 | ret = sysfs_create_link(&dev->kobj, &group->kobj, "iommu_group"); |
394 | if (ret) { | |
395 | kfree(device); | |
396 | return ret; | |
397 | } | |
398 | ||
399 | device->name = kasprintf(GFP_KERNEL, "%s", kobject_name(&dev->kobj)); | |
400 | rename: | |
401 | if (!device->name) { | |
402 | sysfs_remove_link(&dev->kobj, "iommu_group"); | |
403 | kfree(device); | |
404 | return -ENOMEM; | |
405 | } | |
1460432c | 406 | |
d72e31c9 AW |
407 | ret = sysfs_create_link_nowarn(group->devices_kobj, |
408 | &dev->kobj, device->name); | |
409 | if (ret) { | |
410 | kfree(device->name); | |
411 | if (ret == -EEXIST && i >= 0) { | |
412 | /* | |
413 | * Account for the slim chance of collision | |
414 | * and append an instance to the name. | |
415 | */ | |
416 | device->name = kasprintf(GFP_KERNEL, "%s.%d", | |
417 | kobject_name(&dev->kobj), i++); | |
418 | goto rename; | |
419 | } | |
420 | ||
421 | sysfs_remove_link(&dev->kobj, "iommu_group"); | |
422 | kfree(device); | |
423 | return ret; | |
424 | } | |
425 | ||
426 | kobject_get(group->devices_kobj); | |
427 | ||
428 | dev->iommu_group = group; | |
429 | ||
beed2821 JR |
430 | iommu_group_create_direct_mappings(group, dev); |
431 | ||
d72e31c9 AW |
432 | mutex_lock(&group->mutex); |
433 | list_add_tail(&device->list, &group->devices); | |
e39cb8a3 JR |
434 | if (group->domain) |
435 | __iommu_attach_device(group->domain, dev); | |
d72e31c9 AW |
436 | mutex_unlock(&group->mutex); |
437 | ||
438 | /* Notify any listeners about change to group. */ | |
439 | blocking_notifier_call_chain(&group->notifier, | |
440 | IOMMU_GROUP_NOTIFY_ADD_DEVICE, dev); | |
d1cf7e82 SK |
441 | |
442 | trace_add_device_to_group(group->id, dev); | |
269aa808 JR |
443 | |
444 | pr_info("Adding device %s to group %d\n", dev_name(dev), group->id); | |
445 | ||
1460432c AW |
446 | return 0; |
447 | } | |
d72e31c9 | 448 | EXPORT_SYMBOL_GPL(iommu_group_add_device); |
1460432c | 449 | |
d72e31c9 AW |
450 | /** |
451 | * iommu_group_remove_device - remove a device from it's current group | |
452 | * @dev: device to be removed | |
453 | * | |
454 | * This function is called by an iommu driver to remove the device from | |
455 | * it's current group. This decrements the iommu group reference count. | |
456 | */ | |
457 | void iommu_group_remove_device(struct device *dev) | |
458 | { | |
459 | struct iommu_group *group = dev->iommu_group; | |
460 | struct iommu_device *tmp_device, *device = NULL; | |
461 | ||
269aa808 JR |
462 | pr_info("Removing device %s from group %d\n", dev_name(dev), group->id); |
463 | ||
d72e31c9 AW |
464 | /* Pre-notify listeners that a device is being removed. */ |
465 | blocking_notifier_call_chain(&group->notifier, | |
466 | IOMMU_GROUP_NOTIFY_DEL_DEVICE, dev); | |
467 | ||
468 | mutex_lock(&group->mutex); | |
469 | list_for_each_entry(tmp_device, &group->devices, list) { | |
470 | if (tmp_device->dev == dev) { | |
471 | device = tmp_device; | |
472 | list_del(&device->list); | |
473 | break; | |
474 | } | |
475 | } | |
476 | mutex_unlock(&group->mutex); | |
477 | ||
478 | if (!device) | |
479 | return; | |
480 | ||
481 | sysfs_remove_link(group->devices_kobj, device->name); | |
482 | sysfs_remove_link(&dev->kobj, "iommu_group"); | |
483 | ||
2e757086 SK |
484 | trace_remove_device_from_group(group->id, dev); |
485 | ||
d72e31c9 AW |
486 | kfree(device->name); |
487 | kfree(device); | |
488 | dev->iommu_group = NULL; | |
489 | kobject_put(group->devices_kobj); | |
490 | } | |
491 | EXPORT_SYMBOL_GPL(iommu_group_remove_device); | |
492 | ||
426a2738 JR |
493 | static int iommu_group_device_count(struct iommu_group *group) |
494 | { | |
495 | struct iommu_device *entry; | |
496 | int ret = 0; | |
497 | ||
498 | list_for_each_entry(entry, &group->devices, list) | |
499 | ret++; | |
500 | ||
501 | return ret; | |
502 | } | |
503 | ||
d72e31c9 AW |
504 | /** |
505 | * iommu_group_for_each_dev - iterate over each device in the group | |
506 | * @group: the group | |
507 | * @data: caller opaque data to be passed to callback function | |
508 | * @fn: caller supplied callback function | |
509 | * | |
510 | * This function is called by group users to iterate over group devices. | |
511 | * Callers should hold a reference count to the group during callback. | |
512 | * The group->mutex is held across callbacks, which will block calls to | |
513 | * iommu_group_add/remove_device. | |
514 | */ | |
e39cb8a3 JR |
515 | static int __iommu_group_for_each_dev(struct iommu_group *group, void *data, |
516 | int (*fn)(struct device *, void *)) | |
d72e31c9 AW |
517 | { |
518 | struct iommu_device *device; | |
519 | int ret = 0; | |
520 | ||
d72e31c9 AW |
521 | list_for_each_entry(device, &group->devices, list) { |
522 | ret = fn(device->dev, data); | |
523 | if (ret) | |
524 | break; | |
525 | } | |
e39cb8a3 JR |
526 | return ret; |
527 | } | |
528 | ||
529 | ||
530 | int iommu_group_for_each_dev(struct iommu_group *group, void *data, | |
531 | int (*fn)(struct device *, void *)) | |
532 | { | |
533 | int ret; | |
534 | ||
535 | mutex_lock(&group->mutex); | |
536 | ret = __iommu_group_for_each_dev(group, data, fn); | |
d72e31c9 | 537 | mutex_unlock(&group->mutex); |
e39cb8a3 | 538 | |
d72e31c9 AW |
539 | return ret; |
540 | } | |
541 | EXPORT_SYMBOL_GPL(iommu_group_for_each_dev); | |
542 | ||
543 | /** | |
544 | * iommu_group_get - Return the group for a device and increment reference | |
545 | * @dev: get the group that this device belongs to | |
546 | * | |
547 | * This function is called by iommu drivers and users to get the group | |
548 | * for the specified device. If found, the group is returned and the group | |
549 | * reference in incremented, else NULL. | |
550 | */ | |
551 | struct iommu_group *iommu_group_get(struct device *dev) | |
552 | { | |
553 | struct iommu_group *group = dev->iommu_group; | |
554 | ||
555 | if (group) | |
556 | kobject_get(group->devices_kobj); | |
557 | ||
558 | return group; | |
559 | } | |
560 | EXPORT_SYMBOL_GPL(iommu_group_get); | |
561 | ||
562 | /** | |
563 | * iommu_group_put - Decrement group reference | |
564 | * @group: the group to use | |
565 | * | |
566 | * This function is called by iommu drivers and users to release the | |
567 | * iommu group. Once the reference count is zero, the group is released. | |
568 | */ | |
569 | void iommu_group_put(struct iommu_group *group) | |
570 | { | |
571 | if (group) | |
572 | kobject_put(group->devices_kobj); | |
573 | } | |
574 | EXPORT_SYMBOL_GPL(iommu_group_put); | |
575 | ||
576 | /** | |
577 | * iommu_group_register_notifier - Register a notifier for group changes | |
578 | * @group: the group to watch | |
579 | * @nb: notifier block to signal | |
580 | * | |
581 | * This function allows iommu group users to track changes in a group. | |
582 | * See include/linux/iommu.h for actions sent via this notifier. Caller | |
583 | * should hold a reference to the group throughout notifier registration. | |
584 | */ | |
585 | int iommu_group_register_notifier(struct iommu_group *group, | |
586 | struct notifier_block *nb) | |
587 | { | |
588 | return blocking_notifier_chain_register(&group->notifier, nb); | |
589 | } | |
590 | EXPORT_SYMBOL_GPL(iommu_group_register_notifier); | |
591 | ||
592 | /** | |
593 | * iommu_group_unregister_notifier - Unregister a notifier | |
594 | * @group: the group to watch | |
595 | * @nb: notifier block to signal | |
596 | * | |
597 | * Unregister a previously registered group notifier block. | |
598 | */ | |
599 | int iommu_group_unregister_notifier(struct iommu_group *group, | |
600 | struct notifier_block *nb) | |
601 | { | |
602 | return blocking_notifier_chain_unregister(&group->notifier, nb); | |
603 | } | |
604 | EXPORT_SYMBOL_GPL(iommu_group_unregister_notifier); | |
605 | ||
606 | /** | |
607 | * iommu_group_id - Return ID for a group | |
608 | * @group: the group to ID | |
609 | * | |
610 | * Return the unique ID for the group matching the sysfs group number. | |
611 | */ | |
612 | int iommu_group_id(struct iommu_group *group) | |
613 | { | |
614 | return group->id; | |
615 | } | |
616 | EXPORT_SYMBOL_GPL(iommu_group_id); | |
1460432c | 617 | |
f096c061 AW |
618 | static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev, |
619 | unsigned long *devfns); | |
620 | ||
104a1c13 AW |
621 | /* |
622 | * To consider a PCI device isolated, we require ACS to support Source | |
623 | * Validation, Request Redirection, Completer Redirection, and Upstream | |
624 | * Forwarding. This effectively means that devices cannot spoof their | |
625 | * requester ID, requests and completions cannot be redirected, and all | |
626 | * transactions are forwarded upstream, even as it passes through a | |
627 | * bridge where the target device is downstream. | |
628 | */ | |
629 | #define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF) | |
630 | ||
f096c061 AW |
631 | /* |
632 | * For multifunction devices which are not isolated from each other, find | |
633 | * all the other non-isolated functions and look for existing groups. For | |
634 | * each function, we also need to look for aliases to or from other devices | |
635 | * that may already have a group. | |
636 | */ | |
637 | static struct iommu_group *get_pci_function_alias_group(struct pci_dev *pdev, | |
638 | unsigned long *devfns) | |
639 | { | |
640 | struct pci_dev *tmp = NULL; | |
641 | struct iommu_group *group; | |
642 | ||
643 | if (!pdev->multifunction || pci_acs_enabled(pdev, REQ_ACS_FLAGS)) | |
644 | return NULL; | |
645 | ||
646 | for_each_pci_dev(tmp) { | |
647 | if (tmp == pdev || tmp->bus != pdev->bus || | |
648 | PCI_SLOT(tmp->devfn) != PCI_SLOT(pdev->devfn) || | |
649 | pci_acs_enabled(tmp, REQ_ACS_FLAGS)) | |
650 | continue; | |
651 | ||
652 | group = get_pci_alias_group(tmp, devfns); | |
653 | if (group) { | |
654 | pci_dev_put(tmp); | |
655 | return group; | |
656 | } | |
657 | } | |
658 | ||
659 | return NULL; | |
660 | } | |
661 | ||
662 | /* | |
663 | * Look for aliases to or from the given device for exisiting groups. The | |
664 | * dma_alias_devfn only supports aliases on the same bus, therefore the search | |
665 | * space is quite small (especially since we're really only looking at pcie | |
666 | * device, and therefore only expect multiple slots on the root complex or | |
667 | * downstream switch ports). It's conceivable though that a pair of | |
668 | * multifunction devices could have aliases between them that would cause a | |
669 | * loop. To prevent this, we use a bitmap to track where we've been. | |
670 | */ | |
671 | static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev, | |
672 | unsigned long *devfns) | |
673 | { | |
674 | struct pci_dev *tmp = NULL; | |
675 | struct iommu_group *group; | |
676 | ||
677 | if (test_and_set_bit(pdev->devfn & 0xff, devfns)) | |
678 | return NULL; | |
679 | ||
680 | group = iommu_group_get(&pdev->dev); | |
681 | if (group) | |
682 | return group; | |
683 | ||
684 | for_each_pci_dev(tmp) { | |
685 | if (tmp == pdev || tmp->bus != pdev->bus) | |
686 | continue; | |
687 | ||
688 | /* We alias them or they alias us */ | |
689 | if (((pdev->dev_flags & PCI_DEV_FLAGS_DMA_ALIAS_DEVFN) && | |
690 | pdev->dma_alias_devfn == tmp->devfn) || | |
691 | ((tmp->dev_flags & PCI_DEV_FLAGS_DMA_ALIAS_DEVFN) && | |
692 | tmp->dma_alias_devfn == pdev->devfn)) { | |
693 | ||
694 | group = get_pci_alias_group(tmp, devfns); | |
695 | if (group) { | |
696 | pci_dev_put(tmp); | |
697 | return group; | |
698 | } | |
699 | ||
700 | group = get_pci_function_alias_group(tmp, devfns); | |
701 | if (group) { | |
702 | pci_dev_put(tmp); | |
703 | return group; | |
704 | } | |
705 | } | |
706 | } | |
707 | ||
708 | return NULL; | |
709 | } | |
710 | ||
104a1c13 AW |
711 | struct group_for_pci_data { |
712 | struct pci_dev *pdev; | |
713 | struct iommu_group *group; | |
714 | }; | |
715 | ||
716 | /* | |
717 | * DMA alias iterator callback, return the last seen device. Stop and return | |
718 | * the IOMMU group if we find one along the way. | |
719 | */ | |
720 | static int get_pci_alias_or_group(struct pci_dev *pdev, u16 alias, void *opaque) | |
721 | { | |
722 | struct group_for_pci_data *data = opaque; | |
723 | ||
724 | data->pdev = pdev; | |
725 | data->group = iommu_group_get(&pdev->dev); | |
726 | ||
727 | return data->group != NULL; | |
728 | } | |
729 | ||
730 | /* | |
731 | * Use standard PCI bus topology, isolation features, and DMA alias quirks | |
732 | * to find or create an IOMMU group for a device. | |
733 | */ | |
734 | static struct iommu_group *iommu_group_get_for_pci_dev(struct pci_dev *pdev) | |
735 | { | |
736 | struct group_for_pci_data data; | |
737 | struct pci_bus *bus; | |
738 | struct iommu_group *group = NULL; | |
f096c061 | 739 | u64 devfns[4] = { 0 }; |
104a1c13 AW |
740 | |
741 | /* | |
742 | * Find the upstream DMA alias for the device. A device must not | |
743 | * be aliased due to topology in order to have its own IOMMU group. | |
744 | * If we find an alias along the way that already belongs to a | |
745 | * group, use it. | |
746 | */ | |
747 | if (pci_for_each_dma_alias(pdev, get_pci_alias_or_group, &data)) | |
748 | return data.group; | |
749 | ||
750 | pdev = data.pdev; | |
751 | ||
752 | /* | |
753 | * Continue upstream from the point of minimum IOMMU granularity | |
754 | * due to aliases to the point where devices are protected from | |
755 | * peer-to-peer DMA by PCI ACS. Again, if we find an existing | |
756 | * group, use it. | |
757 | */ | |
758 | for (bus = pdev->bus; !pci_is_root_bus(bus); bus = bus->parent) { | |
759 | if (!bus->self) | |
760 | continue; | |
761 | ||
762 | if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS)) | |
763 | break; | |
764 | ||
765 | pdev = bus->self; | |
766 | ||
767 | group = iommu_group_get(&pdev->dev); | |
768 | if (group) | |
769 | return group; | |
770 | } | |
771 | ||
772 | /* | |
f096c061 AW |
773 | * Look for existing groups on device aliases. If we alias another |
774 | * device or another device aliases us, use the same group. | |
104a1c13 | 775 | */ |
f096c061 AW |
776 | group = get_pci_alias_group(pdev, (unsigned long *)devfns); |
777 | if (group) | |
778 | return group; | |
104a1c13 AW |
779 | |
780 | /* | |
f096c061 AW |
781 | * Look for existing groups on non-isolated functions on the same |
782 | * slot and aliases of those funcions, if any. No need to clear | |
783 | * the search bitmap, the tested devfns are still valid. | |
104a1c13 | 784 | */ |
f096c061 AW |
785 | group = get_pci_function_alias_group(pdev, (unsigned long *)devfns); |
786 | if (group) | |
787 | return group; | |
104a1c13 AW |
788 | |
789 | /* No shared group found, allocate new */ | |
53723dc5 | 790 | group = iommu_group_alloc(); |
409e553d DC |
791 | if (IS_ERR(group)) |
792 | return NULL; | |
793 | ||
794 | /* | |
795 | * Try to allocate a default domain - needs support from the | |
796 | * IOMMU driver. | |
797 | */ | |
798 | group->default_domain = __iommu_domain_alloc(pdev->dev.bus, | |
799 | IOMMU_DOMAIN_DMA); | |
800 | group->domain = group->default_domain; | |
53723dc5 JR |
801 | |
802 | return group; | |
104a1c13 AW |
803 | } |
804 | ||
805 | /** | |
806 | * iommu_group_get_for_dev - Find or create the IOMMU group for a device | |
807 | * @dev: target device | |
808 | * | |
809 | * This function is intended to be called by IOMMU drivers and extended to | |
810 | * support common, bus-defined algorithms when determining or creating the | |
811 | * IOMMU group for a device. On success, the caller will hold a reference | |
812 | * to the returned IOMMU group, which will already include the provided | |
813 | * device. The reference should be released with iommu_group_put(). | |
814 | */ | |
815 | struct iommu_group *iommu_group_get_for_dev(struct device *dev) | |
816 | { | |
c4a783b8 | 817 | struct iommu_group *group; |
104a1c13 AW |
818 | int ret; |
819 | ||
820 | group = iommu_group_get(dev); | |
821 | if (group) | |
822 | return group; | |
823 | ||
c4a783b8 JR |
824 | if (!dev_is_pci(dev)) |
825 | return ERR_PTR(-EINVAL); | |
826 | ||
827 | group = iommu_group_get_for_pci_dev(to_pci_dev(dev)); | |
104a1c13 AW |
828 | |
829 | if (IS_ERR(group)) | |
830 | return group; | |
831 | ||
832 | ret = iommu_group_add_device(group, dev); | |
833 | if (ret) { | |
834 | iommu_group_put(group); | |
835 | return ERR_PTR(ret); | |
836 | } | |
837 | ||
838 | return group; | |
839 | } | |
840 | ||
6827ca83 JR |
841 | struct iommu_domain *iommu_group_default_domain(struct iommu_group *group) |
842 | { | |
843 | return group->default_domain; | |
844 | } | |
845 | ||
d72e31c9 | 846 | static int add_iommu_group(struct device *dev, void *data) |
1460432c | 847 | { |
b22f6434 TR |
848 | struct iommu_callback_data *cb = data; |
849 | const struct iommu_ops *ops = cb->ops; | |
d72e31c9 AW |
850 | |
851 | if (!ops->add_device) | |
461bfb3f | 852 | return 0; |
1460432c | 853 | |
d72e31c9 AW |
854 | WARN_ON(dev->iommu_group); |
855 | ||
19762d70 | 856 | return ops->add_device(dev); |
1460432c AW |
857 | } |
858 | ||
8da30142 JR |
859 | static int remove_iommu_group(struct device *dev, void *data) |
860 | { | |
861 | struct iommu_callback_data *cb = data; | |
862 | const struct iommu_ops *ops = cb->ops; | |
863 | ||
864 | if (ops->remove_device && dev->iommu_group) | |
865 | ops->remove_device(dev); | |
1460432c AW |
866 | |
867 | return 0; | |
868 | } | |
869 | ||
d72e31c9 AW |
870 | static int iommu_bus_notifier(struct notifier_block *nb, |
871 | unsigned long action, void *data) | |
1460432c AW |
872 | { |
873 | struct device *dev = data; | |
b22f6434 | 874 | const struct iommu_ops *ops = dev->bus->iommu_ops; |
d72e31c9 AW |
875 | struct iommu_group *group; |
876 | unsigned long group_action = 0; | |
877 | ||
878 | /* | |
879 | * ADD/DEL call into iommu driver ops if provided, which may | |
880 | * result in ADD/DEL notifiers to group->notifier | |
881 | */ | |
882 | if (action == BUS_NOTIFY_ADD_DEVICE) { | |
883 | if (ops->add_device) | |
884 | return ops->add_device(dev); | |
843cb6dc | 885 | } else if (action == BUS_NOTIFY_REMOVED_DEVICE) { |
d72e31c9 AW |
886 | if (ops->remove_device && dev->iommu_group) { |
887 | ops->remove_device(dev); | |
888 | return 0; | |
889 | } | |
890 | } | |
1460432c | 891 | |
d72e31c9 AW |
892 | /* |
893 | * Remaining BUS_NOTIFYs get filtered and republished to the | |
894 | * group, if anyone is listening | |
895 | */ | |
896 | group = iommu_group_get(dev); | |
897 | if (!group) | |
898 | return 0; | |
1460432c | 899 | |
d72e31c9 AW |
900 | switch (action) { |
901 | case BUS_NOTIFY_BIND_DRIVER: | |
902 | group_action = IOMMU_GROUP_NOTIFY_BIND_DRIVER; | |
903 | break; | |
904 | case BUS_NOTIFY_BOUND_DRIVER: | |
905 | group_action = IOMMU_GROUP_NOTIFY_BOUND_DRIVER; | |
906 | break; | |
907 | case BUS_NOTIFY_UNBIND_DRIVER: | |
908 | group_action = IOMMU_GROUP_NOTIFY_UNBIND_DRIVER; | |
909 | break; | |
910 | case BUS_NOTIFY_UNBOUND_DRIVER: | |
911 | group_action = IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER; | |
912 | break; | |
913 | } | |
1460432c | 914 | |
d72e31c9 AW |
915 | if (group_action) |
916 | blocking_notifier_call_chain(&group->notifier, | |
917 | group_action, dev); | |
1460432c | 918 | |
d72e31c9 | 919 | iommu_group_put(group); |
1460432c AW |
920 | return 0; |
921 | } | |
922 | ||
fb3e3065 | 923 | static int iommu_bus_init(struct bus_type *bus, const struct iommu_ops *ops) |
ff21776d | 924 | { |
fb3e3065 MS |
925 | int err; |
926 | struct notifier_block *nb; | |
b22f6434 TR |
927 | struct iommu_callback_data cb = { |
928 | .ops = ops, | |
929 | }; | |
930 | ||
fb3e3065 MS |
931 | nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL); |
932 | if (!nb) | |
933 | return -ENOMEM; | |
934 | ||
935 | nb->notifier_call = iommu_bus_notifier; | |
936 | ||
937 | err = bus_register_notifier(bus, nb); | |
8da30142 JR |
938 | if (err) |
939 | goto out_free; | |
d7da6bdc HS |
940 | |
941 | err = bus_for_each_dev(bus, NULL, &cb, add_iommu_group); | |
8da30142 JR |
942 | if (err) |
943 | goto out_err; | |
944 | ||
d7da6bdc HS |
945 | |
946 | return 0; | |
8da30142 JR |
947 | |
948 | out_err: | |
949 | /* Clean up */ | |
950 | bus_for_each_dev(bus, NULL, &cb, remove_iommu_group); | |
951 | bus_unregister_notifier(bus, nb); | |
952 | ||
953 | out_free: | |
954 | kfree(nb); | |
955 | ||
956 | return err; | |
ff21776d | 957 | } |
fc2100eb | 958 | |
ff21776d JR |
959 | /** |
960 | * bus_set_iommu - set iommu-callbacks for the bus | |
961 | * @bus: bus. | |
962 | * @ops: the callbacks provided by the iommu-driver | |
963 | * | |
964 | * This function is called by an iommu driver to set the iommu methods | |
965 | * used for a particular bus. Drivers for devices on that bus can use | |
966 | * the iommu-api after these ops are registered. | |
967 | * This special function is needed because IOMMUs are usually devices on | |
968 | * the bus itself, so the iommu drivers are not initialized when the bus | |
969 | * is set up. With this function the iommu-driver can set the iommu-ops | |
970 | * afterwards. | |
971 | */ | |
b22f6434 | 972 | int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops) |
fc2100eb | 973 | { |
d7da6bdc HS |
974 | int err; |
975 | ||
ff21776d JR |
976 | if (bus->iommu_ops != NULL) |
977 | return -EBUSY; | |
fc2100eb | 978 | |
ff21776d JR |
979 | bus->iommu_ops = ops; |
980 | ||
981 | /* Do IOMMU specific setup for this bus-type */ | |
d7da6bdc HS |
982 | err = iommu_bus_init(bus, ops); |
983 | if (err) | |
984 | bus->iommu_ops = NULL; | |
985 | ||
986 | return err; | |
fc2100eb | 987 | } |
ff21776d | 988 | EXPORT_SYMBOL_GPL(bus_set_iommu); |
fc2100eb | 989 | |
a1b60c1c | 990 | bool iommu_present(struct bus_type *bus) |
fc2100eb | 991 | { |
94441c3b | 992 | return bus->iommu_ops != NULL; |
fc2100eb | 993 | } |
a1b60c1c | 994 | EXPORT_SYMBOL_GPL(iommu_present); |
fc2100eb | 995 | |
3c0e0ca0 JR |
996 | bool iommu_capable(struct bus_type *bus, enum iommu_cap cap) |
997 | { | |
998 | if (!bus->iommu_ops || !bus->iommu_ops->capable) | |
999 | return false; | |
1000 | ||
1001 | return bus->iommu_ops->capable(cap); | |
1002 | } | |
1003 | EXPORT_SYMBOL_GPL(iommu_capable); | |
1004 | ||
4f3f8d9d OBC |
1005 | /** |
1006 | * iommu_set_fault_handler() - set a fault handler for an iommu domain | |
1007 | * @domain: iommu domain | |
1008 | * @handler: fault handler | |
77ca2332 | 1009 | * @token: user data, will be passed back to the fault handler |
0ed6d2d2 OBC |
1010 | * |
1011 | * This function should be used by IOMMU users which want to be notified | |
1012 | * whenever an IOMMU fault happens. | |
1013 | * | |
1014 | * The fault handler itself should return 0 on success, and an appropriate | |
1015 | * error code otherwise. | |
4f3f8d9d OBC |
1016 | */ |
1017 | void iommu_set_fault_handler(struct iommu_domain *domain, | |
77ca2332 OBC |
1018 | iommu_fault_handler_t handler, |
1019 | void *token) | |
4f3f8d9d OBC |
1020 | { |
1021 | BUG_ON(!domain); | |
1022 | ||
1023 | domain->handler = handler; | |
77ca2332 | 1024 | domain->handler_token = token; |
4f3f8d9d | 1025 | } |
30bd918c | 1026 | EXPORT_SYMBOL_GPL(iommu_set_fault_handler); |
4f3f8d9d | 1027 | |
53723dc5 JR |
1028 | static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus, |
1029 | unsigned type) | |
fc2100eb JR |
1030 | { |
1031 | struct iommu_domain *domain; | |
fc2100eb | 1032 | |
94441c3b | 1033 | if (bus == NULL || bus->iommu_ops == NULL) |
905d66c1 JR |
1034 | return NULL; |
1035 | ||
53723dc5 | 1036 | domain = bus->iommu_ops->domain_alloc(type); |
fc2100eb JR |
1037 | if (!domain) |
1038 | return NULL; | |
1039 | ||
8539c7c1 | 1040 | domain->ops = bus->iommu_ops; |
53723dc5 | 1041 | domain->type = type; |
905d66c1 | 1042 | |
fc2100eb | 1043 | return domain; |
fc2100eb | 1044 | } |
fc2100eb | 1045 | |
53723dc5 JR |
1046 | struct iommu_domain *iommu_domain_alloc(struct bus_type *bus) |
1047 | { | |
1048 | return __iommu_domain_alloc(bus, IOMMU_DOMAIN_UNMANAGED); | |
fc2100eb JR |
1049 | } |
1050 | EXPORT_SYMBOL_GPL(iommu_domain_alloc); | |
1051 | ||
1052 | void iommu_domain_free(struct iommu_domain *domain) | |
1053 | { | |
89be34a1 | 1054 | domain->ops->domain_free(domain); |
fc2100eb JR |
1055 | } |
1056 | EXPORT_SYMBOL_GPL(iommu_domain_free); | |
1057 | ||
426a2738 JR |
1058 | static int __iommu_attach_device(struct iommu_domain *domain, |
1059 | struct device *dev) | |
fc2100eb | 1060 | { |
b54db778 | 1061 | int ret; |
e5aa7f00 JR |
1062 | if (unlikely(domain->ops->attach_dev == NULL)) |
1063 | return -ENODEV; | |
1064 | ||
b54db778 SK |
1065 | ret = domain->ops->attach_dev(domain, dev); |
1066 | if (!ret) | |
1067 | trace_attach_device_to_domain(dev); | |
1068 | return ret; | |
fc2100eb | 1069 | } |
426a2738 JR |
1070 | |
1071 | int iommu_attach_device(struct iommu_domain *domain, struct device *dev) | |
1072 | { | |
1073 | struct iommu_group *group; | |
1074 | int ret; | |
1075 | ||
1076 | group = iommu_group_get(dev); | |
1077 | /* FIXME: Remove this when groups a mandatory for iommu drivers */ | |
1078 | if (group == NULL) | |
1079 | return __iommu_attach_device(domain, dev); | |
1080 | ||
1081 | /* | |
1082 | * We have a group - lock it to make sure the device-count doesn't | |
1083 | * change while we are attaching | |
1084 | */ | |
1085 | mutex_lock(&group->mutex); | |
1086 | ret = -EINVAL; | |
1087 | if (iommu_group_device_count(group) != 1) | |
1088 | goto out_unlock; | |
1089 | ||
e39cb8a3 | 1090 | ret = __iommu_attach_group(domain, group); |
426a2738 JR |
1091 | |
1092 | out_unlock: | |
1093 | mutex_unlock(&group->mutex); | |
1094 | iommu_group_put(group); | |
1095 | ||
1096 | return ret; | |
1097 | } | |
fc2100eb JR |
1098 | EXPORT_SYMBOL_GPL(iommu_attach_device); |
1099 | ||
426a2738 JR |
1100 | static void __iommu_detach_device(struct iommu_domain *domain, |
1101 | struct device *dev) | |
fc2100eb | 1102 | { |
e5aa7f00 JR |
1103 | if (unlikely(domain->ops->detach_dev == NULL)) |
1104 | return; | |
1105 | ||
1106 | domain->ops->detach_dev(domain, dev); | |
69980630 | 1107 | trace_detach_device_from_domain(dev); |
fc2100eb | 1108 | } |
426a2738 JR |
1109 | |
1110 | void iommu_detach_device(struct iommu_domain *domain, struct device *dev) | |
1111 | { | |
1112 | struct iommu_group *group; | |
1113 | ||
1114 | group = iommu_group_get(dev); | |
1115 | /* FIXME: Remove this when groups a mandatory for iommu drivers */ | |
1116 | if (group == NULL) | |
1117 | return __iommu_detach_device(domain, dev); | |
1118 | ||
1119 | mutex_lock(&group->mutex); | |
1120 | if (iommu_group_device_count(group) != 1) { | |
1121 | WARN_ON(1); | |
1122 | goto out_unlock; | |
1123 | } | |
1124 | ||
e39cb8a3 | 1125 | __iommu_detach_group(domain, group); |
426a2738 JR |
1126 | |
1127 | out_unlock: | |
1128 | mutex_unlock(&group->mutex); | |
1129 | iommu_group_put(group); | |
1130 | } | |
fc2100eb JR |
1131 | EXPORT_SYMBOL_GPL(iommu_detach_device); |
1132 | ||
2c1296d9 JR |
1133 | struct iommu_domain *iommu_get_domain_for_dev(struct device *dev) |
1134 | { | |
1135 | struct iommu_domain *domain; | |
1136 | struct iommu_group *group; | |
1137 | ||
1138 | group = iommu_group_get(dev); | |
1139 | /* FIXME: Remove this when groups a mandatory for iommu drivers */ | |
1140 | if (group == NULL) | |
1141 | return NULL; | |
1142 | ||
1143 | domain = group->domain; | |
1144 | ||
1145 | iommu_group_put(group); | |
1146 | ||
1147 | return domain; | |
1148 | } | |
1149 | EXPORT_SYMBOL_GPL(iommu_get_domain_for_dev); | |
fc2100eb | 1150 | |
d72e31c9 AW |
1151 | /* |
1152 | * IOMMU groups are really the natrual working unit of the IOMMU, but | |
1153 | * the IOMMU API works on domains and devices. Bridge that gap by | |
1154 | * iterating over the devices in a group. Ideally we'd have a single | |
1155 | * device which represents the requestor ID of the group, but we also | |
1156 | * allow IOMMU drivers to create policy defined minimum sets, where | |
1157 | * the physical hardware may be able to distiguish members, but we | |
1158 | * wish to group them at a higher level (ex. untrusted multi-function | |
1159 | * PCI devices). Thus we attach each device. | |
1160 | */ | |
1161 | static int iommu_group_do_attach_device(struct device *dev, void *data) | |
1162 | { | |
1163 | struct iommu_domain *domain = data; | |
1164 | ||
426a2738 | 1165 | return __iommu_attach_device(domain, dev); |
d72e31c9 AW |
1166 | } |
1167 | ||
e39cb8a3 JR |
1168 | static int __iommu_attach_group(struct iommu_domain *domain, |
1169 | struct iommu_group *group) | |
1170 | { | |
1171 | int ret; | |
1172 | ||
1173 | if (group->default_domain && group->domain != group->default_domain) | |
1174 | return -EBUSY; | |
1175 | ||
1176 | ret = __iommu_group_for_each_dev(group, domain, | |
1177 | iommu_group_do_attach_device); | |
1178 | if (ret == 0) | |
1179 | group->domain = domain; | |
1180 | ||
1181 | return ret; | |
d72e31c9 AW |
1182 | } |
1183 | ||
1184 | int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group) | |
1185 | { | |
e39cb8a3 JR |
1186 | int ret; |
1187 | ||
1188 | mutex_lock(&group->mutex); | |
1189 | ret = __iommu_attach_group(domain, group); | |
1190 | mutex_unlock(&group->mutex); | |
1191 | ||
1192 | return ret; | |
d72e31c9 AW |
1193 | } |
1194 | EXPORT_SYMBOL_GPL(iommu_attach_group); | |
1195 | ||
1196 | static int iommu_group_do_detach_device(struct device *dev, void *data) | |
1197 | { | |
1198 | struct iommu_domain *domain = data; | |
1199 | ||
426a2738 | 1200 | __iommu_detach_device(domain, dev); |
d72e31c9 AW |
1201 | |
1202 | return 0; | |
1203 | } | |
1204 | ||
e39cb8a3 JR |
1205 | static void __iommu_detach_group(struct iommu_domain *domain, |
1206 | struct iommu_group *group) | |
1207 | { | |
1208 | int ret; | |
1209 | ||
1210 | if (!group->default_domain) { | |
1211 | __iommu_group_for_each_dev(group, domain, | |
1212 | iommu_group_do_detach_device); | |
1213 | group->domain = NULL; | |
1214 | return; | |
1215 | } | |
1216 | ||
1217 | if (group->domain == group->default_domain) | |
1218 | return; | |
1219 | ||
1220 | /* Detach by re-attaching to the default domain */ | |
1221 | ret = __iommu_group_for_each_dev(group, group->default_domain, | |
1222 | iommu_group_do_attach_device); | |
1223 | if (ret != 0) | |
1224 | WARN_ON(1); | |
1225 | else | |
1226 | group->domain = group->default_domain; | |
1227 | } | |
1228 | ||
d72e31c9 AW |
1229 | void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group) |
1230 | { | |
e39cb8a3 JR |
1231 | mutex_lock(&group->mutex); |
1232 | __iommu_detach_group(domain, group); | |
1233 | mutex_unlock(&group->mutex); | |
d72e31c9 AW |
1234 | } |
1235 | EXPORT_SYMBOL_GPL(iommu_detach_group); | |
1236 | ||
bb5547ac | 1237 | phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) |
fc2100eb | 1238 | { |
e5aa7f00 JR |
1239 | if (unlikely(domain->ops->iova_to_phys == NULL)) |
1240 | return 0; | |
1241 | ||
1242 | return domain->ops->iova_to_phys(domain, iova); | |
fc2100eb JR |
1243 | } |
1244 | EXPORT_SYMBOL_GPL(iommu_iova_to_phys); | |
dbb9fd86 | 1245 | |
bd13969b AW |
1246 | static size_t iommu_pgsize(struct iommu_domain *domain, |
1247 | unsigned long addr_merge, size_t size) | |
1248 | { | |
1249 | unsigned int pgsize_idx; | |
1250 | size_t pgsize; | |
1251 | ||
1252 | /* Max page size that still fits into 'size' */ | |
1253 | pgsize_idx = __fls(size); | |
1254 | ||
1255 | /* need to consider alignment requirements ? */ | |
1256 | if (likely(addr_merge)) { | |
1257 | /* Max page size allowed by address */ | |
1258 | unsigned int align_pgsize_idx = __ffs(addr_merge); | |
1259 | pgsize_idx = min(pgsize_idx, align_pgsize_idx); | |
1260 | } | |
1261 | ||
1262 | /* build a mask of acceptable page sizes */ | |
1263 | pgsize = (1UL << (pgsize_idx + 1)) - 1; | |
1264 | ||
1265 | /* throw away page sizes not supported by the hardware */ | |
1266 | pgsize &= domain->ops->pgsize_bitmap; | |
1267 | ||
1268 | /* make sure we're still sane */ | |
1269 | BUG_ON(!pgsize); | |
1270 | ||
1271 | /* pick the biggest page */ | |
1272 | pgsize_idx = __fls(pgsize); | |
1273 | pgsize = 1UL << pgsize_idx; | |
1274 | ||
1275 | return pgsize; | |
1276 | } | |
1277 | ||
cefc53c7 | 1278 | int iommu_map(struct iommu_domain *domain, unsigned long iova, |
7d3002cc | 1279 | phys_addr_t paddr, size_t size, int prot) |
cefc53c7 | 1280 | { |
7d3002cc OBC |
1281 | unsigned long orig_iova = iova; |
1282 | unsigned int min_pagesz; | |
1283 | size_t orig_size = size; | |
1284 | int ret = 0; | |
cefc53c7 | 1285 | |
9db4ad91 | 1286 | if (unlikely(domain->ops->map == NULL || |
57886518 | 1287 | domain->ops->pgsize_bitmap == 0UL)) |
e5aa7f00 | 1288 | return -ENODEV; |
cefc53c7 | 1289 | |
a10315e5 JR |
1290 | if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING))) |
1291 | return -EINVAL; | |
1292 | ||
7d3002cc OBC |
1293 | /* find out the minimum page size supported */ |
1294 | min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap); | |
1295 | ||
1296 | /* | |
1297 | * both the virtual address and the physical one, as well as | |
1298 | * the size of the mapping, must be aligned (at least) to the | |
1299 | * size of the smallest page supported by the hardware | |
1300 | */ | |
1301 | if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) { | |
abedb049 | 1302 | pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%x\n", |
6197ca82 | 1303 | iova, &paddr, size, min_pagesz); |
7d3002cc OBC |
1304 | return -EINVAL; |
1305 | } | |
1306 | ||
abedb049 | 1307 | pr_debug("map: iova 0x%lx pa %pa size 0x%zx\n", iova, &paddr, size); |
7d3002cc OBC |
1308 | |
1309 | while (size) { | |
bd13969b | 1310 | size_t pgsize = iommu_pgsize(domain, iova | paddr, size); |
7d3002cc | 1311 | |
abedb049 | 1312 | pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx\n", |
6197ca82 | 1313 | iova, &paddr, pgsize); |
7d3002cc OBC |
1314 | |
1315 | ret = domain->ops->map(domain, iova, paddr, pgsize, prot); | |
1316 | if (ret) | |
1317 | break; | |
1318 | ||
1319 | iova += pgsize; | |
1320 | paddr += pgsize; | |
1321 | size -= pgsize; | |
1322 | } | |
1323 | ||
1324 | /* unroll mapping in case something went wrong */ | |
1325 | if (ret) | |
1326 | iommu_unmap(domain, orig_iova, orig_size - size); | |
e0be7c86 | 1327 | else |
860cd64d | 1328 | trace_map(orig_iova, paddr, orig_size); |
7d3002cc OBC |
1329 | |
1330 | return ret; | |
cefc53c7 JR |
1331 | } |
1332 | EXPORT_SYMBOL_GPL(iommu_map); | |
1333 | ||
7d3002cc | 1334 | size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size) |
cefc53c7 | 1335 | { |
7d3002cc OBC |
1336 | size_t unmapped_page, unmapped = 0; |
1337 | unsigned int min_pagesz; | |
6fd492fd | 1338 | unsigned long orig_iova = iova; |
cefc53c7 | 1339 | |
57886518 JR |
1340 | if (unlikely(domain->ops->unmap == NULL || |
1341 | domain->ops->pgsize_bitmap == 0UL)) | |
e5aa7f00 JR |
1342 | return -ENODEV; |
1343 | ||
a10315e5 JR |
1344 | if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING))) |
1345 | return -EINVAL; | |
1346 | ||
7d3002cc OBC |
1347 | /* find out the minimum page size supported */ |
1348 | min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap); | |
1349 | ||
1350 | /* | |
1351 | * The virtual address, as well as the size of the mapping, must be | |
1352 | * aligned (at least) to the size of the smallest page supported | |
1353 | * by the hardware | |
1354 | */ | |
1355 | if (!IS_ALIGNED(iova | size, min_pagesz)) { | |
6197ca82 JP |
1356 | pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n", |
1357 | iova, size, min_pagesz); | |
7d3002cc OBC |
1358 | return -EINVAL; |
1359 | } | |
1360 | ||
6197ca82 | 1361 | pr_debug("unmap this: iova 0x%lx size 0x%zx\n", iova, size); |
7d3002cc OBC |
1362 | |
1363 | /* | |
1364 | * Keep iterating until we either unmap 'size' bytes (or more) | |
1365 | * or we hit an area that isn't mapped. | |
1366 | */ | |
1367 | while (unmapped < size) { | |
bd13969b | 1368 | size_t pgsize = iommu_pgsize(domain, iova, size - unmapped); |
7d3002cc | 1369 | |
bd13969b | 1370 | unmapped_page = domain->ops->unmap(domain, iova, pgsize); |
7d3002cc OBC |
1371 | if (!unmapped_page) |
1372 | break; | |
1373 | ||
6197ca82 JP |
1374 | pr_debug("unmapped: iova 0x%lx size 0x%zx\n", |
1375 | iova, unmapped_page); | |
7d3002cc OBC |
1376 | |
1377 | iova += unmapped_page; | |
1378 | unmapped += unmapped_page; | |
1379 | } | |
1380 | ||
db8614d3 | 1381 | trace_unmap(orig_iova, size, unmapped); |
7d3002cc | 1382 | return unmapped; |
cefc53c7 JR |
1383 | } |
1384 | EXPORT_SYMBOL_GPL(iommu_unmap); | |
1460432c | 1385 | |
315786eb OH |
1386 | size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova, |
1387 | struct scatterlist *sg, unsigned int nents, int prot) | |
1388 | { | |
38ec010d | 1389 | struct scatterlist *s; |
315786eb | 1390 | size_t mapped = 0; |
18f23409 | 1391 | unsigned int i, min_pagesz; |
38ec010d | 1392 | int ret; |
315786eb | 1393 | |
18f23409 RM |
1394 | if (unlikely(domain->ops->pgsize_bitmap == 0UL)) |
1395 | return 0; | |
315786eb | 1396 | |
18f23409 RM |
1397 | min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap); |
1398 | ||
1399 | for_each_sg(sg, s, nents, i) { | |
1400 | phys_addr_t phys = page_to_phys(sg_page(s)) + s->offset; | |
1401 | ||
1402 | /* | |
1403 | * We are mapping on IOMMU page boundaries, so offset within | |
1404 | * the page must be 0. However, the IOMMU may support pages | |
1405 | * smaller than PAGE_SIZE, so s->offset may still represent | |
1406 | * an offset of that boundary within the CPU page. | |
1407 | */ | |
1408 | if (!IS_ALIGNED(s->offset, min_pagesz)) | |
38ec010d JR |
1409 | goto out_err; |
1410 | ||
1411 | ret = iommu_map(domain, iova + mapped, phys, s->length, prot); | |
1412 | if (ret) | |
1413 | goto out_err; | |
1414 | ||
1415 | mapped += s->length; | |
315786eb OH |
1416 | } |
1417 | ||
1418 | return mapped; | |
38ec010d JR |
1419 | |
1420 | out_err: | |
1421 | /* undo mappings already done */ | |
1422 | iommu_unmap(domain, iova, mapped); | |
1423 | ||
1424 | return 0; | |
1425 | ||
315786eb OH |
1426 | } |
1427 | EXPORT_SYMBOL_GPL(default_iommu_map_sg); | |
d7787d57 JR |
1428 | |
1429 | int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr, | |
80f97f0f | 1430 | phys_addr_t paddr, u64 size, int prot) |
d7787d57 JR |
1431 | { |
1432 | if (unlikely(domain->ops->domain_window_enable == NULL)) | |
1433 | return -ENODEV; | |
1434 | ||
80f97f0f VS |
1435 | return domain->ops->domain_window_enable(domain, wnd_nr, paddr, size, |
1436 | prot); | |
d7787d57 JR |
1437 | } |
1438 | EXPORT_SYMBOL_GPL(iommu_domain_window_enable); | |
1439 | ||
1440 | void iommu_domain_window_disable(struct iommu_domain *domain, u32 wnd_nr) | |
1441 | { | |
1442 | if (unlikely(domain->ops->domain_window_disable == NULL)) | |
1443 | return; | |
1444 | ||
1445 | return domain->ops->domain_window_disable(domain, wnd_nr); | |
1446 | } | |
1447 | EXPORT_SYMBOL_GPL(iommu_domain_window_disable); | |
1448 | ||
d72e31c9 | 1449 | static int __init iommu_init(void) |
1460432c | 1450 | { |
d72e31c9 AW |
1451 | iommu_group_kset = kset_create_and_add("iommu_groups", |
1452 | NULL, kernel_kobj); | |
1453 | ida_init(&iommu_group_ida); | |
1454 | mutex_init(&iommu_group_mutex); | |
1460432c | 1455 | |
d72e31c9 AW |
1456 | BUG_ON(!iommu_group_kset); |
1457 | ||
1458 | return 0; | |
1460432c | 1459 | } |
d7ef9995 | 1460 | core_initcall(iommu_init); |
0cd76dd1 JR |
1461 | |
1462 | int iommu_domain_get_attr(struct iommu_domain *domain, | |
1463 | enum iommu_attr attr, void *data) | |
1464 | { | |
0ff64f80 | 1465 | struct iommu_domain_geometry *geometry; |
d2e12160 | 1466 | bool *paging; |
0ff64f80 | 1467 | int ret = 0; |
69356712 | 1468 | u32 *count; |
0ff64f80 JR |
1469 | |
1470 | switch (attr) { | |
1471 | case DOMAIN_ATTR_GEOMETRY: | |
1472 | geometry = data; | |
1473 | *geometry = domain->geometry; | |
1474 | ||
d2e12160 JR |
1475 | break; |
1476 | case DOMAIN_ATTR_PAGING: | |
1477 | paging = data; | |
1478 | *paging = (domain->ops->pgsize_bitmap != 0UL); | |
69356712 JR |
1479 | break; |
1480 | case DOMAIN_ATTR_WINDOWS: | |
1481 | count = data; | |
1482 | ||
1483 | if (domain->ops->domain_get_windows != NULL) | |
1484 | *count = domain->ops->domain_get_windows(domain); | |
1485 | else | |
1486 | ret = -ENODEV; | |
1487 | ||
0ff64f80 JR |
1488 | break; |
1489 | default: | |
1490 | if (!domain->ops->domain_get_attr) | |
1491 | return -EINVAL; | |
0cd76dd1 | 1492 | |
0ff64f80 JR |
1493 | ret = domain->ops->domain_get_attr(domain, attr, data); |
1494 | } | |
1495 | ||
1496 | return ret; | |
0cd76dd1 JR |
1497 | } |
1498 | EXPORT_SYMBOL_GPL(iommu_domain_get_attr); | |
1499 | ||
1500 | int iommu_domain_set_attr(struct iommu_domain *domain, | |
1501 | enum iommu_attr attr, void *data) | |
1502 | { | |
69356712 JR |
1503 | int ret = 0; |
1504 | u32 *count; | |
1505 | ||
1506 | switch (attr) { | |
1507 | case DOMAIN_ATTR_WINDOWS: | |
1508 | count = data; | |
1509 | ||
1510 | if (domain->ops->domain_set_windows != NULL) | |
1511 | ret = domain->ops->domain_set_windows(domain, *count); | |
1512 | else | |
1513 | ret = -ENODEV; | |
1460432c | 1514 | |
69356712 JR |
1515 | break; |
1516 | default: | |
1517 | if (domain->ops->domain_set_attr == NULL) | |
1518 | return -EINVAL; | |
1519 | ||
1520 | ret = domain->ops->domain_set_attr(domain, attr, data); | |
1521 | } | |
1522 | ||
1523 | return ret; | |
1460432c | 1524 | } |
0cd76dd1 | 1525 | EXPORT_SYMBOL_GPL(iommu_domain_set_attr); |
a1015c2b JR |
1526 | |
1527 | void iommu_get_dm_regions(struct device *dev, struct list_head *list) | |
1528 | { | |
1529 | const struct iommu_ops *ops = dev->bus->iommu_ops; | |
1530 | ||
1531 | if (ops && ops->get_dm_regions) | |
1532 | ops->get_dm_regions(dev, list); | |
1533 | } | |
1534 | ||
1535 | void iommu_put_dm_regions(struct device *dev, struct list_head *list) | |
1536 | { | |
1537 | const struct iommu_ops *ops = dev->bus->iommu_ops; | |
1538 | ||
1539 | if (ops && ops->put_dm_regions) | |
1540 | ops->put_dm_regions(dev, list); | |
1541 | } | |
d290f1e7 JR |
1542 | |
1543 | /* Request that a device is direct mapped by the IOMMU */ | |
1544 | int iommu_request_dm_for_dev(struct device *dev) | |
1545 | { | |
1546 | struct iommu_domain *dm_domain; | |
1547 | struct iommu_group *group; | |
1548 | int ret; | |
1549 | ||
1550 | /* Device must already be in a group before calling this function */ | |
1551 | group = iommu_group_get_for_dev(dev); | |
409e553d DC |
1552 | if (IS_ERR(group)) |
1553 | return PTR_ERR(group); | |
d290f1e7 JR |
1554 | |
1555 | mutex_lock(&group->mutex); | |
1556 | ||
1557 | /* Check if the default domain is already direct mapped */ | |
1558 | ret = 0; | |
1559 | if (group->default_domain && | |
1560 | group->default_domain->type == IOMMU_DOMAIN_IDENTITY) | |
1561 | goto out; | |
1562 | ||
1563 | /* Don't change mappings of existing devices */ | |
1564 | ret = -EBUSY; | |
1565 | if (iommu_group_device_count(group) != 1) | |
1566 | goto out; | |
1567 | ||
1568 | /* Allocate a direct mapped domain */ | |
1569 | ret = -ENOMEM; | |
1570 | dm_domain = __iommu_domain_alloc(dev->bus, IOMMU_DOMAIN_IDENTITY); | |
1571 | if (!dm_domain) | |
1572 | goto out; | |
1573 | ||
1574 | /* Attach the device to the domain */ | |
1575 | ret = __iommu_attach_group(dm_domain, group); | |
1576 | if (ret) { | |
1577 | iommu_domain_free(dm_domain); | |
1578 | goto out; | |
1579 | } | |
1580 | ||
1581 | /* Make the direct mapped domain the default for this group */ | |
1582 | if (group->default_domain) | |
1583 | iommu_domain_free(group->default_domain); | |
1584 | group->default_domain = dm_domain; | |
1585 | ||
1586 | pr_info("Using direct mapping for device %s\n", dev_name(dev)); | |
1587 | ||
1588 | ret = 0; | |
1589 | out: | |
1590 | mutex_unlock(&group->mutex); | |
1591 | iommu_group_put(group); | |
1592 | ||
1593 | return ret; | |
1594 | } |