Commit | Line | Data |
---|---|---|
e3c495c7 JR |
1 | /* |
2 | * Copyright (C) 2010-2012 Advanced Micro Devices, Inc. | |
63ce3ae8 | 3 | * Author: Joerg Roedel <jroedel@suse.de> |
e3c495c7 JR |
4 | * |
5 | * This program is free software; you can redistribute it and/or modify it | |
6 | * under the terms of the GNU General Public License version 2 as published | |
7 | * by the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License | |
15 | * along with this program; if not, write to the Free Software | |
16 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | |
17 | */ | |
18 | ||
8736b2c3 | 19 | #include <linux/mmu_notifier.h> |
ed96f228 JR |
20 | #include <linux/amd-iommu.h> |
21 | #include <linux/mm_types.h> | |
8736b2c3 | 22 | #include <linux/profile.h> |
e3c495c7 | 23 | #include <linux/module.h> |
2d5503b6 | 24 | #include <linux/sched.h> |
ed96f228 | 25 | #include <linux/iommu.h> |
028eeacc | 26 | #include <linux/wait.h> |
ed96f228 JR |
27 | #include <linux/pci.h> |
28 | #include <linux/gfp.h> | |
29 | ||
028eeacc | 30 | #include "amd_iommu_types.h" |
ed96f228 | 31 | #include "amd_iommu_proto.h" |
e3c495c7 JR |
32 | |
33 | MODULE_LICENSE("GPL v2"); | |
63ce3ae8 | 34 | MODULE_AUTHOR("Joerg Roedel <jroedel@suse.de>"); |
e3c495c7 | 35 | |
ed96f228 JR |
36 | #define MAX_DEVICES 0x10000 |
37 | #define PRI_QUEUE_SIZE 512 | |
38 | ||
39 | struct pri_queue { | |
40 | atomic_t inflight; | |
41 | bool finish; | |
028eeacc | 42 | int status; |
ed96f228 JR |
43 | }; |
44 | ||
45 | struct pasid_state { | |
46 | struct list_head list; /* For global state-list */ | |
47 | atomic_t count; /* Reference count */ | |
d73a6d72 | 48 | unsigned mmu_notifier_count; /* Counting nested mmu_notifier |
e79df31c | 49 | calls */ |
ed96f228 | 50 | struct mm_struct *mm; /* mm_struct for the faults */ |
ff6d0cce | 51 | struct mmu_notifier mn; /* mmu_notifier handle */ |
ed96f228 JR |
52 | struct pri_queue pri[PRI_QUEUE_SIZE]; /* PRI tag states */ |
53 | struct device_state *device_state; /* Link to our device_state */ | |
54 | int pasid; /* PASID index */ | |
d9e1611e JR |
55 | bool invalid; /* Used during setup and |
56 | teardown of the pasid */ | |
d73a6d72 JR |
57 | spinlock_t lock; /* Protect pri_queues and |
58 | mmu_notifer_count */ | |
028eeacc | 59 | wait_queue_head_t wq; /* To wait for count == 0 */ |
ed96f228 JR |
60 | }; |
61 | ||
62 | struct device_state { | |
741669c7 JR |
63 | struct list_head list; |
64 | u16 devid; | |
ed96f228 JR |
65 | atomic_t count; |
66 | struct pci_dev *pdev; | |
67 | struct pasid_state **states; | |
68 | struct iommu_domain *domain; | |
69 | int pasid_levels; | |
70 | int max_pasids; | |
175d6146 | 71 | amd_iommu_invalid_ppr_cb inv_ppr_cb; |
bc21662f | 72 | amd_iommu_invalidate_ctx inv_ctx_cb; |
ed96f228 | 73 | spinlock_t lock; |
028eeacc JR |
74 | wait_queue_head_t wq; |
75 | }; | |
76 | ||
77 | struct fault { | |
78 | struct work_struct work; | |
79 | struct device_state *dev_state; | |
80 | struct pasid_state *state; | |
81 | struct mm_struct *mm; | |
82 | u64 address; | |
83 | u16 devid; | |
84 | u16 pasid; | |
85 | u16 tag; | |
86 | u16 finish; | |
87 | u16 flags; | |
ed96f228 JR |
88 | }; |
89 | ||
741669c7 | 90 | static LIST_HEAD(state_list); |
ed96f228 JR |
91 | static spinlock_t state_lock; |
92 | ||
028eeacc JR |
93 | static struct workqueue_struct *iommu_wq; |
94 | ||
2d5503b6 | 95 | static void free_pasid_states(struct device_state *dev_state); |
ed96f228 JR |
96 | |
97 | static u16 device_id(struct pci_dev *pdev) | |
98 | { | |
99 | u16 devid; | |
100 | ||
101 | devid = pdev->bus->number; | |
102 | devid = (devid << 8) | pdev->devfn; | |
103 | ||
104 | return devid; | |
105 | } | |
106 | ||
b87d2d7c JR |
107 | static struct device_state *__get_device_state(u16 devid) |
108 | { | |
741669c7 JR |
109 | struct device_state *dev_state; |
110 | ||
111 | list_for_each_entry(dev_state, &state_list, list) { | |
112 | if (dev_state->devid == devid) | |
113 | return dev_state; | |
114 | } | |
115 | ||
116 | return NULL; | |
b87d2d7c JR |
117 | } |
118 | ||
ed96f228 JR |
119 | static struct device_state *get_device_state(u16 devid) |
120 | { | |
121 | struct device_state *dev_state; | |
122 | unsigned long flags; | |
123 | ||
124 | spin_lock_irqsave(&state_lock, flags); | |
b87d2d7c | 125 | dev_state = __get_device_state(devid); |
ed96f228 JR |
126 | if (dev_state != NULL) |
127 | atomic_inc(&dev_state->count); | |
128 | spin_unlock_irqrestore(&state_lock, flags); | |
129 | ||
130 | return dev_state; | |
131 | } | |
132 | ||
133 | static void free_device_state(struct device_state *dev_state) | |
134 | { | |
55c99a4d JR |
135 | struct iommu_group *group; |
136 | ||
2d5503b6 JR |
137 | /* |
138 | * First detach device from domain - No more PRI requests will arrive | |
139 | * from that device after it is unbound from the IOMMUv2 domain. | |
140 | */ | |
55c99a4d JR |
141 | group = iommu_group_get(&dev_state->pdev->dev); |
142 | if (WARN_ON(!group)) | |
143 | return; | |
144 | ||
145 | iommu_detach_group(dev_state->domain, group); | |
146 | ||
147 | iommu_group_put(group); | |
2d5503b6 JR |
148 | |
149 | /* Everything is down now, free the IOMMUv2 domain */ | |
ed96f228 | 150 | iommu_domain_free(dev_state->domain); |
2d5503b6 JR |
151 | |
152 | /* Finally get rid of the device-state */ | |
ed96f228 JR |
153 | kfree(dev_state); |
154 | } | |
155 | ||
156 | static void put_device_state(struct device_state *dev_state) | |
157 | { | |
158 | if (atomic_dec_and_test(&dev_state->count)) | |
028eeacc | 159 | wake_up(&dev_state->wq); |
ed96f228 JR |
160 | } |
161 | ||
2d5503b6 JR |
162 | /* Must be called under dev_state->lock */ |
163 | static struct pasid_state **__get_pasid_state_ptr(struct device_state *dev_state, | |
164 | int pasid, bool alloc) | |
165 | { | |
166 | struct pasid_state **root, **ptr; | |
167 | int level, index; | |
168 | ||
169 | level = dev_state->pasid_levels; | |
170 | root = dev_state->states; | |
171 | ||
172 | while (true) { | |
173 | ||
174 | index = (pasid >> (9 * level)) & 0x1ff; | |
175 | ptr = &root[index]; | |
176 | ||
177 | if (level == 0) | |
178 | break; | |
179 | ||
180 | if (*ptr == NULL) { | |
181 | if (!alloc) | |
182 | return NULL; | |
183 | ||
184 | *ptr = (void *)get_zeroed_page(GFP_ATOMIC); | |
185 | if (*ptr == NULL) | |
186 | return NULL; | |
187 | } | |
188 | ||
189 | root = (struct pasid_state **)*ptr; | |
190 | level -= 1; | |
191 | } | |
192 | ||
193 | return ptr; | |
194 | } | |
195 | ||
196 | static int set_pasid_state(struct device_state *dev_state, | |
197 | struct pasid_state *pasid_state, | |
198 | int pasid) | |
199 | { | |
200 | struct pasid_state **ptr; | |
201 | unsigned long flags; | |
202 | int ret; | |
203 | ||
204 | spin_lock_irqsave(&dev_state->lock, flags); | |
205 | ptr = __get_pasid_state_ptr(dev_state, pasid, true); | |
206 | ||
207 | ret = -ENOMEM; | |
208 | if (ptr == NULL) | |
209 | goto out_unlock; | |
210 | ||
211 | ret = -ENOMEM; | |
212 | if (*ptr != NULL) | |
213 | goto out_unlock; | |
214 | ||
215 | *ptr = pasid_state; | |
216 | ||
217 | ret = 0; | |
218 | ||
219 | out_unlock: | |
220 | spin_unlock_irqrestore(&dev_state->lock, flags); | |
221 | ||
222 | return ret; | |
223 | } | |
224 | ||
225 | static void clear_pasid_state(struct device_state *dev_state, int pasid) | |
226 | { | |
227 | struct pasid_state **ptr; | |
228 | unsigned long flags; | |
229 | ||
230 | spin_lock_irqsave(&dev_state->lock, flags); | |
231 | ptr = __get_pasid_state_ptr(dev_state, pasid, true); | |
232 | ||
233 | if (ptr == NULL) | |
234 | goto out_unlock; | |
235 | ||
236 | *ptr = NULL; | |
237 | ||
238 | out_unlock: | |
239 | spin_unlock_irqrestore(&dev_state->lock, flags); | |
240 | } | |
241 | ||
242 | static struct pasid_state *get_pasid_state(struct device_state *dev_state, | |
243 | int pasid) | |
244 | { | |
245 | struct pasid_state **ptr, *ret = NULL; | |
246 | unsigned long flags; | |
247 | ||
248 | spin_lock_irqsave(&dev_state->lock, flags); | |
249 | ptr = __get_pasid_state_ptr(dev_state, pasid, false); | |
250 | ||
251 | if (ptr == NULL) | |
252 | goto out_unlock; | |
253 | ||
254 | ret = *ptr; | |
255 | if (ret) | |
256 | atomic_inc(&ret->count); | |
257 | ||
258 | out_unlock: | |
259 | spin_unlock_irqrestore(&dev_state->lock, flags); | |
260 | ||
261 | return ret; | |
262 | } | |
263 | ||
264 | static void free_pasid_state(struct pasid_state *pasid_state) | |
265 | { | |
266 | kfree(pasid_state); | |
267 | } | |
268 | ||
269 | static void put_pasid_state(struct pasid_state *pasid_state) | |
270 | { | |
1c51099a | 271 | if (atomic_dec_and_test(&pasid_state->count)) |
028eeacc | 272 | wake_up(&pasid_state->wq); |
2d5503b6 JR |
273 | } |
274 | ||
028eeacc JR |
275 | static void put_pasid_state_wait(struct pasid_state *pasid_state) |
276 | { | |
1bf1b431 | 277 | atomic_dec(&pasid_state->count); |
a1bec062 | 278 | wait_event(pasid_state->wq, !atomic_read(&pasid_state->count)); |
028eeacc JR |
279 | free_pasid_state(pasid_state); |
280 | } | |
281 | ||
61feb438 | 282 | static void unbind_pasid(struct pasid_state *pasid_state) |
8736b2c3 JR |
283 | { |
284 | struct iommu_domain *domain; | |
285 | ||
286 | domain = pasid_state->device_state->domain; | |
287 | ||
53d340ef JR |
288 | /* |
289 | * Mark pasid_state as invalid, no more faults will we added to the | |
290 | * work queue after this is visible everywhere. | |
291 | */ | |
292 | pasid_state->invalid = true; | |
293 | ||
294 | /* Make sure this is visible */ | |
295 | smp_wmb(); | |
296 | ||
297 | /* After this the device/pasid can't access the mm anymore */ | |
8736b2c3 | 298 | amd_iommu_domain_clear_gcr3(domain, pasid_state->pasid); |
8736b2c3 JR |
299 | |
300 | /* Make sure no more pending faults are in the queue */ | |
301 | flush_workqueue(iommu_wq); | |
8736b2c3 JR |
302 | } |
303 | ||
2d5503b6 JR |
304 | static void free_pasid_states_level1(struct pasid_state **tbl) |
305 | { | |
306 | int i; | |
307 | ||
308 | for (i = 0; i < 512; ++i) { | |
309 | if (tbl[i] == NULL) | |
310 | continue; | |
311 | ||
312 | free_page((unsigned long)tbl[i]); | |
313 | } | |
314 | } | |
315 | ||
316 | static void free_pasid_states_level2(struct pasid_state **tbl) | |
317 | { | |
318 | struct pasid_state **ptr; | |
319 | int i; | |
320 | ||
321 | for (i = 0; i < 512; ++i) { | |
322 | if (tbl[i] == NULL) | |
323 | continue; | |
324 | ||
325 | ptr = (struct pasid_state **)tbl[i]; | |
326 | free_pasid_states_level1(ptr); | |
327 | } | |
328 | } | |
329 | ||
330 | static void free_pasid_states(struct device_state *dev_state) | |
331 | { | |
332 | struct pasid_state *pasid_state; | |
333 | int i; | |
334 | ||
335 | for (i = 0; i < dev_state->max_pasids; ++i) { | |
336 | pasid_state = get_pasid_state(dev_state, i); | |
337 | if (pasid_state == NULL) | |
338 | continue; | |
339 | ||
2d5503b6 | 340 | put_pasid_state(pasid_state); |
a40d4c67 JR |
341 | |
342 | /* | |
343 | * This will call the mn_release function and | |
344 | * unbind the PASID | |
345 | */ | |
346 | mmu_notifier_unregister(&pasid_state->mn, pasid_state->mm); | |
c5db16ad JR |
347 | |
348 | put_pasid_state_wait(pasid_state); /* Reference taken in | |
daff2f9c | 349 | amd_iommu_bind_pasid */ |
75058a30 JR |
350 | |
351 | /* Drop reference taken in amd_iommu_bind_pasid */ | |
352 | put_device_state(dev_state); | |
2d5503b6 JR |
353 | } |
354 | ||
355 | if (dev_state->pasid_levels == 2) | |
356 | free_pasid_states_level2(dev_state->states); | |
357 | else if (dev_state->pasid_levels == 1) | |
358 | free_pasid_states_level1(dev_state->states); | |
23d3a98c JR |
359 | else |
360 | BUG_ON(dev_state->pasid_levels != 0); | |
2d5503b6 JR |
361 | |
362 | free_page((unsigned long)dev_state->states); | |
363 | } | |
364 | ||
8736b2c3 JR |
365 | static struct pasid_state *mn_to_state(struct mmu_notifier *mn) |
366 | { | |
367 | return container_of(mn, struct pasid_state, mn); | |
368 | } | |
369 | ||
370 | static void __mn_flush_page(struct mmu_notifier *mn, | |
371 | unsigned long address) | |
372 | { | |
373 | struct pasid_state *pasid_state; | |
374 | struct device_state *dev_state; | |
375 | ||
376 | pasid_state = mn_to_state(mn); | |
377 | dev_state = pasid_state->device_state; | |
378 | ||
379 | amd_iommu_flush_page(dev_state->domain, pasid_state->pasid, address); | |
380 | } | |
381 | ||
382 | static int mn_clear_flush_young(struct mmu_notifier *mn, | |
383 | struct mm_struct *mm, | |
57128468 ALC |
384 | unsigned long start, |
385 | unsigned long end) | |
8736b2c3 | 386 | { |
57128468 ALC |
387 | for (; start < end; start += PAGE_SIZE) |
388 | __mn_flush_page(mn, start); | |
8736b2c3 JR |
389 | |
390 | return 0; | |
391 | } | |
392 | ||
8736b2c3 JR |
393 | static void mn_invalidate_page(struct mmu_notifier *mn, |
394 | struct mm_struct *mm, | |
395 | unsigned long address) | |
396 | { | |
397 | __mn_flush_page(mn, address); | |
398 | } | |
399 | ||
e7cc3dd4 JR |
400 | static void mn_invalidate_range(struct mmu_notifier *mn, |
401 | struct mm_struct *mm, | |
402 | unsigned long start, unsigned long end) | |
8736b2c3 JR |
403 | { |
404 | struct pasid_state *pasid_state; | |
405 | struct device_state *dev_state; | |
406 | ||
407 | pasid_state = mn_to_state(mn); | |
408 | dev_state = pasid_state->device_state; | |
409 | ||
e7cc3dd4 JR |
410 | if ((start ^ (end - 1)) < PAGE_SIZE) |
411 | amd_iommu_flush_page(dev_state->domain, pasid_state->pasid, | |
412 | start); | |
413 | else | |
414 | amd_iommu_flush_tlb(dev_state->domain, pasid_state->pasid); | |
8736b2c3 JR |
415 | } |
416 | ||
a40d4c67 JR |
417 | static void mn_release(struct mmu_notifier *mn, struct mm_struct *mm) |
418 | { | |
419 | struct pasid_state *pasid_state; | |
420 | struct device_state *dev_state; | |
d9e1611e | 421 | bool run_inv_ctx_cb; |
a40d4c67 JR |
422 | |
423 | might_sleep(); | |
424 | ||
d9e1611e JR |
425 | pasid_state = mn_to_state(mn); |
426 | dev_state = pasid_state->device_state; | |
427 | run_inv_ctx_cb = !pasid_state->invalid; | |
a40d4c67 | 428 | |
940f700d | 429 | if (run_inv_ctx_cb && dev_state->inv_ctx_cb) |
a40d4c67 JR |
430 | dev_state->inv_ctx_cb(dev_state->pdev, pasid_state->pasid); |
431 | ||
61feb438 | 432 | unbind_pasid(pasid_state); |
a40d4c67 JR |
433 | } |
434 | ||
759ce23b | 435 | static const struct mmu_notifier_ops iommu_mn = { |
a40d4c67 | 436 | .release = mn_release, |
8736b2c3 | 437 | .clear_flush_young = mn_clear_flush_young, |
8736b2c3 | 438 | .invalidate_page = mn_invalidate_page, |
e7cc3dd4 | 439 | .invalidate_range = mn_invalidate_range, |
8736b2c3 JR |
440 | }; |
441 | ||
028eeacc JR |
442 | static void set_pri_tag_status(struct pasid_state *pasid_state, |
443 | u16 tag, int status) | |
444 | { | |
445 | unsigned long flags; | |
446 | ||
447 | spin_lock_irqsave(&pasid_state->lock, flags); | |
448 | pasid_state->pri[tag].status = status; | |
449 | spin_unlock_irqrestore(&pasid_state->lock, flags); | |
450 | } | |
451 | ||
452 | static void finish_pri_tag(struct device_state *dev_state, | |
453 | struct pasid_state *pasid_state, | |
454 | u16 tag) | |
455 | { | |
456 | unsigned long flags; | |
457 | ||
458 | spin_lock_irqsave(&pasid_state->lock, flags); | |
459 | if (atomic_dec_and_test(&pasid_state->pri[tag].inflight) && | |
460 | pasid_state->pri[tag].finish) { | |
461 | amd_iommu_complete_ppr(dev_state->pdev, pasid_state->pasid, | |
462 | pasid_state->pri[tag].status, tag); | |
463 | pasid_state->pri[tag].finish = false; | |
464 | pasid_state->pri[tag].status = PPR_SUCCESS; | |
465 | } | |
466 | spin_unlock_irqrestore(&pasid_state->lock, flags); | |
467 | } | |
468 | ||
9dc00f4c JB |
469 | static void handle_fault_error(struct fault *fault) |
470 | { | |
471 | int status; | |
472 | ||
473 | if (!fault->dev_state->inv_ppr_cb) { | |
474 | set_pri_tag_status(fault->state, fault->tag, PPR_INVALID); | |
475 | return; | |
476 | } | |
477 | ||
478 | status = fault->dev_state->inv_ppr_cb(fault->dev_state->pdev, | |
479 | fault->pasid, | |
480 | fault->address, | |
481 | fault->flags); | |
482 | switch (status) { | |
483 | case AMD_IOMMU_INV_PRI_RSP_SUCCESS: | |
484 | set_pri_tag_status(fault->state, fault->tag, PPR_SUCCESS); | |
485 | break; | |
486 | case AMD_IOMMU_INV_PRI_RSP_INVALID: | |
487 | set_pri_tag_status(fault->state, fault->tag, PPR_INVALID); | |
488 | break; | |
489 | case AMD_IOMMU_INV_PRI_RSP_FAIL: | |
490 | set_pri_tag_status(fault->state, fault->tag, PPR_FAILURE); | |
491 | break; | |
492 | default: | |
493 | BUG(); | |
494 | } | |
495 | } | |
496 | ||
7b5cc1a9 JR |
497 | static bool access_error(struct vm_area_struct *vma, struct fault *fault) |
498 | { | |
499 | unsigned long requested = 0; | |
500 | ||
501 | if (fault->flags & PPR_FAULT_EXEC) | |
502 | requested |= VM_EXEC; | |
503 | ||
504 | if (fault->flags & PPR_FAULT_READ) | |
505 | requested |= VM_READ; | |
506 | ||
507 | if (fault->flags & PPR_FAULT_WRITE) | |
508 | requested |= VM_WRITE; | |
509 | ||
510 | return (requested & ~vma->vm_flags) != 0; | |
511 | } | |
512 | ||
028eeacc JR |
513 | static void do_fault(struct work_struct *work) |
514 | { | |
515 | struct fault *fault = container_of(work, struct fault, work); | |
9dc00f4c | 516 | struct vm_area_struct *vma; |
492e7459 | 517 | int ret = VM_FAULT_ERROR; |
43c0ea20 JR |
518 | unsigned int flags = 0; |
519 | struct mm_struct *mm; | |
9dc00f4c | 520 | u64 address; |
028eeacc | 521 | |
9dc00f4c JB |
522 | mm = fault->state->mm; |
523 | address = fault->address; | |
524 | ||
43c0ea20 JR |
525 | if (fault->flags & PPR_FAULT_USER) |
526 | flags |= FAULT_FLAG_USER; | |
527 | if (fault->flags & PPR_FAULT_WRITE) | |
528 | flags |= FAULT_FLAG_WRITE; | |
1b2ee126 | 529 | flags |= FAULT_FLAG_REMOTE; |
43c0ea20 | 530 | |
9dc00f4c JB |
531 | down_read(&mm->mmap_sem); |
532 | vma = find_extend_vma(mm, address); | |
492e7459 | 533 | if (!vma || address < vma->vm_start) |
9dc00f4c | 534 | /* failed to get a vma in the right range */ |
9dc00f4c | 535 | goto out; |
028eeacc | 536 | |
7b5cc1a9 | 537 | /* Check if we have the right permissions on the vma */ |
492e7459 | 538 | if (access_error(vma, fault)) |
d14f6fce | 539 | goto out; |
d14f6fce | 540 | |
43c0ea20 | 541 | ret = handle_mm_fault(mm, vma, address, flags); |
9dc00f4c | 542 | |
492e7459 | 543 | out: |
9dc00f4c JB |
544 | up_read(&mm->mmap_sem); |
545 | ||
492e7459 JR |
546 | if (ret & VM_FAULT_ERROR) |
547 | /* failed to service fault */ | |
548 | handle_fault_error(fault); | |
549 | ||
028eeacc JR |
550 | finish_pri_tag(fault->dev_state, fault->state, fault->tag); |
551 | ||
552 | put_pasid_state(fault->state); | |
553 | ||
554 | kfree(fault); | |
555 | } | |
556 | ||
557 | static int ppr_notifier(struct notifier_block *nb, unsigned long e, void *data) | |
558 | { | |
559 | struct amd_iommu_fault *iommu_fault; | |
560 | struct pasid_state *pasid_state; | |
561 | struct device_state *dev_state; | |
562 | unsigned long flags; | |
563 | struct fault *fault; | |
564 | bool finish; | |
565 | u16 tag; | |
566 | int ret; | |
567 | ||
568 | iommu_fault = data; | |
569 | tag = iommu_fault->tag & 0x1ff; | |
570 | finish = (iommu_fault->tag >> 9) & 1; | |
571 | ||
572 | ret = NOTIFY_DONE; | |
573 | dev_state = get_device_state(iommu_fault->device_id); | |
574 | if (dev_state == NULL) | |
575 | goto out; | |
576 | ||
577 | pasid_state = get_pasid_state(dev_state, iommu_fault->pasid); | |
53d340ef | 578 | if (pasid_state == NULL || pasid_state->invalid) { |
028eeacc JR |
579 | /* We know the device but not the PASID -> send INVALID */ |
580 | amd_iommu_complete_ppr(dev_state->pdev, iommu_fault->pasid, | |
581 | PPR_INVALID, tag); | |
582 | goto out_drop_state; | |
583 | } | |
584 | ||
585 | spin_lock_irqsave(&pasid_state->lock, flags); | |
586 | atomic_inc(&pasid_state->pri[tag].inflight); | |
587 | if (finish) | |
588 | pasid_state->pri[tag].finish = true; | |
589 | spin_unlock_irqrestore(&pasid_state->lock, flags); | |
590 | ||
591 | fault = kzalloc(sizeof(*fault), GFP_ATOMIC); | |
592 | if (fault == NULL) { | |
593 | /* We are OOM - send success and let the device re-fault */ | |
594 | finish_pri_tag(dev_state, pasid_state, tag); | |
595 | goto out_drop_state; | |
596 | } | |
597 | ||
598 | fault->dev_state = dev_state; | |
599 | fault->address = iommu_fault->address; | |
600 | fault->state = pasid_state; | |
601 | fault->tag = tag; | |
602 | fault->finish = finish; | |
b00675b8 | 603 | fault->pasid = iommu_fault->pasid; |
028eeacc JR |
604 | fault->flags = iommu_fault->flags; |
605 | INIT_WORK(&fault->work, do_fault); | |
606 | ||
607 | queue_work(iommu_wq, &fault->work); | |
608 | ||
609 | ret = NOTIFY_OK; | |
610 | ||
611 | out_drop_state: | |
dc88db7e JR |
612 | |
613 | if (ret != NOTIFY_OK && pasid_state) | |
614 | put_pasid_state(pasid_state); | |
615 | ||
028eeacc JR |
616 | put_device_state(dev_state); |
617 | ||
618 | out: | |
619 | return ret; | |
620 | } | |
621 | ||
622 | static struct notifier_block ppr_nb = { | |
623 | .notifier_call = ppr_notifier, | |
624 | }; | |
625 | ||
2d5503b6 JR |
626 | int amd_iommu_bind_pasid(struct pci_dev *pdev, int pasid, |
627 | struct task_struct *task) | |
628 | { | |
629 | struct pasid_state *pasid_state; | |
630 | struct device_state *dev_state; | |
f0aac63b | 631 | struct mm_struct *mm; |
2d5503b6 JR |
632 | u16 devid; |
633 | int ret; | |
634 | ||
635 | might_sleep(); | |
636 | ||
637 | if (!amd_iommu_v2_supported()) | |
638 | return -ENODEV; | |
639 | ||
640 | devid = device_id(pdev); | |
641 | dev_state = get_device_state(devid); | |
642 | ||
643 | if (dev_state == NULL) | |
644 | return -EINVAL; | |
645 | ||
646 | ret = -EINVAL; | |
647 | if (pasid < 0 || pasid >= dev_state->max_pasids) | |
648 | goto out; | |
649 | ||
650 | ret = -ENOMEM; | |
651 | pasid_state = kzalloc(sizeof(*pasid_state), GFP_KERNEL); | |
652 | if (pasid_state == NULL) | |
653 | goto out; | |
654 | ||
f0aac63b | 655 | |
2d5503b6 | 656 | atomic_set(&pasid_state->count, 1); |
028eeacc | 657 | init_waitqueue_head(&pasid_state->wq); |
2c13d47a JR |
658 | spin_lock_init(&pasid_state->lock); |
659 | ||
f0aac63b | 660 | mm = get_task_mm(task); |
f0aac63b | 661 | pasid_state->mm = mm; |
2d5503b6 JR |
662 | pasid_state->device_state = dev_state; |
663 | pasid_state->pasid = pasid; | |
d9e1611e JR |
664 | pasid_state->invalid = true; /* Mark as valid only if we are |
665 | done with setting up the pasid */ | |
8736b2c3 | 666 | pasid_state->mn.ops = &iommu_mn; |
2d5503b6 JR |
667 | |
668 | if (pasid_state->mm == NULL) | |
669 | goto out_free; | |
670 | ||
f0aac63b | 671 | mmu_notifier_register(&pasid_state->mn, mm); |
8736b2c3 | 672 | |
2d5503b6 JR |
673 | ret = set_pasid_state(dev_state, pasid_state, pasid); |
674 | if (ret) | |
8736b2c3 | 675 | goto out_unregister; |
2d5503b6 JR |
676 | |
677 | ret = amd_iommu_domain_set_gcr3(dev_state->domain, pasid, | |
678 | __pa(pasid_state->mm->pgd)); | |
679 | if (ret) | |
680 | goto out_clear_state; | |
681 | ||
d9e1611e JR |
682 | /* Now we are ready to handle faults */ |
683 | pasid_state->invalid = false; | |
684 | ||
f0aac63b JR |
685 | /* |
686 | * Drop the reference to the mm_struct here. We rely on the | |
687 | * mmu_notifier release call-back to inform us when the mm | |
688 | * is going away. | |
689 | */ | |
690 | mmput(mm); | |
691 | ||
2d5503b6 JR |
692 | return 0; |
693 | ||
694 | out_clear_state: | |
695 | clear_pasid_state(dev_state, pasid); | |
696 | ||
8736b2c3 | 697 | out_unregister: |
f0aac63b | 698 | mmu_notifier_unregister(&pasid_state->mn, mm); |
8736b2c3 | 699 | |
2d5503b6 | 700 | out_free: |
f0aac63b | 701 | mmput(mm); |
028eeacc | 702 | free_pasid_state(pasid_state); |
2d5503b6 JR |
703 | |
704 | out: | |
705 | put_device_state(dev_state); | |
706 | ||
707 | return ret; | |
708 | } | |
709 | EXPORT_SYMBOL(amd_iommu_bind_pasid); | |
710 | ||
711 | void amd_iommu_unbind_pasid(struct pci_dev *pdev, int pasid) | |
712 | { | |
a40d4c67 | 713 | struct pasid_state *pasid_state; |
2d5503b6 JR |
714 | struct device_state *dev_state; |
715 | u16 devid; | |
716 | ||
717 | might_sleep(); | |
718 | ||
719 | if (!amd_iommu_v2_supported()) | |
720 | return; | |
721 | ||
722 | devid = device_id(pdev); | |
723 | dev_state = get_device_state(devid); | |
724 | if (dev_state == NULL) | |
725 | return; | |
726 | ||
727 | if (pasid < 0 || pasid >= dev_state->max_pasids) | |
728 | goto out; | |
729 | ||
a40d4c67 JR |
730 | pasid_state = get_pasid_state(dev_state, pasid); |
731 | if (pasid_state == NULL) | |
732 | goto out; | |
733 | /* | |
734 | * Drop reference taken here. We are safe because we still hold | |
735 | * the reference taken in the amd_iommu_bind_pasid function. | |
736 | */ | |
737 | put_pasid_state(pasid_state); | |
738 | ||
53d340ef JR |
739 | /* Clear the pasid state so that the pasid can be re-used */ |
740 | clear_pasid_state(dev_state, pasid_state->pasid); | |
741 | ||
f0aac63b | 742 | /* |
fcaa9606 JR |
743 | * Call mmu_notifier_unregister to drop our reference |
744 | * to pasid_state->mm | |
f0aac63b | 745 | */ |
fcaa9606 | 746 | mmu_notifier_unregister(&pasid_state->mn, pasid_state->mm); |
2d5503b6 | 747 | |
c5db16ad | 748 | put_pasid_state_wait(pasid_state); /* Reference taken in |
daff2f9c | 749 | amd_iommu_bind_pasid */ |
2d5503b6 | 750 | out: |
75058a30 JR |
751 | /* Drop reference taken in this function */ |
752 | put_device_state(dev_state); | |
753 | ||
754 | /* Drop reference taken in amd_iommu_bind_pasid */ | |
2d5503b6 JR |
755 | put_device_state(dev_state); |
756 | } | |
757 | EXPORT_SYMBOL(amd_iommu_unbind_pasid); | |
758 | ||
ed96f228 JR |
759 | int amd_iommu_init_device(struct pci_dev *pdev, int pasids) |
760 | { | |
761 | struct device_state *dev_state; | |
55c99a4d | 762 | struct iommu_group *group; |
ed96f228 JR |
763 | unsigned long flags; |
764 | int ret, tmp; | |
765 | u16 devid; | |
766 | ||
767 | might_sleep(); | |
768 | ||
769 | if (!amd_iommu_v2_supported()) | |
770 | return -ENODEV; | |
771 | ||
772 | if (pasids <= 0 || pasids > (PASID_MASK + 1)) | |
773 | return -EINVAL; | |
774 | ||
775 | devid = device_id(pdev); | |
776 | ||
777 | dev_state = kzalloc(sizeof(*dev_state), GFP_KERNEL); | |
778 | if (dev_state == NULL) | |
779 | return -ENOMEM; | |
780 | ||
781 | spin_lock_init(&dev_state->lock); | |
028eeacc | 782 | init_waitqueue_head(&dev_state->wq); |
741669c7 JR |
783 | dev_state->pdev = pdev; |
784 | dev_state->devid = devid; | |
ed96f228 JR |
785 | |
786 | tmp = pasids; | |
787 | for (dev_state->pasid_levels = 0; (tmp - 1) & ~0x1ff; tmp >>= 9) | |
788 | dev_state->pasid_levels += 1; | |
789 | ||
790 | atomic_set(&dev_state->count, 1); | |
791 | dev_state->max_pasids = pasids; | |
792 | ||
793 | ret = -ENOMEM; | |
794 | dev_state->states = (void *)get_zeroed_page(GFP_KERNEL); | |
795 | if (dev_state->states == NULL) | |
796 | goto out_free_dev_state; | |
797 | ||
798 | dev_state->domain = iommu_domain_alloc(&pci_bus_type); | |
799 | if (dev_state->domain == NULL) | |
800 | goto out_free_states; | |
801 | ||
802 | amd_iommu_domain_direct_map(dev_state->domain); | |
803 | ||
804 | ret = amd_iommu_domain_enable_v2(dev_state->domain, pasids); | |
805 | if (ret) | |
806 | goto out_free_domain; | |
807 | ||
55c99a4d JR |
808 | group = iommu_group_get(&pdev->dev); |
809 | if (!group) | |
ed96f228 JR |
810 | goto out_free_domain; |
811 | ||
55c99a4d JR |
812 | ret = iommu_attach_group(dev_state->domain, group); |
813 | if (ret != 0) | |
814 | goto out_drop_group; | |
815 | ||
816 | iommu_group_put(group); | |
817 | ||
ed96f228 JR |
818 | spin_lock_irqsave(&state_lock, flags); |
819 | ||
741669c7 | 820 | if (__get_device_state(devid) != NULL) { |
ed96f228 JR |
821 | spin_unlock_irqrestore(&state_lock, flags); |
822 | ret = -EBUSY; | |
823 | goto out_free_domain; | |
824 | } | |
825 | ||
741669c7 | 826 | list_add_tail(&dev_state->list, &state_list); |
ed96f228 JR |
827 | |
828 | spin_unlock_irqrestore(&state_lock, flags); | |
829 | ||
830 | return 0; | |
831 | ||
55c99a4d JR |
832 | out_drop_group: |
833 | iommu_group_put(group); | |
834 | ||
ed96f228 JR |
835 | out_free_domain: |
836 | iommu_domain_free(dev_state->domain); | |
837 | ||
838 | out_free_states: | |
839 | free_page((unsigned long)dev_state->states); | |
840 | ||
841 | out_free_dev_state: | |
842 | kfree(dev_state); | |
843 | ||
844 | return ret; | |
845 | } | |
846 | EXPORT_SYMBOL(amd_iommu_init_device); | |
847 | ||
848 | void amd_iommu_free_device(struct pci_dev *pdev) | |
849 | { | |
850 | struct device_state *dev_state; | |
851 | unsigned long flags; | |
852 | u16 devid; | |
853 | ||
854 | if (!amd_iommu_v2_supported()) | |
855 | return; | |
856 | ||
857 | devid = device_id(pdev); | |
858 | ||
859 | spin_lock_irqsave(&state_lock, flags); | |
860 | ||
b87d2d7c | 861 | dev_state = __get_device_state(devid); |
ed96f228 JR |
862 | if (dev_state == NULL) { |
863 | spin_unlock_irqrestore(&state_lock, flags); | |
864 | return; | |
865 | } | |
866 | ||
741669c7 | 867 | list_del(&dev_state->list); |
ed96f228 JR |
868 | |
869 | spin_unlock_irqrestore(&state_lock, flags); | |
870 | ||
2d5503b6 JR |
871 | /* Get rid of any remaining pasid states */ |
872 | free_pasid_states(dev_state); | |
873 | ||
91f65fac PZ |
874 | put_device_state(dev_state); |
875 | /* | |
876 | * Wait until the last reference is dropped before freeing | |
877 | * the device state. | |
878 | */ | |
879 | wait_event(dev_state->wq, !atomic_read(&dev_state->count)); | |
880 | free_device_state(dev_state); | |
ed96f228 JR |
881 | } |
882 | EXPORT_SYMBOL(amd_iommu_free_device); | |
883 | ||
175d6146 JR |
884 | int amd_iommu_set_invalid_ppr_cb(struct pci_dev *pdev, |
885 | amd_iommu_invalid_ppr_cb cb) | |
886 | { | |
887 | struct device_state *dev_state; | |
888 | unsigned long flags; | |
889 | u16 devid; | |
890 | int ret; | |
891 | ||
892 | if (!amd_iommu_v2_supported()) | |
893 | return -ENODEV; | |
894 | ||
895 | devid = device_id(pdev); | |
896 | ||
897 | spin_lock_irqsave(&state_lock, flags); | |
898 | ||
899 | ret = -EINVAL; | |
b87d2d7c | 900 | dev_state = __get_device_state(devid); |
175d6146 JR |
901 | if (dev_state == NULL) |
902 | goto out_unlock; | |
903 | ||
904 | dev_state->inv_ppr_cb = cb; | |
905 | ||
906 | ret = 0; | |
907 | ||
908 | out_unlock: | |
909 | spin_unlock_irqrestore(&state_lock, flags); | |
910 | ||
911 | return ret; | |
912 | } | |
913 | EXPORT_SYMBOL(amd_iommu_set_invalid_ppr_cb); | |
914 | ||
bc21662f JR |
915 | int amd_iommu_set_invalidate_ctx_cb(struct pci_dev *pdev, |
916 | amd_iommu_invalidate_ctx cb) | |
917 | { | |
918 | struct device_state *dev_state; | |
919 | unsigned long flags; | |
920 | u16 devid; | |
921 | int ret; | |
922 | ||
923 | if (!amd_iommu_v2_supported()) | |
924 | return -ENODEV; | |
925 | ||
926 | devid = device_id(pdev); | |
927 | ||
928 | spin_lock_irqsave(&state_lock, flags); | |
929 | ||
930 | ret = -EINVAL; | |
b87d2d7c | 931 | dev_state = __get_device_state(devid); |
bc21662f JR |
932 | if (dev_state == NULL) |
933 | goto out_unlock; | |
934 | ||
935 | dev_state->inv_ctx_cb = cb; | |
936 | ||
937 | ret = 0; | |
938 | ||
939 | out_unlock: | |
940 | spin_unlock_irqrestore(&state_lock, flags); | |
941 | ||
942 | return ret; | |
943 | } | |
944 | EXPORT_SYMBOL(amd_iommu_set_invalidate_ctx_cb); | |
945 | ||
e3c495c7 JR |
946 | static int __init amd_iommu_v2_init(void) |
947 | { | |
028eeacc | 948 | int ret; |
ed96f228 | 949 | |
63ce3ae8 | 950 | pr_info("AMD IOMMUv2 driver by Joerg Roedel <jroedel@suse.de>\n"); |
474d567d JR |
951 | |
952 | if (!amd_iommu_v2_supported()) { | |
07db0409 | 953 | pr_info("AMD IOMMUv2 functionality not available on this system\n"); |
474d567d JR |
954 | /* |
955 | * Load anyway to provide the symbols to other modules | |
956 | * which may use AMD IOMMUv2 optionally. | |
957 | */ | |
958 | return 0; | |
959 | } | |
e3c495c7 | 960 | |
ed96f228 JR |
961 | spin_lock_init(&state_lock); |
962 | ||
028eeacc JR |
963 | ret = -ENOMEM; |
964 | iommu_wq = create_workqueue("amd_iommu_v2"); | |
8736b2c3 | 965 | if (iommu_wq == NULL) |
741669c7 | 966 | goto out; |
8736b2c3 | 967 | |
028eeacc JR |
968 | amd_iommu_register_ppr_notifier(&ppr_nb); |
969 | ||
e3c495c7 | 970 | return 0; |
028eeacc | 971 | |
741669c7 | 972 | out: |
028eeacc | 973 | return ret; |
e3c495c7 JR |
974 | } |
975 | ||
976 | static void __exit amd_iommu_v2_exit(void) | |
977 | { | |
ed96f228 | 978 | struct device_state *dev_state; |
ed96f228 JR |
979 | int i; |
980 | ||
474d567d JR |
981 | if (!amd_iommu_v2_supported()) |
982 | return; | |
983 | ||
028eeacc JR |
984 | amd_iommu_unregister_ppr_notifier(&ppr_nb); |
985 | ||
986 | flush_workqueue(iommu_wq); | |
987 | ||
988 | /* | |
989 | * The loop below might call flush_workqueue(), so call | |
990 | * destroy_workqueue() after it | |
991 | */ | |
ed96f228 JR |
992 | for (i = 0; i < MAX_DEVICES; ++i) { |
993 | dev_state = get_device_state(i); | |
994 | ||
995 | if (dev_state == NULL) | |
996 | continue; | |
997 | ||
998 | WARN_ON_ONCE(1); | |
999 | ||
ed96f228 | 1000 | put_device_state(dev_state); |
028eeacc | 1001 | amd_iommu_free_device(dev_state->pdev); |
ed96f228 JR |
1002 | } |
1003 | ||
028eeacc | 1004 | destroy_workqueue(iommu_wq); |
e3c495c7 JR |
1005 | } |
1006 | ||
1007 | module_init(amd_iommu_v2_init); | |
1008 | module_exit(amd_iommu_v2_exit); |