Commit | Line | Data |
---|---|---|
e3cf00d0 UM |
1 | /* |
2 | * Copyright (c) 2005 Topspin Communications. All rights reserved. | |
3 | * Copyright (c) 2005 Mellanox Technologies. All rights reserved. | |
4 | * Copyright (c) 2013 Cisco Systems. All rights reserved. | |
5 | * | |
6 | * This software is available to you under a choice of one of two | |
7 | * licenses. You may choose to be licensed under the terms of the GNU | |
8 | * General Public License (GPL) Version 2, available from the file | |
9 | * COPYING in the main directory of this source tree, or the | |
10 | * OpenIB.org BSD license below: | |
11 | * | |
12 | * Redistribution and use in source and binary forms, with or | |
13 | * without modification, are permitted provided that the following | |
14 | * conditions are met: | |
15 | * | |
16 | * - Redistributions of source code must retain the above | |
17 | * copyright notice, this list of conditions and the following | |
18 | * disclaimer. | |
19 | * | |
20 | * - Redistributions in binary form must reproduce the above | |
21 | * copyright notice, this list of conditions and the following | |
22 | * disclaimer in the documentation and/or other materials | |
23 | * provided with the distribution. | |
24 | * | |
25 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
26 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
27 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
28 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
29 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
30 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
31 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
32 | * SOFTWARE. | |
33 | */ | |
34 | ||
35 | #include <linux/mm.h> | |
36 | #include <linux/dma-mapping.h> | |
37 | #include <linux/sched.h> | |
38 | #include <linux/hugetlb.h> | |
39 | #include <linux/dma-attrs.h> | |
40 | #include <linux/iommu.h> | |
41 | #include <linux/workqueue.h> | |
42 | #include <linux/list.h> | |
43 | #include <linux/pci.h> | |
44 | ||
45 | #include "usnic_log.h" | |
46 | #include "usnic_uiom.h" | |
47 | #include "usnic_uiom_interval_tree.h" | |
48 | ||
49 | static struct workqueue_struct *usnic_uiom_wq; | |
50 | ||
51 | #define USNIC_UIOM_PAGE_CHUNK \ | |
52 | ((PAGE_SIZE - offsetof(struct usnic_uiom_chunk, page_list)) /\ | |
53 | ((void *) &((struct usnic_uiom_chunk *) 0)->page_list[1] - \ | |
54 | (void *) &((struct usnic_uiom_chunk *) 0)->page_list[0])) | |
55 | ||
56 | static void usnic_uiom_reg_account(struct work_struct *work) | |
57 | { | |
58 | struct usnic_uiom_reg *umem = container_of(work, | |
59 | struct usnic_uiom_reg, work); | |
60 | ||
61 | down_write(&umem->mm->mmap_sem); | |
62 | umem->mm->locked_vm -= umem->diff; | |
63 | up_write(&umem->mm->mmap_sem); | |
64 | mmput(umem->mm); | |
65 | kfree(umem); | |
66 | } | |
67 | ||
68 | static int usnic_uiom_dma_fault(struct iommu_domain *domain, | |
69 | struct device *dev, | |
70 | unsigned long iova, int flags, | |
71 | void *token) | |
72 | { | |
73 | usnic_err("Device %s iommu fault domain 0x%pK va 0x%lx flags 0x%x\n", | |
74 | dev_name(dev), | |
75 | domain, iova, flags); | |
76 | return -ENOSYS; | |
77 | } | |
78 | ||
79 | static void usnic_uiom_put_pages(struct list_head *chunk_list, int dirty) | |
80 | { | |
81 | struct usnic_uiom_chunk *chunk, *tmp; | |
82 | struct page *page; | |
83 | int i; | |
84 | dma_addr_t pa; | |
85 | ||
86 | list_for_each_entry_safe(chunk, tmp, chunk_list, list) { | |
87 | for (i = 0; i < chunk->nents; i++) { | |
88 | page = sg_page(&chunk->page_list[i]); | |
89 | pa = sg_phys(&chunk->page_list[i]); | |
90 | if (dirty) | |
91 | set_page_dirty_lock(page); | |
92 | put_page(page); | |
93 | usnic_dbg("pa: %pa\n", &pa); | |
94 | } | |
95 | kfree(chunk); | |
96 | } | |
97 | } | |
98 | ||
99 | static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable, | |
100 | int dmasync, struct list_head *chunk_list) | |
101 | { | |
102 | struct page **page_list; | |
103 | struct usnic_uiom_chunk *chunk; | |
104 | unsigned long locked; | |
105 | unsigned long lock_limit; | |
106 | unsigned long cur_base; | |
107 | unsigned long npages; | |
108 | int ret; | |
109 | int off; | |
110 | int i; | |
111 | int flags; | |
112 | dma_addr_t pa; | |
113 | DEFINE_DMA_ATTRS(attrs); | |
114 | ||
115 | if (dmasync) | |
116 | dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs); | |
117 | ||
118 | if (!can_do_mlock()) | |
119 | return -EPERM; | |
120 | ||
121 | INIT_LIST_HEAD(chunk_list); | |
122 | ||
123 | page_list = (struct page **) __get_free_page(GFP_KERNEL); | |
124 | if (!page_list) | |
125 | return -ENOMEM; | |
126 | ||
127 | npages = PAGE_ALIGN(size + (addr & ~PAGE_MASK)) >> PAGE_SHIFT; | |
128 | ||
129 | down_write(¤t->mm->mmap_sem); | |
130 | ||
131 | locked = npages + current->mm->locked_vm; | |
132 | lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; | |
133 | ||
134 | if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) { | |
135 | ret = -ENOMEM; | |
136 | goto out; | |
137 | } | |
138 | ||
139 | flags = IOMMU_READ | IOMMU_CACHE; | |
140 | flags |= (writable) ? IOMMU_WRITE : 0; | |
141 | cur_base = addr & PAGE_MASK; | |
142 | ret = 0; | |
143 | ||
144 | while (npages) { | |
145 | ret = get_user_pages(current, current->mm, cur_base, | |
146 | min_t(unsigned long, npages, | |
147 | PAGE_SIZE / sizeof(struct page *)), | |
148 | 1, !writable, page_list, NULL); | |
149 | ||
150 | if (ret < 0) | |
151 | goto out; | |
152 | ||
153 | npages -= ret; | |
154 | off = 0; | |
155 | ||
156 | while (ret) { | |
157 | chunk = kmalloc(sizeof(*chunk) + | |
158 | sizeof(struct scatterlist) * | |
159 | min_t(int, ret, USNIC_UIOM_PAGE_CHUNK), | |
160 | GFP_KERNEL); | |
161 | if (!chunk) { | |
162 | ret = -ENOMEM; | |
163 | goto out; | |
164 | } | |
165 | ||
166 | chunk->nents = min_t(int, ret, USNIC_UIOM_PAGE_CHUNK); | |
167 | sg_init_table(chunk->page_list, chunk->nents); | |
168 | for (i = 0; i < chunk->nents; ++i) { | |
169 | sg_set_page(&chunk->page_list[i], | |
170 | page_list[i + off], | |
171 | PAGE_SIZE, 0); | |
172 | pa = sg_phys(&chunk->page_list[i]); | |
173 | usnic_dbg("va: 0x%lx pa: %pa\n", | |
174 | cur_base + i*PAGE_SIZE, &pa); | |
175 | } | |
176 | cur_base += chunk->nents * PAGE_SIZE; | |
177 | ret -= chunk->nents; | |
178 | off += chunk->nents; | |
179 | list_add_tail(&chunk->list, chunk_list); | |
180 | } | |
181 | ||
182 | ret = 0; | |
183 | } | |
184 | ||
185 | out: | |
186 | if (ret < 0) | |
187 | usnic_uiom_put_pages(chunk_list, 0); | |
188 | else | |
189 | current->mm->locked_vm = locked; | |
190 | ||
191 | up_write(¤t->mm->mmap_sem); | |
192 | free_page((unsigned long) page_list); | |
193 | return ret; | |
194 | } | |
195 | ||
196 | static void usnic_uiom_unmap_sorted_intervals(struct list_head *intervals, | |
197 | struct usnic_uiom_pd *pd) | |
198 | { | |
199 | struct usnic_uiom_interval_node *interval, *tmp; | |
200 | long unsigned va, size; | |
201 | ||
202 | list_for_each_entry_safe(interval, tmp, intervals, link) { | |
203 | va = interval->start << PAGE_SHIFT; | |
204 | size = ((interval->last - interval->start) + 1) << PAGE_SHIFT; | |
205 | while (size > 0) { | |
206 | /* Workaround for RH 970401 */ | |
207 | usnic_dbg("va 0x%lx size 0x%lx", va, PAGE_SIZE); | |
208 | iommu_unmap(pd->domain, va, PAGE_SIZE); | |
209 | va += PAGE_SIZE; | |
210 | size -= PAGE_SIZE; | |
211 | } | |
212 | } | |
213 | } | |
214 | ||
215 | static void __usnic_uiom_reg_release(struct usnic_uiom_pd *pd, | |
216 | struct usnic_uiom_reg *uiomr, | |
217 | int dirty) | |
218 | { | |
219 | int npages; | |
220 | unsigned long vpn_start, vpn_last; | |
221 | struct usnic_uiom_interval_node *interval, *tmp; | |
222 | int writable = 0; | |
223 | LIST_HEAD(rm_intervals); | |
224 | ||
225 | npages = PAGE_ALIGN(uiomr->length + uiomr->offset) >> PAGE_SHIFT; | |
226 | vpn_start = (uiomr->va & PAGE_MASK) >> PAGE_SHIFT; | |
227 | vpn_last = vpn_start + npages - 1; | |
228 | ||
229 | spin_lock(&pd->lock); | |
230 | usnic_uiom_remove_interval(&pd->rb_root, vpn_start, | |
231 | vpn_last, &rm_intervals); | |
232 | usnic_uiom_unmap_sorted_intervals(&rm_intervals, pd); | |
233 | ||
234 | list_for_each_entry_safe(interval, tmp, &rm_intervals, link) { | |
235 | if (interval->flags & IOMMU_WRITE) | |
236 | writable = 1; | |
237 | list_del(&interval->link); | |
238 | kfree(interval); | |
239 | } | |
240 | ||
241 | usnic_uiom_put_pages(&uiomr->chunk_list, dirty & writable); | |
242 | spin_unlock(&pd->lock); | |
243 | } | |
244 | ||
245 | static int usnic_uiom_map_sorted_intervals(struct list_head *intervals, | |
246 | struct usnic_uiom_reg *uiomr) | |
247 | { | |
248 | int i, err; | |
249 | size_t size; | |
250 | struct usnic_uiom_chunk *chunk; | |
251 | struct usnic_uiom_interval_node *interval_node; | |
252 | dma_addr_t pa; | |
253 | dma_addr_t pa_start = 0; | |
254 | dma_addr_t pa_end = 0; | |
255 | long int va_start = -EINVAL; | |
256 | struct usnic_uiom_pd *pd = uiomr->pd; | |
257 | long int va = uiomr->va & PAGE_MASK; | |
258 | int flags = IOMMU_READ | IOMMU_CACHE; | |
259 | ||
260 | flags |= (uiomr->writable) ? IOMMU_WRITE : 0; | |
261 | chunk = list_first_entry(&uiomr->chunk_list, struct usnic_uiom_chunk, | |
262 | list); | |
263 | list_for_each_entry(interval_node, intervals, link) { | |
264 | iter_chunk: | |
265 | for (i = 0; i < chunk->nents; i++, va += PAGE_SIZE) { | |
266 | pa = sg_phys(&chunk->page_list[i]); | |
267 | if ((va >> PAGE_SHIFT) < interval_node->start) | |
268 | continue; | |
269 | ||
270 | if ((va >> PAGE_SHIFT) == interval_node->start) { | |
271 | /* First page of the interval */ | |
272 | va_start = va; | |
273 | pa_start = pa; | |
274 | pa_end = pa; | |
275 | } | |
276 | ||
277 | WARN_ON(va_start == -EINVAL); | |
278 | ||
279 | if ((pa_end + PAGE_SIZE != pa) && | |
280 | (pa != pa_start)) { | |
281 | /* PAs are not contiguous */ | |
282 | size = pa_end - pa_start + PAGE_SIZE; | |
283 | usnic_dbg("va 0x%lx pa %pa size 0x%zx flags 0x%x", | |
284 | va_start, &pa_start, size, flags); | |
285 | err = iommu_map(pd->domain, va_start, pa_start, | |
286 | size, flags); | |
287 | if (err) { | |
288 | usnic_err("Failed to map va 0x%lx pa 0x%pa size 0x%zx with err %d\n", | |
289 | va_start, &pa_start, size, err); | |
290 | goto err_out; | |
291 | } | |
292 | va_start = va; | |
293 | pa_start = pa; | |
294 | pa_end = pa; | |
295 | } | |
296 | ||
297 | if ((va >> PAGE_SHIFT) == interval_node->last) { | |
298 | /* Last page of the interval */ | |
299 | size = pa - pa_start + PAGE_SIZE; | |
300 | usnic_dbg("va 0x%lx pa %pa size 0x%zx flags 0x%x\n", | |
301 | va_start, &pa_start, size, flags); | |
302 | err = iommu_map(pd->domain, va_start, pa_start, | |
303 | size, flags); | |
304 | if (err) { | |
305 | usnic_err("Failed to map va 0x%lx pa %pa size 0x%zx with err %d\n", | |
306 | va_start, &pa_start, size, err); | |
307 | goto err_out; | |
308 | } | |
309 | break; | |
310 | } | |
311 | ||
312 | if (pa != pa_start) | |
313 | pa_end += PAGE_SIZE; | |
314 | } | |
315 | ||
316 | if (i == chunk->nents) { | |
317 | /* | |
318 | * Hit last entry of the chunk, | |
319 | * hence advance to next chunk | |
320 | */ | |
321 | chunk = list_first_entry(&chunk->list, | |
322 | struct usnic_uiom_chunk, | |
323 | list); | |
324 | goto iter_chunk; | |
325 | } | |
326 | } | |
327 | ||
328 | return 0; | |
329 | ||
330 | err_out: | |
331 | usnic_uiom_unmap_sorted_intervals(intervals, pd); | |
332 | return err; | |
333 | } | |
334 | ||
335 | struct usnic_uiom_reg *usnic_uiom_reg_get(struct usnic_uiom_pd *pd, | |
336 | unsigned long addr, size_t size, | |
337 | int writable, int dmasync) | |
338 | { | |
339 | struct usnic_uiom_reg *uiomr; | |
340 | unsigned long va_base, vpn_start, vpn_last; | |
341 | unsigned long npages; | |
342 | int offset, err; | |
343 | LIST_HEAD(sorted_diff_intervals); | |
344 | ||
345 | /* | |
346 | * Intel IOMMU map throws an error if a translation entry is | |
347 | * changed from read to write. This module may not unmap | |
348 | * and then remap the entry after fixing the permission | |
349 | * b/c this open up a small windows where hw DMA may page fault | |
350 | * Hence, make all entries to be writable. | |
351 | */ | |
352 | writable = 1; | |
353 | ||
354 | va_base = addr & PAGE_MASK; | |
355 | offset = addr & ~PAGE_MASK; | |
356 | npages = PAGE_ALIGN(size + offset) >> PAGE_SHIFT; | |
357 | vpn_start = (addr & PAGE_MASK) >> PAGE_SHIFT; | |
358 | vpn_last = vpn_start + npages - 1; | |
359 | ||
360 | uiomr = kmalloc(sizeof(*uiomr), GFP_KERNEL); | |
361 | if (!uiomr) | |
362 | return ERR_PTR(-ENOMEM); | |
363 | ||
364 | uiomr->va = va_base; | |
365 | uiomr->offset = offset; | |
366 | uiomr->length = size; | |
367 | uiomr->writable = writable; | |
368 | uiomr->pd = pd; | |
369 | ||
370 | err = usnic_uiom_get_pages(addr, size, writable, dmasync, | |
371 | &uiomr->chunk_list); | |
372 | if (err) { | |
373 | usnic_err("Failed get_pages vpn [0x%lx,0x%lx] err %d\n", | |
374 | vpn_start, vpn_last, err); | |
375 | goto out_free_uiomr; | |
376 | } | |
377 | ||
378 | spin_lock(&pd->lock); | |
379 | err = usnic_uiom_get_intervals_diff(vpn_start, vpn_last, | |
380 | (writable) ? IOMMU_WRITE : 0, | |
381 | IOMMU_WRITE, | |
382 | &pd->rb_root, | |
383 | &sorted_diff_intervals); | |
384 | if (err) { | |
385 | usnic_err("Failed disjoint interval vpn [0x%lx,0x%lx] err %d\n", | |
386 | vpn_start, vpn_last, err); | |
387 | goto out_put_pages; | |
388 | } | |
389 | ||
390 | err = usnic_uiom_map_sorted_intervals(&sorted_diff_intervals, uiomr); | |
391 | if (err) { | |
392 | usnic_err("Failed map interval vpn [0x%lx,0x%lx] err %d\n", | |
393 | vpn_start, vpn_last, err); | |
394 | goto out_put_intervals; | |
395 | ||
396 | } | |
397 | ||
398 | err = usnic_uiom_insert_interval(&pd->rb_root, vpn_start, vpn_last, | |
399 | (writable) ? IOMMU_WRITE : 0); | |
400 | if (err) { | |
401 | usnic_err("Failed insert interval vpn [0x%lx,0x%lx] err %d\n", | |
402 | vpn_start, vpn_last, err); | |
403 | goto out_unmap_intervals; | |
404 | } | |
405 | ||
406 | usnic_uiom_put_interval_set(&sorted_diff_intervals); | |
407 | spin_unlock(&pd->lock); | |
408 | ||
409 | return uiomr; | |
410 | ||
411 | out_unmap_intervals: | |
412 | usnic_uiom_unmap_sorted_intervals(&sorted_diff_intervals, pd); | |
413 | out_put_intervals: | |
414 | usnic_uiom_put_interval_set(&sorted_diff_intervals); | |
415 | out_put_pages: | |
416 | usnic_uiom_put_pages(&uiomr->chunk_list, 0); | |
417 | spin_unlock(&pd->lock); | |
418 | out_free_uiomr: | |
419 | kfree(uiomr); | |
420 | return ERR_PTR(err); | |
421 | } | |
422 | ||
423 | void usnic_uiom_reg_release(struct usnic_uiom_reg *uiomr, int closing) | |
424 | { | |
425 | struct mm_struct *mm; | |
426 | unsigned long diff; | |
427 | ||
428 | __usnic_uiom_reg_release(uiomr->pd, uiomr, 1); | |
429 | ||
430 | mm = get_task_mm(current); | |
431 | if (!mm) { | |
432 | kfree(uiomr); | |
433 | return; | |
434 | } | |
435 | ||
436 | diff = PAGE_ALIGN(uiomr->length + uiomr->offset) >> PAGE_SHIFT; | |
437 | ||
438 | /* | |
439 | * We may be called with the mm's mmap_sem already held. This | |
440 | * can happen when a userspace munmap() is the call that drops | |
441 | * the last reference to our file and calls our release | |
442 | * method. If there are memory regions to destroy, we'll end | |
443 | * up here and not be able to take the mmap_sem. In that case | |
444 | * we defer the vm_locked accounting to the system workqueue. | |
445 | */ | |
446 | if (closing) { | |
447 | if (!down_write_trylock(&mm->mmap_sem)) { | |
448 | INIT_WORK(&uiomr->work, usnic_uiom_reg_account); | |
449 | uiomr->mm = mm; | |
450 | uiomr->diff = diff; | |
451 | ||
452 | queue_work(usnic_uiom_wq, &uiomr->work); | |
453 | return; | |
454 | } | |
455 | } else | |
456 | down_write(&mm->mmap_sem); | |
457 | ||
458 | current->mm->locked_vm -= diff; | |
459 | up_write(&mm->mmap_sem); | |
460 | mmput(mm); | |
461 | kfree(uiomr); | |
462 | } | |
463 | ||
464 | struct usnic_uiom_pd *usnic_uiom_alloc_pd(void) | |
465 | { | |
466 | struct usnic_uiom_pd *pd; | |
467 | void *domain; | |
468 | ||
469 | pd = kzalloc(sizeof(*pd), GFP_KERNEL); | |
470 | if (!pd) | |
471 | return ERR_PTR(-ENOMEM); | |
472 | ||
473 | pd->domain = domain = iommu_domain_alloc(&pci_bus_type); | |
474 | if (IS_ERR_OR_NULL(domain)) { | |
475 | usnic_err("Failed to allocate IOMMU domain with err %ld\n", | |
476 | PTR_ERR(pd->domain)); | |
477 | kfree(pd); | |
478 | return ERR_PTR(domain ? PTR_ERR(domain) : -ENOMEM); | |
479 | } | |
480 | ||
481 | iommu_set_fault_handler(pd->domain, usnic_uiom_dma_fault, NULL); | |
482 | ||
483 | spin_lock_init(&pd->lock); | |
484 | INIT_LIST_HEAD(&pd->devs); | |
485 | ||
486 | return pd; | |
487 | } | |
488 | ||
489 | void usnic_uiom_dealloc_pd(struct usnic_uiom_pd *pd) | |
490 | { | |
491 | iommu_domain_free(pd->domain); | |
492 | kfree(pd); | |
493 | } | |
494 | ||
495 | int usnic_uiom_attach_dev_to_pd(struct usnic_uiom_pd *pd, struct device *dev) | |
496 | { | |
497 | struct usnic_uiom_dev *uiom_dev; | |
498 | int err; | |
499 | ||
500 | uiom_dev = kzalloc(sizeof(*uiom_dev), GFP_KERNEL); | |
501 | if (!uiom_dev) | |
502 | return -ENOMEM; | |
503 | uiom_dev->dev = dev; | |
504 | ||
505 | err = iommu_attach_device(pd->domain, dev); | |
506 | if (err) | |
507 | goto out_free_dev; | |
508 | ||
509 | if (!iommu_domain_has_cap(pd->domain, IOMMU_CAP_CACHE_COHERENCY)) { | |
510 | usnic_err("IOMMU of %s does not support cache coherency\n", | |
511 | dev_name(dev)); | |
512 | err = -EINVAL; | |
513 | goto out_detach_device; | |
514 | } | |
515 | ||
516 | spin_lock(&pd->lock); | |
517 | list_add_tail(&uiom_dev->link, &pd->devs); | |
518 | pd->dev_cnt++; | |
519 | spin_unlock(&pd->lock); | |
520 | ||
521 | return 0; | |
522 | ||
523 | out_detach_device: | |
524 | iommu_detach_device(pd->domain, dev); | |
525 | out_free_dev: | |
526 | kfree(uiom_dev); | |
527 | return err; | |
528 | } | |
529 | ||
530 | void usnic_uiom_detach_dev_from_pd(struct usnic_uiom_pd *pd, struct device *dev) | |
531 | { | |
532 | struct usnic_uiom_dev *uiom_dev; | |
533 | int found = 0; | |
534 | ||
535 | spin_lock(&pd->lock); | |
536 | list_for_each_entry(uiom_dev, &pd->devs, link) { | |
537 | if (uiom_dev->dev == dev) { | |
538 | found = 1; | |
539 | break; | |
540 | } | |
541 | } | |
542 | ||
543 | if (!found) { | |
544 | usnic_err("Unable to free dev %s - not found\n", | |
545 | dev_name(dev)); | |
546 | spin_unlock(&pd->lock); | |
547 | return; | |
548 | } | |
549 | ||
550 | list_del(&uiom_dev->link); | |
551 | pd->dev_cnt--; | |
552 | spin_unlock(&pd->lock); | |
553 | ||
554 | return iommu_detach_device(pd->domain, dev); | |
555 | } | |
556 | ||
557 | struct device **usnic_uiom_get_dev_list(struct usnic_uiom_pd *pd) | |
558 | { | |
559 | struct usnic_uiom_dev *uiom_dev; | |
560 | struct device **devs; | |
561 | int i = 0; | |
562 | ||
563 | spin_lock(&pd->lock); | |
564 | devs = kcalloc(pd->dev_cnt + 1, sizeof(*devs), GFP_ATOMIC); | |
565 | if (!devs) { | |
566 | devs = ERR_PTR(-ENOMEM); | |
567 | goto out; | |
568 | } | |
569 | ||
570 | list_for_each_entry(uiom_dev, &pd->devs, link) { | |
571 | devs[i++] = uiom_dev->dev; | |
572 | } | |
573 | out: | |
574 | spin_unlock(&pd->lock); | |
575 | return devs; | |
576 | } | |
577 | ||
578 | void usnic_uiom_free_dev_list(struct device **devs) | |
579 | { | |
580 | kfree(devs); | |
581 | } | |
582 | ||
583 | int usnic_uiom_init(char *drv_name) | |
584 | { | |
585 | if (!iommu_present(&pci_bus_type)) { | |
586 | usnic_err("IOMMU required but not present or enabled. USNIC QPs will not function w/o enabling IOMMU\n"); | |
587 | return -EPERM; | |
588 | } | |
589 | ||
590 | usnic_uiom_wq = create_workqueue(drv_name); | |
591 | if (!usnic_uiom_wq) { | |
592 | usnic_err("Unable to alloc wq for drv %s\n", drv_name); | |
593 | return -ENOMEM; | |
594 | } | |
595 | ||
596 | return 0; | |
597 | } | |
598 | ||
599 | void usnic_uiom_fini(void) | |
600 | { | |
601 | flush_workqueue(usnic_uiom_wq); | |
602 | destroy_workqueue(usnic_uiom_wq); | |
603 | } |