Commit | Line | Data |
---|---|---|
e3cf00d0 UM |
1 | /* |
2 | * Copyright (c) 2005 Topspin Communications. All rights reserved. | |
3 | * Copyright (c) 2005 Mellanox Technologies. All rights reserved. | |
4 | * Copyright (c) 2013 Cisco Systems. All rights reserved. | |
5 | * | |
6 | * This software is available to you under a choice of one of two | |
7 | * licenses. You may choose to be licensed under the terms of the GNU | |
8 | * General Public License (GPL) Version 2, available from the file | |
9 | * COPYING in the main directory of this source tree, or the | |
3805eade | 10 | * BSD license below: |
e3cf00d0 UM |
11 | * |
12 | * Redistribution and use in source and binary forms, with or | |
13 | * without modification, are permitted provided that the following | |
14 | * conditions are met: | |
15 | * | |
16 | * - Redistributions of source code must retain the above | |
17 | * copyright notice, this list of conditions and the following | |
18 | * disclaimer. | |
19 | * | |
20 | * - Redistributions in binary form must reproduce the above | |
21 | * copyright notice, this list of conditions and the following | |
22 | * disclaimer in the documentation and/or other materials | |
23 | * provided with the distribution. | |
24 | * | |
25 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
26 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
27 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
28 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
29 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
30 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
31 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
32 | * SOFTWARE. | |
33 | */ | |
34 | ||
35 | #include <linux/mm.h> | |
36 | #include <linux/dma-mapping.h> | |
37 | #include <linux/sched.h> | |
38 | #include <linux/hugetlb.h> | |
e3cf00d0 UM |
39 | #include <linux/iommu.h> |
40 | #include <linux/workqueue.h> | |
41 | #include <linux/list.h> | |
42 | #include <linux/pci.h> | |
43 | ||
44 | #include "usnic_log.h" | |
45 | #include "usnic_uiom.h" | |
46 | #include "usnic_uiom_interval_tree.h" | |
47 | ||
48 | static struct workqueue_struct *usnic_uiom_wq; | |
49 | ||
50 | #define USNIC_UIOM_PAGE_CHUNK \ | |
51 | ((PAGE_SIZE - offsetof(struct usnic_uiom_chunk, page_list)) /\ | |
52 | ((void *) &((struct usnic_uiom_chunk *) 0)->page_list[1] - \ | |
53 | (void *) &((struct usnic_uiom_chunk *) 0)->page_list[0])) | |
54 | ||
55 | static void usnic_uiom_reg_account(struct work_struct *work) | |
56 | { | |
57 | struct usnic_uiom_reg *umem = container_of(work, | |
58 | struct usnic_uiom_reg, work); | |
59 | ||
60 | down_write(&umem->mm->mmap_sem); | |
61 | umem->mm->locked_vm -= umem->diff; | |
62 | up_write(&umem->mm->mmap_sem); | |
63 | mmput(umem->mm); | |
64 | kfree(umem); | |
65 | } | |
66 | ||
67 | static int usnic_uiom_dma_fault(struct iommu_domain *domain, | |
68 | struct device *dev, | |
69 | unsigned long iova, int flags, | |
70 | void *token) | |
71 | { | |
72 | usnic_err("Device %s iommu fault domain 0x%pK va 0x%lx flags 0x%x\n", | |
73 | dev_name(dev), | |
74 | domain, iova, flags); | |
75 | return -ENOSYS; | |
76 | } | |
77 | ||
78 | static void usnic_uiom_put_pages(struct list_head *chunk_list, int dirty) | |
79 | { | |
80 | struct usnic_uiom_chunk *chunk, *tmp; | |
81 | struct page *page; | |
c5f855e0 | 82 | struct scatterlist *sg; |
e3cf00d0 UM |
83 | int i; |
84 | dma_addr_t pa; | |
85 | ||
86 | list_for_each_entry_safe(chunk, tmp, chunk_list, list) { | |
c5f855e0 UM |
87 | for_each_sg(chunk->page_list, sg, chunk->nents, i) { |
88 | page = sg_page(sg); | |
89 | pa = sg_phys(sg); | |
e3cf00d0 UM |
90 | if (dirty) |
91 | set_page_dirty_lock(page); | |
92 | put_page(page); | |
93 | usnic_dbg("pa: %pa\n", &pa); | |
94 | } | |
95 | kfree(chunk); | |
96 | } | |
97 | } | |
98 | ||
99 | static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable, | |
100 | int dmasync, struct list_head *chunk_list) | |
101 | { | |
102 | struct page **page_list; | |
c5f855e0 | 103 | struct scatterlist *sg; |
e3cf00d0 UM |
104 | struct usnic_uiom_chunk *chunk; |
105 | unsigned long locked; | |
106 | unsigned long lock_limit; | |
107 | unsigned long cur_base; | |
108 | unsigned long npages; | |
109 | int ret; | |
110 | int off; | |
111 | int i; | |
112 | int flags; | |
113 | dma_addr_t pa; | |
e3cf00d0 UM |
114 | |
115 | if (!can_do_mlock()) | |
116 | return -EPERM; | |
117 | ||
118 | INIT_LIST_HEAD(chunk_list); | |
119 | ||
120 | page_list = (struct page **) __get_free_page(GFP_KERNEL); | |
121 | if (!page_list) | |
122 | return -ENOMEM; | |
123 | ||
124 | npages = PAGE_ALIGN(size + (addr & ~PAGE_MASK)) >> PAGE_SHIFT; | |
125 | ||
126 | down_write(¤t->mm->mmap_sem); | |
127 | ||
128 | locked = npages + current->mm->locked_vm; | |
129 | lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; | |
130 | ||
131 | if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) { | |
132 | ret = -ENOMEM; | |
133 | goto out; | |
134 | } | |
135 | ||
136 | flags = IOMMU_READ | IOMMU_CACHE; | |
137 | flags |= (writable) ? IOMMU_WRITE : 0; | |
138 | cur_base = addr & PAGE_MASK; | |
139 | ret = 0; | |
140 | ||
141 | while (npages) { | |
d4edcf0d | 142 | ret = get_user_pages(cur_base, |
e3cf00d0 UM |
143 | min_t(unsigned long, npages, |
144 | PAGE_SIZE / sizeof(struct page *)), | |
145 | 1, !writable, page_list, NULL); | |
146 | ||
147 | if (ret < 0) | |
148 | goto out; | |
149 | ||
150 | npages -= ret; | |
151 | off = 0; | |
152 | ||
153 | while (ret) { | |
154 | chunk = kmalloc(sizeof(*chunk) + | |
155 | sizeof(struct scatterlist) * | |
156 | min_t(int, ret, USNIC_UIOM_PAGE_CHUNK), | |
157 | GFP_KERNEL); | |
158 | if (!chunk) { | |
159 | ret = -ENOMEM; | |
160 | goto out; | |
161 | } | |
162 | ||
163 | chunk->nents = min_t(int, ret, USNIC_UIOM_PAGE_CHUNK); | |
164 | sg_init_table(chunk->page_list, chunk->nents); | |
c5f855e0 UM |
165 | for_each_sg(chunk->page_list, sg, chunk->nents, i) { |
166 | sg_set_page(sg, page_list[i + off], | |
167 | PAGE_SIZE, 0); | |
168 | pa = sg_phys(sg); | |
e3cf00d0 UM |
169 | usnic_dbg("va: 0x%lx pa: %pa\n", |
170 | cur_base + i*PAGE_SIZE, &pa); | |
171 | } | |
172 | cur_base += chunk->nents * PAGE_SIZE; | |
173 | ret -= chunk->nents; | |
174 | off += chunk->nents; | |
175 | list_add_tail(&chunk->list, chunk_list); | |
176 | } | |
177 | ||
178 | ret = 0; | |
179 | } | |
180 | ||
181 | out: | |
182 | if (ret < 0) | |
183 | usnic_uiom_put_pages(chunk_list, 0); | |
184 | else | |
185 | current->mm->locked_vm = locked; | |
186 | ||
187 | up_write(¤t->mm->mmap_sem); | |
188 | free_page((unsigned long) page_list); | |
189 | return ret; | |
190 | } | |
191 | ||
192 | static void usnic_uiom_unmap_sorted_intervals(struct list_head *intervals, | |
193 | struct usnic_uiom_pd *pd) | |
194 | { | |
195 | struct usnic_uiom_interval_node *interval, *tmp; | |
196 | long unsigned va, size; | |
197 | ||
198 | list_for_each_entry_safe(interval, tmp, intervals, link) { | |
199 | va = interval->start << PAGE_SHIFT; | |
200 | size = ((interval->last - interval->start) + 1) << PAGE_SHIFT; | |
201 | while (size > 0) { | |
202 | /* Workaround for RH 970401 */ | |
203 | usnic_dbg("va 0x%lx size 0x%lx", va, PAGE_SIZE); | |
204 | iommu_unmap(pd->domain, va, PAGE_SIZE); | |
205 | va += PAGE_SIZE; | |
206 | size -= PAGE_SIZE; | |
207 | } | |
208 | } | |
209 | } | |
210 | ||
211 | static void __usnic_uiom_reg_release(struct usnic_uiom_pd *pd, | |
212 | struct usnic_uiom_reg *uiomr, | |
213 | int dirty) | |
214 | { | |
215 | int npages; | |
216 | unsigned long vpn_start, vpn_last; | |
217 | struct usnic_uiom_interval_node *interval, *tmp; | |
218 | int writable = 0; | |
219 | LIST_HEAD(rm_intervals); | |
220 | ||
221 | npages = PAGE_ALIGN(uiomr->length + uiomr->offset) >> PAGE_SHIFT; | |
222 | vpn_start = (uiomr->va & PAGE_MASK) >> PAGE_SHIFT; | |
223 | vpn_last = vpn_start + npages - 1; | |
224 | ||
225 | spin_lock(&pd->lock); | |
226 | usnic_uiom_remove_interval(&pd->rb_root, vpn_start, | |
227 | vpn_last, &rm_intervals); | |
228 | usnic_uiom_unmap_sorted_intervals(&rm_intervals, pd); | |
229 | ||
230 | list_for_each_entry_safe(interval, tmp, &rm_intervals, link) { | |
231 | if (interval->flags & IOMMU_WRITE) | |
232 | writable = 1; | |
233 | list_del(&interval->link); | |
234 | kfree(interval); | |
235 | } | |
236 | ||
237 | usnic_uiom_put_pages(&uiomr->chunk_list, dirty & writable); | |
238 | spin_unlock(&pd->lock); | |
239 | } | |
240 | ||
241 | static int usnic_uiom_map_sorted_intervals(struct list_head *intervals, | |
242 | struct usnic_uiom_reg *uiomr) | |
243 | { | |
244 | int i, err; | |
245 | size_t size; | |
246 | struct usnic_uiom_chunk *chunk; | |
247 | struct usnic_uiom_interval_node *interval_node; | |
248 | dma_addr_t pa; | |
249 | dma_addr_t pa_start = 0; | |
250 | dma_addr_t pa_end = 0; | |
251 | long int va_start = -EINVAL; | |
252 | struct usnic_uiom_pd *pd = uiomr->pd; | |
253 | long int va = uiomr->va & PAGE_MASK; | |
254 | int flags = IOMMU_READ | IOMMU_CACHE; | |
255 | ||
256 | flags |= (uiomr->writable) ? IOMMU_WRITE : 0; | |
257 | chunk = list_first_entry(&uiomr->chunk_list, struct usnic_uiom_chunk, | |
258 | list); | |
259 | list_for_each_entry(interval_node, intervals, link) { | |
260 | iter_chunk: | |
261 | for (i = 0; i < chunk->nents; i++, va += PAGE_SIZE) { | |
262 | pa = sg_phys(&chunk->page_list[i]); | |
263 | if ((va >> PAGE_SHIFT) < interval_node->start) | |
264 | continue; | |
265 | ||
266 | if ((va >> PAGE_SHIFT) == interval_node->start) { | |
267 | /* First page of the interval */ | |
268 | va_start = va; | |
269 | pa_start = pa; | |
270 | pa_end = pa; | |
271 | } | |
272 | ||
273 | WARN_ON(va_start == -EINVAL); | |
274 | ||
275 | if ((pa_end + PAGE_SIZE != pa) && | |
276 | (pa != pa_start)) { | |
277 | /* PAs are not contiguous */ | |
278 | size = pa_end - pa_start + PAGE_SIZE; | |
279 | usnic_dbg("va 0x%lx pa %pa size 0x%zx flags 0x%x", | |
280 | va_start, &pa_start, size, flags); | |
281 | err = iommu_map(pd->domain, va_start, pa_start, | |
282 | size, flags); | |
283 | if (err) { | |
970918b3 | 284 | usnic_err("Failed to map va 0x%lx pa %pa size 0x%zx with err %d\n", |
e3cf00d0 UM |
285 | va_start, &pa_start, size, err); |
286 | goto err_out; | |
287 | } | |
288 | va_start = va; | |
289 | pa_start = pa; | |
290 | pa_end = pa; | |
291 | } | |
292 | ||
293 | if ((va >> PAGE_SHIFT) == interval_node->last) { | |
294 | /* Last page of the interval */ | |
295 | size = pa - pa_start + PAGE_SIZE; | |
296 | usnic_dbg("va 0x%lx pa %pa size 0x%zx flags 0x%x\n", | |
297 | va_start, &pa_start, size, flags); | |
298 | err = iommu_map(pd->domain, va_start, pa_start, | |
299 | size, flags); | |
300 | if (err) { | |
301 | usnic_err("Failed to map va 0x%lx pa %pa size 0x%zx with err %d\n", | |
302 | va_start, &pa_start, size, err); | |
303 | goto err_out; | |
304 | } | |
305 | break; | |
306 | } | |
307 | ||
308 | if (pa != pa_start) | |
309 | pa_end += PAGE_SIZE; | |
310 | } | |
311 | ||
312 | if (i == chunk->nents) { | |
313 | /* | |
314 | * Hit last entry of the chunk, | |
315 | * hence advance to next chunk | |
316 | */ | |
317 | chunk = list_first_entry(&chunk->list, | |
318 | struct usnic_uiom_chunk, | |
319 | list); | |
320 | goto iter_chunk; | |
321 | } | |
322 | } | |
323 | ||
324 | return 0; | |
325 | ||
326 | err_out: | |
327 | usnic_uiom_unmap_sorted_intervals(intervals, pd); | |
328 | return err; | |
329 | } | |
330 | ||
331 | struct usnic_uiom_reg *usnic_uiom_reg_get(struct usnic_uiom_pd *pd, | |
332 | unsigned long addr, size_t size, | |
333 | int writable, int dmasync) | |
334 | { | |
335 | struct usnic_uiom_reg *uiomr; | |
336 | unsigned long va_base, vpn_start, vpn_last; | |
337 | unsigned long npages; | |
338 | int offset, err; | |
339 | LIST_HEAD(sorted_diff_intervals); | |
340 | ||
341 | /* | |
342 | * Intel IOMMU map throws an error if a translation entry is | |
343 | * changed from read to write. This module may not unmap | |
344 | * and then remap the entry after fixing the permission | |
345 | * b/c this open up a small windows where hw DMA may page fault | |
346 | * Hence, make all entries to be writable. | |
347 | */ | |
348 | writable = 1; | |
349 | ||
350 | va_base = addr & PAGE_MASK; | |
351 | offset = addr & ~PAGE_MASK; | |
352 | npages = PAGE_ALIGN(size + offset) >> PAGE_SHIFT; | |
353 | vpn_start = (addr & PAGE_MASK) >> PAGE_SHIFT; | |
354 | vpn_last = vpn_start + npages - 1; | |
355 | ||
356 | uiomr = kmalloc(sizeof(*uiomr), GFP_KERNEL); | |
357 | if (!uiomr) | |
358 | return ERR_PTR(-ENOMEM); | |
359 | ||
360 | uiomr->va = va_base; | |
361 | uiomr->offset = offset; | |
362 | uiomr->length = size; | |
363 | uiomr->writable = writable; | |
364 | uiomr->pd = pd; | |
365 | ||
366 | err = usnic_uiom_get_pages(addr, size, writable, dmasync, | |
367 | &uiomr->chunk_list); | |
368 | if (err) { | |
369 | usnic_err("Failed get_pages vpn [0x%lx,0x%lx] err %d\n", | |
370 | vpn_start, vpn_last, err); | |
371 | goto out_free_uiomr; | |
372 | } | |
373 | ||
374 | spin_lock(&pd->lock); | |
375 | err = usnic_uiom_get_intervals_diff(vpn_start, vpn_last, | |
376 | (writable) ? IOMMU_WRITE : 0, | |
377 | IOMMU_WRITE, | |
378 | &pd->rb_root, | |
379 | &sorted_diff_intervals); | |
380 | if (err) { | |
381 | usnic_err("Failed disjoint interval vpn [0x%lx,0x%lx] err %d\n", | |
382 | vpn_start, vpn_last, err); | |
383 | goto out_put_pages; | |
384 | } | |
385 | ||
386 | err = usnic_uiom_map_sorted_intervals(&sorted_diff_intervals, uiomr); | |
387 | if (err) { | |
388 | usnic_err("Failed map interval vpn [0x%lx,0x%lx] err %d\n", | |
389 | vpn_start, vpn_last, err); | |
390 | goto out_put_intervals; | |
391 | ||
392 | } | |
393 | ||
394 | err = usnic_uiom_insert_interval(&pd->rb_root, vpn_start, vpn_last, | |
395 | (writable) ? IOMMU_WRITE : 0); | |
396 | if (err) { | |
397 | usnic_err("Failed insert interval vpn [0x%lx,0x%lx] err %d\n", | |
398 | vpn_start, vpn_last, err); | |
399 | goto out_unmap_intervals; | |
400 | } | |
401 | ||
402 | usnic_uiom_put_interval_set(&sorted_diff_intervals); | |
403 | spin_unlock(&pd->lock); | |
404 | ||
405 | return uiomr; | |
406 | ||
407 | out_unmap_intervals: | |
408 | usnic_uiom_unmap_sorted_intervals(&sorted_diff_intervals, pd); | |
409 | out_put_intervals: | |
410 | usnic_uiom_put_interval_set(&sorted_diff_intervals); | |
411 | out_put_pages: | |
412 | usnic_uiom_put_pages(&uiomr->chunk_list, 0); | |
413 | spin_unlock(&pd->lock); | |
414 | out_free_uiomr: | |
415 | kfree(uiomr); | |
416 | return ERR_PTR(err); | |
417 | } | |
418 | ||
419 | void usnic_uiom_reg_release(struct usnic_uiom_reg *uiomr, int closing) | |
420 | { | |
421 | struct mm_struct *mm; | |
422 | unsigned long diff; | |
423 | ||
424 | __usnic_uiom_reg_release(uiomr->pd, uiomr, 1); | |
425 | ||
426 | mm = get_task_mm(current); | |
427 | if (!mm) { | |
428 | kfree(uiomr); | |
429 | return; | |
430 | } | |
431 | ||
432 | diff = PAGE_ALIGN(uiomr->length + uiomr->offset) >> PAGE_SHIFT; | |
433 | ||
434 | /* | |
435 | * We may be called with the mm's mmap_sem already held. This | |
436 | * can happen when a userspace munmap() is the call that drops | |
437 | * the last reference to our file and calls our release | |
438 | * method. If there are memory regions to destroy, we'll end | |
439 | * up here and not be able to take the mmap_sem. In that case | |
440 | * we defer the vm_locked accounting to the system workqueue. | |
441 | */ | |
442 | if (closing) { | |
443 | if (!down_write_trylock(&mm->mmap_sem)) { | |
444 | INIT_WORK(&uiomr->work, usnic_uiom_reg_account); | |
445 | uiomr->mm = mm; | |
446 | uiomr->diff = diff; | |
447 | ||
448 | queue_work(usnic_uiom_wq, &uiomr->work); | |
449 | return; | |
450 | } | |
451 | } else | |
452 | down_write(&mm->mmap_sem); | |
453 | ||
454 | current->mm->locked_vm -= diff; | |
455 | up_write(&mm->mmap_sem); | |
456 | mmput(mm); | |
457 | kfree(uiomr); | |
458 | } | |
459 | ||
460 | struct usnic_uiom_pd *usnic_uiom_alloc_pd(void) | |
461 | { | |
462 | struct usnic_uiom_pd *pd; | |
463 | void *domain; | |
464 | ||
465 | pd = kzalloc(sizeof(*pd), GFP_KERNEL); | |
466 | if (!pd) | |
467 | return ERR_PTR(-ENOMEM); | |
468 | ||
469 | pd->domain = domain = iommu_domain_alloc(&pci_bus_type); | |
fc3aa45b DC |
470 | if (!domain) { |
471 | usnic_err("Failed to allocate IOMMU domain"); | |
e3cf00d0 | 472 | kfree(pd); |
fc3aa45b | 473 | return ERR_PTR(-ENOMEM); |
e3cf00d0 UM |
474 | } |
475 | ||
476 | iommu_set_fault_handler(pd->domain, usnic_uiom_dma_fault, NULL); | |
477 | ||
478 | spin_lock_init(&pd->lock); | |
479 | INIT_LIST_HEAD(&pd->devs); | |
480 | ||
481 | return pd; | |
482 | } | |
483 | ||
484 | void usnic_uiom_dealloc_pd(struct usnic_uiom_pd *pd) | |
485 | { | |
486 | iommu_domain_free(pd->domain); | |
487 | kfree(pd); | |
488 | } | |
489 | ||
490 | int usnic_uiom_attach_dev_to_pd(struct usnic_uiom_pd *pd, struct device *dev) | |
491 | { | |
492 | struct usnic_uiom_dev *uiom_dev; | |
493 | int err; | |
494 | ||
8ce96afa | 495 | uiom_dev = kzalloc(sizeof(*uiom_dev), GFP_ATOMIC); |
e3cf00d0 UM |
496 | if (!uiom_dev) |
497 | return -ENOMEM; | |
498 | uiom_dev->dev = dev; | |
499 | ||
500 | err = iommu_attach_device(pd->domain, dev); | |
501 | if (err) | |
502 | goto out_free_dev; | |
503 | ||
6f952710 | 504 | if (!iommu_capable(dev->bus, IOMMU_CAP_CACHE_COHERENCY)) { |
e3cf00d0 UM |
505 | usnic_err("IOMMU of %s does not support cache coherency\n", |
506 | dev_name(dev)); | |
507 | err = -EINVAL; | |
508 | goto out_detach_device; | |
509 | } | |
510 | ||
511 | spin_lock(&pd->lock); | |
512 | list_add_tail(&uiom_dev->link, &pd->devs); | |
513 | pd->dev_cnt++; | |
514 | spin_unlock(&pd->lock); | |
515 | ||
516 | return 0; | |
517 | ||
518 | out_detach_device: | |
519 | iommu_detach_device(pd->domain, dev); | |
520 | out_free_dev: | |
521 | kfree(uiom_dev); | |
522 | return err; | |
523 | } | |
524 | ||
525 | void usnic_uiom_detach_dev_from_pd(struct usnic_uiom_pd *pd, struct device *dev) | |
526 | { | |
527 | struct usnic_uiom_dev *uiom_dev; | |
528 | int found = 0; | |
529 | ||
530 | spin_lock(&pd->lock); | |
531 | list_for_each_entry(uiom_dev, &pd->devs, link) { | |
532 | if (uiom_dev->dev == dev) { | |
533 | found = 1; | |
534 | break; | |
535 | } | |
536 | } | |
537 | ||
538 | if (!found) { | |
539 | usnic_err("Unable to free dev %s - not found\n", | |
540 | dev_name(dev)); | |
541 | spin_unlock(&pd->lock); | |
542 | return; | |
543 | } | |
544 | ||
545 | list_del(&uiom_dev->link); | |
546 | pd->dev_cnt--; | |
547 | spin_unlock(&pd->lock); | |
548 | ||
549 | return iommu_detach_device(pd->domain, dev); | |
550 | } | |
551 | ||
552 | struct device **usnic_uiom_get_dev_list(struct usnic_uiom_pd *pd) | |
553 | { | |
554 | struct usnic_uiom_dev *uiom_dev; | |
555 | struct device **devs; | |
556 | int i = 0; | |
557 | ||
558 | spin_lock(&pd->lock); | |
559 | devs = kcalloc(pd->dev_cnt + 1, sizeof(*devs), GFP_ATOMIC); | |
560 | if (!devs) { | |
561 | devs = ERR_PTR(-ENOMEM); | |
562 | goto out; | |
563 | } | |
564 | ||
565 | list_for_each_entry(uiom_dev, &pd->devs, link) { | |
566 | devs[i++] = uiom_dev->dev; | |
567 | } | |
568 | out: | |
569 | spin_unlock(&pd->lock); | |
570 | return devs; | |
571 | } | |
572 | ||
573 | void usnic_uiom_free_dev_list(struct device **devs) | |
574 | { | |
575 | kfree(devs); | |
576 | } | |
577 | ||
578 | int usnic_uiom_init(char *drv_name) | |
579 | { | |
580 | if (!iommu_present(&pci_bus_type)) { | |
581 | usnic_err("IOMMU required but not present or enabled. USNIC QPs will not function w/o enabling IOMMU\n"); | |
582 | return -EPERM; | |
583 | } | |
584 | ||
585 | usnic_uiom_wq = create_workqueue(drv_name); | |
586 | if (!usnic_uiom_wq) { | |
587 | usnic_err("Unable to alloc wq for drv %s\n", drv_name); | |
588 | return -ENOMEM; | |
589 | } | |
590 | ||
591 | return 0; | |
592 | } | |
593 | ||
594 | void usnic_uiom_fini(void) | |
595 | { | |
596 | flush_workqueue(usnic_uiom_wq); | |
597 | destroy_workqueue(usnic_uiom_wq); | |
598 | } |