Commit | Line | Data |
---|---|---|
40f5cf99 AD |
1 | /* |
2 | * Copyright 2012 Advanced Micro Devices, Inc. | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice shall be included in | |
12 | * all copies or substantial portions of the Software. | |
13 | * | |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
20 | * OTHER DEALINGS IN THE SOFTWARE. | |
21 | * | |
22 | * based on nouveau_prime.c | |
23 | * | |
24 | * Authors: Alex Deucher | |
25 | */ | |
26 | #include "drmP.h" | |
27 | #include "drm.h" | |
28 | ||
29 | #include "radeon.h" | |
30 | #include "radeon_drm.h" | |
31 | ||
32 | #include <linux/dma-buf.h> | |
33 | ||
34 | static struct sg_table *radeon_gem_map_dma_buf(struct dma_buf_attachment *attachment, | |
35 | enum dma_data_direction dir) | |
36 | { | |
37 | struct radeon_bo *bo = attachment->dmabuf->priv; | |
38 | struct drm_device *dev = bo->rdev->ddev; | |
39 | int npages = bo->tbo.num_pages; | |
40 | struct sg_table *sg; | |
41 | int nents; | |
42 | ||
43 | mutex_lock(&dev->struct_mutex); | |
44 | sg = drm_prime_pages_to_sg(bo->tbo.ttm->pages, npages); | |
45 | nents = dma_map_sg(attachment->dev, sg->sgl, sg->nents, dir); | |
46 | mutex_unlock(&dev->struct_mutex); | |
47 | return sg; | |
48 | } | |
49 | ||
50 | static void radeon_gem_unmap_dma_buf(struct dma_buf_attachment *attachment, | |
51 | struct sg_table *sg, enum dma_data_direction dir) | |
52 | { | |
53 | dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir); | |
54 | sg_free_table(sg); | |
55 | kfree(sg); | |
56 | } | |
57 | ||
58 | static void radeon_gem_dmabuf_release(struct dma_buf *dma_buf) | |
59 | { | |
60 | struct radeon_bo *bo = dma_buf->priv; | |
61 | ||
62 | if (bo->gem_base.export_dma_buf == dma_buf) { | |
63 | DRM_ERROR("unreference dmabuf %p\n", &bo->gem_base); | |
64 | bo->gem_base.export_dma_buf = NULL; | |
65 | drm_gem_object_unreference_unlocked(&bo->gem_base); | |
66 | } | |
67 | } | |
68 | ||
69 | static void *radeon_gem_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num) | |
70 | { | |
71 | return NULL; | |
72 | } | |
73 | ||
74 | static void radeon_gem_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr) | |
75 | { | |
76 | ||
77 | } | |
78 | static void *radeon_gem_kmap(struct dma_buf *dma_buf, unsigned long page_num) | |
79 | { | |
80 | return NULL; | |
81 | } | |
82 | ||
83 | static void radeon_gem_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr) | |
84 | { | |
85 | ||
86 | } | |
87 | ||
946c7491 DA |
88 | static int radeon_gem_prime_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma) |
89 | { | |
90 | return -EINVAL; | |
91 | } | |
92 | ||
63bc620b DA |
93 | static void *radeon_gem_prime_vmap(struct dma_buf *dma_buf) |
94 | { | |
95 | struct radeon_bo *bo = dma_buf->priv; | |
96 | struct drm_device *dev = bo->rdev->ddev; | |
97 | int ret; | |
98 | ||
99 | mutex_lock(&dev->struct_mutex); | |
100 | if (bo->vmapping_count) { | |
101 | bo->vmapping_count++; | |
102 | goto out_unlock; | |
103 | } | |
104 | ||
105 | ret = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, | |
106 | &bo->dma_buf_vmap); | |
107 | if (ret) { | |
108 | mutex_unlock(&dev->struct_mutex); | |
109 | return ERR_PTR(ret); | |
110 | } | |
111 | bo->vmapping_count = 1; | |
112 | out_unlock: | |
113 | mutex_unlock(&dev->struct_mutex); | |
114 | return bo->dma_buf_vmap.virtual; | |
115 | } | |
116 | ||
117 | static void radeon_gem_prime_vunmap(struct dma_buf *dma_buf, void *vaddr) | |
118 | { | |
119 | struct radeon_bo *bo = dma_buf->priv; | |
120 | struct drm_device *dev = bo->rdev->ddev; | |
121 | ||
122 | mutex_lock(&dev->struct_mutex); | |
123 | bo->vmapping_count--; | |
124 | if (bo->vmapping_count == 0) { | |
125 | ttm_bo_kunmap(&bo->dma_buf_vmap); | |
126 | } | |
127 | mutex_unlock(&dev->struct_mutex); | |
128 | } | |
41ceeeb2 | 129 | const static struct dma_buf_ops radeon_dmabuf_ops = { |
40f5cf99 AD |
130 | .map_dma_buf = radeon_gem_map_dma_buf, |
131 | .unmap_dma_buf = radeon_gem_unmap_dma_buf, | |
132 | .release = radeon_gem_dmabuf_release, | |
133 | .kmap = radeon_gem_kmap, | |
134 | .kmap_atomic = radeon_gem_kmap_atomic, | |
135 | .kunmap = radeon_gem_kunmap, | |
136 | .kunmap_atomic = radeon_gem_kunmap_atomic, | |
946c7491 | 137 | .mmap = radeon_gem_prime_mmap, |
63bc620b DA |
138 | .vmap = radeon_gem_prime_vmap, |
139 | .vunmap = radeon_gem_prime_vunmap, | |
40f5cf99 AD |
140 | }; |
141 | ||
142 | static int radeon_prime_create(struct drm_device *dev, | |
143 | size_t size, | |
144 | struct sg_table *sg, | |
145 | struct radeon_bo **pbo) | |
146 | { | |
147 | struct radeon_device *rdev = dev->dev_private; | |
148 | struct radeon_bo *bo; | |
149 | int ret; | |
150 | ||
151 | ret = radeon_bo_create(rdev, size, PAGE_SIZE, false, | |
152 | RADEON_GEM_DOMAIN_GTT, sg, pbo); | |
153 | if (ret) | |
154 | return ret; | |
155 | bo = *pbo; | |
156 | bo->gem_base.driver_private = bo; | |
157 | ||
158 | mutex_lock(&rdev->gem.mutex); | |
159 | list_add_tail(&bo->list, &rdev->gem.objects); | |
160 | mutex_unlock(&rdev->gem.mutex); | |
161 | ||
162 | return 0; | |
163 | } | |
164 | ||
165 | struct dma_buf *radeon_gem_prime_export(struct drm_device *dev, | |
166 | struct drm_gem_object *obj, | |
167 | int flags) | |
168 | { | |
169 | struct radeon_bo *bo = gem_to_radeon_bo(obj); | |
170 | int ret = 0; | |
171 | ||
489797d5 DA |
172 | ret = radeon_bo_reserve(bo, false); |
173 | if (unlikely(ret != 0)) | |
174 | return ERR_PTR(ret); | |
175 | ||
40f5cf99 AD |
176 | /* pin buffer into GTT */ |
177 | ret = radeon_bo_pin(bo, RADEON_GEM_DOMAIN_GTT, NULL); | |
489797d5 DA |
178 | if (ret) { |
179 | radeon_bo_unreserve(bo); | |
40f5cf99 | 180 | return ERR_PTR(ret); |
489797d5 DA |
181 | } |
182 | radeon_bo_unreserve(bo); | |
40f5cf99 AD |
183 | return dma_buf_export(bo, &radeon_dmabuf_ops, obj->size, flags); |
184 | } | |
185 | ||
186 | struct drm_gem_object *radeon_gem_prime_import(struct drm_device *dev, | |
187 | struct dma_buf *dma_buf) | |
188 | { | |
189 | struct dma_buf_attachment *attach; | |
190 | struct sg_table *sg; | |
191 | struct radeon_bo *bo; | |
192 | int ret; | |
193 | ||
194 | if (dma_buf->ops == &radeon_dmabuf_ops) { | |
195 | bo = dma_buf->priv; | |
196 | if (bo->gem_base.dev == dev) { | |
197 | drm_gem_object_reference(&bo->gem_base); | |
198 | return &bo->gem_base; | |
199 | } | |
200 | } | |
201 | ||
202 | /* need to attach */ | |
203 | attach = dma_buf_attach(dma_buf, dev->dev); | |
204 | if (IS_ERR(attach)) | |
205 | return ERR_CAST(attach); | |
206 | ||
207 | sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); | |
208 | if (IS_ERR(sg)) { | |
209 | ret = PTR_ERR(sg); | |
210 | goto fail_detach; | |
211 | } | |
212 | ||
213 | ret = radeon_prime_create(dev, dma_buf->size, sg, &bo); | |
214 | if (ret) | |
215 | goto fail_unmap; | |
216 | ||
217 | bo->gem_base.import_attach = attach; | |
218 | ||
219 | return &bo->gem_base; | |
220 | ||
221 | fail_unmap: | |
222 | dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL); | |
223 | fail_detach: | |
224 | dma_buf_detach(dma_buf, attach); | |
225 | return ERR_PTR(ret); | |
226 | } |