drm/nouveau: port all engines to new engine module format
[deliverable/linux.git] / drivers / gpu / drm / nouveau / nouveau_prime.c
CommitLineData
e9bf5f36
DA
1/*
2 * Copyright 2011 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Dave Airlie
23 */
22b33e8e
DA
24
25#include "drmP.h"
26#include "drm.h"
27
28#include "nouveau_drv.h"
94580299 29#include <nouveau_drm.h>
22b33e8e
DA
30
31#include <linux/dma-buf.h>
32
33static struct sg_table *nouveau_gem_map_dma_buf(struct dma_buf_attachment *attachment,
34 enum dma_data_direction dir)
35{
36 struct nouveau_bo *nvbo = attachment->dmabuf->priv;
37 struct drm_device *dev = nvbo->gem->dev;
38 int npages = nvbo->bo.num_pages;
39 struct sg_table *sg;
40 int nents;
41
42 mutex_lock(&dev->struct_mutex);
43 sg = drm_prime_pages_to_sg(nvbo->bo.ttm->pages, npages);
44 nents = dma_map_sg(attachment->dev, sg->sgl, sg->nents, dir);
45 mutex_unlock(&dev->struct_mutex);
46 return sg;
47}
48
49static void nouveau_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
50 struct sg_table *sg, enum dma_data_direction dir)
51{
52 dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
53 sg_free_table(sg);
54 kfree(sg);
55}
56
57static void nouveau_gem_dmabuf_release(struct dma_buf *dma_buf)
58{
59 struct nouveau_bo *nvbo = dma_buf->priv;
60
61 if (nvbo->gem->export_dma_buf == dma_buf) {
62 nvbo->gem->export_dma_buf = NULL;
63 drm_gem_object_unreference_unlocked(nvbo->gem);
64 }
65}
66
67static void *nouveau_gem_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
68{
69 return NULL;
70}
71
72static void nouveau_gem_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
73{
74
75}
76static void *nouveau_gem_kmap(struct dma_buf *dma_buf, unsigned long page_num)
77{
78 return NULL;
79}
80
81static void nouveau_gem_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
82{
83
84}
85
e1bbc4bf
DA
86static int nouveau_gem_prime_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
87{
88 return -EINVAL;
89}
90
35916ace
DA
91static void *nouveau_gem_prime_vmap(struct dma_buf *dma_buf)
92{
93 struct nouveau_bo *nvbo = dma_buf->priv;
94 struct drm_device *dev = nvbo->gem->dev;
95 int ret;
96
97 mutex_lock(&dev->struct_mutex);
98 if (nvbo->vmapping_count) {
99 nvbo->vmapping_count++;
100 goto out_unlock;
101 }
102
103 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.num_pages,
104 &nvbo->dma_buf_vmap);
105 if (ret) {
106 mutex_unlock(&dev->struct_mutex);
107 return ERR_PTR(ret);
108 }
109 nvbo->vmapping_count = 1;
110out_unlock:
111 mutex_unlock(&dev->struct_mutex);
112 return nvbo->dma_buf_vmap.virtual;
113}
114
115static void nouveau_gem_prime_vunmap(struct dma_buf *dma_buf, void *vaddr)
116{
117 struct nouveau_bo *nvbo = dma_buf->priv;
118 struct drm_device *dev = nvbo->gem->dev;
119
120 mutex_lock(&dev->struct_mutex);
121 nvbo->vmapping_count--;
122 if (nvbo->vmapping_count == 0) {
123 ttm_bo_kunmap(&nvbo->dma_buf_vmap);
124 }
125 mutex_unlock(&dev->struct_mutex);
126}
127
41ceeeb2 128static const struct dma_buf_ops nouveau_dmabuf_ops = {
22b33e8e
DA
129 .map_dma_buf = nouveau_gem_map_dma_buf,
130 .unmap_dma_buf = nouveau_gem_unmap_dma_buf,
131 .release = nouveau_gem_dmabuf_release,
132 .kmap = nouveau_gem_kmap,
133 .kmap_atomic = nouveau_gem_kmap_atomic,
134 .kunmap = nouveau_gem_kunmap,
135 .kunmap_atomic = nouveau_gem_kunmap_atomic,
e1bbc4bf 136 .mmap = nouveau_gem_prime_mmap,
35916ace
DA
137 .vmap = nouveau_gem_prime_vmap,
138 .vunmap = nouveau_gem_prime_vunmap,
22b33e8e
DA
139};
140
141static int
142nouveau_prime_new(struct drm_device *dev,
143 size_t size,
144 struct sg_table *sg,
145 struct nouveau_bo **pnvbo)
146{
147 struct nouveau_bo *nvbo;
148 u32 flags = 0;
149 int ret;
150
151 flags = TTM_PL_FLAG_TT;
152
153 ret = nouveau_bo_new(dev, size, 0, flags, 0, 0,
154 sg, pnvbo);
155 if (ret)
156 return ret;
157 nvbo = *pnvbo;
158
159 /* we restrict allowed domains on nv50+ to only the types
160 * that were requested at creation time. not possibly on
161 * earlier chips without busting the ABI.
162 */
163 nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_GART;
164 nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size);
165 if (!nvbo->gem) {
166 nouveau_bo_ref(NULL, pnvbo);
167 return -ENOMEM;
168 }
169
170 nvbo->gem->driver_private = nvbo;
171 return 0;
172}
173
174struct dma_buf *nouveau_gem_prime_export(struct drm_device *dev,
175 struct drm_gem_object *obj, int flags)
176{
177 struct nouveau_bo *nvbo = nouveau_gem_object(obj);
178 int ret = 0;
179
180 /* pin buffer into GTT */
181 ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_TT);
182 if (ret)
183 return ERR_PTR(-EINVAL);
184
185 return dma_buf_export(nvbo, &nouveau_dmabuf_ops, obj->size, flags);
186}
187
188struct drm_gem_object *nouveau_gem_prime_import(struct drm_device *dev,
189 struct dma_buf *dma_buf)
190{
191 struct dma_buf_attachment *attach;
192 struct sg_table *sg;
193 struct nouveau_bo *nvbo;
194 int ret;
195
196 if (dma_buf->ops == &nouveau_dmabuf_ops) {
197 nvbo = dma_buf->priv;
198 if (nvbo->gem) {
199 if (nvbo->gem->dev == dev) {
200 drm_gem_object_reference(nvbo->gem);
201 return nvbo->gem;
202 }
203 }
204 }
205 /* need to attach */
206 attach = dma_buf_attach(dma_buf, dev->dev);
207 if (IS_ERR(attach))
208 return ERR_PTR(PTR_ERR(attach));
209
210 sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
211 if (IS_ERR(sg)) {
212 ret = PTR_ERR(sg);
213 goto fail_detach;
214 }
215
216 ret = nouveau_prime_new(dev, dma_buf->size, sg, &nvbo);
217 if (ret)
218 goto fail_unmap;
219
220 nvbo->gem->import_attach = attach;
221
222 return nvbo->gem;
223
224fail_unmap:
225 dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
226fail_detach:
227 dma_buf_detach(dma_buf, attach);
228 return ERR_PTR(ret);
229}
230
This page took 0.056184 seconds and 5 git commands to generate.