Commit | Line | Data |
---|---|---|
5320918b DA |
1 | /* |
2 | * Copyright (C) 2012 Red Hat | |
3 | * | |
4 | * This file is subject to the terms and conditions of the GNU General Public | |
5 | * License v2. See the file COPYING in the main directory of this archive for | |
6 | * more details. | |
7 | */ | |
8 | ||
9 | #include "drmP.h" | |
10 | #include "udl_drv.h" | |
11 | #include <linux/shmem_fs.h> | |
96503f59 | 12 | #include <linux/dma-buf.h> |
5320918b DA |
13 | |
14 | struct udl_gem_object *udl_gem_alloc_object(struct drm_device *dev, | |
15 | size_t size) | |
16 | { | |
17 | struct udl_gem_object *obj; | |
18 | ||
19 | obj = kzalloc(sizeof(*obj), GFP_KERNEL); | |
20 | if (obj == NULL) | |
21 | return NULL; | |
22 | ||
23 | if (drm_gem_object_init(dev, &obj->base, size) != 0) { | |
24 | kfree(obj); | |
25 | return NULL; | |
26 | } | |
27 | ||
28 | return obj; | |
29 | } | |
30 | ||
31 | static int | |
32 | udl_gem_create(struct drm_file *file, | |
33 | struct drm_device *dev, | |
34 | uint64_t size, | |
35 | uint32_t *handle_p) | |
36 | { | |
37 | struct udl_gem_object *obj; | |
38 | int ret; | |
39 | u32 handle; | |
40 | ||
41 | size = roundup(size, PAGE_SIZE); | |
42 | ||
43 | obj = udl_gem_alloc_object(dev, size); | |
44 | if (obj == NULL) | |
45 | return -ENOMEM; | |
46 | ||
47 | ret = drm_gem_handle_create(file, &obj->base, &handle); | |
48 | if (ret) { | |
49 | drm_gem_object_release(&obj->base); | |
50 | kfree(obj); | |
51 | return ret; | |
52 | } | |
53 | ||
54 | drm_gem_object_unreference(&obj->base); | |
55 | *handle_p = handle; | |
56 | return 0; | |
57 | } | |
58 | ||
59 | int udl_dumb_create(struct drm_file *file, | |
60 | struct drm_device *dev, | |
61 | struct drm_mode_create_dumb *args) | |
62 | { | |
63 | args->pitch = args->width * ((args->bpp + 1) / 8); | |
64 | args->size = args->pitch * args->height; | |
65 | return udl_gem_create(file, dev, | |
66 | args->size, &args->handle); | |
67 | } | |
68 | ||
69 | int udl_dumb_destroy(struct drm_file *file, struct drm_device *dev, | |
70 | uint32_t handle) | |
71 | { | |
72 | return drm_gem_handle_delete(file, handle); | |
73 | } | |
74 | ||
fa9e8550 KK |
75 | int udl_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) |
76 | { | |
77 | int ret; | |
78 | ||
79 | ret = drm_gem_mmap(filp, vma); | |
80 | if (ret) | |
81 | return ret; | |
82 | ||
83 | vma->vm_flags &= ~VM_PFNMAP; | |
84 | vma->vm_flags |= VM_MIXEDMAP; | |
85 | ||
86 | return ret; | |
87 | } | |
88 | ||
5320918b DA |
89 | int udl_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) |
90 | { | |
91 | struct udl_gem_object *obj = to_udl_bo(vma->vm_private_data); | |
92 | struct page *page; | |
93 | unsigned int page_offset; | |
94 | int ret = 0; | |
95 | ||
96 | page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >> | |
97 | PAGE_SHIFT; | |
98 | ||
99 | if (!obj->pages) | |
100 | return VM_FAULT_SIGBUS; | |
101 | ||
102 | page = obj->pages[page_offset]; | |
103 | ret = vm_insert_page(vma, (unsigned long)vmf->virtual_address, page); | |
104 | switch (ret) { | |
105 | case -EAGAIN: | |
106 | set_need_resched(); | |
107 | case 0: | |
108 | case -ERESTARTSYS: | |
109 | return VM_FAULT_NOPAGE; | |
110 | case -ENOMEM: | |
111 | return VM_FAULT_OOM; | |
112 | default: | |
113 | return VM_FAULT_SIGBUS; | |
114 | } | |
115 | } | |
116 | ||
117 | int udl_gem_init_object(struct drm_gem_object *obj) | |
118 | { | |
119 | BUG(); | |
120 | ||
121 | return 0; | |
122 | } | |
123 | ||
124 | static int udl_gem_get_pages(struct udl_gem_object *obj, gfp_t gfpmask) | |
125 | { | |
126 | int page_count, i; | |
127 | struct page *page; | |
128 | struct inode *inode; | |
129 | struct address_space *mapping; | |
130 | ||
131 | if (obj->pages) | |
132 | return 0; | |
133 | ||
134 | page_count = obj->base.size / PAGE_SIZE; | |
135 | BUG_ON(obj->pages != NULL); | |
136 | obj->pages = drm_malloc_ab(page_count, sizeof(struct page *)); | |
137 | if (obj->pages == NULL) | |
138 | return -ENOMEM; | |
139 | ||
140 | inode = obj->base.filp->f_path.dentry->d_inode; | |
141 | mapping = inode->i_mapping; | |
142 | gfpmask |= mapping_gfp_mask(mapping); | |
143 | ||
144 | for (i = 0; i < page_count; i++) { | |
145 | page = shmem_read_mapping_page_gfp(mapping, i, gfpmask); | |
146 | if (IS_ERR(page)) | |
147 | goto err_pages; | |
148 | obj->pages[i] = page; | |
149 | } | |
150 | ||
151 | return 0; | |
152 | err_pages: | |
153 | while (i--) | |
154 | page_cache_release(obj->pages[i]); | |
155 | drm_free_large(obj->pages); | |
156 | obj->pages = NULL; | |
157 | return PTR_ERR(page); | |
158 | } | |
159 | ||
160 | static void udl_gem_put_pages(struct udl_gem_object *obj) | |
161 | { | |
162 | int page_count = obj->base.size / PAGE_SIZE; | |
163 | int i; | |
164 | ||
96503f59 DA |
165 | if (obj->base.import_attach) { |
166 | drm_free_large(obj->pages); | |
167 | obj->pages = NULL; | |
168 | return; | |
169 | } | |
170 | ||
5320918b DA |
171 | for (i = 0; i < page_count; i++) |
172 | page_cache_release(obj->pages[i]); | |
173 | ||
174 | drm_free_large(obj->pages); | |
175 | obj->pages = NULL; | |
176 | } | |
177 | ||
178 | int udl_gem_vmap(struct udl_gem_object *obj) | |
179 | { | |
180 | int page_count = obj->base.size / PAGE_SIZE; | |
181 | int ret; | |
182 | ||
e8aa1d1e DA |
183 | if (obj->base.import_attach) { |
184 | ret = dma_buf_begin_cpu_access(obj->base.import_attach->dmabuf, | |
185 | 0, obj->base.size, DMA_BIDIRECTIONAL); | |
186 | if (ret) | |
187 | return -EINVAL; | |
188 | ||
189 | obj->vmapping = dma_buf_vmap(obj->base.import_attach->dmabuf); | |
190 | if (!obj->vmapping) | |
191 | return -ENOMEM; | |
192 | return 0; | |
193 | } | |
194 | ||
5320918b DA |
195 | ret = udl_gem_get_pages(obj, GFP_KERNEL); |
196 | if (ret) | |
197 | return ret; | |
198 | ||
199 | obj->vmapping = vmap(obj->pages, page_count, 0, PAGE_KERNEL); | |
200 | if (!obj->vmapping) | |
201 | return -ENOMEM; | |
202 | return 0; | |
203 | } | |
204 | ||
205 | void udl_gem_vunmap(struct udl_gem_object *obj) | |
206 | { | |
e8aa1d1e DA |
207 | if (obj->base.import_attach) { |
208 | dma_buf_vunmap(obj->base.import_attach->dmabuf, obj->vmapping); | |
209 | dma_buf_end_cpu_access(obj->base.import_attach->dmabuf, 0, | |
210 | obj->base.size, DMA_BIDIRECTIONAL); | |
211 | return; | |
212 | } | |
213 | ||
5320918b DA |
214 | if (obj->vmapping) |
215 | vunmap(obj->vmapping); | |
216 | ||
217 | udl_gem_put_pages(obj); | |
218 | } | |
219 | ||
220 | void udl_gem_free_object(struct drm_gem_object *gem_obj) | |
221 | { | |
222 | struct udl_gem_object *obj = to_udl_bo(gem_obj); | |
223 | ||
224 | if (obj->vmapping) | |
225 | udl_gem_vunmap(obj); | |
226 | ||
e8aa1d1e DA |
227 | if (gem_obj->import_attach) |
228 | drm_prime_gem_destroy(gem_obj, obj->sg); | |
229 | ||
5320918b DA |
230 | if (obj->pages) |
231 | udl_gem_put_pages(obj); | |
232 | ||
233 | if (gem_obj->map_list.map) | |
234 | drm_gem_free_mmap_offset(gem_obj); | |
235 | } | |
236 | ||
237 | /* the dumb interface doesn't work with the GEM straight MMAP | |
238 | interface, it expects to do MMAP on the drm fd, like normal */ | |
239 | int udl_gem_mmap(struct drm_file *file, struct drm_device *dev, | |
240 | uint32_t handle, uint64_t *offset) | |
241 | { | |
242 | struct udl_gem_object *gobj; | |
243 | struct drm_gem_object *obj; | |
244 | int ret = 0; | |
245 | ||
246 | mutex_lock(&dev->struct_mutex); | |
247 | obj = drm_gem_object_lookup(dev, file, handle); | |
248 | if (obj == NULL) { | |
249 | ret = -ENOENT; | |
250 | goto unlock; | |
251 | } | |
252 | gobj = to_udl_bo(obj); | |
253 | ||
254 | ret = udl_gem_get_pages(gobj, GFP_KERNEL); | |
255 | if (ret) | |
ace281e8 | 256 | goto out; |
5320918b DA |
257 | if (!gobj->base.map_list.map) { |
258 | ret = drm_gem_create_mmap_offset(obj); | |
259 | if (ret) | |
260 | goto out; | |
261 | } | |
262 | ||
263 | *offset = (u64)gobj->base.map_list.hash.key << PAGE_SHIFT; | |
264 | ||
265 | out: | |
266 | drm_gem_object_unreference(&gobj->base); | |
267 | unlock: | |
268 | mutex_unlock(&dev->struct_mutex); | |
269 | return ret; | |
270 | } | |
96503f59 DA |
271 | |
272 | static int udl_prime_create(struct drm_device *dev, | |
273 | size_t size, | |
274 | struct sg_table *sg, | |
275 | struct udl_gem_object **obj_p) | |
276 | { | |
277 | struct udl_gem_object *obj; | |
278 | int npages; | |
96503f59 DA |
279 | |
280 | npages = size / PAGE_SIZE; | |
281 | ||
282 | *obj_p = NULL; | |
283 | obj = udl_gem_alloc_object(dev, npages * PAGE_SIZE); | |
284 | if (!obj) | |
285 | return -ENOMEM; | |
286 | ||
287 | obj->sg = sg; | |
288 | obj->pages = drm_malloc_ab(npages, sizeof(struct page *)); | |
289 | if (obj->pages == NULL) { | |
290 | DRM_ERROR("obj pages is NULL %d\n", npages); | |
291 | return -ENOMEM; | |
292 | } | |
293 | ||
294 | drm_prime_sg_to_page_addr_arrays(sg, obj->pages, NULL, npages); | |
295 | ||
296 | *obj_p = obj; | |
297 | return 0; | |
298 | } | |
299 | ||
300 | struct drm_gem_object *udl_gem_prime_import(struct drm_device *dev, | |
301 | struct dma_buf *dma_buf) | |
302 | { | |
303 | struct dma_buf_attachment *attach; | |
304 | struct sg_table *sg; | |
305 | struct udl_gem_object *uobj; | |
306 | int ret; | |
307 | ||
308 | /* need to attach */ | |
309 | attach = dma_buf_attach(dma_buf, dev->dev); | |
310 | if (IS_ERR(attach)) | |
959f7247 | 311 | return ERR_CAST(attach); |
96503f59 DA |
312 | |
313 | sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); | |
314 | if (IS_ERR(sg)) { | |
315 | ret = PTR_ERR(sg); | |
316 | goto fail_detach; | |
317 | } | |
318 | ||
319 | ret = udl_prime_create(dev, dma_buf->size, sg, &uobj); | |
320 | if (ret) { | |
321 | goto fail_unmap; | |
322 | } | |
323 | ||
324 | uobj->base.import_attach = attach; | |
325 | ||
326 | return &uobj->base; | |
327 | ||
328 | fail_unmap: | |
329 | dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL); | |
330 | fail_detach: | |
331 | dma_buf_detach(dma_buf, attach); | |
332 | return ERR_PTR(ret); | |
333 | } |