[media] em28xx: save isoc endpoint number for DVB only if endpoint has alt settings...
[deliverable/linux.git] / drivers / media / v4l2-core / videobuf2-dma-sg.c
CommitLineData
5ba3f757
AP
1/*
2 * videobuf2-dma-sg.c - dma scatter/gather memory allocator for videobuf2
3 *
4 * Copyright (C) 2010 Samsung Electronics
5 *
6 * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation.
11 */
12
13#include <linux/module.h>
14#include <linux/mm.h>
15#include <linux/scatterlist.h>
16#include <linux/sched.h>
17#include <linux/slab.h>
18#include <linux/vmalloc.h>
19
20#include <media/videobuf2-core.h>
21#include <media/videobuf2-memops.h>
22#include <media/videobuf2-dma-sg.h>
23
ffdc78ef
HV
24static int debug;
25module_param(debug, int, 0644);
26
27#define dprintk(level, fmt, arg...) \
28 do { \
29 if (debug >= level) \
30 printk(KERN_DEBUG "vb2-dma-sg: " fmt, ## arg); \
31 } while (0)
32
5ba3f757
AP
33struct vb2_dma_sg_buf {
34 void *vaddr;
35 struct page **pages;
36 int write;
37 int offset;
38 struct vb2_dma_sg_desc sg_desc;
39 atomic_t refcount;
40 struct vb2_vmarea_handler handler;
41};
42
43static void vb2_dma_sg_put(void *buf_priv);
44
b6ba2057 45static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size, gfp_t gfp_flags)
5ba3f757
AP
46{
47 struct vb2_dma_sg_buf *buf;
48 int i;
49
50 buf = kzalloc(sizeof *buf, GFP_KERNEL);
51 if (!buf)
52 return NULL;
53
54 buf->vaddr = NULL;
55 buf->write = 0;
56 buf->offset = 0;
57 buf->sg_desc.size = size;
58 buf->sg_desc.num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
59
fabc6b85 60 buf->sg_desc.sglist = vzalloc(buf->sg_desc.num_pages *
5ba3f757
AP
61 sizeof(*buf->sg_desc.sglist));
62 if (!buf->sg_desc.sglist)
63 goto fail_sglist_alloc;
5ba3f757
AP
64 sg_init_table(buf->sg_desc.sglist, buf->sg_desc.num_pages);
65
66 buf->pages = kzalloc(buf->sg_desc.num_pages * sizeof(struct page *),
67 GFP_KERNEL);
68 if (!buf->pages)
69 goto fail_pages_array_alloc;
70
71 for (i = 0; i < buf->sg_desc.num_pages; ++i) {
b6ba2057
HV
72 buf->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO |
73 __GFP_NOWARN | gfp_flags);
5ba3f757
AP
74 if (NULL == buf->pages[i])
75 goto fail_pages_alloc;
76 sg_set_page(&buf->sg_desc.sglist[i],
77 buf->pages[i], PAGE_SIZE, 0);
78 }
79
80 buf->handler.refcount = &buf->refcount;
81 buf->handler.put = vb2_dma_sg_put;
82 buf->handler.arg = buf;
83
84 atomic_inc(&buf->refcount);
85
ffdc78ef 86 dprintk(1, "%s: Allocated buffer of %d pages\n",
5ba3f757 87 __func__, buf->sg_desc.num_pages);
5ba3f757
AP
88 return buf;
89
90fail_pages_alloc:
91 while (--i >= 0)
92 __free_page(buf->pages[i]);
a9bb36aa 93 kfree(buf->pages);
5ba3f757
AP
94
95fail_pages_array_alloc:
96 vfree(buf->sg_desc.sglist);
97
98fail_sglist_alloc:
99 kfree(buf);
100 return NULL;
101}
102
103static void vb2_dma_sg_put(void *buf_priv)
104{
105 struct vb2_dma_sg_buf *buf = buf_priv;
106 int i = buf->sg_desc.num_pages;
107
108 if (atomic_dec_and_test(&buf->refcount)) {
ffdc78ef 109 dprintk(1, "%s: Freeing buffer of %d pages\n", __func__,
5ba3f757
AP
110 buf->sg_desc.num_pages);
111 if (buf->vaddr)
112 vm_unmap_ram(buf->vaddr, buf->sg_desc.num_pages);
113 vfree(buf->sg_desc.sglist);
114 while (--i >= 0)
115 __free_page(buf->pages[i]);
116 kfree(buf->pages);
117 kfree(buf);
118 }
119}
120
121static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
122 unsigned long size, int write)
123{
124 struct vb2_dma_sg_buf *buf;
125 unsigned long first, last;
126 int num_pages_from_user, i;
127
128 buf = kzalloc(sizeof *buf, GFP_KERNEL);
129 if (!buf)
130 return NULL;
131
132 buf->vaddr = NULL;
133 buf->write = write;
134 buf->offset = vaddr & ~PAGE_MASK;
135 buf->sg_desc.size = size;
136
137 first = (vaddr & PAGE_MASK) >> PAGE_SHIFT;
138 last = ((vaddr + size - 1) & PAGE_MASK) >> PAGE_SHIFT;
139 buf->sg_desc.num_pages = last - first + 1;
140
fabc6b85 141 buf->sg_desc.sglist = vzalloc(
5ba3f757
AP
142 buf->sg_desc.num_pages * sizeof(*buf->sg_desc.sglist));
143 if (!buf->sg_desc.sglist)
144 goto userptr_fail_sglist_alloc;
145
5ba3f757
AP
146 sg_init_table(buf->sg_desc.sglist, buf->sg_desc.num_pages);
147
148 buf->pages = kzalloc(buf->sg_desc.num_pages * sizeof(struct page *),
149 GFP_KERNEL);
150 if (!buf->pages)
151 goto userptr_fail_pages_array_alloc;
152
5ba3f757
AP
153 num_pages_from_user = get_user_pages(current, current->mm,
154 vaddr & PAGE_MASK,
155 buf->sg_desc.num_pages,
156 write,
157 1, /* force */
158 buf->pages,
159 NULL);
b037c0fd 160
5ba3f757
AP
161 if (num_pages_from_user != buf->sg_desc.num_pages)
162 goto userptr_fail_get_user_pages;
163
164 sg_set_page(&buf->sg_desc.sglist[0], buf->pages[0],
165 PAGE_SIZE - buf->offset, buf->offset);
166 size -= PAGE_SIZE - buf->offset;
167 for (i = 1; i < buf->sg_desc.num_pages; ++i) {
168 sg_set_page(&buf->sg_desc.sglist[i], buf->pages[i],
169 min_t(size_t, PAGE_SIZE, size), 0);
170 size -= min_t(size_t, PAGE_SIZE, size);
171 }
172 return buf;
173
174userptr_fail_get_user_pages:
ffdc78ef 175 dprintk(1, "get_user_pages requested/got: %d/%d]\n",
5ba3f757
AP
176 num_pages_from_user, buf->sg_desc.num_pages);
177 while (--num_pages_from_user >= 0)
178 put_page(buf->pages[num_pages_from_user]);
a9bb36aa 179 kfree(buf->pages);
5ba3f757
AP
180
181userptr_fail_pages_array_alloc:
182 vfree(buf->sg_desc.sglist);
183
184userptr_fail_sglist_alloc:
185 kfree(buf);
186 return NULL;
187}
188
189/*
190 * @put_userptr: inform the allocator that a USERPTR buffer will no longer
191 * be used
192 */
193static void vb2_dma_sg_put_userptr(void *buf_priv)
194{
195 struct vb2_dma_sg_buf *buf = buf_priv;
196 int i = buf->sg_desc.num_pages;
197
ffdc78ef 198 dprintk(1, "%s: Releasing userspace buffer of %d pages\n",
5ba3f757
AP
199 __func__, buf->sg_desc.num_pages);
200 if (buf->vaddr)
201 vm_unmap_ram(buf->vaddr, buf->sg_desc.num_pages);
202 while (--i >= 0) {
203 if (buf->write)
204 set_page_dirty_lock(buf->pages[i]);
205 put_page(buf->pages[i]);
206 }
207 vfree(buf->sg_desc.sglist);
208 kfree(buf->pages);
209 kfree(buf);
210}
211
212static void *vb2_dma_sg_vaddr(void *buf_priv)
213{
214 struct vb2_dma_sg_buf *buf = buf_priv;
215
216 BUG_ON(!buf);
217
218 if (!buf->vaddr)
219 buf->vaddr = vm_map_ram(buf->pages,
220 buf->sg_desc.num_pages,
221 -1,
222 PAGE_KERNEL);
223
224 /* add offset in case userptr is not page-aligned */
225 return buf->vaddr + buf->offset;
226}
227
228static unsigned int vb2_dma_sg_num_users(void *buf_priv)
229{
230 struct vb2_dma_sg_buf *buf = buf_priv;
231
232 return atomic_read(&buf->refcount);
233}
234
235static int vb2_dma_sg_mmap(void *buf_priv, struct vm_area_struct *vma)
236{
237 struct vb2_dma_sg_buf *buf = buf_priv;
238 unsigned long uaddr = vma->vm_start;
239 unsigned long usize = vma->vm_end - vma->vm_start;
240 int i = 0;
241
242 if (!buf) {
243 printk(KERN_ERR "No memory to map\n");
244 return -EINVAL;
245 }
246
247 do {
248 int ret;
249
250 ret = vm_insert_page(vma, uaddr, buf->pages[i++]);
251 if (ret) {
252 printk(KERN_ERR "Remapping memory, error: %d\n", ret);
253 return ret;
254 }
255
256 uaddr += PAGE_SIZE;
257 usize -= PAGE_SIZE;
258 } while (usize > 0);
259
260
261 /*
262 * Use common vm_area operations to track buffer refcount.
263 */
264 vma->vm_private_data = &buf->handler;
265 vma->vm_ops = &vb2_common_vm_ops;
266
267 vma->vm_ops->open(vma);
268
269 return 0;
270}
271
272static void *vb2_dma_sg_cookie(void *buf_priv)
273{
274 struct vb2_dma_sg_buf *buf = buf_priv;
275
276 return &buf->sg_desc;
277}
278
279const struct vb2_mem_ops vb2_dma_sg_memops = {
280 .alloc = vb2_dma_sg_alloc,
281 .put = vb2_dma_sg_put,
282 .get_userptr = vb2_dma_sg_get_userptr,
283 .put_userptr = vb2_dma_sg_put_userptr,
284 .vaddr = vb2_dma_sg_vaddr,
285 .mmap = vb2_dma_sg_mmap,
286 .num_users = vb2_dma_sg_num_users,
287 .cookie = vb2_dma_sg_cookie,
288};
289EXPORT_SYMBOL_GPL(vb2_dma_sg_memops);
290
291MODULE_DESCRIPTION("dma scatter/gather memory handling routines for videobuf2");
292MODULE_AUTHOR("Andrzej Pietrasiewicz");
293MODULE_LICENSE("GPL");
This page took 0.257848 seconds and 5 git commands to generate.