untangling process_vm_..., part 2
[deliverable/linux.git] / mm / process_vm_access.c
1 /*
2 * linux/mm/process_vm_access.c
3 *
4 * Copyright (C) 2010-2011 Christopher Yeoh <cyeoh@au1.ibm.com>, IBM Corp.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12 #include <linux/mm.h>
13 #include <linux/uio.h>
14 #include <linux/sched.h>
15 #include <linux/highmem.h>
16 #include <linux/ptrace.h>
17 #include <linux/slab.h>
18 #include <linux/syscalls.h>
19
20 #ifdef CONFIG_COMPAT
21 #include <linux/compat.h>
22 #endif
23
24 /**
25 * process_vm_rw_pages - read/write pages from task specified
26 * @task: task to read/write from
27 * @mm: mm for task
28 * @process_pages: struct pages area that can store at least
29 * nr_pages_to_copy struct page pointers
30 * @pa: address of page in task to start copying from/to
31 * @start_offset: offset in page to start copying from/to
32 * @len: number of bytes to copy
33 * @lvec: iovec array specifying where to copy to/from
34 * @lvec_cnt: number of elements in iovec array
35 * @lvec_current: index in iovec array we are up to
36 * @lvec_offset: offset in bytes from current iovec iov_base we are up to
37 * @vm_write: 0 means copy from, 1 means copy to
38 * @nr_pages_to_copy: number of pages to copy
39 * @bytes_copied: returns number of bytes successfully copied
40 * Returns 0 on success, error code otherwise
41 */
42 static int process_vm_rw_pages(struct task_struct *task,
43 struct mm_struct *mm,
44 struct page **process_pages,
45 unsigned long pa,
46 unsigned long start_offset,
47 unsigned long len,
48 const struct iovec **iovp,
49 unsigned long lvec_cnt,
50 unsigned long *lvec_current,
51 size_t *lvec_offset,
52 int vm_write,
53 unsigned int nr_pages_to_copy,
54 ssize_t *bytes_copied)
55 {
56 int pages_pinned;
57 void *target_kaddr;
58 int pgs_copied = 0;
59 int j;
60 int ret;
61 ssize_t bytes_to_copy;
62 ssize_t rc = 0;
63 const struct iovec *iov = *iovp;
64
65 *bytes_copied = 0;
66
67 /* Get the pages we're interested in */
68 down_read(&mm->mmap_sem);
69 pages_pinned = get_user_pages(task, mm, pa,
70 nr_pages_to_copy,
71 vm_write, 0, process_pages, NULL);
72 up_read(&mm->mmap_sem);
73
74 if (pages_pinned != nr_pages_to_copy) {
75 rc = -EFAULT;
76 goto end;
77 }
78
79 /* Do the copy for each page */
80 for (pgs_copied = 0;
81 (pgs_copied < nr_pages_to_copy) && (*lvec_current < lvec_cnt);
82 pgs_copied++) {
83 /* Make sure we have a non zero length iovec */
84 while (*lvec_current < lvec_cnt
85 && iov->iov_len == 0) {
86 iov++;
87 (*lvec_current)++;
88 }
89 if (*lvec_current == lvec_cnt)
90 break;
91
92 /*
93 * Will copy smallest of:
94 * - bytes remaining in page
95 * - bytes remaining in destination iovec
96 */
97 bytes_to_copy = min_t(ssize_t, PAGE_SIZE - start_offset,
98 len - *bytes_copied);
99 bytes_to_copy = min_t(ssize_t, bytes_to_copy,
100 iov->iov_len
101 - *lvec_offset);
102
103 target_kaddr = kmap(process_pages[pgs_copied]) + start_offset;
104
105 if (vm_write)
106 ret = copy_from_user(target_kaddr,
107 iov->iov_base
108 + *lvec_offset,
109 bytes_to_copy);
110 else
111 ret = copy_to_user(iov->iov_base
112 + *lvec_offset,
113 target_kaddr, bytes_to_copy);
114 kunmap(process_pages[pgs_copied]);
115 if (ret) {
116 *bytes_copied += bytes_to_copy - ret;
117 pgs_copied++;
118 rc = -EFAULT;
119 goto end;
120 }
121 *bytes_copied += bytes_to_copy;
122 *lvec_offset += bytes_to_copy;
123 if (*lvec_offset == iov->iov_len) {
124 /*
125 * Need to copy remaining part of page into the
126 * next iovec if there are any bytes left in page
127 */
128 (*lvec_current)++;
129 iov++;
130 *lvec_offset = 0;
131 start_offset = (start_offset + bytes_to_copy)
132 % PAGE_SIZE;
133 if (start_offset)
134 pgs_copied--;
135 } else {
136 start_offset = 0;
137 }
138 }
139
140 end:
141 if (vm_write) {
142 for (j = 0; j < pages_pinned; j++) {
143 if (j < pgs_copied)
144 set_page_dirty_lock(process_pages[j]);
145 put_page(process_pages[j]);
146 }
147 } else {
148 for (j = 0; j < pages_pinned; j++)
149 put_page(process_pages[j]);
150 }
151
152 *iovp = iov;
153 return rc;
154 }
155
156 /* Maximum number of pages kmalloc'd to hold struct page's during copy */
157 #define PVM_MAX_KMALLOC_PAGES (PAGE_SIZE * 2)
158
159 /**
160 * process_vm_rw_single_vec - read/write pages from task specified
161 * @addr: start memory address of target process
162 * @len: size of area to copy to/from
163 * @lvec: iovec array specifying where to copy to/from locally
164 * @lvec_cnt: number of elements in iovec array
165 * @lvec_current: index in iovec array we are up to
166 * @lvec_offset: offset in bytes from current iovec iov_base we are up to
167 * @process_pages: struct pages area that can store at least
168 * nr_pages_to_copy struct page pointers
169 * @mm: mm for task
170 * @task: task to read/write from
171 * @vm_write: 0 means copy from, 1 means copy to
172 * @bytes_copied: returns number of bytes successfully copied
173 * Returns 0 on success or on failure error code
174 */
175 static int process_vm_rw_single_vec(unsigned long addr,
176 unsigned long len,
177 const struct iovec *lvec,
178 unsigned long lvec_cnt,
179 unsigned long *lvec_current,
180 size_t *lvec_offset,
181 struct page **process_pages,
182 struct mm_struct *mm,
183 struct task_struct *task,
184 int vm_write,
185 ssize_t *bytes_copied)
186 {
187 unsigned long pa = addr & PAGE_MASK;
188 unsigned long start_offset = addr - pa;
189 unsigned long nr_pages;
190 ssize_t bytes_copied_loop;
191 ssize_t rc = 0;
192 unsigned long nr_pages_copied = 0;
193 unsigned long nr_pages_to_copy;
194 unsigned long max_pages_per_loop = PVM_MAX_KMALLOC_PAGES
195 / sizeof(struct pages *);
196 const struct iovec *iov = lvec + *lvec_current;
197
198 *bytes_copied = 0;
199
200 /* Work out address and page range required */
201 if (len == 0)
202 return 0;
203 nr_pages = (addr + len - 1) / PAGE_SIZE - addr / PAGE_SIZE + 1;
204
205 while ((nr_pages_copied < nr_pages) && (*lvec_current < lvec_cnt)) {
206 nr_pages_to_copy = min(nr_pages - nr_pages_copied,
207 max_pages_per_loop);
208
209 rc = process_vm_rw_pages(task, mm, process_pages, pa,
210 start_offset, len,
211 &iov, lvec_cnt,
212 lvec_current, lvec_offset,
213 vm_write, nr_pages_to_copy,
214 &bytes_copied_loop);
215 start_offset = 0;
216 *bytes_copied += bytes_copied_loop;
217
218 if (rc < 0) {
219 return rc;
220 } else {
221 len -= bytes_copied_loop;
222 nr_pages_copied += nr_pages_to_copy;
223 pa += nr_pages_to_copy * PAGE_SIZE;
224 }
225 }
226
227 return rc;
228 }
229
230 /* Maximum number of entries for process pages array
231 which lives on stack */
232 #define PVM_MAX_PP_ARRAY_COUNT 16
233
234 /**
235 * process_vm_rw_core - core of reading/writing pages from task specified
236 * @pid: PID of process to read/write from/to
237 * @lvec: iovec array specifying where to copy to/from locally
238 * @liovcnt: size of lvec array
239 * @rvec: iovec array specifying where to copy to/from in the other process
240 * @riovcnt: size of rvec array
241 * @flags: currently unused
242 * @vm_write: 0 if reading from other process, 1 if writing to other process
243 * Returns the number of bytes read/written or error code. May
244 * return less bytes than expected if an error occurs during the copying
245 * process.
246 */
247 static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
248 unsigned long liovcnt,
249 const struct iovec *rvec,
250 unsigned long riovcnt,
251 unsigned long flags, int vm_write)
252 {
253 struct task_struct *task;
254 struct page *pp_stack[PVM_MAX_PP_ARRAY_COUNT];
255 struct page **process_pages = pp_stack;
256 struct mm_struct *mm;
257 unsigned long i;
258 ssize_t rc = 0;
259 ssize_t bytes_copied_loop;
260 ssize_t bytes_copied = 0;
261 unsigned long nr_pages = 0;
262 unsigned long nr_pages_iov;
263 unsigned long iov_l_curr_idx = 0;
264 size_t iov_l_curr_offset = 0;
265 ssize_t iov_len;
266
267 /*
268 * Work out how many pages of struct pages we're going to need
269 * when eventually calling get_user_pages
270 */
271 for (i = 0; i < riovcnt; i++) {
272 iov_len = rvec[i].iov_len;
273 if (iov_len > 0) {
274 nr_pages_iov = ((unsigned long)rvec[i].iov_base
275 + iov_len)
276 / PAGE_SIZE - (unsigned long)rvec[i].iov_base
277 / PAGE_SIZE + 1;
278 nr_pages = max(nr_pages, nr_pages_iov);
279 }
280 }
281
282 if (nr_pages == 0)
283 return 0;
284
285 if (nr_pages > PVM_MAX_PP_ARRAY_COUNT) {
286 /* For reliability don't try to kmalloc more than
287 2 pages worth */
288 process_pages = kmalloc(min_t(size_t, PVM_MAX_KMALLOC_PAGES,
289 sizeof(struct pages *)*nr_pages),
290 GFP_KERNEL);
291
292 if (!process_pages)
293 return -ENOMEM;
294 }
295
296 /* Get process information */
297 rcu_read_lock();
298 task = find_task_by_vpid(pid);
299 if (task)
300 get_task_struct(task);
301 rcu_read_unlock();
302 if (!task) {
303 rc = -ESRCH;
304 goto free_proc_pages;
305 }
306
307 mm = mm_access(task, PTRACE_MODE_ATTACH);
308 if (!mm || IS_ERR(mm)) {
309 rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
310 /*
311 * Explicitly map EACCES to EPERM as EPERM is a more a
312 * appropriate error code for process_vw_readv/writev
313 */
314 if (rc == -EACCES)
315 rc = -EPERM;
316 goto put_task_struct;
317 }
318
319 for (i = 0; i < riovcnt && iov_l_curr_idx < liovcnt; i++) {
320 rc = process_vm_rw_single_vec(
321 (unsigned long)rvec[i].iov_base, rvec[i].iov_len,
322 lvec, liovcnt, &iov_l_curr_idx, &iov_l_curr_offset,
323 process_pages, mm, task, vm_write, &bytes_copied_loop);
324 bytes_copied += bytes_copied_loop;
325 if (rc != 0) {
326 /* If we have managed to copy any data at all then
327 we return the number of bytes copied. Otherwise
328 we return the error code */
329 if (bytes_copied)
330 rc = bytes_copied;
331 goto put_mm;
332 }
333 }
334
335 rc = bytes_copied;
336 put_mm:
337 mmput(mm);
338
339 put_task_struct:
340 put_task_struct(task);
341
342 free_proc_pages:
343 if (process_pages != pp_stack)
344 kfree(process_pages);
345 return rc;
346 }
347
348 /**
349 * process_vm_rw - check iovecs before calling core routine
350 * @pid: PID of process to read/write from/to
351 * @lvec: iovec array specifying where to copy to/from locally
352 * @liovcnt: size of lvec array
353 * @rvec: iovec array specifying where to copy to/from in the other process
354 * @riovcnt: size of rvec array
355 * @flags: currently unused
356 * @vm_write: 0 if reading from other process, 1 if writing to other process
357 * Returns the number of bytes read/written or error code. May
358 * return less bytes than expected if an error occurs during the copying
359 * process.
360 */
361 static ssize_t process_vm_rw(pid_t pid,
362 const struct iovec __user *lvec,
363 unsigned long liovcnt,
364 const struct iovec __user *rvec,
365 unsigned long riovcnt,
366 unsigned long flags, int vm_write)
367 {
368 struct iovec iovstack_l[UIO_FASTIOV];
369 struct iovec iovstack_r[UIO_FASTIOV];
370 struct iovec *iov_l = iovstack_l;
371 struct iovec *iov_r = iovstack_r;
372 ssize_t rc;
373
374 if (flags != 0)
375 return -EINVAL;
376
377 /* Check iovecs */
378 if (vm_write)
379 rc = rw_copy_check_uvector(WRITE, lvec, liovcnt, UIO_FASTIOV,
380 iovstack_l, &iov_l);
381 else
382 rc = rw_copy_check_uvector(READ, lvec, liovcnt, UIO_FASTIOV,
383 iovstack_l, &iov_l);
384 if (rc <= 0)
385 goto free_iovecs;
386
387 rc = rw_copy_check_uvector(CHECK_IOVEC_ONLY, rvec, riovcnt, UIO_FASTIOV,
388 iovstack_r, &iov_r);
389 if (rc <= 0)
390 goto free_iovecs;
391
392 rc = process_vm_rw_core(pid, iov_l, liovcnt, iov_r, riovcnt, flags,
393 vm_write);
394
395 free_iovecs:
396 if (iov_r != iovstack_r)
397 kfree(iov_r);
398 if (iov_l != iovstack_l)
399 kfree(iov_l);
400
401 return rc;
402 }
403
404 SYSCALL_DEFINE6(process_vm_readv, pid_t, pid, const struct iovec __user *, lvec,
405 unsigned long, liovcnt, const struct iovec __user *, rvec,
406 unsigned long, riovcnt, unsigned long, flags)
407 {
408 return process_vm_rw(pid, lvec, liovcnt, rvec, riovcnt, flags, 0);
409 }
410
411 SYSCALL_DEFINE6(process_vm_writev, pid_t, pid,
412 const struct iovec __user *, lvec,
413 unsigned long, liovcnt, const struct iovec __user *, rvec,
414 unsigned long, riovcnt, unsigned long, flags)
415 {
416 return process_vm_rw(pid, lvec, liovcnt, rvec, riovcnt, flags, 1);
417 }
418
419 #ifdef CONFIG_COMPAT
420
421 asmlinkage ssize_t
422 compat_process_vm_rw(compat_pid_t pid,
423 const struct compat_iovec __user *lvec,
424 unsigned long liovcnt,
425 const struct compat_iovec __user *rvec,
426 unsigned long riovcnt,
427 unsigned long flags, int vm_write)
428 {
429 struct iovec iovstack_l[UIO_FASTIOV];
430 struct iovec iovstack_r[UIO_FASTIOV];
431 struct iovec *iov_l = iovstack_l;
432 struct iovec *iov_r = iovstack_r;
433 ssize_t rc = -EFAULT;
434
435 if (flags != 0)
436 return -EINVAL;
437
438 if (vm_write)
439 rc = compat_rw_copy_check_uvector(WRITE, lvec, liovcnt,
440 UIO_FASTIOV, iovstack_l,
441 &iov_l);
442 else
443 rc = compat_rw_copy_check_uvector(READ, lvec, liovcnt,
444 UIO_FASTIOV, iovstack_l,
445 &iov_l);
446 if (rc <= 0)
447 goto free_iovecs;
448 rc = compat_rw_copy_check_uvector(CHECK_IOVEC_ONLY, rvec, riovcnt,
449 UIO_FASTIOV, iovstack_r,
450 &iov_r);
451 if (rc <= 0)
452 goto free_iovecs;
453
454 rc = process_vm_rw_core(pid, iov_l, liovcnt, iov_r, riovcnt, flags,
455 vm_write);
456
457 free_iovecs:
458 if (iov_r != iovstack_r)
459 kfree(iov_r);
460 if (iov_l != iovstack_l)
461 kfree(iov_l);
462 return rc;
463 }
464
465 asmlinkage ssize_t
466 compat_sys_process_vm_readv(compat_pid_t pid,
467 const struct compat_iovec __user *lvec,
468 unsigned long liovcnt,
469 const struct compat_iovec __user *rvec,
470 unsigned long riovcnt,
471 unsigned long flags)
472 {
473 return compat_process_vm_rw(pid, lvec, liovcnt, rvec,
474 riovcnt, flags, 0);
475 }
476
477 asmlinkage ssize_t
478 compat_sys_process_vm_writev(compat_pid_t pid,
479 const struct compat_iovec __user *lvec,
480 unsigned long liovcnt,
481 const struct compat_iovec __user *rvec,
482 unsigned long riovcnt,
483 unsigned long flags)
484 {
485 return compat_process_vm_rw(pid, lvec, liovcnt, rvec,
486 riovcnt, flags, 1);
487 }
488
489 #endif
This page took 0.043036 seconds and 6 git commands to generate.