orangefs: don't reinvent completion.h...
[deliverable/linux.git] / fs / orangefs / devorangefs-req.c
1 /*
2 * (C) 2001 Clemson University and The University of Chicago
3 *
4 * Changes by Acxiom Corporation to add protocol version to kernel
5 * communication, Copyright Acxiom Corporation, 2005.
6 *
7 * See COPYING in top-level directory.
8 */
9
10 #include "protocol.h"
11 #include "orangefs-kernel.h"
12 #include "orangefs-dev-proto.h"
13 #include "orangefs-bufmap.h"
14
15 #include <linux/debugfs.h>
16 #include <linux/slab.h>
17
18 /* this file implements the /dev/pvfs2-req device node */
19
20 static int open_access_count;
21
22 #define DUMP_DEVICE_ERROR() \
23 do { \
24 gossip_err("*****************************************************\n");\
25 gossip_err("ORANGEFS Device Error: You cannot open the device file "); \
26 gossip_err("\n/dev/%s more than once. Please make sure that\nthere " \
27 "are no ", ORANGEFS_REQDEVICE_NAME); \
28 gossip_err("instances of a program using this device\ncurrently " \
29 "running. (You must verify this!)\n"); \
30 gossip_err("For example, you can use the lsof program as follows:\n");\
31 gossip_err("'lsof | grep %s' (run this as root)\n", \
32 ORANGEFS_REQDEVICE_NAME); \
33 gossip_err(" open_access_count = %d\n", open_access_count); \
34 gossip_err("*****************************************************\n");\
35 } while (0)
36
37 static int hash_func(__u64 tag, int table_size)
38 {
39 return do_div(tag, (unsigned int)table_size);
40 }
41
42 static void orangefs_devreq_add_op(struct orangefs_kernel_op_s *op)
43 {
44 int index = hash_func(op->tag, hash_table_size);
45
46 list_add_tail(&op->list, &htable_ops_in_progress[index]);
47 }
48
49 static struct orangefs_kernel_op_s *orangefs_devreq_remove_op(__u64 tag)
50 {
51 struct orangefs_kernel_op_s *op, *next;
52 int index;
53
54 index = hash_func(tag, hash_table_size);
55
56 spin_lock(&htable_ops_in_progress_lock);
57 list_for_each_entry_safe(op,
58 next,
59 &htable_ops_in_progress[index],
60 list) {
61 if (op->tag == tag && !op_state_purged(op)) {
62 list_del_init(&op->list);
63 get_op(op); /* increase ref count. */
64 spin_unlock(&htable_ops_in_progress_lock);
65 return op;
66 }
67 }
68
69 spin_unlock(&htable_ops_in_progress_lock);
70 return NULL;
71 }
72
73 static int orangefs_devreq_open(struct inode *inode, struct file *file)
74 {
75 int ret = -EINVAL;
76
77 if (!(file->f_flags & O_NONBLOCK)) {
78 gossip_err("%s: device cannot be opened in blocking mode\n",
79 __func__);
80 goto out;
81 }
82 ret = -EACCES;
83 gossip_debug(GOSSIP_DEV_DEBUG, "client-core: opening device\n");
84 mutex_lock(&devreq_mutex);
85
86 if (open_access_count == 0) {
87 open_access_count = 1;
88 ret = 0;
89 } else {
90 DUMP_DEVICE_ERROR();
91 }
92 mutex_unlock(&devreq_mutex);
93
94 out:
95
96 gossip_debug(GOSSIP_DEV_DEBUG,
97 "pvfs2-client-core: open device complete (ret = %d)\n",
98 ret);
99 return ret;
100 }
101
102 /* Function for read() callers into the device */
103 static ssize_t orangefs_devreq_read(struct file *file,
104 char __user *buf,
105 size_t count, loff_t *offset)
106 {
107 struct orangefs_kernel_op_s *op, *temp;
108 __s32 proto_ver = ORANGEFS_KERNEL_PROTO_VERSION;
109 static __s32 magic = ORANGEFS_DEVREQ_MAGIC;
110 struct orangefs_kernel_op_s *cur_op = NULL;
111 unsigned long ret;
112
113 /* We do not support blocking IO. */
114 if (!(file->f_flags & O_NONBLOCK)) {
115 gossip_err("%s: blocking read from client-core.\n",
116 __func__);
117 return -EINVAL;
118 }
119
120 /*
121 * The client will do an ioctl to find MAX_DEV_REQ_UPSIZE, then
122 * always read with that size buffer.
123 */
124 if (count != MAX_DEV_REQ_UPSIZE) {
125 gossip_err("orangefs: client-core tried to read wrong size\n");
126 return -EINVAL;
127 }
128
129 restart:
130 /* Get next op (if any) from top of list. */
131 spin_lock(&orangefs_request_list_lock);
132 list_for_each_entry_safe(op, temp, &orangefs_request_list, list) {
133 __s32 fsid;
134 /* This lock is held past the end of the loop when we break. */
135 spin_lock(&op->lock);
136 if (unlikely(op_state_purged(op))) {
137 spin_unlock(&op->lock);
138 continue;
139 }
140
141 fsid = fsid_of_op(op);
142 if (fsid != ORANGEFS_FS_ID_NULL) {
143 int ret;
144 /* Skip ops whose filesystem needs to be mounted. */
145 ret = fs_mount_pending(fsid);
146 if (ret == 1) {
147 gossip_debug(GOSSIP_DEV_DEBUG,
148 "orangefs: skipping op tag %llu %s\n",
149 llu(op->tag), get_opname_string(op));
150 spin_unlock(&op->lock);
151 continue;
152 /*
153 * Skip ops whose filesystem we don't know about unless
154 * it is being mounted.
155 */
156 /* XXX: is there a better way to detect this? */
157 } else if (ret == -1 &&
158 !(op->upcall.type ==
159 ORANGEFS_VFS_OP_FS_MOUNT ||
160 op->upcall.type ==
161 ORANGEFS_VFS_OP_GETATTR)) {
162 gossip_debug(GOSSIP_DEV_DEBUG,
163 "orangefs: skipping op tag %llu %s\n",
164 llu(op->tag), get_opname_string(op));
165 gossip_err(
166 "orangefs: ERROR: fs_mount_pending %d\n",
167 fsid);
168 spin_unlock(&op->lock);
169 continue;
170 }
171 }
172 /*
173 * Either this op does not pertain to a filesystem, is mounting
174 * a filesystem, or pertains to a mounted filesystem. Let it
175 * through.
176 */
177 cur_op = op;
178 break;
179 }
180
181 /*
182 * At this point we either have a valid op and can continue or have not
183 * found an op and must ask the client to try again later.
184 */
185 if (!cur_op) {
186 spin_unlock(&orangefs_request_list_lock);
187 return -EAGAIN;
188 }
189
190 gossip_debug(GOSSIP_DEV_DEBUG, "orangefs: reading op tag %llu %s\n",
191 llu(cur_op->tag), get_opname_string(cur_op));
192
193 /*
194 * Such an op should never be on the list in the first place. If so, we
195 * will abort.
196 */
197 if (op_state_in_progress(cur_op) || op_state_serviced(cur_op)) {
198 gossip_err("orangefs: ERROR: Current op already queued.\n");
199 list_del(&cur_op->list);
200 spin_unlock(&cur_op->lock);
201 spin_unlock(&orangefs_request_list_lock);
202 return -EAGAIN;
203 }
204 list_del_init(&cur_op->list);
205 get_op(op);
206 spin_unlock(&orangefs_request_list_lock);
207
208 spin_unlock(&cur_op->lock);
209
210 /* Push the upcall out. */
211 ret = copy_to_user(buf, &proto_ver, sizeof(__s32));
212 if (ret != 0)
213 goto error;
214 ret = copy_to_user(buf+sizeof(__s32), &magic, sizeof(__s32));
215 if (ret != 0)
216 goto error;
217 ret = copy_to_user(buf+2 * sizeof(__s32), &cur_op->tag, sizeof(__u64));
218 if (ret != 0)
219 goto error;
220 ret = copy_to_user(buf+2*sizeof(__s32)+sizeof(__u64), &cur_op->upcall,
221 sizeof(struct orangefs_upcall_s));
222 if (ret != 0)
223 goto error;
224
225 spin_lock(&htable_ops_in_progress_lock);
226 spin_lock(&cur_op->lock);
227 if (unlikely(op_state_given_up(cur_op))) {
228 spin_unlock(&cur_op->lock);
229 spin_unlock(&htable_ops_in_progress_lock);
230 op_release(cur_op);
231 goto restart;
232 }
233
234 /*
235 * Set the operation to be in progress and move it between lists since
236 * it has been sent to the client.
237 */
238 set_op_state_inprogress(cur_op);
239 orangefs_devreq_add_op(cur_op);
240 spin_unlock(&cur_op->lock);
241 spin_unlock(&htable_ops_in_progress_lock);
242 op_release(cur_op);
243
244 /* The client only asks to read one size buffer. */
245 return MAX_DEV_REQ_UPSIZE;
246 error:
247 /*
248 * We were unable to copy the op data to the client. Put the op back in
249 * list. If client has crashed, the op will be purged later when the
250 * device is released.
251 */
252 gossip_err("orangefs: Failed to copy data to user space\n");
253 spin_lock(&orangefs_request_list_lock);
254 spin_lock(&cur_op->lock);
255 if (likely(!op_state_given_up(cur_op))) {
256 set_op_state_waiting(cur_op);
257 list_add(&cur_op->list, &orangefs_request_list);
258 }
259 spin_unlock(&cur_op->lock);
260 spin_unlock(&orangefs_request_list_lock);
261 op_release(cur_op);
262 return -EFAULT;
263 }
264
265 /*
266 * Function for writev() callers into the device.
267 *
268 * Userspace should have written:
269 * - __u32 version
270 * - __u32 magic
271 * - __u64 tag
272 * - struct orangefs_downcall_s
273 * - trailer buffer (in the case of READDIR operations)
274 */
275 static ssize_t orangefs_devreq_write_iter(struct kiocb *iocb,
276 struct iov_iter *iter)
277 {
278 ssize_t ret;
279 struct orangefs_kernel_op_s *op = NULL;
280 struct {
281 __u32 version;
282 __u32 magic;
283 __u64 tag;
284 } head;
285 int total = ret = iov_iter_count(iter);
286 int n;
287 int downcall_size = sizeof(struct orangefs_downcall_s);
288 int head_size = sizeof(head);
289
290 gossip_debug(GOSSIP_DEV_DEBUG, "%s: total:%d: ret:%zd:\n",
291 __func__,
292 total,
293 ret);
294
295 if (total < MAX_DEV_REQ_DOWNSIZE) {
296 gossip_err("%s: total:%d: must be at least:%u:\n",
297 __func__,
298 total,
299 (unsigned int) MAX_DEV_REQ_DOWNSIZE);
300 return -EFAULT;
301 }
302
303 n = copy_from_iter(&head, head_size, iter);
304 if (n < head_size) {
305 gossip_err("%s: failed to copy head.\n", __func__);
306 return -EFAULT;
307 }
308
309 if (head.version < ORANGEFS_MINIMUM_USERSPACE_VERSION) {
310 gossip_err("%s: userspace claims version"
311 "%d, minimum version required: %d.\n",
312 __func__,
313 head.version,
314 ORANGEFS_MINIMUM_USERSPACE_VERSION);
315 return -EPROTO;
316 }
317
318 if (head.magic != ORANGEFS_DEVREQ_MAGIC) {
319 gossip_err("Error: Device magic number does not match.\n");
320 return -EPROTO;
321 }
322
323 op = orangefs_devreq_remove_op(head.tag);
324 if (!op) {
325 gossip_err("WARNING: No one's waiting for tag %llu\n",
326 llu(head.tag));
327 return ret;
328 }
329
330 n = copy_from_iter(&op->downcall, downcall_size, iter);
331 if (n != downcall_size) {
332 gossip_err("%s: failed to copy downcall.\n", __func__);
333 ret = -EFAULT;
334 goto Broken;
335 }
336
337 if (op->downcall.status)
338 goto wakeup;
339
340 /*
341 * We've successfully peeled off the head and the downcall.
342 * Something has gone awry if total doesn't equal the
343 * sum of head_size, downcall_size and trailer_size.
344 */
345 if ((head_size + downcall_size + op->downcall.trailer_size) != total) {
346 gossip_err("%s: funky write, head_size:%d"
347 ": downcall_size:%d: trailer_size:%lld"
348 ": total size:%d:\n",
349 __func__,
350 head_size,
351 downcall_size,
352 op->downcall.trailer_size,
353 total);
354 ret = -EFAULT;
355 goto Broken;
356 }
357
358 /* Only READDIR operations should have trailers. */
359 if ((op->downcall.type != ORANGEFS_VFS_OP_READDIR) &&
360 (op->downcall.trailer_size != 0)) {
361 gossip_err("%s: %x operation with trailer.",
362 __func__,
363 op->downcall.type);
364 ret = -EFAULT;
365 goto Broken;
366 }
367
368 /* READDIR operations should always have trailers. */
369 if ((op->downcall.type == ORANGEFS_VFS_OP_READDIR) &&
370 (op->downcall.trailer_size == 0)) {
371 gossip_err("%s: %x operation with no trailer.",
372 __func__,
373 op->downcall.type);
374 ret = -EFAULT;
375 goto Broken;
376 }
377
378 if (op->downcall.type != ORANGEFS_VFS_OP_READDIR)
379 goto wakeup;
380
381 op->downcall.trailer_buf =
382 vmalloc(op->downcall.trailer_size);
383 if (op->downcall.trailer_buf == NULL) {
384 gossip_err("%s: failed trailer vmalloc.\n",
385 __func__);
386 ret = -ENOMEM;
387 goto Broken;
388 }
389 memset(op->downcall.trailer_buf, 0, op->downcall.trailer_size);
390 n = copy_from_iter(op->downcall.trailer_buf,
391 op->downcall.trailer_size,
392 iter);
393 if (n != op->downcall.trailer_size) {
394 gossip_err("%s: failed to copy trailer.\n", __func__);
395 vfree(op->downcall.trailer_buf);
396 ret = -EFAULT;
397 goto Broken;
398 }
399
400 wakeup:
401 /*
402 * tell the vfs op waiting on a waitqueue
403 * that this op is done
404 */
405 spin_lock(&op->lock);
406 if (unlikely(op_state_given_up(op))) {
407 spin_unlock(&op->lock);
408 goto out;
409 }
410 set_op_state_serviced(op);
411 spin_unlock(&op->lock);
412
413 /*
414 * If this operation is an I/O operation we need to wait
415 * for all data to be copied before we can return to avoid
416 * buffer corruption and races that can pull the buffers
417 * out from under us.
418 *
419 * Essentially we're synchronizing with other parts of the
420 * vfs implicitly by not allowing the user space
421 * application reading/writing this device to return until
422 * the buffers are done being used.
423 */
424 if (op->downcall.type == ORANGEFS_VFS_OP_FILE_IO) {
425 long n = wait_for_completion_interruptible_timeout(&op->done,
426 op_timeout_secs * HZ);
427 if (unlikely(n < 0)) {
428 gossip_debug(GOSSIP_DEV_DEBUG,
429 "%s: signal on I/O wait, aborting\n",
430 __func__);
431 } else if (unlikely(n == 0)) {
432 gossip_debug(GOSSIP_DEV_DEBUG,
433 "%s: timed out.\n",
434 __func__);
435 }
436 }
437 out:
438 op_release(op);
439 return ret;
440
441 Broken:
442 spin_lock(&op->lock);
443 if (!op_state_given_up(op)) {
444 op->downcall.status = ret;
445 set_op_state_serviced(op);
446 }
447 spin_unlock(&op->lock);
448 goto out;
449 }
450
451 /* Returns whether any FS are still pending remounted */
452 static int mark_all_pending_mounts(void)
453 {
454 int unmounted = 1;
455 struct orangefs_sb_info_s *orangefs_sb = NULL;
456
457 spin_lock(&orangefs_superblocks_lock);
458 list_for_each_entry(orangefs_sb, &orangefs_superblocks, list) {
459 /* All of these file system require a remount */
460 orangefs_sb->mount_pending = 1;
461 unmounted = 0;
462 }
463 spin_unlock(&orangefs_superblocks_lock);
464 return unmounted;
465 }
466
467 /*
468 * Determine if a given file system needs to be remounted or not
469 * Returns -1 on error
470 * 0 if already mounted
471 * 1 if needs remount
472 */
473 int fs_mount_pending(__s32 fsid)
474 {
475 int mount_pending = -1;
476 struct orangefs_sb_info_s *orangefs_sb = NULL;
477
478 spin_lock(&orangefs_superblocks_lock);
479 list_for_each_entry(orangefs_sb, &orangefs_superblocks, list) {
480 if (orangefs_sb->fs_id == fsid) {
481 mount_pending = orangefs_sb->mount_pending;
482 break;
483 }
484 }
485 spin_unlock(&orangefs_superblocks_lock);
486 return mount_pending;
487 }
488
489 /*
490 * NOTE: gets called when the last reference to this device is dropped.
491 * Using the open_access_count variable, we enforce a reference count
492 * on this file so that it can be opened by only one process at a time.
493 * the devreq_mutex is used to make sure all i/o has completed
494 * before we call orangefs_bufmap_finalize, and similar such tricky
495 * situations
496 */
497 static int orangefs_devreq_release(struct inode *inode, struct file *file)
498 {
499 int unmounted = 0;
500
501 gossip_debug(GOSSIP_DEV_DEBUG,
502 "%s:pvfs2-client-core: exiting, closing device\n",
503 __func__);
504
505 mutex_lock(&devreq_mutex);
506 if (orangefs_get_bufmap_init())
507 orangefs_bufmap_finalize();
508
509 open_access_count = -1;
510
511 unmounted = mark_all_pending_mounts();
512 gossip_debug(GOSSIP_DEV_DEBUG, "ORANGEFS Device Close: Filesystem(s) %s\n",
513 (unmounted ? "UNMOUNTED" : "MOUNTED"));
514
515 /*
516 * Walk through the list of ops in the request list, mark them
517 * as purged and wake them up.
518 */
519 purge_waiting_ops();
520 /*
521 * Walk through the hash table of in progress operations; mark
522 * them as purged and wake them up
523 */
524 purge_inprogress_ops();
525 gossip_debug(GOSSIP_DEV_DEBUG,
526 "pvfs2-client-core: device close complete\n");
527 open_access_count = 0;
528 mutex_unlock(&devreq_mutex);
529 return 0;
530 }
531
532 int is_daemon_in_service(void)
533 {
534 int in_service;
535
536 /*
537 * What this function does is checks if client-core is alive
538 * based on the access count we maintain on the device.
539 */
540 mutex_lock(&devreq_mutex);
541 in_service = open_access_count == 1 ? 0 : -EIO;
542 mutex_unlock(&devreq_mutex);
543 return in_service;
544 }
545
546 static inline long check_ioctl_command(unsigned int command)
547 {
548 /* Check for valid ioctl codes */
549 if (_IOC_TYPE(command) != ORANGEFS_DEV_MAGIC) {
550 gossip_err("device ioctl magic numbers don't match! Did you rebuild pvfs2-client-core/libpvfs2? [cmd %x, magic %x != %x]\n",
551 command,
552 _IOC_TYPE(command),
553 ORANGEFS_DEV_MAGIC);
554 return -EINVAL;
555 }
556 /* and valid ioctl commands */
557 if (_IOC_NR(command) >= ORANGEFS_DEV_MAXNR || _IOC_NR(command) <= 0) {
558 gossip_err("Invalid ioctl command number [%d >= %d]\n",
559 _IOC_NR(command), ORANGEFS_DEV_MAXNR);
560 return -ENOIOCTLCMD;
561 }
562 return 0;
563 }
564
565 static long dispatch_ioctl_command(unsigned int command, unsigned long arg)
566 {
567 static __s32 magic = ORANGEFS_DEVREQ_MAGIC;
568 static __s32 max_up_size = MAX_DEV_REQ_UPSIZE;
569 static __s32 max_down_size = MAX_DEV_REQ_DOWNSIZE;
570 struct ORANGEFS_dev_map_desc user_desc;
571 int ret = 0;
572 struct dev_mask_info_s mask_info = { 0 };
573 struct dev_mask2_info_s mask2_info = { 0, 0 };
574 int upstream_kmod = 1;
575 struct list_head *tmp = NULL;
576 struct orangefs_sb_info_s *orangefs_sb = NULL;
577
578 /* mtmoore: add locking here */
579
580 switch (command) {
581 case ORANGEFS_DEV_GET_MAGIC:
582 return ((put_user(magic, (__s32 __user *) arg) == -EFAULT) ?
583 -EIO :
584 0);
585 case ORANGEFS_DEV_GET_MAX_UPSIZE:
586 return ((put_user(max_up_size,
587 (__s32 __user *) arg) == -EFAULT) ?
588 -EIO :
589 0);
590 case ORANGEFS_DEV_GET_MAX_DOWNSIZE:
591 return ((put_user(max_down_size,
592 (__s32 __user *) arg) == -EFAULT) ?
593 -EIO :
594 0);
595 case ORANGEFS_DEV_MAP:
596 ret = copy_from_user(&user_desc,
597 (struct ORANGEFS_dev_map_desc __user *)
598 arg,
599 sizeof(struct ORANGEFS_dev_map_desc));
600 if (orangefs_get_bufmap_init()) {
601 return -EINVAL;
602 } else {
603 return ret ?
604 -EIO :
605 orangefs_bufmap_initialize(&user_desc);
606 }
607 case ORANGEFS_DEV_REMOUNT_ALL:
608 gossip_debug(GOSSIP_DEV_DEBUG,
609 "%s: got ORANGEFS_DEV_REMOUNT_ALL\n",
610 __func__);
611
612 /*
613 * remount all mounted orangefs volumes to regain the lost
614 * dynamic mount tables (if any) -- NOTE: this is done
615 * without keeping the superblock list locked due to the
616 * upcall/downcall waiting. also, the request semaphore is
617 * used to ensure that no operations will be serviced until
618 * all of the remounts are serviced (to avoid ops between
619 * mounts to fail)
620 */
621 ret = mutex_lock_interruptible(&request_mutex);
622 if (ret < 0)
623 return ret;
624 gossip_debug(GOSSIP_DEV_DEBUG,
625 "%s: priority remount in progress\n",
626 __func__);
627 list_for_each(tmp, &orangefs_superblocks) {
628 orangefs_sb =
629 list_entry(tmp,
630 struct orangefs_sb_info_s,
631 list);
632 if (orangefs_sb && (orangefs_sb->sb)) {
633 gossip_debug(GOSSIP_DEV_DEBUG,
634 "%s: Remounting SB %p\n",
635 __func__,
636 orangefs_sb);
637
638 ret = orangefs_remount(orangefs_sb->sb);
639 if (ret) {
640 gossip_debug(GOSSIP_DEV_DEBUG,
641 "SB %p remount failed\n",
642 orangefs_sb);
643 break;
644 }
645 }
646 }
647 gossip_debug(GOSSIP_DEV_DEBUG,
648 "%s: priority remount complete\n",
649 __func__);
650 mutex_unlock(&request_mutex);
651 return ret;
652
653 case ORANGEFS_DEV_UPSTREAM:
654 ret = copy_to_user((void __user *)arg,
655 &upstream_kmod,
656 sizeof(upstream_kmod));
657
658 if (ret != 0)
659 return -EIO;
660 else
661 return ret;
662
663 case ORANGEFS_DEV_CLIENT_MASK:
664 ret = copy_from_user(&mask2_info,
665 (void __user *)arg,
666 sizeof(struct dev_mask2_info_s));
667
668 if (ret != 0)
669 return -EIO;
670
671 client_debug_mask.mask1 = mask2_info.mask1_value;
672 client_debug_mask.mask2 = mask2_info.mask2_value;
673
674 pr_info("%s: client debug mask has been been received "
675 ":%llx: :%llx:\n",
676 __func__,
677 (unsigned long long)client_debug_mask.mask1,
678 (unsigned long long)client_debug_mask.mask2);
679
680 return ret;
681
682 case ORANGEFS_DEV_CLIENT_STRING:
683 ret = copy_from_user(&client_debug_array_string,
684 (void __user *)arg,
685 ORANGEFS_MAX_DEBUG_STRING_LEN);
686 if (ret != 0) {
687 pr_info("%s: CLIENT_STRING: copy_from_user failed\n",
688 __func__);
689 return -EIO;
690 }
691
692 pr_info("%s: client debug array string has been received.\n",
693 __func__);
694
695 if (!help_string_initialized) {
696
697 /* Free the "we don't know yet" default string... */
698 kfree(debug_help_string);
699
700 /* build a proper debug help string */
701 if (orangefs_prepare_debugfs_help_string(0)) {
702 gossip_err("%s: no debug help string \n",
703 __func__);
704 return -EIO;
705 }
706
707 /* Replace the boilerplate boot-time debug-help file. */
708 debugfs_remove(help_file_dentry);
709
710 help_file_dentry =
711 debugfs_create_file(
712 ORANGEFS_KMOD_DEBUG_HELP_FILE,
713 0444,
714 debug_dir,
715 debug_help_string,
716 &debug_help_fops);
717
718 if (!help_file_dentry) {
719 gossip_err("%s: debugfs_create_file failed for"
720 " :%s:!\n",
721 __func__,
722 ORANGEFS_KMOD_DEBUG_HELP_FILE);
723 return -EIO;
724 }
725 }
726
727 debug_mask_to_string(&client_debug_mask, 1);
728
729 debugfs_remove(client_debug_dentry);
730
731 orangefs_client_debug_init();
732
733 help_string_initialized++;
734
735 return ret;
736
737 case ORANGEFS_DEV_DEBUG:
738 ret = copy_from_user(&mask_info,
739 (void __user *)arg,
740 sizeof(mask_info));
741
742 if (ret != 0)
743 return -EIO;
744
745 if (mask_info.mask_type == KERNEL_MASK) {
746 if ((mask_info.mask_value == 0)
747 && (kernel_mask_set_mod_init)) {
748 /*
749 * the kernel debug mask was set when the
750 * kernel module was loaded; don't override
751 * it if the client-core was started without
752 * a value for ORANGEFS_KMODMASK.
753 */
754 return 0;
755 }
756 debug_mask_to_string(&mask_info.mask_value,
757 mask_info.mask_type);
758 gossip_debug_mask = mask_info.mask_value;
759 pr_info("%s: kernel debug mask has been modified to "
760 ":%s: :%llx:\n",
761 __func__,
762 kernel_debug_string,
763 (unsigned long long)gossip_debug_mask);
764 } else if (mask_info.mask_type == CLIENT_MASK) {
765 debug_mask_to_string(&mask_info.mask_value,
766 mask_info.mask_type);
767 pr_info("%s: client debug mask has been modified to"
768 ":%s: :%llx:\n",
769 __func__,
770 client_debug_string,
771 llu(mask_info.mask_value));
772 } else {
773 gossip_lerr("Invalid mask type....\n");
774 return -EINVAL;
775 }
776
777 return ret;
778
779 default:
780 return -ENOIOCTLCMD;
781 }
782 return -ENOIOCTLCMD;
783 }
784
785 static long orangefs_devreq_ioctl(struct file *file,
786 unsigned int command, unsigned long arg)
787 {
788 long ret;
789
790 /* Check for properly constructed commands */
791 ret = check_ioctl_command(command);
792 if (ret < 0)
793 return (int)ret;
794
795 return (int)dispatch_ioctl_command(command, arg);
796 }
797
798 #ifdef CONFIG_COMPAT /* CONFIG_COMPAT is in .config */
799
800 /* Compat structure for the ORANGEFS_DEV_MAP ioctl */
801 struct ORANGEFS_dev_map_desc32 {
802 compat_uptr_t ptr;
803 __s32 total_size;
804 __s32 size;
805 __s32 count;
806 };
807
808 static unsigned long translate_dev_map26(unsigned long args, long *error)
809 {
810 struct ORANGEFS_dev_map_desc32 __user *p32 = (void __user *)args;
811 /*
812 * Depending on the architecture, allocate some space on the
813 * user-call-stack based on our expected layout.
814 */
815 struct ORANGEFS_dev_map_desc __user *p =
816 compat_alloc_user_space(sizeof(*p));
817 compat_uptr_t addr;
818
819 *error = 0;
820 /* get the ptr from the 32 bit user-space */
821 if (get_user(addr, &p32->ptr))
822 goto err;
823 /* try to put that into a 64-bit layout */
824 if (put_user(compat_ptr(addr), &p->ptr))
825 goto err;
826 /* copy the remaining fields */
827 if (copy_in_user(&p->total_size, &p32->total_size, sizeof(__s32)))
828 goto err;
829 if (copy_in_user(&p->size, &p32->size, sizeof(__s32)))
830 goto err;
831 if (copy_in_user(&p->count, &p32->count, sizeof(__s32)))
832 goto err;
833 return (unsigned long)p;
834 err:
835 *error = -EFAULT;
836 return 0;
837 }
838
839 /*
840 * 32 bit user-space apps' ioctl handlers when kernel modules
841 * is compiled as a 64 bit one
842 */
843 static long orangefs_devreq_compat_ioctl(struct file *filp, unsigned int cmd,
844 unsigned long args)
845 {
846 long ret;
847 unsigned long arg = args;
848
849 /* Check for properly constructed commands */
850 ret = check_ioctl_command(cmd);
851 if (ret < 0)
852 return ret;
853 if (cmd == ORANGEFS_DEV_MAP) {
854 /*
855 * convert the arguments to what we expect internally
856 * in kernel space
857 */
858 arg = translate_dev_map26(args, &ret);
859 if (ret < 0) {
860 gossip_err("Could not translate dev map\n");
861 return ret;
862 }
863 }
864 /* no other ioctl requires translation */
865 return dispatch_ioctl_command(cmd, arg);
866 }
867
868 #endif /* CONFIG_COMPAT is in .config */
869
870 /* the assigned character device major number */
871 static int orangefs_dev_major;
872
873 /*
874 * Initialize orangefs device specific state:
875 * Must be called at module load time only
876 */
877 int orangefs_dev_init(void)
878 {
879 /* register orangefs-req device */
880 orangefs_dev_major = register_chrdev(0,
881 ORANGEFS_REQDEVICE_NAME,
882 &orangefs_devreq_file_operations);
883 if (orangefs_dev_major < 0) {
884 gossip_debug(GOSSIP_DEV_DEBUG,
885 "Failed to register /dev/%s (error %d)\n",
886 ORANGEFS_REQDEVICE_NAME, orangefs_dev_major);
887 return orangefs_dev_major;
888 }
889
890 gossip_debug(GOSSIP_DEV_DEBUG,
891 "*** /dev/%s character device registered ***\n",
892 ORANGEFS_REQDEVICE_NAME);
893 gossip_debug(GOSSIP_DEV_DEBUG, "'mknod /dev/%s c %d 0'.\n",
894 ORANGEFS_REQDEVICE_NAME, orangefs_dev_major);
895 return 0;
896 }
897
898 void orangefs_dev_cleanup(void)
899 {
900 unregister_chrdev(orangefs_dev_major, ORANGEFS_REQDEVICE_NAME);
901 gossip_debug(GOSSIP_DEV_DEBUG,
902 "*** /dev/%s character device unregistered ***\n",
903 ORANGEFS_REQDEVICE_NAME);
904 }
905
906 static unsigned int orangefs_devreq_poll(struct file *file,
907 struct poll_table_struct *poll_table)
908 {
909 int poll_revent_mask = 0;
910
911 poll_wait(file, &orangefs_request_list_waitq, poll_table);
912
913 if (!list_empty(&orangefs_request_list))
914 poll_revent_mask |= POLL_IN;
915 return poll_revent_mask;
916 }
917
918 const struct file_operations orangefs_devreq_file_operations = {
919 .owner = THIS_MODULE,
920 .read = orangefs_devreq_read,
921 .write_iter = orangefs_devreq_write_iter,
922 .open = orangefs_devreq_open,
923 .release = orangefs_devreq_release,
924 .unlocked_ioctl = orangefs_devreq_ioctl,
925
926 #ifdef CONFIG_COMPAT /* CONFIG_COMPAT is in .config */
927 .compat_ioctl = orangefs_devreq_compat_ioctl,
928 #endif
929 .poll = orangefs_devreq_poll
930 };
This page took 0.063084 seconds and 5 git commands to generate.