2 * (C) 2001 Clemson University and The University of Chicago
4 * Changes by Acxiom Corporation to add protocol version to kernel
5 * communication, Copyright Acxiom Corporation, 2005.
7 * See COPYING in top-level directory.
11 #include "orangefs-kernel.h"
12 #include "orangefs-dev-proto.h"
13 #include "orangefs-bufmap.h"
15 #include <linux/debugfs.h>
16 #include <linux/slab.h>
18 /* this file implements the /dev/pvfs2-req device node */
20 static int open_access_count
;
22 #define DUMP_DEVICE_ERROR() \
24 gossip_err("*****************************************************\n");\
25 gossip_err("ORANGEFS Device Error: You cannot open the device file "); \
26 gossip_err("\n/dev/%s more than once. Please make sure that\nthere " \
27 "are no ", ORANGEFS_REQDEVICE_NAME); \
28 gossip_err("instances of a program using this device\ncurrently " \
29 "running. (You must verify this!)\n"); \
30 gossip_err("For example, you can use the lsof program as follows:\n");\
31 gossip_err("'lsof | grep %s' (run this as root)\n", \
32 ORANGEFS_REQDEVICE_NAME); \
33 gossip_err(" open_access_count = %d\n", open_access_count); \
34 gossip_err("*****************************************************\n");\
37 static int hash_func(__u64 tag
, int table_size
)
39 return do_div(tag
, (unsigned int)table_size
);
42 static void orangefs_devreq_add_op(struct orangefs_kernel_op_s
*op
)
44 int index
= hash_func(op
->tag
, hash_table_size
);
46 spin_lock(&htable_ops_in_progress_lock
);
47 list_add_tail(&op
->list
, &htable_ops_in_progress
[index
]);
48 spin_unlock(&htable_ops_in_progress_lock
);
51 static struct orangefs_kernel_op_s
*orangefs_devreq_remove_op(__u64 tag
)
53 struct orangefs_kernel_op_s
*op
, *next
;
56 index
= hash_func(tag
, hash_table_size
);
58 spin_lock(&htable_ops_in_progress_lock
);
59 list_for_each_entry_safe(op
,
61 &htable_ops_in_progress
[index
],
65 spin_unlock(&htable_ops_in_progress_lock
);
70 spin_unlock(&htable_ops_in_progress_lock
);
74 static int orangefs_devreq_open(struct inode
*inode
, struct file
*file
)
78 if (!(file
->f_flags
& O_NONBLOCK
)) {
79 gossip_err("%s: device cannot be opened in blocking mode\n",
84 gossip_debug(GOSSIP_DEV_DEBUG
, "client-core: opening device\n");
85 mutex_lock(&devreq_mutex
);
87 if (open_access_count
== 0) {
88 ret
= generic_file_open(inode
, file
);
94 mutex_unlock(&devreq_mutex
);
98 gossip_debug(GOSSIP_DEV_DEBUG
,
99 "pvfs2-client-core: open device complete (ret = %d)\n",
104 /* Function for read() callers into the device */
105 static ssize_t
orangefs_devreq_read(struct file
*file
,
107 size_t count
, loff_t
*offset
)
109 struct orangefs_kernel_op_s
*op
, *temp
;
110 __s32 proto_ver
= ORANGEFS_KERNEL_PROTO_VERSION
;
111 static __s32 magic
= ORANGEFS_DEVREQ_MAGIC
;
112 struct orangefs_kernel_op_s
*cur_op
= NULL
;
115 /* We do not support blocking IO. */
116 if (!(file
->f_flags
& O_NONBLOCK
)) {
117 gossip_err("%s: blocking read from client-core.\n",
123 * The client will do an ioctl to find MAX_DEV_REQ_UPSIZE, then
124 * always read with that size buffer.
126 if (count
!= MAX_DEV_REQ_UPSIZE
) {
127 gossip_err("orangefs: client-core tried to read wrong size\n");
131 /* Get next op (if any) from top of list. */
132 spin_lock(&orangefs_request_list_lock
);
133 list_for_each_entry_safe(op
, temp
, &orangefs_request_list
, list
) {
135 /* This lock is held past the end of the loop when we break. */
136 spin_lock(&op
->lock
);
138 fsid
= fsid_of_op(op
);
139 if (fsid
!= ORANGEFS_FS_ID_NULL
) {
141 /* Skip ops whose filesystem needs to be mounted. */
142 ret
= fs_mount_pending(fsid
);
144 gossip_debug(GOSSIP_DEV_DEBUG
,
145 "orangefs: skipping op tag %llu %s\n",
146 llu(op
->tag
), get_opname_string(op
));
147 spin_unlock(&op
->lock
);
150 * Skip ops whose filesystem we don't know about unless
151 * it is being mounted.
153 /* XXX: is there a better way to detect this? */
154 } else if (ret
== -1 &&
156 ORANGEFS_VFS_OP_FS_MOUNT
||
158 ORANGEFS_VFS_OP_GETATTR
)) {
159 gossip_debug(GOSSIP_DEV_DEBUG
,
160 "orangefs: skipping op tag %llu %s\n",
161 llu(op
->tag
), get_opname_string(op
));
163 "orangefs: ERROR: fs_mount_pending %d\n",
165 spin_unlock(&op
->lock
);
170 * Either this op does not pertain to a filesystem, is mounting
171 * a filesystem, or pertains to a mounted filesystem. Let it
179 * At this point we either have a valid op and can continue or have not
180 * found an op and must ask the client to try again later.
183 spin_unlock(&orangefs_request_list_lock
);
187 gossip_debug(GOSSIP_DEV_DEBUG
, "orangefs: reading op tag %llu %s\n",
188 llu(cur_op
->tag
), get_opname_string(cur_op
));
191 * Such an op should never be on the list in the first place. If so, we
194 if (op_state_in_progress(cur_op
) || op_state_serviced(cur_op
)) {
195 gossip_err("orangefs: ERROR: Current op already queued.\n");
196 list_del(&cur_op
->list
);
197 spin_unlock(&cur_op
->lock
);
198 spin_unlock(&orangefs_request_list_lock
);
203 * Set the operation to be in progress and move it between lists since
204 * it has been sent to the client.
206 set_op_state_inprogress(cur_op
);
208 list_del(&cur_op
->list
);
209 spin_unlock(&orangefs_request_list_lock
);
210 orangefs_devreq_add_op(cur_op
);
211 spin_unlock(&cur_op
->lock
);
213 /* Push the upcall out. */
214 ret
= copy_to_user(buf
, &proto_ver
, sizeof(__s32
));
217 ret
= copy_to_user(buf
+sizeof(__s32
), &magic
, sizeof(__s32
));
220 ret
= copy_to_user(buf
+2 * sizeof(__s32
), &cur_op
->tag
, sizeof(__u64
));
223 ret
= copy_to_user(buf
+2*sizeof(__s32
)+sizeof(__u64
), &cur_op
->upcall
,
224 sizeof(struct orangefs_upcall_s
));
228 /* The client only asks to read one size buffer. */
229 return MAX_DEV_REQ_UPSIZE
;
232 * We were unable to copy the op data to the client. Put the op back in
233 * list. If client has crashed, the op will be purged later when the
234 * device is released.
236 gossip_err("orangefs: Failed to copy data to user space\n");
237 spin_lock(&orangefs_request_list_lock
);
238 spin_lock(&cur_op
->lock
);
239 set_op_state_waiting(cur_op
);
240 orangefs_devreq_remove_op(cur_op
->tag
);
241 list_add(&cur_op
->list
, &orangefs_request_list
);
242 spin_unlock(&cur_op
->lock
);
243 spin_unlock(&orangefs_request_list_lock
);
248 * Function for writev() callers into the device.
250 * Userspace should have written:
254 * - struct orangefs_downcall_s
255 * - trailer buffer (in the case of READDIR operations)
257 static ssize_t
orangefs_devreq_write_iter(struct kiocb
*iocb
,
258 struct iov_iter
*iter
)
261 struct orangefs_kernel_op_s
*op
= NULL
;
267 int total
= ret
= iov_iter_count(iter
);
269 int downcall_size
= sizeof(struct orangefs_downcall_s
);
270 int head_size
= sizeof(head
);
272 gossip_debug(GOSSIP_DEV_DEBUG
, "%s: total:%d: ret:%zd:\n",
277 if (total
< MAX_DEV_REQ_DOWNSIZE
) {
278 gossip_err("%s: total:%d: must be at least:%u:\n",
281 (unsigned int) MAX_DEV_REQ_DOWNSIZE
);
286 n
= copy_from_iter(&head
, head_size
, iter
);
288 gossip_err("%s: failed to copy head.\n", __func__
);
293 if (head
.version
< ORANGEFS_MINIMUM_USERSPACE_VERSION
) {
294 gossip_err("%s: userspace claims version"
295 "%d, minimum version required: %d.\n",
298 ORANGEFS_MINIMUM_USERSPACE_VERSION
);
303 if (head
.magic
!= ORANGEFS_DEVREQ_MAGIC
) {
304 gossip_err("Error: Device magic number does not match.\n");
309 op
= orangefs_devreq_remove_op(head
.tag
);
311 gossip_err("WARNING: No one's waiting for tag %llu\n",
316 get_op(op
); /* increase ref count. */
318 n
= copy_from_iter(&op
->downcall
, downcall_size
, iter
);
319 if (n
!= downcall_size
) {
320 gossip_err("%s: failed to copy downcall.\n", __func__
);
326 if (op
->downcall
.status
)
330 * We've successfully peeled off the head and the downcall.
331 * Something has gone awry if total doesn't equal the
332 * sum of head_size, downcall_size and trailer_size.
334 if ((head_size
+ downcall_size
+ op
->downcall
.trailer_size
) != total
) {
335 gossip_err("%s: funky write, head_size:%d"
336 ": downcall_size:%d: trailer_size:%lld"
337 ": total size:%d:\n",
341 op
->downcall
.trailer_size
,
348 /* Only READDIR operations should have trailers. */
349 if ((op
->downcall
.type
!= ORANGEFS_VFS_OP_READDIR
) &&
350 (op
->downcall
.trailer_size
!= 0)) {
351 gossip_err("%s: %x operation with trailer.",
359 /* READDIR operations should always have trailers. */
360 if ((op
->downcall
.type
== ORANGEFS_VFS_OP_READDIR
) &&
361 (op
->downcall
.trailer_size
== 0)) {
362 gossip_err("%s: %x operation with no trailer.",
370 if (op
->downcall
.type
!= ORANGEFS_VFS_OP_READDIR
)
373 op
->downcall
.trailer_buf
=
374 vmalloc(op
->downcall
.trailer_size
);
375 if (op
->downcall
.trailer_buf
== NULL
) {
376 gossip_err("%s: failed trailer vmalloc.\n",
382 memset(op
->downcall
.trailer_buf
, 0, op
->downcall
.trailer_size
);
383 n
= copy_from_iter(op
->downcall
.trailer_buf
,
384 op
->downcall
.trailer_size
,
386 if (n
!= op
->downcall
.trailer_size
) {
387 gossip_err("%s: failed to copy trailer.\n", __func__
);
388 vfree(op
->downcall
.trailer_buf
);
397 * If this operation is an I/O operation we need to wait
398 * for all data to be copied before we can return to avoid
399 * buffer corruption and races that can pull the buffers
402 * Essentially we're synchronizing with other parts of the
403 * vfs implicitly by not allowing the user space
404 * application reading/writing this device to return until
405 * the buffers are done being used.
407 if (op
->downcall
.type
== ORANGEFS_VFS_OP_FILE_IO
) {
409 DEFINE_WAIT(wait_entry
);
412 * tell the vfs op waiting on a waitqueue
413 * that this op is done
415 spin_lock(&op
->lock
);
416 set_op_state_serviced(op
);
417 spin_unlock(&op
->lock
);
419 wake_up_interruptible(&op
->waitq
);
422 spin_lock(&op
->lock
);
423 prepare_to_wait_exclusive(
424 &op
->io_completion_waitq
,
427 if (op
->io_completed
) {
428 spin_unlock(&op
->lock
);
431 spin_unlock(&op
->lock
);
433 if (!signal_pending(current
)) {
435 MSECS_TO_JIFFIES(1000 *
437 if (!schedule_timeout(timeout
)) {
438 gossip_debug(GOSSIP_DEV_DEBUG
,
447 gossip_debug(GOSSIP_DEV_DEBUG
,
448 "%s: signal on I/O wait, aborting\n",
453 spin_lock(&op
->lock
);
454 finish_wait(&op
->io_completion_waitq
, &wait_entry
);
455 spin_unlock(&op
->lock
);
457 /* NOTE: for I/O operations we handle releasing the op
458 * object except in the case of timeout. the reason we
459 * can't free the op in timeout cases is that the op
460 * service logic in the vfs retries operations using
461 * the same op ptr, thus it can't be freed.
467 * tell the vfs op waiting on a waitqueue that
470 spin_lock(&op
->lock
);
471 set_op_state_serviced(op
);
472 spin_unlock(&op
->lock
);
474 * for every other operation (i.e. non-I/O), we need to
475 * wake up the callers for downcall completion
478 wake_up_interruptible(&op
->waitq
);
484 /* Returns whether any FS are still pending remounted */
485 static int mark_all_pending_mounts(void)
488 struct orangefs_sb_info_s
*orangefs_sb
= NULL
;
490 spin_lock(&orangefs_superblocks_lock
);
491 list_for_each_entry(orangefs_sb
, &orangefs_superblocks
, list
) {
492 /* All of these file system require a remount */
493 orangefs_sb
->mount_pending
= 1;
496 spin_unlock(&orangefs_superblocks_lock
);
501 * Determine if a given file system needs to be remounted or not
502 * Returns -1 on error
503 * 0 if already mounted
506 int fs_mount_pending(__s32 fsid
)
508 int mount_pending
= -1;
509 struct orangefs_sb_info_s
*orangefs_sb
= NULL
;
511 spin_lock(&orangefs_superblocks_lock
);
512 list_for_each_entry(orangefs_sb
, &orangefs_superblocks
, list
) {
513 if (orangefs_sb
->fs_id
== fsid
) {
514 mount_pending
= orangefs_sb
->mount_pending
;
518 spin_unlock(&orangefs_superblocks_lock
);
519 return mount_pending
;
523 * NOTE: gets called when the last reference to this device is dropped.
524 * Using the open_access_count variable, we enforce a reference count
525 * on this file so that it can be opened by only one process at a time.
526 * the devreq_mutex is used to make sure all i/o has completed
527 * before we call orangefs_bufmap_finalize, and similar such tricky
530 static int orangefs_devreq_release(struct inode
*inode
, struct file
*file
)
534 gossip_debug(GOSSIP_DEV_DEBUG
,
535 "%s:pvfs2-client-core: exiting, closing device\n",
538 mutex_lock(&devreq_mutex
);
539 if (orangefs_get_bufmap_init())
540 orangefs_bufmap_finalize();
544 unmounted
= mark_all_pending_mounts();
545 gossip_debug(GOSSIP_DEV_DEBUG
, "ORANGEFS Device Close: Filesystem(s) %s\n",
546 (unmounted
? "UNMOUNTED" : "MOUNTED"));
547 mutex_unlock(&devreq_mutex
);
550 * Walk through the list of ops in the request list, mark them
551 * as purged and wake them up.
555 * Walk through the hash table of in progress operations; mark
556 * them as purged and wake them up
558 purge_inprogress_ops();
559 gossip_debug(GOSSIP_DEV_DEBUG
,
560 "pvfs2-client-core: device close complete\n");
564 int is_daemon_in_service(void)
569 * What this function does is checks if client-core is alive
570 * based on the access count we maintain on the device.
572 mutex_lock(&devreq_mutex
);
573 in_service
= open_access_count
== 1 ? 0 : -EIO
;
574 mutex_unlock(&devreq_mutex
);
578 static inline long check_ioctl_command(unsigned int command
)
580 /* Check for valid ioctl codes */
581 if (_IOC_TYPE(command
) != ORANGEFS_DEV_MAGIC
) {
582 gossip_err("device ioctl magic numbers don't match! Did you rebuild pvfs2-client-core/libpvfs2? [cmd %x, magic %x != %x]\n",
588 /* and valid ioctl commands */
589 if (_IOC_NR(command
) >= ORANGEFS_DEV_MAXNR
|| _IOC_NR(command
) <= 0) {
590 gossip_err("Invalid ioctl command number [%d >= %d]\n",
591 _IOC_NR(command
), ORANGEFS_DEV_MAXNR
);
597 static long dispatch_ioctl_command(unsigned int command
, unsigned long arg
)
599 static __s32 magic
= ORANGEFS_DEVREQ_MAGIC
;
600 static __s32 max_up_size
= MAX_DEV_REQ_UPSIZE
;
601 static __s32 max_down_size
= MAX_DEV_REQ_DOWNSIZE
;
602 struct ORANGEFS_dev_map_desc user_desc
;
604 struct dev_mask_info_s mask_info
= { 0 };
605 struct dev_mask2_info_s mask2_info
= { 0, 0 };
606 int upstream_kmod
= 1;
607 struct list_head
*tmp
= NULL
;
608 struct orangefs_sb_info_s
*orangefs_sb
= NULL
;
610 /* mtmoore: add locking here */
613 case ORANGEFS_DEV_GET_MAGIC
:
614 return ((put_user(magic
, (__s32 __user
*) arg
) == -EFAULT
) ?
617 case ORANGEFS_DEV_GET_MAX_UPSIZE
:
618 return ((put_user(max_up_size
,
619 (__s32 __user
*) arg
) == -EFAULT
) ?
622 case ORANGEFS_DEV_GET_MAX_DOWNSIZE
:
623 return ((put_user(max_down_size
,
624 (__s32 __user
*) arg
) == -EFAULT
) ?
627 case ORANGEFS_DEV_MAP
:
628 ret
= copy_from_user(&user_desc
,
629 (struct ORANGEFS_dev_map_desc __user
*)
631 sizeof(struct ORANGEFS_dev_map_desc
));
632 if (orangefs_get_bufmap_init()) {
637 orangefs_bufmap_initialize(&user_desc
);
639 case ORANGEFS_DEV_REMOUNT_ALL
:
640 gossip_debug(GOSSIP_DEV_DEBUG
,
641 "%s: got ORANGEFS_DEV_REMOUNT_ALL\n",
645 * remount all mounted orangefs volumes to regain the lost
646 * dynamic mount tables (if any) -- NOTE: this is done
647 * without keeping the superblock list locked due to the
648 * upcall/downcall waiting. also, the request semaphore is
649 * used to ensure that no operations will be serviced until
650 * all of the remounts are serviced (to avoid ops between
653 ret
= mutex_lock_interruptible(&request_mutex
);
656 gossip_debug(GOSSIP_DEV_DEBUG
,
657 "%s: priority remount in progress\n",
659 list_for_each(tmp
, &orangefs_superblocks
) {
662 struct orangefs_sb_info_s
,
664 if (orangefs_sb
&& (orangefs_sb
->sb
)) {
665 gossip_debug(GOSSIP_DEV_DEBUG
,
666 "%s: Remounting SB %p\n",
670 ret
= orangefs_remount(orangefs_sb
->sb
);
672 gossip_debug(GOSSIP_DEV_DEBUG
,
673 "SB %p remount failed\n",
679 gossip_debug(GOSSIP_DEV_DEBUG
,
680 "%s: priority remount complete\n",
682 mutex_unlock(&request_mutex
);
685 case ORANGEFS_DEV_UPSTREAM
:
686 ret
= copy_to_user((void __user
*)arg
,
688 sizeof(upstream_kmod
));
695 case ORANGEFS_DEV_CLIENT_MASK
:
696 ret
= copy_from_user(&mask2_info
,
698 sizeof(struct dev_mask2_info_s
));
703 client_debug_mask
.mask1
= mask2_info
.mask1_value
;
704 client_debug_mask
.mask2
= mask2_info
.mask2_value
;
706 pr_info("%s: client debug mask has been been received "
709 (unsigned long long)client_debug_mask
.mask1
,
710 (unsigned long long)client_debug_mask
.mask2
);
714 case ORANGEFS_DEV_CLIENT_STRING
:
715 ret
= copy_from_user(&client_debug_array_string
,
717 ORANGEFS_MAX_DEBUG_STRING_LEN
);
719 pr_info("%s: CLIENT_STRING: copy_from_user failed\n",
724 pr_info("%s: client debug array string has been received.\n",
727 if (!help_string_initialized
) {
729 /* Free the "we don't know yet" default string... */
730 kfree(debug_help_string
);
732 /* build a proper debug help string */
733 if (orangefs_prepare_debugfs_help_string(0)) {
734 gossip_err("%s: no debug help string \n",
739 /* Replace the boilerplate boot-time debug-help file. */
740 debugfs_remove(help_file_dentry
);
744 ORANGEFS_KMOD_DEBUG_HELP_FILE
,
750 if (!help_file_dentry
) {
751 gossip_err("%s: debugfs_create_file failed for"
754 ORANGEFS_KMOD_DEBUG_HELP_FILE
);
759 debug_mask_to_string(&client_debug_mask
, 1);
761 debugfs_remove(client_debug_dentry
);
763 orangefs_client_debug_init();
765 help_string_initialized
++;
769 case ORANGEFS_DEV_DEBUG
:
770 ret
= copy_from_user(&mask_info
,
777 if (mask_info
.mask_type
== KERNEL_MASK
) {
778 if ((mask_info
.mask_value
== 0)
779 && (kernel_mask_set_mod_init
)) {
781 * the kernel debug mask was set when the
782 * kernel module was loaded; don't override
783 * it if the client-core was started without
784 * a value for ORANGEFS_KMODMASK.
788 debug_mask_to_string(&mask_info
.mask_value
,
789 mask_info
.mask_type
);
790 gossip_debug_mask
= mask_info
.mask_value
;
791 pr_info("%s: kernel debug mask has been modified to "
795 (unsigned long long)gossip_debug_mask
);
796 } else if (mask_info
.mask_type
== CLIENT_MASK
) {
797 debug_mask_to_string(&mask_info
.mask_value
,
798 mask_info
.mask_type
);
799 pr_info("%s: client debug mask has been modified to"
803 llu(mask_info
.mask_value
));
805 gossip_lerr("Invalid mask type....\n");
817 static long orangefs_devreq_ioctl(struct file
*file
,
818 unsigned int command
, unsigned long arg
)
822 /* Check for properly constructed commands */
823 ret
= check_ioctl_command(command
);
827 return (int)dispatch_ioctl_command(command
, arg
);
830 #ifdef CONFIG_COMPAT /* CONFIG_COMPAT is in .config */
832 /* Compat structure for the ORANGEFS_DEV_MAP ioctl */
833 struct ORANGEFS_dev_map_desc32
{
840 static unsigned long translate_dev_map26(unsigned long args
, long *error
)
842 struct ORANGEFS_dev_map_desc32 __user
*p32
= (void __user
*)args
;
844 * Depending on the architecture, allocate some space on the
845 * user-call-stack based on our expected layout.
847 struct ORANGEFS_dev_map_desc __user
*p
=
848 compat_alloc_user_space(sizeof(*p
));
852 /* get the ptr from the 32 bit user-space */
853 if (get_user(addr
, &p32
->ptr
))
855 /* try to put that into a 64-bit layout */
856 if (put_user(compat_ptr(addr
), &p
->ptr
))
858 /* copy the remaining fields */
859 if (copy_in_user(&p
->total_size
, &p32
->total_size
, sizeof(__s32
)))
861 if (copy_in_user(&p
->size
, &p32
->size
, sizeof(__s32
)))
863 if (copy_in_user(&p
->count
, &p32
->count
, sizeof(__s32
)))
865 return (unsigned long)p
;
872 * 32 bit user-space apps' ioctl handlers when kernel modules
873 * is compiled as a 64 bit one
875 static long orangefs_devreq_compat_ioctl(struct file
*filp
, unsigned int cmd
,
879 unsigned long arg
= args
;
881 /* Check for properly constructed commands */
882 ret
= check_ioctl_command(cmd
);
885 if (cmd
== ORANGEFS_DEV_MAP
) {
887 * convert the arguments to what we expect internally
890 arg
= translate_dev_map26(args
, &ret
);
892 gossip_err("Could not translate dev map\n");
896 /* no other ioctl requires translation */
897 return dispatch_ioctl_command(cmd
, arg
);
900 #endif /* CONFIG_COMPAT is in .config */
903 * The following two ioctl32 functions had been refactored into the above
904 * CONFIG_COMPAT ifdef, but that was an over simplification that was
905 * not noticed until we tried to compile on power pc...
907 #if (defined(CONFIG_COMPAT) && !defined(HAVE_REGISTER_IOCTL32_CONVERSION)) || !defined(CONFIG_COMPAT)
908 static int orangefs_ioctl32_init(void)
913 static void orangefs_ioctl32_cleanup(void)
919 /* the assigned character device major number */
920 static int orangefs_dev_major
;
923 * Initialize orangefs device specific state:
924 * Must be called at module load time only
926 int orangefs_dev_init(void)
930 /* register the ioctl32 sub-system */
931 ret
= orangefs_ioctl32_init();
935 /* register orangefs-req device */
936 orangefs_dev_major
= register_chrdev(0,
937 ORANGEFS_REQDEVICE_NAME
,
938 &orangefs_devreq_file_operations
);
939 if (orangefs_dev_major
< 0) {
940 gossip_debug(GOSSIP_DEV_DEBUG
,
941 "Failed to register /dev/%s (error %d)\n",
942 ORANGEFS_REQDEVICE_NAME
, orangefs_dev_major
);
943 orangefs_ioctl32_cleanup();
944 return orangefs_dev_major
;
947 gossip_debug(GOSSIP_DEV_DEBUG
,
948 "*** /dev/%s character device registered ***\n",
949 ORANGEFS_REQDEVICE_NAME
);
950 gossip_debug(GOSSIP_DEV_DEBUG
, "'mknod /dev/%s c %d 0'.\n",
951 ORANGEFS_REQDEVICE_NAME
, orangefs_dev_major
);
955 void orangefs_dev_cleanup(void)
957 unregister_chrdev(orangefs_dev_major
, ORANGEFS_REQDEVICE_NAME
);
958 gossip_debug(GOSSIP_DEV_DEBUG
,
959 "*** /dev/%s character device unregistered ***\n",
960 ORANGEFS_REQDEVICE_NAME
);
961 /* unregister the ioctl32 sub-system */
962 orangefs_ioctl32_cleanup();
965 static unsigned int orangefs_devreq_poll(struct file
*file
,
966 struct poll_table_struct
*poll_table
)
968 int poll_revent_mask
= 0;
970 if (open_access_count
== 1) {
971 poll_wait(file
, &orangefs_request_list_waitq
, poll_table
);
973 spin_lock(&orangefs_request_list_lock
);
974 if (!list_empty(&orangefs_request_list
))
975 poll_revent_mask
|= POLL_IN
;
976 spin_unlock(&orangefs_request_list_lock
);
978 return poll_revent_mask
;
981 const struct file_operations orangefs_devreq_file_operations
= {
982 .owner
= THIS_MODULE
,
983 .read
= orangefs_devreq_read
,
984 .write_iter
= orangefs_devreq_write_iter
,
985 .open
= orangefs_devreq_open
,
986 .release
= orangefs_devreq_release
,
987 .unlocked_ioctl
= orangefs_devreq_ioctl
,
989 #ifdef CONFIG_COMPAT /* CONFIG_COMPAT is in .config */
990 .compat_ioctl
= orangefs_devreq_compat_ioctl
,
992 .poll
= orangefs_devreq_poll