orangefs: delay freeing slot until cancel completes
[deliverable/linux.git] / fs / orangefs / devorangefs-req.c
1 /*
2 * (C) 2001 Clemson University and The University of Chicago
3 *
4 * Changes by Acxiom Corporation to add protocol version to kernel
5 * communication, Copyright Acxiom Corporation, 2005.
6 *
7 * See COPYING in top-level directory.
8 */
9
10 #include "protocol.h"
11 #include "orangefs-kernel.h"
12 #include "orangefs-dev-proto.h"
13 #include "orangefs-bufmap.h"
14
15 #include <linux/debugfs.h>
16 #include <linux/slab.h>
17
18 /* this file implements the /dev/pvfs2-req device node */
19
20 static int open_access_count;
21
22 #define DUMP_DEVICE_ERROR() \
23 do { \
24 gossip_err("*****************************************************\n");\
25 gossip_err("ORANGEFS Device Error: You cannot open the device file "); \
26 gossip_err("\n/dev/%s more than once. Please make sure that\nthere " \
27 "are no ", ORANGEFS_REQDEVICE_NAME); \
28 gossip_err("instances of a program using this device\ncurrently " \
29 "running. (You must verify this!)\n"); \
30 gossip_err("For example, you can use the lsof program as follows:\n");\
31 gossip_err("'lsof | grep %s' (run this as root)\n", \
32 ORANGEFS_REQDEVICE_NAME); \
33 gossip_err(" open_access_count = %d\n", open_access_count); \
34 gossip_err("*****************************************************\n");\
35 } while (0)
36
37 static int hash_func(__u64 tag, int table_size)
38 {
39 return do_div(tag, (unsigned int)table_size);
40 }
41
42 static void orangefs_devreq_add_op(struct orangefs_kernel_op_s *op)
43 {
44 int index = hash_func(op->tag, hash_table_size);
45
46 list_add_tail(&op->list, &htable_ops_in_progress[index]);
47 }
48
49 static struct orangefs_kernel_op_s *orangefs_devreq_remove_op(__u64 tag)
50 {
51 struct orangefs_kernel_op_s *op, *next;
52 int index;
53
54 index = hash_func(tag, hash_table_size);
55
56 spin_lock(&htable_ops_in_progress_lock);
57 list_for_each_entry_safe(op,
58 next,
59 &htable_ops_in_progress[index],
60 list) {
61 if (op->tag == tag && !op_state_purged(op)) {
62 list_del_init(&op->list);
63 get_op(op); /* increase ref count. */
64 spin_unlock(&htable_ops_in_progress_lock);
65 return op;
66 }
67 }
68
69 spin_unlock(&htable_ops_in_progress_lock);
70 return NULL;
71 }
72
73 static int orangefs_devreq_open(struct inode *inode, struct file *file)
74 {
75 int ret = -EINVAL;
76
77 if (!(file->f_flags & O_NONBLOCK)) {
78 gossip_err("%s: device cannot be opened in blocking mode\n",
79 __func__);
80 goto out;
81 }
82 ret = -EACCES;
83 gossip_debug(GOSSIP_DEV_DEBUG, "client-core: opening device\n");
84 mutex_lock(&devreq_mutex);
85
86 if (open_access_count == 0) {
87 open_access_count = 1;
88 ret = 0;
89 } else {
90 DUMP_DEVICE_ERROR();
91 }
92 mutex_unlock(&devreq_mutex);
93
94 out:
95
96 gossip_debug(GOSSIP_DEV_DEBUG,
97 "pvfs2-client-core: open device complete (ret = %d)\n",
98 ret);
99 return ret;
100 }
101
102 /* Function for read() callers into the device */
103 static ssize_t orangefs_devreq_read(struct file *file,
104 char __user *buf,
105 size_t count, loff_t *offset)
106 {
107 struct orangefs_kernel_op_s *op, *temp;
108 __s32 proto_ver = ORANGEFS_KERNEL_PROTO_VERSION;
109 static __s32 magic = ORANGEFS_DEVREQ_MAGIC;
110 struct orangefs_kernel_op_s *cur_op = NULL;
111 unsigned long ret;
112
113 /* We do not support blocking IO. */
114 if (!(file->f_flags & O_NONBLOCK)) {
115 gossip_err("%s: blocking read from client-core.\n",
116 __func__);
117 return -EINVAL;
118 }
119
120 /*
121 * The client will do an ioctl to find MAX_DEV_REQ_UPSIZE, then
122 * always read with that size buffer.
123 */
124 if (count != MAX_DEV_REQ_UPSIZE) {
125 gossip_err("orangefs: client-core tried to read wrong size\n");
126 return -EINVAL;
127 }
128
129 restart:
130 /* Get next op (if any) from top of list. */
131 spin_lock(&orangefs_request_list_lock);
132 list_for_each_entry_safe(op, temp, &orangefs_request_list, list) {
133 __s32 fsid;
134 /* This lock is held past the end of the loop when we break. */
135 spin_lock(&op->lock);
136 if (unlikely(op_state_purged(op))) {
137 spin_unlock(&op->lock);
138 continue;
139 }
140
141 fsid = fsid_of_op(op);
142 if (fsid != ORANGEFS_FS_ID_NULL) {
143 int ret;
144 /* Skip ops whose filesystem needs to be mounted. */
145 ret = fs_mount_pending(fsid);
146 if (ret == 1) {
147 gossip_debug(GOSSIP_DEV_DEBUG,
148 "%s: mount pending, skipping op tag "
149 "%llu %s\n",
150 __func__,
151 llu(op->tag),
152 get_opname_string(op));
153 spin_unlock(&op->lock);
154 continue;
155 /*
156 * Skip ops whose filesystem we don't know about unless
157 * it is being mounted.
158 */
159 /* XXX: is there a better way to detect this? */
160 } else if (ret == -1 &&
161 !(op->upcall.type ==
162 ORANGEFS_VFS_OP_FS_MOUNT ||
163 op->upcall.type ==
164 ORANGEFS_VFS_OP_GETATTR)) {
165 gossip_debug(GOSSIP_DEV_DEBUG,
166 "orangefs: skipping op tag %llu %s\n",
167 llu(op->tag), get_opname_string(op));
168 gossip_err(
169 "orangefs: ERROR: fs_mount_pending %d\n",
170 fsid);
171 spin_unlock(&op->lock);
172 continue;
173 }
174 }
175 /*
176 * Either this op does not pertain to a filesystem, is mounting
177 * a filesystem, or pertains to a mounted filesystem. Let it
178 * through.
179 */
180 cur_op = op;
181 break;
182 }
183
184 /*
185 * At this point we either have a valid op and can continue or have not
186 * found an op and must ask the client to try again later.
187 */
188 if (!cur_op) {
189 spin_unlock(&orangefs_request_list_lock);
190 return -EAGAIN;
191 }
192
193 gossip_debug(GOSSIP_DEV_DEBUG, "orangefs: reading op tag %llu %s\n",
194 llu(cur_op->tag), get_opname_string(cur_op));
195
196 /*
197 * Such an op should never be on the list in the first place. If so, we
198 * will abort.
199 */
200 if (op_state_in_progress(cur_op) || op_state_serviced(cur_op)) {
201 gossip_err("orangefs: ERROR: Current op already queued.\n");
202 list_del(&cur_op->list);
203 spin_unlock(&cur_op->lock);
204 spin_unlock(&orangefs_request_list_lock);
205 return -EAGAIN;
206 }
207 list_del_init(&cur_op->list);
208 get_op(op);
209 spin_unlock(&orangefs_request_list_lock);
210
211 spin_unlock(&cur_op->lock);
212
213 /* Push the upcall out. */
214 ret = copy_to_user(buf, &proto_ver, sizeof(__s32));
215 if (ret != 0)
216 goto error;
217 ret = copy_to_user(buf+sizeof(__s32), &magic, sizeof(__s32));
218 if (ret != 0)
219 goto error;
220 ret = copy_to_user(buf+2 * sizeof(__s32), &cur_op->tag, sizeof(__u64));
221 if (ret != 0)
222 goto error;
223 ret = copy_to_user(buf+2*sizeof(__s32)+sizeof(__u64), &cur_op->upcall,
224 sizeof(struct orangefs_upcall_s));
225 if (ret != 0)
226 goto error;
227
228 spin_lock(&htable_ops_in_progress_lock);
229 spin_lock(&cur_op->lock);
230 if (unlikely(op_state_given_up(cur_op))) {
231 spin_unlock(&cur_op->lock);
232 spin_unlock(&htable_ops_in_progress_lock);
233 op_release(cur_op);
234 goto restart;
235 }
236
237 /*
238 * Set the operation to be in progress and move it between lists since
239 * it has been sent to the client.
240 */
241 set_op_state_inprogress(cur_op);
242 orangefs_devreq_add_op(cur_op);
243 spin_unlock(&cur_op->lock);
244 spin_unlock(&htable_ops_in_progress_lock);
245 op_release(cur_op);
246
247 /* The client only asks to read one size buffer. */
248 return MAX_DEV_REQ_UPSIZE;
249 error:
250 /*
251 * We were unable to copy the op data to the client. Put the op back in
252 * list. If client has crashed, the op will be purged later when the
253 * device is released.
254 */
255 gossip_err("orangefs: Failed to copy data to user space\n");
256 spin_lock(&orangefs_request_list_lock);
257 spin_lock(&cur_op->lock);
258 if (likely(!op_state_given_up(cur_op))) {
259 set_op_state_waiting(cur_op);
260 list_add(&cur_op->list, &orangefs_request_list);
261 }
262 spin_unlock(&cur_op->lock);
263 spin_unlock(&orangefs_request_list_lock);
264 op_release(cur_op);
265 return -EFAULT;
266 }
267
268 /*
269 * Function for writev() callers into the device.
270 *
271 * Userspace should have written:
272 * - __u32 version
273 * - __u32 magic
274 * - __u64 tag
275 * - struct orangefs_downcall_s
276 * - trailer buffer (in the case of READDIR operations)
277 */
278 static ssize_t orangefs_devreq_write_iter(struct kiocb *iocb,
279 struct iov_iter *iter)
280 {
281 ssize_t ret;
282 struct orangefs_kernel_op_s *op = NULL;
283 struct {
284 __u32 version;
285 __u32 magic;
286 __u64 tag;
287 } head;
288 int total = ret = iov_iter_count(iter);
289 int n;
290 int downcall_size = sizeof(struct orangefs_downcall_s);
291 int head_size = sizeof(head);
292
293 gossip_debug(GOSSIP_DEV_DEBUG, "%s: total:%d: ret:%zd:\n",
294 __func__,
295 total,
296 ret);
297
298 if (total < MAX_DEV_REQ_DOWNSIZE) {
299 gossip_err("%s: total:%d: must be at least:%u:\n",
300 __func__,
301 total,
302 (unsigned int) MAX_DEV_REQ_DOWNSIZE);
303 return -EFAULT;
304 }
305
306 n = copy_from_iter(&head, head_size, iter);
307 if (n < head_size) {
308 gossip_err("%s: failed to copy head.\n", __func__);
309 return -EFAULT;
310 }
311
312 if (head.version < ORANGEFS_MINIMUM_USERSPACE_VERSION) {
313 gossip_err("%s: userspace claims version"
314 "%d, minimum version required: %d.\n",
315 __func__,
316 head.version,
317 ORANGEFS_MINIMUM_USERSPACE_VERSION);
318 return -EPROTO;
319 }
320
321 if (head.magic != ORANGEFS_DEVREQ_MAGIC) {
322 gossip_err("Error: Device magic number does not match.\n");
323 return -EPROTO;
324 }
325
326 op = orangefs_devreq_remove_op(head.tag);
327 if (!op) {
328 gossip_err("WARNING: No one's waiting for tag %llu\n",
329 llu(head.tag));
330 return ret;
331 }
332
333 n = copy_from_iter(&op->downcall, downcall_size, iter);
334 if (n != downcall_size) {
335 gossip_err("%s: failed to copy downcall.\n", __func__);
336 ret = -EFAULT;
337 goto Broken;
338 }
339
340 if (op->downcall.status)
341 goto wakeup;
342
343 /*
344 * We've successfully peeled off the head and the downcall.
345 * Something has gone awry if total doesn't equal the
346 * sum of head_size, downcall_size and trailer_size.
347 */
348 if ((head_size + downcall_size + op->downcall.trailer_size) != total) {
349 gossip_err("%s: funky write, head_size:%d"
350 ": downcall_size:%d: trailer_size:%lld"
351 ": total size:%d:\n",
352 __func__,
353 head_size,
354 downcall_size,
355 op->downcall.trailer_size,
356 total);
357 ret = -EFAULT;
358 goto Broken;
359 }
360
361 /* Only READDIR operations should have trailers. */
362 if ((op->downcall.type != ORANGEFS_VFS_OP_READDIR) &&
363 (op->downcall.trailer_size != 0)) {
364 gossip_err("%s: %x operation with trailer.",
365 __func__,
366 op->downcall.type);
367 ret = -EFAULT;
368 goto Broken;
369 }
370
371 /* READDIR operations should always have trailers. */
372 if ((op->downcall.type == ORANGEFS_VFS_OP_READDIR) &&
373 (op->downcall.trailer_size == 0)) {
374 gossip_err("%s: %x operation with no trailer.",
375 __func__,
376 op->downcall.type);
377 ret = -EFAULT;
378 goto Broken;
379 }
380
381 if (op->downcall.type != ORANGEFS_VFS_OP_READDIR)
382 goto wakeup;
383
384 op->downcall.trailer_buf =
385 vmalloc(op->downcall.trailer_size);
386 if (op->downcall.trailer_buf == NULL) {
387 gossip_err("%s: failed trailer vmalloc.\n",
388 __func__);
389 ret = -ENOMEM;
390 goto Broken;
391 }
392 memset(op->downcall.trailer_buf, 0, op->downcall.trailer_size);
393 n = copy_from_iter(op->downcall.trailer_buf,
394 op->downcall.trailer_size,
395 iter);
396 if (n != op->downcall.trailer_size) {
397 gossip_err("%s: failed to copy trailer.\n", __func__);
398 vfree(op->downcall.trailer_buf);
399 ret = -EFAULT;
400 goto Broken;
401 }
402
403 wakeup:
404 /*
405 * tell the vfs op waiting on a waitqueue
406 * that this op is done
407 */
408 spin_lock(&op->lock);
409 if (unlikely(op_state_given_up(op))) {
410 spin_unlock(&op->lock);
411 goto out;
412 }
413 set_op_state_serviced(op);
414 spin_unlock(&op->lock);
415
416 /*
417 * If this operation is an I/O operation we need to wait
418 * for all data to be copied before we can return to avoid
419 * buffer corruption and races that can pull the buffers
420 * out from under us.
421 *
422 * Essentially we're synchronizing with other parts of the
423 * vfs implicitly by not allowing the user space
424 * application reading/writing this device to return until
425 * the buffers are done being used.
426 */
427 if (op->downcall.type == ORANGEFS_VFS_OP_FILE_IO) {
428 long n = wait_for_completion_interruptible_timeout(&op->done,
429 op_timeout_secs * HZ);
430 if (unlikely(n < 0)) {
431 gossip_debug(GOSSIP_DEV_DEBUG,
432 "%s: signal on I/O wait, aborting\n",
433 __func__);
434 } else if (unlikely(n == 0)) {
435 gossip_debug(GOSSIP_DEV_DEBUG,
436 "%s: timed out.\n",
437 __func__);
438 }
439 }
440 out:
441 if (unlikely(op_is_cancel(op)))
442 put_cancel(op);
443 op_release(op);
444 return ret;
445
446 Broken:
447 spin_lock(&op->lock);
448 if (!op_state_given_up(op)) {
449 op->downcall.status = ret;
450 set_op_state_serviced(op);
451 }
452 spin_unlock(&op->lock);
453 goto out;
454 }
455
456 /* Returns whether any FS are still pending remounted */
457 static int mark_all_pending_mounts(void)
458 {
459 int unmounted = 1;
460 struct orangefs_sb_info_s *orangefs_sb = NULL;
461
462 spin_lock(&orangefs_superblocks_lock);
463 list_for_each_entry(orangefs_sb, &orangefs_superblocks, list) {
464 /* All of these file system require a remount */
465 orangefs_sb->mount_pending = 1;
466 unmounted = 0;
467 }
468 spin_unlock(&orangefs_superblocks_lock);
469 return unmounted;
470 }
471
472 /*
473 * Determine if a given file system needs to be remounted or not
474 * Returns -1 on error
475 * 0 if already mounted
476 * 1 if needs remount
477 */
478 int fs_mount_pending(__s32 fsid)
479 {
480 int mount_pending = -1;
481 struct orangefs_sb_info_s *orangefs_sb = NULL;
482
483 spin_lock(&orangefs_superblocks_lock);
484 list_for_each_entry(orangefs_sb, &orangefs_superblocks, list) {
485 if (orangefs_sb->fs_id == fsid) {
486 mount_pending = orangefs_sb->mount_pending;
487 break;
488 }
489 }
490 spin_unlock(&orangefs_superblocks_lock);
491 return mount_pending;
492 }
493
494 /*
495 * NOTE: gets called when the last reference to this device is dropped.
496 * Using the open_access_count variable, we enforce a reference count
497 * on this file so that it can be opened by only one process at a time.
498 * the devreq_mutex is used to make sure all i/o has completed
499 * before we call orangefs_bufmap_finalize, and similar such tricky
500 * situations
501 */
502 static int orangefs_devreq_release(struct inode *inode, struct file *file)
503 {
504 int unmounted = 0;
505
506 gossip_debug(GOSSIP_DEV_DEBUG,
507 "%s:pvfs2-client-core: exiting, closing device\n",
508 __func__);
509
510 mutex_lock(&devreq_mutex);
511 if (orangefs_get_bufmap_init())
512 orangefs_bufmap_finalize();
513
514 open_access_count = -1;
515
516 unmounted = mark_all_pending_mounts();
517 gossip_debug(GOSSIP_DEV_DEBUG, "ORANGEFS Device Close: Filesystem(s) %s\n",
518 (unmounted ? "UNMOUNTED" : "MOUNTED"));
519
520 /*
521 * Walk through the list of ops in the request list, mark them
522 * as purged and wake them up.
523 */
524 purge_waiting_ops();
525 /*
526 * Walk through the hash table of in progress operations; mark
527 * them as purged and wake them up
528 */
529 purge_inprogress_ops();
530 gossip_debug(GOSSIP_DEV_DEBUG,
531 "pvfs2-client-core: device close complete\n");
532 open_access_count = 0;
533 mutex_unlock(&devreq_mutex);
534 return 0;
535 }
536
537 int is_daemon_in_service(void)
538 {
539 int in_service;
540
541 /*
542 * What this function does is checks if client-core is alive
543 * based on the access count we maintain on the device.
544 */
545 mutex_lock(&devreq_mutex);
546 in_service = open_access_count == 1 ? 0 : -EIO;
547 mutex_unlock(&devreq_mutex);
548 return in_service;
549 }
550
551 bool __is_daemon_in_service(void)
552 {
553 return open_access_count == 1;
554 }
555
556 static inline long check_ioctl_command(unsigned int command)
557 {
558 /* Check for valid ioctl codes */
559 if (_IOC_TYPE(command) != ORANGEFS_DEV_MAGIC) {
560 gossip_err("device ioctl magic numbers don't match! Did you rebuild pvfs2-client-core/libpvfs2? [cmd %x, magic %x != %x]\n",
561 command,
562 _IOC_TYPE(command),
563 ORANGEFS_DEV_MAGIC);
564 return -EINVAL;
565 }
566 /* and valid ioctl commands */
567 if (_IOC_NR(command) >= ORANGEFS_DEV_MAXNR || _IOC_NR(command) <= 0) {
568 gossip_err("Invalid ioctl command number [%d >= %d]\n",
569 _IOC_NR(command), ORANGEFS_DEV_MAXNR);
570 return -ENOIOCTLCMD;
571 }
572 return 0;
573 }
574
575 static long dispatch_ioctl_command(unsigned int command, unsigned long arg)
576 {
577 static __s32 magic = ORANGEFS_DEVREQ_MAGIC;
578 static __s32 max_up_size = MAX_DEV_REQ_UPSIZE;
579 static __s32 max_down_size = MAX_DEV_REQ_DOWNSIZE;
580 struct ORANGEFS_dev_map_desc user_desc;
581 int ret = 0;
582 struct dev_mask_info_s mask_info = { 0 };
583 struct dev_mask2_info_s mask2_info = { 0, 0 };
584 int upstream_kmod = 1;
585 struct list_head *tmp = NULL;
586 struct orangefs_sb_info_s *orangefs_sb = NULL;
587
588 /* mtmoore: add locking here */
589
590 switch (command) {
591 case ORANGEFS_DEV_GET_MAGIC:
592 return ((put_user(magic, (__s32 __user *) arg) == -EFAULT) ?
593 -EIO :
594 0);
595 case ORANGEFS_DEV_GET_MAX_UPSIZE:
596 return ((put_user(max_up_size,
597 (__s32 __user *) arg) == -EFAULT) ?
598 -EIO :
599 0);
600 case ORANGEFS_DEV_GET_MAX_DOWNSIZE:
601 return ((put_user(max_down_size,
602 (__s32 __user *) arg) == -EFAULT) ?
603 -EIO :
604 0);
605 case ORANGEFS_DEV_MAP:
606 ret = copy_from_user(&user_desc,
607 (struct ORANGEFS_dev_map_desc __user *)
608 arg,
609 sizeof(struct ORANGEFS_dev_map_desc));
610 if (orangefs_get_bufmap_init()) {
611 return -EINVAL;
612 } else {
613 return ret ?
614 -EIO :
615 orangefs_bufmap_initialize(&user_desc);
616 }
617 case ORANGEFS_DEV_REMOUNT_ALL:
618 gossip_debug(GOSSIP_DEV_DEBUG,
619 "%s: got ORANGEFS_DEV_REMOUNT_ALL\n",
620 __func__);
621
622 /*
623 * remount all mounted orangefs volumes to regain the lost
624 * dynamic mount tables (if any) -- NOTE: this is done
625 * without keeping the superblock list locked due to the
626 * upcall/downcall waiting. also, the request semaphore is
627 * used to ensure that no operations will be serviced until
628 * all of the remounts are serviced (to avoid ops between
629 * mounts to fail)
630 */
631 ret = mutex_lock_interruptible(&request_mutex);
632 if (ret < 0)
633 return ret;
634 gossip_debug(GOSSIP_DEV_DEBUG,
635 "%s: priority remount in progress\n",
636 __func__);
637 list_for_each(tmp, &orangefs_superblocks) {
638 orangefs_sb =
639 list_entry(tmp,
640 struct orangefs_sb_info_s,
641 list);
642 if (orangefs_sb && (orangefs_sb->sb)) {
643 gossip_debug(GOSSIP_DEV_DEBUG,
644 "%s: Remounting SB %p\n",
645 __func__,
646 orangefs_sb);
647
648 ret = orangefs_remount(orangefs_sb->sb);
649 if (ret) {
650 gossip_debug(GOSSIP_DEV_DEBUG,
651 "SB %p remount failed\n",
652 orangefs_sb);
653 break;
654 }
655 }
656 }
657 gossip_debug(GOSSIP_DEV_DEBUG,
658 "%s: priority remount complete\n",
659 __func__);
660 mutex_unlock(&request_mutex);
661 return ret;
662
663 case ORANGEFS_DEV_UPSTREAM:
664 ret = copy_to_user((void __user *)arg,
665 &upstream_kmod,
666 sizeof(upstream_kmod));
667
668 if (ret != 0)
669 return -EIO;
670 else
671 return ret;
672
673 case ORANGEFS_DEV_CLIENT_MASK:
674 ret = copy_from_user(&mask2_info,
675 (void __user *)arg,
676 sizeof(struct dev_mask2_info_s));
677
678 if (ret != 0)
679 return -EIO;
680
681 client_debug_mask.mask1 = mask2_info.mask1_value;
682 client_debug_mask.mask2 = mask2_info.mask2_value;
683
684 pr_info("%s: client debug mask has been been received "
685 ":%llx: :%llx:\n",
686 __func__,
687 (unsigned long long)client_debug_mask.mask1,
688 (unsigned long long)client_debug_mask.mask2);
689
690 return ret;
691
692 case ORANGEFS_DEV_CLIENT_STRING:
693 ret = copy_from_user(&client_debug_array_string,
694 (void __user *)arg,
695 ORANGEFS_MAX_DEBUG_STRING_LEN);
696 if (ret != 0) {
697 pr_info("%s: CLIENT_STRING: copy_from_user failed\n",
698 __func__);
699 return -EIO;
700 }
701
702 pr_info("%s: client debug array string has been received.\n",
703 __func__);
704
705 if (!help_string_initialized) {
706
707 /* Free the "we don't know yet" default string... */
708 kfree(debug_help_string);
709
710 /* build a proper debug help string */
711 if (orangefs_prepare_debugfs_help_string(0)) {
712 gossip_err("%s: no debug help string \n",
713 __func__);
714 return -EIO;
715 }
716
717 /* Replace the boilerplate boot-time debug-help file. */
718 debugfs_remove(help_file_dentry);
719
720 help_file_dentry =
721 debugfs_create_file(
722 ORANGEFS_KMOD_DEBUG_HELP_FILE,
723 0444,
724 debug_dir,
725 debug_help_string,
726 &debug_help_fops);
727
728 if (!help_file_dentry) {
729 gossip_err("%s: debugfs_create_file failed for"
730 " :%s:!\n",
731 __func__,
732 ORANGEFS_KMOD_DEBUG_HELP_FILE);
733 return -EIO;
734 }
735 }
736
737 debug_mask_to_string(&client_debug_mask, 1);
738
739 debugfs_remove(client_debug_dentry);
740
741 orangefs_client_debug_init();
742
743 help_string_initialized++;
744
745 return ret;
746
747 case ORANGEFS_DEV_DEBUG:
748 ret = copy_from_user(&mask_info,
749 (void __user *)arg,
750 sizeof(mask_info));
751
752 if (ret != 0)
753 return -EIO;
754
755 if (mask_info.mask_type == KERNEL_MASK) {
756 if ((mask_info.mask_value == 0)
757 && (kernel_mask_set_mod_init)) {
758 /*
759 * the kernel debug mask was set when the
760 * kernel module was loaded; don't override
761 * it if the client-core was started without
762 * a value for ORANGEFS_KMODMASK.
763 */
764 return 0;
765 }
766 debug_mask_to_string(&mask_info.mask_value,
767 mask_info.mask_type);
768 gossip_debug_mask = mask_info.mask_value;
769 pr_info("%s: kernel debug mask has been modified to "
770 ":%s: :%llx:\n",
771 __func__,
772 kernel_debug_string,
773 (unsigned long long)gossip_debug_mask);
774 } else if (mask_info.mask_type == CLIENT_MASK) {
775 debug_mask_to_string(&mask_info.mask_value,
776 mask_info.mask_type);
777 pr_info("%s: client debug mask has been modified to"
778 ":%s: :%llx:\n",
779 __func__,
780 client_debug_string,
781 llu(mask_info.mask_value));
782 } else {
783 gossip_lerr("Invalid mask type....\n");
784 return -EINVAL;
785 }
786
787 return ret;
788
789 default:
790 return -ENOIOCTLCMD;
791 }
792 return -ENOIOCTLCMD;
793 }
794
795 static long orangefs_devreq_ioctl(struct file *file,
796 unsigned int command, unsigned long arg)
797 {
798 long ret;
799
800 /* Check for properly constructed commands */
801 ret = check_ioctl_command(command);
802 if (ret < 0)
803 return (int)ret;
804
805 return (int)dispatch_ioctl_command(command, arg);
806 }
807
808 #ifdef CONFIG_COMPAT /* CONFIG_COMPAT is in .config */
809
810 /* Compat structure for the ORANGEFS_DEV_MAP ioctl */
811 struct ORANGEFS_dev_map_desc32 {
812 compat_uptr_t ptr;
813 __s32 total_size;
814 __s32 size;
815 __s32 count;
816 };
817
818 static unsigned long translate_dev_map26(unsigned long args, long *error)
819 {
820 struct ORANGEFS_dev_map_desc32 __user *p32 = (void __user *)args;
821 /*
822 * Depending on the architecture, allocate some space on the
823 * user-call-stack based on our expected layout.
824 */
825 struct ORANGEFS_dev_map_desc __user *p =
826 compat_alloc_user_space(sizeof(*p));
827 compat_uptr_t addr;
828
829 *error = 0;
830 /* get the ptr from the 32 bit user-space */
831 if (get_user(addr, &p32->ptr))
832 goto err;
833 /* try to put that into a 64-bit layout */
834 if (put_user(compat_ptr(addr), &p->ptr))
835 goto err;
836 /* copy the remaining fields */
837 if (copy_in_user(&p->total_size, &p32->total_size, sizeof(__s32)))
838 goto err;
839 if (copy_in_user(&p->size, &p32->size, sizeof(__s32)))
840 goto err;
841 if (copy_in_user(&p->count, &p32->count, sizeof(__s32)))
842 goto err;
843 return (unsigned long)p;
844 err:
845 *error = -EFAULT;
846 return 0;
847 }
848
849 /*
850 * 32 bit user-space apps' ioctl handlers when kernel modules
851 * is compiled as a 64 bit one
852 */
853 static long orangefs_devreq_compat_ioctl(struct file *filp, unsigned int cmd,
854 unsigned long args)
855 {
856 long ret;
857 unsigned long arg = args;
858
859 /* Check for properly constructed commands */
860 ret = check_ioctl_command(cmd);
861 if (ret < 0)
862 return ret;
863 if (cmd == ORANGEFS_DEV_MAP) {
864 /*
865 * convert the arguments to what we expect internally
866 * in kernel space
867 */
868 arg = translate_dev_map26(args, &ret);
869 if (ret < 0) {
870 gossip_err("Could not translate dev map\n");
871 return ret;
872 }
873 }
874 /* no other ioctl requires translation */
875 return dispatch_ioctl_command(cmd, arg);
876 }
877
878 #endif /* CONFIG_COMPAT is in .config */
879
880 /* the assigned character device major number */
881 static int orangefs_dev_major;
882
883 /*
884 * Initialize orangefs device specific state:
885 * Must be called at module load time only
886 */
887 int orangefs_dev_init(void)
888 {
889 /* register orangefs-req device */
890 orangefs_dev_major = register_chrdev(0,
891 ORANGEFS_REQDEVICE_NAME,
892 &orangefs_devreq_file_operations);
893 if (orangefs_dev_major < 0) {
894 gossip_debug(GOSSIP_DEV_DEBUG,
895 "Failed to register /dev/%s (error %d)\n",
896 ORANGEFS_REQDEVICE_NAME, orangefs_dev_major);
897 return orangefs_dev_major;
898 }
899
900 gossip_debug(GOSSIP_DEV_DEBUG,
901 "*** /dev/%s character device registered ***\n",
902 ORANGEFS_REQDEVICE_NAME);
903 gossip_debug(GOSSIP_DEV_DEBUG, "'mknod /dev/%s c %d 0'.\n",
904 ORANGEFS_REQDEVICE_NAME, orangefs_dev_major);
905 return 0;
906 }
907
908 void orangefs_dev_cleanup(void)
909 {
910 unregister_chrdev(orangefs_dev_major, ORANGEFS_REQDEVICE_NAME);
911 gossip_debug(GOSSIP_DEV_DEBUG,
912 "*** /dev/%s character device unregistered ***\n",
913 ORANGEFS_REQDEVICE_NAME);
914 }
915
916 static unsigned int orangefs_devreq_poll(struct file *file,
917 struct poll_table_struct *poll_table)
918 {
919 int poll_revent_mask = 0;
920
921 poll_wait(file, &orangefs_request_list_waitq, poll_table);
922
923 if (!list_empty(&orangefs_request_list))
924 poll_revent_mask |= POLL_IN;
925 return poll_revent_mask;
926 }
927
928 const struct file_operations orangefs_devreq_file_operations = {
929 .owner = THIS_MODULE,
930 .read = orangefs_devreq_read,
931 .write_iter = orangefs_devreq_write_iter,
932 .open = orangefs_devreq_open,
933 .release = orangefs_devreq_release,
934 .unlocked_ioctl = orangefs_devreq_ioctl,
935
936 #ifdef CONFIG_COMPAT /* CONFIG_COMPAT is in .config */
937 .compat_ioctl = orangefs_devreq_compat_ioctl,
938 #endif
939 .poll = orangefs_devreq_poll
940 };
This page took 0.053181 seconds and 5 git commands to generate.