790855a72e3209b8ab570e1faa311e07b78c25e2
[deliverable/linux.git] / fs / orangefs / devorangefs-req.c
1 /*
2 * (C) 2001 Clemson University and The University of Chicago
3 *
4 * Changes by Acxiom Corporation to add protocol version to kernel
5 * communication, Copyright Acxiom Corporation, 2005.
6 *
7 * See COPYING in top-level directory.
8 */
9
10 #include "protocol.h"
11 #include "orangefs-kernel.h"
12 #include "orangefs-dev-proto.h"
13 #include "orangefs-bufmap.h"
14
15 #include <linux/debugfs.h>
16 #include <linux/slab.h>
17
18 /* this file implements the /dev/pvfs2-req device node */
19
20 static int open_access_count;
21
22 #define DUMP_DEVICE_ERROR() \
23 do { \
24 gossip_err("*****************************************************\n");\
25 gossip_err("ORANGEFS Device Error: You cannot open the device file "); \
26 gossip_err("\n/dev/%s more than once. Please make sure that\nthere " \
27 "are no ", ORANGEFS_REQDEVICE_NAME); \
28 gossip_err("instances of a program using this device\ncurrently " \
29 "running. (You must verify this!)\n"); \
30 gossip_err("For example, you can use the lsof program as follows:\n");\
31 gossip_err("'lsof | grep %s' (run this as root)\n", \
32 ORANGEFS_REQDEVICE_NAME); \
33 gossip_err(" open_access_count = %d\n", open_access_count); \
34 gossip_err("*****************************************************\n");\
35 } while (0)
36
37 static int hash_func(__u64 tag, int table_size)
38 {
39 return do_div(tag, (unsigned int)table_size);
40 }
41
42 static void orangefs_devreq_add_op(struct orangefs_kernel_op_s *op)
43 {
44 int index = hash_func(op->tag, hash_table_size);
45
46 list_add_tail(&op->list, &htable_ops_in_progress[index]);
47 }
48
49 static struct orangefs_kernel_op_s *orangefs_devreq_remove_op(__u64 tag)
50 {
51 struct orangefs_kernel_op_s *op, *next;
52 int index;
53
54 index = hash_func(tag, hash_table_size);
55
56 spin_lock(&htable_ops_in_progress_lock);
57 list_for_each_entry_safe(op,
58 next,
59 &htable_ops_in_progress[index],
60 list) {
61 if (op->tag == tag && !op_state_purged(op)) {
62 list_del_init(&op->list);
63 get_op(op); /* increase ref count. */
64 spin_unlock(&htable_ops_in_progress_lock);
65 return op;
66 }
67 }
68
69 spin_unlock(&htable_ops_in_progress_lock);
70 return NULL;
71 }
72
73 static int orangefs_devreq_open(struct inode *inode, struct file *file)
74 {
75 int ret = -EINVAL;
76
77 if (!(file->f_flags & O_NONBLOCK)) {
78 gossip_err("%s: device cannot be opened in blocking mode\n",
79 __func__);
80 goto out;
81 }
82 ret = -EACCES;
83 gossip_debug(GOSSIP_DEV_DEBUG, "client-core: opening device\n");
84 mutex_lock(&devreq_mutex);
85
86 if (open_access_count == 0) {
87 open_access_count = 1;
88 ret = 0;
89 } else {
90 DUMP_DEVICE_ERROR();
91 }
92 mutex_unlock(&devreq_mutex);
93
94 out:
95
96 gossip_debug(GOSSIP_DEV_DEBUG,
97 "pvfs2-client-core: open device complete (ret = %d)\n",
98 ret);
99 return ret;
100 }
101
102 /* Function for read() callers into the device */
103 static ssize_t orangefs_devreq_read(struct file *file,
104 char __user *buf,
105 size_t count, loff_t *offset)
106 {
107 struct orangefs_kernel_op_s *op, *temp;
108 __s32 proto_ver = ORANGEFS_KERNEL_PROTO_VERSION;
109 static __s32 magic = ORANGEFS_DEVREQ_MAGIC;
110 struct orangefs_kernel_op_s *cur_op = NULL;
111 unsigned long ret;
112
113 /* We do not support blocking IO. */
114 if (!(file->f_flags & O_NONBLOCK)) {
115 gossip_err("%s: blocking read from client-core.\n",
116 __func__);
117 return -EINVAL;
118 }
119
120 /*
121 * The client will do an ioctl to find MAX_DEV_REQ_UPSIZE, then
122 * always read with that size buffer.
123 */
124 if (count != MAX_DEV_REQ_UPSIZE) {
125 gossip_err("orangefs: client-core tried to read wrong size\n");
126 return -EINVAL;
127 }
128
129 restart:
130 /* Get next op (if any) from top of list. */
131 spin_lock(&orangefs_request_list_lock);
132 list_for_each_entry_safe(op, temp, &orangefs_request_list, list) {
133 __s32 fsid;
134 /* This lock is held past the end of the loop when we break. */
135 spin_lock(&op->lock);
136 if (unlikely(op_state_purged(op))) {
137 spin_unlock(&op->lock);
138 continue;
139 }
140
141 fsid = fsid_of_op(op);
142 if (fsid != ORANGEFS_FS_ID_NULL) {
143 int ret;
144 /* Skip ops whose filesystem needs to be mounted. */
145 ret = fs_mount_pending(fsid);
146 if (ret == 1) {
147 gossip_debug(GOSSIP_DEV_DEBUG,
148 "%s: mount pending, skipping op tag "
149 "%llu %s\n",
150 __func__,
151 llu(op->tag),
152 get_opname_string(op));
153 spin_unlock(&op->lock);
154 continue;
155 /*
156 * Skip ops whose filesystem we don't know about unless
157 * it is being mounted.
158 */
159 /* XXX: is there a better way to detect this? */
160 } else if (ret == -1 &&
161 !(op->upcall.type ==
162 ORANGEFS_VFS_OP_FS_MOUNT ||
163 op->upcall.type ==
164 ORANGEFS_VFS_OP_GETATTR)) {
165 gossip_debug(GOSSIP_DEV_DEBUG,
166 "orangefs: skipping op tag %llu %s\n",
167 llu(op->tag), get_opname_string(op));
168 gossip_err(
169 "orangefs: ERROR: fs_mount_pending %d\n",
170 fsid);
171 spin_unlock(&op->lock);
172 continue;
173 }
174 }
175 /*
176 * Either this op does not pertain to a filesystem, is mounting
177 * a filesystem, or pertains to a mounted filesystem. Let it
178 * through.
179 */
180 cur_op = op;
181 break;
182 }
183
184 /*
185 * At this point we either have a valid op and can continue or have not
186 * found an op and must ask the client to try again later.
187 */
188 if (!cur_op) {
189 spin_unlock(&orangefs_request_list_lock);
190 return -EAGAIN;
191 }
192
193 gossip_debug(GOSSIP_DEV_DEBUG, "orangefs: reading op tag %llu %s\n",
194 llu(cur_op->tag), get_opname_string(cur_op));
195
196 /*
197 * Such an op should never be on the list in the first place. If so, we
198 * will abort.
199 */
200 if (op_state_in_progress(cur_op) || op_state_serviced(cur_op)) {
201 gossip_err("orangefs: ERROR: Current op already queued.\n");
202 list_del(&cur_op->list);
203 spin_unlock(&cur_op->lock);
204 spin_unlock(&orangefs_request_list_lock);
205 return -EAGAIN;
206 }
207 list_del_init(&cur_op->list);
208 get_op(op);
209 spin_unlock(&orangefs_request_list_lock);
210
211 spin_unlock(&cur_op->lock);
212
213 /* Push the upcall out. */
214 ret = copy_to_user(buf, &proto_ver, sizeof(__s32));
215 if (ret != 0)
216 goto error;
217 ret = copy_to_user(buf+sizeof(__s32), &magic, sizeof(__s32));
218 if (ret != 0)
219 goto error;
220 ret = copy_to_user(buf+2 * sizeof(__s32), &cur_op->tag, sizeof(__u64));
221 if (ret != 0)
222 goto error;
223 ret = copy_to_user(buf+2*sizeof(__s32)+sizeof(__u64), &cur_op->upcall,
224 sizeof(struct orangefs_upcall_s));
225 if (ret != 0)
226 goto error;
227
228 spin_lock(&htable_ops_in_progress_lock);
229 spin_lock(&cur_op->lock);
230 if (unlikely(op_state_given_up(cur_op))) {
231 spin_unlock(&cur_op->lock);
232 spin_unlock(&htable_ops_in_progress_lock);
233 op_release(cur_op);
234 goto restart;
235 }
236
237 /*
238 * Set the operation to be in progress and move it between lists since
239 * it has been sent to the client.
240 */
241 set_op_state_inprogress(cur_op);
242 orangefs_devreq_add_op(cur_op);
243 spin_unlock(&cur_op->lock);
244 spin_unlock(&htable_ops_in_progress_lock);
245 op_release(cur_op);
246
247 /* The client only asks to read one size buffer. */
248 return MAX_DEV_REQ_UPSIZE;
249 error:
250 /*
251 * We were unable to copy the op data to the client. Put the op back in
252 * list. If client has crashed, the op will be purged later when the
253 * device is released.
254 */
255 gossip_err("orangefs: Failed to copy data to user space\n");
256 spin_lock(&orangefs_request_list_lock);
257 spin_lock(&cur_op->lock);
258 if (likely(!op_state_given_up(cur_op))) {
259 set_op_state_waiting(cur_op);
260 list_add(&cur_op->list, &orangefs_request_list);
261 }
262 spin_unlock(&cur_op->lock);
263 spin_unlock(&orangefs_request_list_lock);
264 op_release(cur_op);
265 return -EFAULT;
266 }
267
268 /*
269 * Function for writev() callers into the device.
270 *
271 * Userspace should have written:
272 * - __u32 version
273 * - __u32 magic
274 * - __u64 tag
275 * - struct orangefs_downcall_s
276 * - trailer buffer (in the case of READDIR operations)
277 */
278 static ssize_t orangefs_devreq_write_iter(struct kiocb *iocb,
279 struct iov_iter *iter)
280 {
281 ssize_t ret;
282 struct orangefs_kernel_op_s *op = NULL;
283 struct {
284 __u32 version;
285 __u32 magic;
286 __u64 tag;
287 } head;
288 int total = ret = iov_iter_count(iter);
289 int n;
290 int downcall_size = sizeof(struct orangefs_downcall_s);
291 int head_size = sizeof(head);
292
293 gossip_debug(GOSSIP_DEV_DEBUG, "%s: total:%d: ret:%zd:\n",
294 __func__,
295 total,
296 ret);
297
298 if (total < MAX_DEV_REQ_DOWNSIZE) {
299 gossip_err("%s: total:%d: must be at least:%u:\n",
300 __func__,
301 total,
302 (unsigned int) MAX_DEV_REQ_DOWNSIZE);
303 return -EFAULT;
304 }
305
306 n = copy_from_iter(&head, head_size, iter);
307 if (n < head_size) {
308 gossip_err("%s: failed to copy head.\n", __func__);
309 return -EFAULT;
310 }
311
312 if (head.version < ORANGEFS_MINIMUM_USERSPACE_VERSION) {
313 gossip_err("%s: userspace claims version"
314 "%d, minimum version required: %d.\n",
315 __func__,
316 head.version,
317 ORANGEFS_MINIMUM_USERSPACE_VERSION);
318 return -EPROTO;
319 }
320
321 if (head.magic != ORANGEFS_DEVREQ_MAGIC) {
322 gossip_err("Error: Device magic number does not match.\n");
323 return -EPROTO;
324 }
325
326 op = orangefs_devreq_remove_op(head.tag);
327 if (!op) {
328 gossip_err("WARNING: No one's waiting for tag %llu\n",
329 llu(head.tag));
330 return ret;
331 }
332
333 n = copy_from_iter(&op->downcall, downcall_size, iter);
334 if (n != downcall_size) {
335 gossip_err("%s: failed to copy downcall.\n", __func__);
336 ret = -EFAULT;
337 goto Broken;
338 }
339
340 if (op->downcall.status)
341 goto wakeup;
342
343 /*
344 * We've successfully peeled off the head and the downcall.
345 * Something has gone awry if total doesn't equal the
346 * sum of head_size, downcall_size and trailer_size.
347 */
348 if ((head_size + downcall_size + op->downcall.trailer_size) != total) {
349 gossip_err("%s: funky write, head_size:%d"
350 ": downcall_size:%d: trailer_size:%lld"
351 ": total size:%d:\n",
352 __func__,
353 head_size,
354 downcall_size,
355 op->downcall.trailer_size,
356 total);
357 ret = -EFAULT;
358 goto Broken;
359 }
360
361 /* Only READDIR operations should have trailers. */
362 if ((op->downcall.type != ORANGEFS_VFS_OP_READDIR) &&
363 (op->downcall.trailer_size != 0)) {
364 gossip_err("%s: %x operation with trailer.",
365 __func__,
366 op->downcall.type);
367 ret = -EFAULT;
368 goto Broken;
369 }
370
371 /* READDIR operations should always have trailers. */
372 if ((op->downcall.type == ORANGEFS_VFS_OP_READDIR) &&
373 (op->downcall.trailer_size == 0)) {
374 gossip_err("%s: %x operation with no trailer.",
375 __func__,
376 op->downcall.type);
377 ret = -EFAULT;
378 goto Broken;
379 }
380
381 if (op->downcall.type != ORANGEFS_VFS_OP_READDIR)
382 goto wakeup;
383
384 op->downcall.trailer_buf =
385 vmalloc(op->downcall.trailer_size);
386 if (op->downcall.trailer_buf == NULL) {
387 gossip_err("%s: failed trailer vmalloc.\n",
388 __func__);
389 ret = -ENOMEM;
390 goto Broken;
391 }
392 memset(op->downcall.trailer_buf, 0, op->downcall.trailer_size);
393 n = copy_from_iter(op->downcall.trailer_buf,
394 op->downcall.trailer_size,
395 iter);
396 if (n != op->downcall.trailer_size) {
397 gossip_err("%s: failed to copy trailer.\n", __func__);
398 vfree(op->downcall.trailer_buf);
399 ret = -EFAULT;
400 goto Broken;
401 }
402
403 wakeup:
404 /*
405 * tell the vfs op waiting on a waitqueue
406 * that this op is done
407 */
408 spin_lock(&op->lock);
409 if (unlikely(op_state_given_up(op))) {
410 spin_unlock(&op->lock);
411 goto out;
412 }
413 set_op_state_serviced(op);
414 spin_unlock(&op->lock);
415
416 /*
417 * If this operation is an I/O operation we need to wait
418 * for all data to be copied before we can return to avoid
419 * buffer corruption and races that can pull the buffers
420 * out from under us.
421 *
422 * Essentially we're synchronizing with other parts of the
423 * vfs implicitly by not allowing the user space
424 * application reading/writing this device to return until
425 * the buffers are done being used.
426 */
427 if (op->downcall.type == ORANGEFS_VFS_OP_FILE_IO) {
428 long n = wait_for_completion_interruptible_timeout(&op->done,
429 op_timeout_secs * HZ);
430 if (unlikely(n < 0)) {
431 gossip_debug(GOSSIP_DEV_DEBUG,
432 "%s: signal on I/O wait, aborting\n",
433 __func__);
434 } else if (unlikely(n == 0)) {
435 gossip_debug(GOSSIP_DEV_DEBUG,
436 "%s: timed out.\n",
437 __func__);
438 }
439 }
440 out:
441 if (unlikely(op_is_cancel(op)))
442 put_cancel(op);
443 op_release(op);
444 return ret;
445
446 Broken:
447 spin_lock(&op->lock);
448 if (!op_state_given_up(op)) {
449 op->downcall.status = ret;
450 set_op_state_serviced(op);
451 }
452 spin_unlock(&op->lock);
453 goto out;
454 }
455
456 /* Returns whether any FS are still pending remounted */
457 static int mark_all_pending_mounts(void)
458 {
459 int unmounted = 1;
460 struct orangefs_sb_info_s *orangefs_sb = NULL;
461
462 spin_lock(&orangefs_superblocks_lock);
463 list_for_each_entry(orangefs_sb, &orangefs_superblocks, list) {
464 /* All of these file system require a remount */
465 orangefs_sb->mount_pending = 1;
466 unmounted = 0;
467 }
468 spin_unlock(&orangefs_superblocks_lock);
469 return unmounted;
470 }
471
472 /*
473 * Determine if a given file system needs to be remounted or not
474 * Returns -1 on error
475 * 0 if already mounted
476 * 1 if needs remount
477 */
478 int fs_mount_pending(__s32 fsid)
479 {
480 int mount_pending = -1;
481 struct orangefs_sb_info_s *orangefs_sb = NULL;
482
483 spin_lock(&orangefs_superblocks_lock);
484 list_for_each_entry(orangefs_sb, &orangefs_superblocks, list) {
485 if (orangefs_sb->fs_id == fsid) {
486 mount_pending = orangefs_sb->mount_pending;
487 break;
488 }
489 }
490 spin_unlock(&orangefs_superblocks_lock);
491 return mount_pending;
492 }
493
494 /*
495 * NOTE: gets called when the last reference to this device is dropped.
496 * Using the open_access_count variable, we enforce a reference count
497 * on this file so that it can be opened by only one process at a time.
498 * the devreq_mutex is used to make sure all i/o has completed
499 * before we call orangefs_bufmap_finalize, and similar such tricky
500 * situations
501 */
502 static int orangefs_devreq_release(struct inode *inode, struct file *file)
503 {
504 int unmounted = 0;
505
506 gossip_debug(GOSSIP_DEV_DEBUG,
507 "%s:pvfs2-client-core: exiting, closing device\n",
508 __func__);
509
510 mutex_lock(&devreq_mutex);
511 orangefs_bufmap_finalize();
512
513 open_access_count = -1;
514
515 unmounted = mark_all_pending_mounts();
516 gossip_debug(GOSSIP_DEV_DEBUG, "ORANGEFS Device Close: Filesystem(s) %s\n",
517 (unmounted ? "UNMOUNTED" : "MOUNTED"));
518
519 /*
520 * Walk through the list of ops in the request list, mark them
521 * as purged and wake them up.
522 */
523 purge_waiting_ops();
524 /*
525 * Walk through the hash table of in progress operations; mark
526 * them as purged and wake them up
527 */
528 purge_inprogress_ops();
529
530 orangefs_bufmap_run_down();
531
532 gossip_debug(GOSSIP_DEV_DEBUG,
533 "pvfs2-client-core: device close complete\n");
534 open_access_count = 0;
535 mutex_unlock(&devreq_mutex);
536 return 0;
537 }
538
539 int is_daemon_in_service(void)
540 {
541 int in_service;
542
543 /*
544 * What this function does is checks if client-core is alive
545 * based on the access count we maintain on the device.
546 */
547 mutex_lock(&devreq_mutex);
548 in_service = open_access_count == 1 ? 0 : -EIO;
549 mutex_unlock(&devreq_mutex);
550 return in_service;
551 }
552
553 bool __is_daemon_in_service(void)
554 {
555 return open_access_count == 1;
556 }
557
558 static inline long check_ioctl_command(unsigned int command)
559 {
560 /* Check for valid ioctl codes */
561 if (_IOC_TYPE(command) != ORANGEFS_DEV_MAGIC) {
562 gossip_err("device ioctl magic numbers don't match! Did you rebuild pvfs2-client-core/libpvfs2? [cmd %x, magic %x != %x]\n",
563 command,
564 _IOC_TYPE(command),
565 ORANGEFS_DEV_MAGIC);
566 return -EINVAL;
567 }
568 /* and valid ioctl commands */
569 if (_IOC_NR(command) >= ORANGEFS_DEV_MAXNR || _IOC_NR(command) <= 0) {
570 gossip_err("Invalid ioctl command number [%d >= %d]\n",
571 _IOC_NR(command), ORANGEFS_DEV_MAXNR);
572 return -ENOIOCTLCMD;
573 }
574 return 0;
575 }
576
577 static long dispatch_ioctl_command(unsigned int command, unsigned long arg)
578 {
579 static __s32 magic = ORANGEFS_DEVREQ_MAGIC;
580 static __s32 max_up_size = MAX_DEV_REQ_UPSIZE;
581 static __s32 max_down_size = MAX_DEV_REQ_DOWNSIZE;
582 struct ORANGEFS_dev_map_desc user_desc;
583 int ret = 0;
584 struct dev_mask_info_s mask_info = { 0 };
585 struct dev_mask2_info_s mask2_info = { 0, 0 };
586 int upstream_kmod = 1;
587 struct list_head *tmp = NULL;
588 struct orangefs_sb_info_s *orangefs_sb = NULL;
589
590 /* mtmoore: add locking here */
591
592 switch (command) {
593 case ORANGEFS_DEV_GET_MAGIC:
594 return ((put_user(magic, (__s32 __user *) arg) == -EFAULT) ?
595 -EIO :
596 0);
597 case ORANGEFS_DEV_GET_MAX_UPSIZE:
598 return ((put_user(max_up_size,
599 (__s32 __user *) arg) == -EFAULT) ?
600 -EIO :
601 0);
602 case ORANGEFS_DEV_GET_MAX_DOWNSIZE:
603 return ((put_user(max_down_size,
604 (__s32 __user *) arg) == -EFAULT) ?
605 -EIO :
606 0);
607 case ORANGEFS_DEV_MAP:
608 ret = copy_from_user(&user_desc,
609 (struct ORANGEFS_dev_map_desc __user *)
610 arg,
611 sizeof(struct ORANGEFS_dev_map_desc));
612 /* WTF -EIO and not -EFAULT? */
613 return ret ? -EIO : orangefs_bufmap_initialize(&user_desc);
614 case ORANGEFS_DEV_REMOUNT_ALL:
615 gossip_debug(GOSSIP_DEV_DEBUG,
616 "%s: got ORANGEFS_DEV_REMOUNT_ALL\n",
617 __func__);
618
619 /*
620 * remount all mounted orangefs volumes to regain the lost
621 * dynamic mount tables (if any) -- NOTE: this is done
622 * without keeping the superblock list locked due to the
623 * upcall/downcall waiting. also, the request semaphore is
624 * used to ensure that no operations will be serviced until
625 * all of the remounts are serviced (to avoid ops between
626 * mounts to fail)
627 */
628 ret = mutex_lock_interruptible(&request_mutex);
629 if (ret < 0)
630 return ret;
631 gossip_debug(GOSSIP_DEV_DEBUG,
632 "%s: priority remount in progress\n",
633 __func__);
634 list_for_each(tmp, &orangefs_superblocks) {
635 orangefs_sb =
636 list_entry(tmp,
637 struct orangefs_sb_info_s,
638 list);
639 if (orangefs_sb && (orangefs_sb->sb)) {
640 gossip_debug(GOSSIP_DEV_DEBUG,
641 "%s: Remounting SB %p\n",
642 __func__,
643 orangefs_sb);
644
645 ret = orangefs_remount(orangefs_sb->sb);
646 if (ret) {
647 gossip_debug(GOSSIP_DEV_DEBUG,
648 "SB %p remount failed\n",
649 orangefs_sb);
650 break;
651 }
652 }
653 }
654 gossip_debug(GOSSIP_DEV_DEBUG,
655 "%s: priority remount complete\n",
656 __func__);
657 mutex_unlock(&request_mutex);
658 return ret;
659
660 case ORANGEFS_DEV_UPSTREAM:
661 ret = copy_to_user((void __user *)arg,
662 &upstream_kmod,
663 sizeof(upstream_kmod));
664
665 if (ret != 0)
666 return -EIO;
667 else
668 return ret;
669
670 case ORANGEFS_DEV_CLIENT_MASK:
671 ret = copy_from_user(&mask2_info,
672 (void __user *)arg,
673 sizeof(struct dev_mask2_info_s));
674
675 if (ret != 0)
676 return -EIO;
677
678 client_debug_mask.mask1 = mask2_info.mask1_value;
679 client_debug_mask.mask2 = mask2_info.mask2_value;
680
681 pr_info("%s: client debug mask has been been received "
682 ":%llx: :%llx:\n",
683 __func__,
684 (unsigned long long)client_debug_mask.mask1,
685 (unsigned long long)client_debug_mask.mask2);
686
687 return ret;
688
689 case ORANGEFS_DEV_CLIENT_STRING:
690 ret = copy_from_user(&client_debug_array_string,
691 (void __user *)arg,
692 ORANGEFS_MAX_DEBUG_STRING_LEN);
693 if (ret != 0) {
694 pr_info("%s: CLIENT_STRING: copy_from_user failed\n",
695 __func__);
696 return -EIO;
697 }
698
699 pr_info("%s: client debug array string has been received.\n",
700 __func__);
701
702 if (!help_string_initialized) {
703
704 /* Free the "we don't know yet" default string... */
705 kfree(debug_help_string);
706
707 /* build a proper debug help string */
708 if (orangefs_prepare_debugfs_help_string(0)) {
709 gossip_err("%s: no debug help string \n",
710 __func__);
711 return -EIO;
712 }
713
714 /* Replace the boilerplate boot-time debug-help file. */
715 debugfs_remove(help_file_dentry);
716
717 help_file_dentry =
718 debugfs_create_file(
719 ORANGEFS_KMOD_DEBUG_HELP_FILE,
720 0444,
721 debug_dir,
722 debug_help_string,
723 &debug_help_fops);
724
725 if (!help_file_dentry) {
726 gossip_err("%s: debugfs_create_file failed for"
727 " :%s:!\n",
728 __func__,
729 ORANGEFS_KMOD_DEBUG_HELP_FILE);
730 return -EIO;
731 }
732 }
733
734 debug_mask_to_string(&client_debug_mask, 1);
735
736 debugfs_remove(client_debug_dentry);
737
738 orangefs_client_debug_init();
739
740 help_string_initialized++;
741
742 return ret;
743
744 case ORANGEFS_DEV_DEBUG:
745 ret = copy_from_user(&mask_info,
746 (void __user *)arg,
747 sizeof(mask_info));
748
749 if (ret != 0)
750 return -EIO;
751
752 if (mask_info.mask_type == KERNEL_MASK) {
753 if ((mask_info.mask_value == 0)
754 && (kernel_mask_set_mod_init)) {
755 /*
756 * the kernel debug mask was set when the
757 * kernel module was loaded; don't override
758 * it if the client-core was started without
759 * a value for ORANGEFS_KMODMASK.
760 */
761 return 0;
762 }
763 debug_mask_to_string(&mask_info.mask_value,
764 mask_info.mask_type);
765 gossip_debug_mask = mask_info.mask_value;
766 pr_info("%s: kernel debug mask has been modified to "
767 ":%s: :%llx:\n",
768 __func__,
769 kernel_debug_string,
770 (unsigned long long)gossip_debug_mask);
771 } else if (mask_info.mask_type == CLIENT_MASK) {
772 debug_mask_to_string(&mask_info.mask_value,
773 mask_info.mask_type);
774 pr_info("%s: client debug mask has been modified to"
775 ":%s: :%llx:\n",
776 __func__,
777 client_debug_string,
778 llu(mask_info.mask_value));
779 } else {
780 gossip_lerr("Invalid mask type....\n");
781 return -EINVAL;
782 }
783
784 return ret;
785
786 default:
787 return -ENOIOCTLCMD;
788 }
789 return -ENOIOCTLCMD;
790 }
791
792 static long orangefs_devreq_ioctl(struct file *file,
793 unsigned int command, unsigned long arg)
794 {
795 long ret;
796
797 /* Check for properly constructed commands */
798 ret = check_ioctl_command(command);
799 if (ret < 0)
800 return (int)ret;
801
802 return (int)dispatch_ioctl_command(command, arg);
803 }
804
805 #ifdef CONFIG_COMPAT /* CONFIG_COMPAT is in .config */
806
807 /* Compat structure for the ORANGEFS_DEV_MAP ioctl */
808 struct ORANGEFS_dev_map_desc32 {
809 compat_uptr_t ptr;
810 __s32 total_size;
811 __s32 size;
812 __s32 count;
813 };
814
815 static unsigned long translate_dev_map26(unsigned long args, long *error)
816 {
817 struct ORANGEFS_dev_map_desc32 __user *p32 = (void __user *)args;
818 /*
819 * Depending on the architecture, allocate some space on the
820 * user-call-stack based on our expected layout.
821 */
822 struct ORANGEFS_dev_map_desc __user *p =
823 compat_alloc_user_space(sizeof(*p));
824 compat_uptr_t addr;
825
826 *error = 0;
827 /* get the ptr from the 32 bit user-space */
828 if (get_user(addr, &p32->ptr))
829 goto err;
830 /* try to put that into a 64-bit layout */
831 if (put_user(compat_ptr(addr), &p->ptr))
832 goto err;
833 /* copy the remaining fields */
834 if (copy_in_user(&p->total_size, &p32->total_size, sizeof(__s32)))
835 goto err;
836 if (copy_in_user(&p->size, &p32->size, sizeof(__s32)))
837 goto err;
838 if (copy_in_user(&p->count, &p32->count, sizeof(__s32)))
839 goto err;
840 return (unsigned long)p;
841 err:
842 *error = -EFAULT;
843 return 0;
844 }
845
846 /*
847 * 32 bit user-space apps' ioctl handlers when kernel modules
848 * is compiled as a 64 bit one
849 */
850 static long orangefs_devreq_compat_ioctl(struct file *filp, unsigned int cmd,
851 unsigned long args)
852 {
853 long ret;
854 unsigned long arg = args;
855
856 /* Check for properly constructed commands */
857 ret = check_ioctl_command(cmd);
858 if (ret < 0)
859 return ret;
860 if (cmd == ORANGEFS_DEV_MAP) {
861 /*
862 * convert the arguments to what we expect internally
863 * in kernel space
864 */
865 arg = translate_dev_map26(args, &ret);
866 if (ret < 0) {
867 gossip_err("Could not translate dev map\n");
868 return ret;
869 }
870 }
871 /* no other ioctl requires translation */
872 return dispatch_ioctl_command(cmd, arg);
873 }
874
875 #endif /* CONFIG_COMPAT is in .config */
876
877 /* the assigned character device major number */
878 static int orangefs_dev_major;
879
880 /*
881 * Initialize orangefs device specific state:
882 * Must be called at module load time only
883 */
884 int orangefs_dev_init(void)
885 {
886 /* register orangefs-req device */
887 orangefs_dev_major = register_chrdev(0,
888 ORANGEFS_REQDEVICE_NAME,
889 &orangefs_devreq_file_operations);
890 if (orangefs_dev_major < 0) {
891 gossip_debug(GOSSIP_DEV_DEBUG,
892 "Failed to register /dev/%s (error %d)\n",
893 ORANGEFS_REQDEVICE_NAME, orangefs_dev_major);
894 return orangefs_dev_major;
895 }
896
897 gossip_debug(GOSSIP_DEV_DEBUG,
898 "*** /dev/%s character device registered ***\n",
899 ORANGEFS_REQDEVICE_NAME);
900 gossip_debug(GOSSIP_DEV_DEBUG, "'mknod /dev/%s c %d 0'.\n",
901 ORANGEFS_REQDEVICE_NAME, orangefs_dev_major);
902 return 0;
903 }
904
905 void orangefs_dev_cleanup(void)
906 {
907 unregister_chrdev(orangefs_dev_major, ORANGEFS_REQDEVICE_NAME);
908 gossip_debug(GOSSIP_DEV_DEBUG,
909 "*** /dev/%s character device unregistered ***\n",
910 ORANGEFS_REQDEVICE_NAME);
911 }
912
913 static unsigned int orangefs_devreq_poll(struct file *file,
914 struct poll_table_struct *poll_table)
915 {
916 int poll_revent_mask = 0;
917
918 poll_wait(file, &orangefs_request_list_waitq, poll_table);
919
920 if (!list_empty(&orangefs_request_list))
921 poll_revent_mask |= POLL_IN;
922 return poll_revent_mask;
923 }
924
925 const struct file_operations orangefs_devreq_file_operations = {
926 .owner = THIS_MODULE,
927 .read = orangefs_devreq_read,
928 .write_iter = orangefs_devreq_write_iter,
929 .open = orangefs_devreq_open,
930 .release = orangefs_devreq_release,
931 .unlocked_ioctl = orangefs_devreq_ioctl,
932
933 #ifdef CONFIG_COMPAT /* CONFIG_COMPAT is in .config */
934 .compat_ioctl = orangefs_devreq_compat_ioctl,
935 #endif
936 .poll = orangefs_devreq_poll
937 };
This page took 0.049028 seconds and 4 git commands to generate.