dadeb381f9fc38285307ac95f559855054899511
[deliverable/linux.git] / fs / orangefs / devorangefs-req.c
1 /*
2 * (C) 2001 Clemson University and The University of Chicago
3 *
4 * Changes by Acxiom Corporation to add protocol version to kernel
5 * communication, Copyright Acxiom Corporation, 2005.
6 *
7 * See COPYING in top-level directory.
8 */
9
10 #include "protocol.h"
11 #include "orangefs-kernel.h"
12 #include "orangefs-dev-proto.h"
13 #include "orangefs-bufmap.h"
14
15 #include <linux/debugfs.h>
16 #include <linux/slab.h>
17
18 /* this file implements the /dev/pvfs2-req device node */
19
20 static int open_access_count;
21
22 #define DUMP_DEVICE_ERROR() \
23 do { \
24 gossip_err("*****************************************************\n");\
25 gossip_err("ORANGEFS Device Error: You cannot open the device file "); \
26 gossip_err("\n/dev/%s more than once. Please make sure that\nthere " \
27 "are no ", ORANGEFS_REQDEVICE_NAME); \
28 gossip_err("instances of a program using this device\ncurrently " \
29 "running. (You must verify this!)\n"); \
30 gossip_err("For example, you can use the lsof program as follows:\n");\
31 gossip_err("'lsof | grep %s' (run this as root)\n", \
32 ORANGEFS_REQDEVICE_NAME); \
33 gossip_err(" open_access_count = %d\n", open_access_count); \
34 gossip_err("*****************************************************\n");\
35 } while (0)
36
37 static int hash_func(__u64 tag, int table_size)
38 {
39 return do_div(tag, (unsigned int)table_size);
40 }
41
42 static void orangefs_devreq_add_op(struct orangefs_kernel_op_s *op)
43 {
44 int index = hash_func(op->tag, hash_table_size);
45
46 spin_lock(&htable_ops_in_progress_lock);
47 list_add_tail(&op->list, &htable_ops_in_progress[index]);
48 spin_unlock(&htable_ops_in_progress_lock);
49 }
50
51 static struct orangefs_kernel_op_s *orangefs_devreq_remove_op(__u64 tag)
52 {
53 struct orangefs_kernel_op_s *op, *next;
54 int index;
55
56 index = hash_func(tag, hash_table_size);
57
58 spin_lock(&htable_ops_in_progress_lock);
59 list_for_each_entry_safe(op,
60 next,
61 &htable_ops_in_progress[index],
62 list) {
63 if (op->tag == tag) {
64 list_del(&op->list);
65 spin_unlock(&htable_ops_in_progress_lock);
66 return op;
67 }
68 }
69
70 spin_unlock(&htable_ops_in_progress_lock);
71 return NULL;
72 }
73
74 static int orangefs_devreq_open(struct inode *inode, struct file *file)
75 {
76 int ret = -EINVAL;
77
78 if (!(file->f_flags & O_NONBLOCK)) {
79 gossip_err("%s: device cannot be opened in blocking mode\n",
80 __func__);
81 goto out;
82 }
83 ret = -EACCES;
84 gossip_debug(GOSSIP_DEV_DEBUG, "client-core: opening device\n");
85 mutex_lock(&devreq_mutex);
86
87 if (open_access_count == 0) {
88 open_access_count++;
89 ret = 0;
90 } else {
91 DUMP_DEVICE_ERROR();
92 }
93 mutex_unlock(&devreq_mutex);
94
95 out:
96
97 gossip_debug(GOSSIP_DEV_DEBUG,
98 "pvfs2-client-core: open device complete (ret = %d)\n",
99 ret);
100 return ret;
101 }
102
103 /* Function for read() callers into the device */
104 static ssize_t orangefs_devreq_read(struct file *file,
105 char __user *buf,
106 size_t count, loff_t *offset)
107 {
108 struct orangefs_kernel_op_s *op, *temp;
109 __s32 proto_ver = ORANGEFS_KERNEL_PROTO_VERSION;
110 static __s32 magic = ORANGEFS_DEVREQ_MAGIC;
111 struct orangefs_kernel_op_s *cur_op = NULL;
112 unsigned long ret;
113
114 /* We do not support blocking IO. */
115 if (!(file->f_flags & O_NONBLOCK)) {
116 gossip_err("%s: blocking read from client-core.\n",
117 __func__);
118 return -EINVAL;
119 }
120
121 /*
122 * The client will do an ioctl to find MAX_DEV_REQ_UPSIZE, then
123 * always read with that size buffer.
124 */
125 if (count != MAX_DEV_REQ_UPSIZE) {
126 gossip_err("orangefs: client-core tried to read wrong size\n");
127 return -EINVAL;
128 }
129
130 /* Get next op (if any) from top of list. */
131 spin_lock(&orangefs_request_list_lock);
132 list_for_each_entry_safe(op, temp, &orangefs_request_list, list) {
133 __s32 fsid;
134 /* This lock is held past the end of the loop when we break. */
135 spin_lock(&op->lock);
136
137 fsid = fsid_of_op(op);
138 if (fsid != ORANGEFS_FS_ID_NULL) {
139 int ret;
140 /* Skip ops whose filesystem needs to be mounted. */
141 ret = fs_mount_pending(fsid);
142 if (ret == 1) {
143 gossip_debug(GOSSIP_DEV_DEBUG,
144 "orangefs: skipping op tag %llu %s\n",
145 llu(op->tag), get_opname_string(op));
146 spin_unlock(&op->lock);
147 continue;
148 /*
149 * Skip ops whose filesystem we don't know about unless
150 * it is being mounted.
151 */
152 /* XXX: is there a better way to detect this? */
153 } else if (ret == -1 &&
154 !(op->upcall.type ==
155 ORANGEFS_VFS_OP_FS_MOUNT ||
156 op->upcall.type ==
157 ORANGEFS_VFS_OP_GETATTR)) {
158 gossip_debug(GOSSIP_DEV_DEBUG,
159 "orangefs: skipping op tag %llu %s\n",
160 llu(op->tag), get_opname_string(op));
161 gossip_err(
162 "orangefs: ERROR: fs_mount_pending %d\n",
163 fsid);
164 spin_unlock(&op->lock);
165 continue;
166 }
167 }
168 /*
169 * Either this op does not pertain to a filesystem, is mounting
170 * a filesystem, or pertains to a mounted filesystem. Let it
171 * through.
172 */
173 cur_op = op;
174 break;
175 }
176
177 /*
178 * At this point we either have a valid op and can continue or have not
179 * found an op and must ask the client to try again later.
180 */
181 if (!cur_op) {
182 spin_unlock(&orangefs_request_list_lock);
183 return -EAGAIN;
184 }
185
186 gossip_debug(GOSSIP_DEV_DEBUG, "orangefs: reading op tag %llu %s\n",
187 llu(cur_op->tag), get_opname_string(cur_op));
188
189 /*
190 * Such an op should never be on the list in the first place. If so, we
191 * will abort.
192 */
193 if (op_state_in_progress(cur_op) || op_state_serviced(cur_op)) {
194 gossip_err("orangefs: ERROR: Current op already queued.\n");
195 list_del(&cur_op->list);
196 spin_unlock(&cur_op->lock);
197 spin_unlock(&orangefs_request_list_lock);
198 return -EAGAIN;
199 }
200
201 /*
202 * Set the operation to be in progress and move it between lists since
203 * it has been sent to the client.
204 */
205 set_op_state_inprogress(cur_op);
206
207 list_del(&cur_op->list);
208 spin_unlock(&orangefs_request_list_lock);
209 orangefs_devreq_add_op(cur_op);
210 spin_unlock(&cur_op->lock);
211
212 /* Push the upcall out. */
213 ret = copy_to_user(buf, &proto_ver, sizeof(__s32));
214 if (ret != 0)
215 goto error;
216 ret = copy_to_user(buf+sizeof(__s32), &magic, sizeof(__s32));
217 if (ret != 0)
218 goto error;
219 ret = copy_to_user(buf+2 * sizeof(__s32), &cur_op->tag, sizeof(__u64));
220 if (ret != 0)
221 goto error;
222 ret = copy_to_user(buf+2*sizeof(__s32)+sizeof(__u64), &cur_op->upcall,
223 sizeof(struct orangefs_upcall_s));
224 if (ret != 0)
225 goto error;
226
227 /* The client only asks to read one size buffer. */
228 return MAX_DEV_REQ_UPSIZE;
229 error:
230 /*
231 * We were unable to copy the op data to the client. Put the op back in
232 * list. If client has crashed, the op will be purged later when the
233 * device is released.
234 */
235 gossip_err("orangefs: Failed to copy data to user space\n");
236 spin_lock(&orangefs_request_list_lock);
237 spin_lock(&cur_op->lock);
238 set_op_state_waiting(cur_op);
239 orangefs_devreq_remove_op(cur_op->tag);
240 list_add(&cur_op->list, &orangefs_request_list);
241 spin_unlock(&cur_op->lock);
242 spin_unlock(&orangefs_request_list_lock);
243 return -EFAULT;
244 }
245
246 /*
247 * Function for writev() callers into the device.
248 *
249 * Userspace should have written:
250 * - __u32 version
251 * - __u32 magic
252 * - __u64 tag
253 * - struct orangefs_downcall_s
254 * - trailer buffer (in the case of READDIR operations)
255 */
256 static ssize_t orangefs_devreq_write_iter(struct kiocb *iocb,
257 struct iov_iter *iter)
258 {
259 ssize_t ret;
260 struct orangefs_kernel_op_s *op = NULL;
261 struct {
262 __u32 version;
263 __u32 magic;
264 __u64 tag;
265 } head;
266 int total = ret = iov_iter_count(iter);
267 int n;
268 int downcall_size = sizeof(struct orangefs_downcall_s);
269 int head_size = sizeof(head);
270
271 gossip_debug(GOSSIP_DEV_DEBUG, "%s: total:%d: ret:%zd:\n",
272 __func__,
273 total,
274 ret);
275
276 if (total < MAX_DEV_REQ_DOWNSIZE) {
277 gossip_err("%s: total:%d: must be at least:%u:\n",
278 __func__,
279 total,
280 (unsigned int) MAX_DEV_REQ_DOWNSIZE);
281 ret = -EFAULT;
282 goto out;
283 }
284
285 n = copy_from_iter(&head, head_size, iter);
286 if (n < head_size) {
287 gossip_err("%s: failed to copy head.\n", __func__);
288 ret = -EFAULT;
289 goto out;
290 }
291
292 if (head.version < ORANGEFS_MINIMUM_USERSPACE_VERSION) {
293 gossip_err("%s: userspace claims version"
294 "%d, minimum version required: %d.\n",
295 __func__,
296 head.version,
297 ORANGEFS_MINIMUM_USERSPACE_VERSION);
298 ret = -EPROTO;
299 goto out;
300 }
301
302 if (head.magic != ORANGEFS_DEVREQ_MAGIC) {
303 gossip_err("Error: Device magic number does not match.\n");
304 ret = -EPROTO;
305 goto out;
306 }
307
308 op = orangefs_devreq_remove_op(head.tag);
309 if (!op) {
310 gossip_err("WARNING: No one's waiting for tag %llu\n",
311 llu(head.tag));
312 goto out;
313 }
314
315 get_op(op); /* increase ref count. */
316
317 n = copy_from_iter(&op->downcall, downcall_size, iter);
318 if (n != downcall_size) {
319 gossip_err("%s: failed to copy downcall.\n", __func__);
320 put_op(op);
321 ret = -EFAULT;
322 goto out;
323 }
324
325 if (op->downcall.status)
326 goto wakeup;
327
328 /*
329 * We've successfully peeled off the head and the downcall.
330 * Something has gone awry if total doesn't equal the
331 * sum of head_size, downcall_size and trailer_size.
332 */
333 if ((head_size + downcall_size + op->downcall.trailer_size) != total) {
334 gossip_err("%s: funky write, head_size:%d"
335 ": downcall_size:%d: trailer_size:%lld"
336 ": total size:%d:\n",
337 __func__,
338 head_size,
339 downcall_size,
340 op->downcall.trailer_size,
341 total);
342 put_op(op);
343 ret = -EFAULT;
344 goto out;
345 }
346
347 /* Only READDIR operations should have trailers. */
348 if ((op->downcall.type != ORANGEFS_VFS_OP_READDIR) &&
349 (op->downcall.trailer_size != 0)) {
350 gossip_err("%s: %x operation with trailer.",
351 __func__,
352 op->downcall.type);
353 put_op(op);
354 ret = -EFAULT;
355 goto out;
356 }
357
358 /* READDIR operations should always have trailers. */
359 if ((op->downcall.type == ORANGEFS_VFS_OP_READDIR) &&
360 (op->downcall.trailer_size == 0)) {
361 gossip_err("%s: %x operation with no trailer.",
362 __func__,
363 op->downcall.type);
364 put_op(op);
365 ret = -EFAULT;
366 goto out;
367 }
368
369 if (op->downcall.type != ORANGEFS_VFS_OP_READDIR)
370 goto wakeup;
371
372 op->downcall.trailer_buf =
373 vmalloc(op->downcall.trailer_size);
374 if (op->downcall.trailer_buf == NULL) {
375 gossip_err("%s: failed trailer vmalloc.\n",
376 __func__);
377 put_op(op);
378 ret = -ENOMEM;
379 goto out;
380 }
381 memset(op->downcall.trailer_buf, 0, op->downcall.trailer_size);
382 n = copy_from_iter(op->downcall.trailer_buf,
383 op->downcall.trailer_size,
384 iter);
385 if (n != op->downcall.trailer_size) {
386 gossip_err("%s: failed to copy trailer.\n", __func__);
387 vfree(op->downcall.trailer_buf);
388 put_op(op);
389 ret = -EFAULT;
390 goto out;
391 }
392
393 wakeup:
394
395 /*
396 * If this operation is an I/O operation we need to wait
397 * for all data to be copied before we can return to avoid
398 * buffer corruption and races that can pull the buffers
399 * out from under us.
400 *
401 * Essentially we're synchronizing with other parts of the
402 * vfs implicitly by not allowing the user space
403 * application reading/writing this device to return until
404 * the buffers are done being used.
405 */
406 if (op->downcall.type == ORANGEFS_VFS_OP_FILE_IO) {
407 int timed_out = 0;
408 DEFINE_WAIT(wait_entry);
409
410 /*
411 * tell the vfs op waiting on a waitqueue
412 * that this op is done
413 */
414 spin_lock(&op->lock);
415 set_op_state_serviced(op);
416 spin_unlock(&op->lock);
417
418 while (1) {
419 spin_lock(&op->lock);
420 prepare_to_wait_exclusive(
421 &op->io_completion_waitq,
422 &wait_entry,
423 TASK_INTERRUPTIBLE);
424 if (op->io_completed) {
425 spin_unlock(&op->lock);
426 break;
427 }
428 spin_unlock(&op->lock);
429
430 if (!signal_pending(current)) {
431 int timeout =
432 MSECS_TO_JIFFIES(1000 *
433 op_timeout_secs);
434 if (!schedule_timeout(timeout)) {
435 gossip_debug(GOSSIP_DEV_DEBUG,
436 "%s: timed out.\n",
437 __func__);
438 timed_out = 1;
439 break;
440 }
441 continue;
442 }
443
444 gossip_debug(GOSSIP_DEV_DEBUG,
445 "%s: signal on I/O wait, aborting\n",
446 __func__);
447 break;
448 }
449
450 spin_lock(&op->lock);
451 finish_wait(&op->io_completion_waitq, &wait_entry);
452 spin_unlock(&op->lock);
453
454 /* NOTE: for I/O operations we handle releasing the op
455 * object except in the case of timeout. the reason we
456 * can't free the op in timeout cases is that the op
457 * service logic in the vfs retries operations using
458 * the same op ptr, thus it can't be freed.
459 */
460 if (!timed_out)
461 op_release(op);
462 } else {
463 /*
464 * tell the vfs op waiting on a waitqueue that
465 * this op is done -
466 * for every other operation (i.e. non-I/O), we need to
467 * wake up the callers for downcall completion
468 * notification
469 */
470 spin_lock(&op->lock);
471 set_op_state_serviced(op);
472 spin_unlock(&op->lock);
473 }
474 out:
475 return ret;
476 }
477
478 /* Returns whether any FS are still pending remounted */
479 static int mark_all_pending_mounts(void)
480 {
481 int unmounted = 1;
482 struct orangefs_sb_info_s *orangefs_sb = NULL;
483
484 spin_lock(&orangefs_superblocks_lock);
485 list_for_each_entry(orangefs_sb, &orangefs_superblocks, list) {
486 /* All of these file system require a remount */
487 orangefs_sb->mount_pending = 1;
488 unmounted = 0;
489 }
490 spin_unlock(&orangefs_superblocks_lock);
491 return unmounted;
492 }
493
494 /*
495 * Determine if a given file system needs to be remounted or not
496 * Returns -1 on error
497 * 0 if already mounted
498 * 1 if needs remount
499 */
500 int fs_mount_pending(__s32 fsid)
501 {
502 int mount_pending = -1;
503 struct orangefs_sb_info_s *orangefs_sb = NULL;
504
505 spin_lock(&orangefs_superblocks_lock);
506 list_for_each_entry(orangefs_sb, &orangefs_superblocks, list) {
507 if (orangefs_sb->fs_id == fsid) {
508 mount_pending = orangefs_sb->mount_pending;
509 break;
510 }
511 }
512 spin_unlock(&orangefs_superblocks_lock);
513 return mount_pending;
514 }
515
516 /*
517 * NOTE: gets called when the last reference to this device is dropped.
518 * Using the open_access_count variable, we enforce a reference count
519 * on this file so that it can be opened by only one process at a time.
520 * the devreq_mutex is used to make sure all i/o has completed
521 * before we call orangefs_bufmap_finalize, and similar such tricky
522 * situations
523 */
524 static int orangefs_devreq_release(struct inode *inode, struct file *file)
525 {
526 int unmounted = 0;
527
528 gossip_debug(GOSSIP_DEV_DEBUG,
529 "%s:pvfs2-client-core: exiting, closing device\n",
530 __func__);
531
532 mutex_lock(&devreq_mutex);
533 if (orangefs_get_bufmap_init())
534 orangefs_bufmap_finalize();
535
536 open_access_count--;
537
538 unmounted = mark_all_pending_mounts();
539 gossip_debug(GOSSIP_DEV_DEBUG, "ORANGEFS Device Close: Filesystem(s) %s\n",
540 (unmounted ? "UNMOUNTED" : "MOUNTED"));
541 mutex_unlock(&devreq_mutex);
542
543 /*
544 * Walk through the list of ops in the request list, mark them
545 * as purged and wake them up.
546 */
547 purge_waiting_ops();
548 /*
549 * Walk through the hash table of in progress operations; mark
550 * them as purged and wake them up
551 */
552 purge_inprogress_ops();
553 gossip_debug(GOSSIP_DEV_DEBUG,
554 "pvfs2-client-core: device close complete\n");
555 return 0;
556 }
557
558 int is_daemon_in_service(void)
559 {
560 int in_service;
561
562 /*
563 * What this function does is checks if client-core is alive
564 * based on the access count we maintain on the device.
565 */
566 mutex_lock(&devreq_mutex);
567 in_service = open_access_count == 1 ? 0 : -EIO;
568 mutex_unlock(&devreq_mutex);
569 return in_service;
570 }
571
572 static inline long check_ioctl_command(unsigned int command)
573 {
574 /* Check for valid ioctl codes */
575 if (_IOC_TYPE(command) != ORANGEFS_DEV_MAGIC) {
576 gossip_err("device ioctl magic numbers don't match! Did you rebuild pvfs2-client-core/libpvfs2? [cmd %x, magic %x != %x]\n",
577 command,
578 _IOC_TYPE(command),
579 ORANGEFS_DEV_MAGIC);
580 return -EINVAL;
581 }
582 /* and valid ioctl commands */
583 if (_IOC_NR(command) >= ORANGEFS_DEV_MAXNR || _IOC_NR(command) <= 0) {
584 gossip_err("Invalid ioctl command number [%d >= %d]\n",
585 _IOC_NR(command), ORANGEFS_DEV_MAXNR);
586 return -ENOIOCTLCMD;
587 }
588 return 0;
589 }
590
591 static long dispatch_ioctl_command(unsigned int command, unsigned long arg)
592 {
593 static __s32 magic = ORANGEFS_DEVREQ_MAGIC;
594 static __s32 max_up_size = MAX_DEV_REQ_UPSIZE;
595 static __s32 max_down_size = MAX_DEV_REQ_DOWNSIZE;
596 struct ORANGEFS_dev_map_desc user_desc;
597 int ret = 0;
598 struct dev_mask_info_s mask_info = { 0 };
599 struct dev_mask2_info_s mask2_info = { 0, 0 };
600 int upstream_kmod = 1;
601 struct list_head *tmp = NULL;
602 struct orangefs_sb_info_s *orangefs_sb = NULL;
603
604 /* mtmoore: add locking here */
605
606 switch (command) {
607 case ORANGEFS_DEV_GET_MAGIC:
608 return ((put_user(magic, (__s32 __user *) arg) == -EFAULT) ?
609 -EIO :
610 0);
611 case ORANGEFS_DEV_GET_MAX_UPSIZE:
612 return ((put_user(max_up_size,
613 (__s32 __user *) arg) == -EFAULT) ?
614 -EIO :
615 0);
616 case ORANGEFS_DEV_GET_MAX_DOWNSIZE:
617 return ((put_user(max_down_size,
618 (__s32 __user *) arg) == -EFAULT) ?
619 -EIO :
620 0);
621 case ORANGEFS_DEV_MAP:
622 ret = copy_from_user(&user_desc,
623 (struct ORANGEFS_dev_map_desc __user *)
624 arg,
625 sizeof(struct ORANGEFS_dev_map_desc));
626 if (orangefs_get_bufmap_init()) {
627 return -EINVAL;
628 } else {
629 return ret ?
630 -EIO :
631 orangefs_bufmap_initialize(&user_desc);
632 }
633 case ORANGEFS_DEV_REMOUNT_ALL:
634 gossip_debug(GOSSIP_DEV_DEBUG,
635 "%s: got ORANGEFS_DEV_REMOUNT_ALL\n",
636 __func__);
637
638 /*
639 * remount all mounted orangefs volumes to regain the lost
640 * dynamic mount tables (if any) -- NOTE: this is done
641 * without keeping the superblock list locked due to the
642 * upcall/downcall waiting. also, the request semaphore is
643 * used to ensure that no operations will be serviced until
644 * all of the remounts are serviced (to avoid ops between
645 * mounts to fail)
646 */
647 ret = mutex_lock_interruptible(&request_mutex);
648 if (ret < 0)
649 return ret;
650 gossip_debug(GOSSIP_DEV_DEBUG,
651 "%s: priority remount in progress\n",
652 __func__);
653 list_for_each(tmp, &orangefs_superblocks) {
654 orangefs_sb =
655 list_entry(tmp,
656 struct orangefs_sb_info_s,
657 list);
658 if (orangefs_sb && (orangefs_sb->sb)) {
659 gossip_debug(GOSSIP_DEV_DEBUG,
660 "%s: Remounting SB %p\n",
661 __func__,
662 orangefs_sb);
663
664 ret = orangefs_remount(orangefs_sb->sb);
665 if (ret) {
666 gossip_debug(GOSSIP_DEV_DEBUG,
667 "SB %p remount failed\n",
668 orangefs_sb);
669 break;
670 }
671 }
672 }
673 gossip_debug(GOSSIP_DEV_DEBUG,
674 "%s: priority remount complete\n",
675 __func__);
676 mutex_unlock(&request_mutex);
677 return ret;
678
679 case ORANGEFS_DEV_UPSTREAM:
680 ret = copy_to_user((void __user *)arg,
681 &upstream_kmod,
682 sizeof(upstream_kmod));
683
684 if (ret != 0)
685 return -EIO;
686 else
687 return ret;
688
689 case ORANGEFS_DEV_CLIENT_MASK:
690 ret = copy_from_user(&mask2_info,
691 (void __user *)arg,
692 sizeof(struct dev_mask2_info_s));
693
694 if (ret != 0)
695 return -EIO;
696
697 client_debug_mask.mask1 = mask2_info.mask1_value;
698 client_debug_mask.mask2 = mask2_info.mask2_value;
699
700 pr_info("%s: client debug mask has been been received "
701 ":%llx: :%llx:\n",
702 __func__,
703 (unsigned long long)client_debug_mask.mask1,
704 (unsigned long long)client_debug_mask.mask2);
705
706 return ret;
707
708 case ORANGEFS_DEV_CLIENT_STRING:
709 ret = copy_from_user(&client_debug_array_string,
710 (void __user *)arg,
711 ORANGEFS_MAX_DEBUG_STRING_LEN);
712 if (ret != 0) {
713 pr_info("%s: CLIENT_STRING: copy_from_user failed\n",
714 __func__);
715 return -EIO;
716 }
717
718 pr_info("%s: client debug array string has been received.\n",
719 __func__);
720
721 if (!help_string_initialized) {
722
723 /* Free the "we don't know yet" default string... */
724 kfree(debug_help_string);
725
726 /* build a proper debug help string */
727 if (orangefs_prepare_debugfs_help_string(0)) {
728 gossip_err("%s: no debug help string \n",
729 __func__);
730 return -EIO;
731 }
732
733 /* Replace the boilerplate boot-time debug-help file. */
734 debugfs_remove(help_file_dentry);
735
736 help_file_dentry =
737 debugfs_create_file(
738 ORANGEFS_KMOD_DEBUG_HELP_FILE,
739 0444,
740 debug_dir,
741 debug_help_string,
742 &debug_help_fops);
743
744 if (!help_file_dentry) {
745 gossip_err("%s: debugfs_create_file failed for"
746 " :%s:!\n",
747 __func__,
748 ORANGEFS_KMOD_DEBUG_HELP_FILE);
749 return -EIO;
750 }
751 }
752
753 debug_mask_to_string(&client_debug_mask, 1);
754
755 debugfs_remove(client_debug_dentry);
756
757 orangefs_client_debug_init();
758
759 help_string_initialized++;
760
761 return ret;
762
763 case ORANGEFS_DEV_DEBUG:
764 ret = copy_from_user(&mask_info,
765 (void __user *)arg,
766 sizeof(mask_info));
767
768 if (ret != 0)
769 return -EIO;
770
771 if (mask_info.mask_type == KERNEL_MASK) {
772 if ((mask_info.mask_value == 0)
773 && (kernel_mask_set_mod_init)) {
774 /*
775 * the kernel debug mask was set when the
776 * kernel module was loaded; don't override
777 * it if the client-core was started without
778 * a value for ORANGEFS_KMODMASK.
779 */
780 return 0;
781 }
782 debug_mask_to_string(&mask_info.mask_value,
783 mask_info.mask_type);
784 gossip_debug_mask = mask_info.mask_value;
785 pr_info("%s: kernel debug mask has been modified to "
786 ":%s: :%llx:\n",
787 __func__,
788 kernel_debug_string,
789 (unsigned long long)gossip_debug_mask);
790 } else if (mask_info.mask_type == CLIENT_MASK) {
791 debug_mask_to_string(&mask_info.mask_value,
792 mask_info.mask_type);
793 pr_info("%s: client debug mask has been modified to"
794 ":%s: :%llx:\n",
795 __func__,
796 client_debug_string,
797 llu(mask_info.mask_value));
798 } else {
799 gossip_lerr("Invalid mask type....\n");
800 return -EINVAL;
801 }
802
803 return ret;
804
805 default:
806 return -ENOIOCTLCMD;
807 }
808 return -ENOIOCTLCMD;
809 }
810
811 static long orangefs_devreq_ioctl(struct file *file,
812 unsigned int command, unsigned long arg)
813 {
814 long ret;
815
816 /* Check for properly constructed commands */
817 ret = check_ioctl_command(command);
818 if (ret < 0)
819 return (int)ret;
820
821 return (int)dispatch_ioctl_command(command, arg);
822 }
823
824 #ifdef CONFIG_COMPAT /* CONFIG_COMPAT is in .config */
825
826 /* Compat structure for the ORANGEFS_DEV_MAP ioctl */
827 struct ORANGEFS_dev_map_desc32 {
828 compat_uptr_t ptr;
829 __s32 total_size;
830 __s32 size;
831 __s32 count;
832 };
833
834 static unsigned long translate_dev_map26(unsigned long args, long *error)
835 {
836 struct ORANGEFS_dev_map_desc32 __user *p32 = (void __user *)args;
837 /*
838 * Depending on the architecture, allocate some space on the
839 * user-call-stack based on our expected layout.
840 */
841 struct ORANGEFS_dev_map_desc __user *p =
842 compat_alloc_user_space(sizeof(*p));
843 compat_uptr_t addr;
844
845 *error = 0;
846 /* get the ptr from the 32 bit user-space */
847 if (get_user(addr, &p32->ptr))
848 goto err;
849 /* try to put that into a 64-bit layout */
850 if (put_user(compat_ptr(addr), &p->ptr))
851 goto err;
852 /* copy the remaining fields */
853 if (copy_in_user(&p->total_size, &p32->total_size, sizeof(__s32)))
854 goto err;
855 if (copy_in_user(&p->size, &p32->size, sizeof(__s32)))
856 goto err;
857 if (copy_in_user(&p->count, &p32->count, sizeof(__s32)))
858 goto err;
859 return (unsigned long)p;
860 err:
861 *error = -EFAULT;
862 return 0;
863 }
864
865 /*
866 * 32 bit user-space apps' ioctl handlers when kernel modules
867 * is compiled as a 64 bit one
868 */
869 static long orangefs_devreq_compat_ioctl(struct file *filp, unsigned int cmd,
870 unsigned long args)
871 {
872 long ret;
873 unsigned long arg = args;
874
875 /* Check for properly constructed commands */
876 ret = check_ioctl_command(cmd);
877 if (ret < 0)
878 return ret;
879 if (cmd == ORANGEFS_DEV_MAP) {
880 /*
881 * convert the arguments to what we expect internally
882 * in kernel space
883 */
884 arg = translate_dev_map26(args, &ret);
885 if (ret < 0) {
886 gossip_err("Could not translate dev map\n");
887 return ret;
888 }
889 }
890 /* no other ioctl requires translation */
891 return dispatch_ioctl_command(cmd, arg);
892 }
893
894 #endif /* CONFIG_COMPAT is in .config */
895
896 /* the assigned character device major number */
897 static int orangefs_dev_major;
898
899 /*
900 * Initialize orangefs device specific state:
901 * Must be called at module load time only
902 */
903 int orangefs_dev_init(void)
904 {
905 /* register orangefs-req device */
906 orangefs_dev_major = register_chrdev(0,
907 ORANGEFS_REQDEVICE_NAME,
908 &orangefs_devreq_file_operations);
909 if (orangefs_dev_major < 0) {
910 gossip_debug(GOSSIP_DEV_DEBUG,
911 "Failed to register /dev/%s (error %d)\n",
912 ORANGEFS_REQDEVICE_NAME, orangefs_dev_major);
913 return orangefs_dev_major;
914 }
915
916 gossip_debug(GOSSIP_DEV_DEBUG,
917 "*** /dev/%s character device registered ***\n",
918 ORANGEFS_REQDEVICE_NAME);
919 gossip_debug(GOSSIP_DEV_DEBUG, "'mknod /dev/%s c %d 0'.\n",
920 ORANGEFS_REQDEVICE_NAME, orangefs_dev_major);
921 return 0;
922 }
923
924 void orangefs_dev_cleanup(void)
925 {
926 unregister_chrdev(orangefs_dev_major, ORANGEFS_REQDEVICE_NAME);
927 gossip_debug(GOSSIP_DEV_DEBUG,
928 "*** /dev/%s character device unregistered ***\n",
929 ORANGEFS_REQDEVICE_NAME);
930 }
931
932 static unsigned int orangefs_devreq_poll(struct file *file,
933 struct poll_table_struct *poll_table)
934 {
935 int poll_revent_mask = 0;
936
937 poll_wait(file, &orangefs_request_list_waitq, poll_table);
938
939 if (!list_empty(&orangefs_request_list))
940 poll_revent_mask |= POLL_IN;
941 return poll_revent_mask;
942 }
943
944 const struct file_operations orangefs_devreq_file_operations = {
945 .owner = THIS_MODULE,
946 .read = orangefs_devreq_read,
947 .write_iter = orangefs_devreq_write_iter,
948 .open = orangefs_devreq_open,
949 .release = orangefs_devreq_release,
950 .unlocked_ioctl = orangefs_devreq_ioctl,
951
952 #ifdef CONFIG_COMPAT /* CONFIG_COMPAT is in .config */
953 .compat_ioctl = orangefs_devreq_compat_ioctl,
954 #endif
955 .poll = orangefs_devreq_poll
956 };
This page took 0.06646 seconds and 4 git commands to generate.