Orangefs: Remove upcall trailers which are not used.
[deliverable/linux.git] / fs / orangefs / devpvfs2-req.c
CommitLineData
5db11c21
MM
1/*
2 * (C) 2001 Clemson University and The University of Chicago
3 *
4 * Changes by Acxiom Corporation to add protocol version to kernel
5 * communication, Copyright Acxiom Corporation, 2005.
6 *
7 * See COPYING in top-level directory.
8 */
9
10#include "protocol.h"
11#include "pvfs2-kernel.h"
12#include "pvfs2-dev-proto.h"
13#include "pvfs2-bufmap.h"
14
15#include <linux/debugfs.h>
16#include <linux/slab.h>
17
18/* this file implements the /dev/pvfs2-req device node */
19
20static int open_access_count;
21
22#define DUMP_DEVICE_ERROR() \
23do { \
24 gossip_err("*****************************************************\n");\
25 gossip_err("PVFS2 Device Error: You cannot open the device file "); \
26 gossip_err("\n/dev/%s more than once. Please make sure that\nthere " \
27 "are no ", PVFS2_REQDEVICE_NAME); \
28 gossip_err("instances of a program using this device\ncurrently " \
29 "running. (You must verify this!)\n"); \
30 gossip_err("For example, you can use the lsof program as follows:\n");\
31 gossip_err("'lsof | grep %s' (run this as root)\n", \
32 PVFS2_REQDEVICE_NAME); \
33 gossip_err(" open_access_count = %d\n", open_access_count); \
34 gossip_err("*****************************************************\n");\
35} while (0)
36
37static int hash_func(__u64 tag, int table_size)
38{
2c590d5f 39 return do_div(tag, (unsigned int)table_size);
5db11c21
MM
40}
41
42static void pvfs2_devreq_add_op(struct pvfs2_kernel_op_s *op)
43{
44 int index = hash_func(op->tag, hash_table_size);
45
46 spin_lock(&htable_ops_in_progress_lock);
47 list_add_tail(&op->list, &htable_ops_in_progress[index]);
48 spin_unlock(&htable_ops_in_progress_lock);
49}
50
51static struct pvfs2_kernel_op_s *pvfs2_devreq_remove_op(__u64 tag)
52{
53 struct pvfs2_kernel_op_s *op, *next;
54 int index;
55
56 index = hash_func(tag, hash_table_size);
57
58 spin_lock(&htable_ops_in_progress_lock);
59 list_for_each_entry_safe(op,
60 next,
61 &htable_ops_in_progress[index],
62 list) {
63 if (op->tag == tag) {
64 list_del(&op->list);
65 spin_unlock(&htable_ops_in_progress_lock);
66 return op;
67 }
68 }
69
70 spin_unlock(&htable_ops_in_progress_lock);
71 return NULL;
72}
73
74static int pvfs2_devreq_open(struct inode *inode, struct file *file)
75{
76 int ret = -EINVAL;
77
78 if (!(file->f_flags & O_NONBLOCK)) {
79 gossip_err("pvfs2: device cannot be opened in blocking mode\n");
80 goto out;
81 }
82 ret = -EACCES;
83 gossip_debug(GOSSIP_DEV_DEBUG, "pvfs2-client-core: opening device\n");
84 mutex_lock(&devreq_mutex);
85
86 if (open_access_count == 0) {
87 ret = generic_file_open(inode, file);
88 if (ret == 0)
89 open_access_count++;
90 } else {
91 DUMP_DEVICE_ERROR();
92 }
93 mutex_unlock(&devreq_mutex);
94
95out:
96
97 gossip_debug(GOSSIP_DEV_DEBUG,
98 "pvfs2-client-core: open device complete (ret = %d)\n",
99 ret);
100 return ret;
101}
102
103static ssize_t pvfs2_devreq_read(struct file *file,
104 char __user *buf,
105 size_t count, loff_t *offset)
106{
107 int ret = 0;
108 ssize_t len = 0;
109 struct pvfs2_kernel_op_s *cur_op = NULL;
110 static __s32 magic = PVFS2_DEVREQ_MAGIC;
111 __s32 proto_ver = PVFS_KERNEL_PROTO_VERSION;
112
113 if (!(file->f_flags & O_NONBLOCK)) {
114 /* We do not support blocking reads/opens any more */
115 gossip_err("pvfs2: blocking reads are not supported! (pvfs2-client-core bug)\n");
116 return -EINVAL;
117 } else {
118 struct pvfs2_kernel_op_s *op = NULL, *temp = NULL;
119 /* get next op (if any) from top of list */
120 spin_lock(&pvfs2_request_list_lock);
121 list_for_each_entry_safe(op, temp, &pvfs2_request_list, list) {
122 __s32 fsid = fsid_of_op(op);
123 /*
124 * Check if this op's fsid is known and needs
125 * remounting
126 */
127 if (fsid != PVFS_FS_ID_NULL &&
128 fs_mount_pending(fsid) == 1) {
129 gossip_debug(GOSSIP_DEV_DEBUG,
130 "Skipping op tag %llu %s\n",
131 llu(op->tag),
132 get_opname_string(op));
133 continue;
134 } else {
135 /*
136 * op does not belong to any particular fsid
137 * or already mounted.. let it through
138 */
139 cur_op = op;
140 spin_lock(&cur_op->lock);
141 list_del(&cur_op->list);
5db11c21
MM
142 spin_unlock(&cur_op->lock);
143 break;
144 }
145 }
146 spin_unlock(&pvfs2_request_list_lock);
147 }
148
149 if (cur_op) {
150 spin_lock(&cur_op->lock);
151
152 gossip_debug(GOSSIP_DEV_DEBUG,
153 "client-core: reading op tag %llu %s\n",
154 llu(cur_op->tag), get_opname_string(cur_op));
155 if (op_state_in_progress(cur_op) || op_state_serviced(cur_op)) {
f0ed4418
MB
156 gossip_err("WARNING: Current op already queued...skipping\n");
157 } else {
5db11c21
MM
158 /*
159 * atomically move the operation to the
160 * htable_ops_in_progress
161 */
162 set_op_state_inprogress(cur_op);
163 pvfs2_devreq_add_op(cur_op);
164 }
165
166 spin_unlock(&cur_op->lock);
167
f0ed4418
MB
168 /* Push the upcall out */
169 len = MAX_ALIGNED_DEV_REQ_UPSIZE;
170 if ((size_t) len <= count) {
171 ret = copy_to_user(buf,
172 &proto_ver,
173 sizeof(__s32));
174 if (ret == 0) {
175 ret = copy_to_user(buf + sizeof(__s32),
176 &magic,
177 sizeof(__s32));
178 if (ret == 0) {
179 ret = copy_to_user(buf+2 * sizeof(__s32),
180 &cur_op->tag,
181 sizeof(__u64));
5db11c21 182 if (ret == 0) {
f0ed4418
MB
183 ret = copy_to_user(
184 buf +
185 2 *
186 sizeof(__s32) +
187 sizeof(__u64),
188 &cur_op->upcall,
189 sizeof(struct pvfs2_upcall_s));
5db11c21 190 }
5db11c21 191 }
f0ed4418
MB
192 }
193
194 if (ret) {
195 gossip_err("Failed to copy data to user space\n");
196 len = -EFAULT;
197 }
5db11c21 198 } else {
f0ed4418
MB
199 gossip_err
200 ("Failed to copy data to user space\n");
201 len = -EIO;
5db11c21
MM
202 }
203 } else if (file->f_flags & O_NONBLOCK) {
204 /*
205 * if in non-blocking mode, return EAGAIN since no requests are
206 * ready yet
207 */
208 len = -EAGAIN;
209 }
210 return len;
211}
212
213/* Function for writev() callers into the device */
214static ssize_t pvfs2_devreq_writev(struct file *file,
215 const struct iovec *iov,
216 size_t count,
217 loff_t *offset)
218{
219 struct pvfs2_kernel_op_s *op = NULL;
220 void *buffer = NULL;
221 void *ptr = NULL;
222 unsigned long i = 0;
223 static int max_downsize = MAX_ALIGNED_DEV_REQ_DOWNSIZE;
224 int ret = 0, num_remaining = max_downsize;
225 int notrailer_count = 4; /* num elements in iovec without trailer */
226 int payload_size = 0;
227 __s32 magic = 0;
228 __s32 proto_ver = 0;
229 __u64 tag = 0;
230 ssize_t total_returned_size = 0;
231
232 /* Either there is a trailer or there isn't */
233 if (count != notrailer_count && count != (notrailer_count + 1)) {
2c590d5f 234 gossip_err("Error: Number of iov vectors is (%zu) and notrailer count is %d\n",
5db11c21
MM
235 count,
236 notrailer_count);
237 return -EPROTO;
238 }
239 buffer = dev_req_alloc();
240 if (!buffer)
241 return -ENOMEM;
242 ptr = buffer;
243
244 for (i = 0; i < notrailer_count; i++) {
245 if (iov[i].iov_len > num_remaining) {
246 gossip_err
247 ("writev error: Freeing buffer and returning\n");
248 dev_req_release(buffer);
249 return -EMSGSIZE;
250 }
251 ret = copy_from_user(ptr, iov[i].iov_base, iov[i].iov_len);
252 if (ret) {
253 gossip_err("Failed to copy data from user space\n");
254 dev_req_release(buffer);
255 return -EIO;
256 }
257 num_remaining -= iov[i].iov_len;
258 ptr += iov[i].iov_len;
259 payload_size += iov[i].iov_len;
260 }
261 total_returned_size = payload_size;
262
263 /* these elements are currently 8 byte aligned (8 bytes for (version +
264 * magic) 8 bytes for tag). If you add another element, either
265 * make it 8 bytes big, or use get_unaligned when asigning.
266 */
267 ptr = buffer;
268 proto_ver = *((__s32 *) ptr);
269 ptr += sizeof(__s32);
270
271 magic = *((__s32 *) ptr);
272 ptr += sizeof(__s32);
273
274 tag = *((__u64 *) ptr);
275 ptr += sizeof(__u64);
276
277 if (magic != PVFS2_DEVREQ_MAGIC) {
278 gossip_err("Error: Device magic number does not match.\n");
279 dev_req_release(buffer);
280 return -EPROTO;
281 }
282
283 /*
284 * proto_ver = 20902 for 2.9.2
285 */
286
287 op = pvfs2_devreq_remove_op(tag);
288 if (op) {
289 /* Increase ref count! */
290 get_op(op);
291 /* cut off magic and tag from payload size */
292 payload_size -= (2 * sizeof(__s32) + sizeof(__u64));
293 if (payload_size <= sizeof(struct pvfs2_downcall_s))
294 /* copy the passed in downcall into the op */
295 memcpy(&op->downcall,
296 ptr,
297 sizeof(struct pvfs2_downcall_s));
298 else
299 gossip_debug(GOSSIP_DEV_DEBUG,
300 "writev: Ignoring %d bytes\n",
301 payload_size);
302
303 /* Do not allocate needlessly if client-core forgets
304 * to reset trailer size on op errors.
305 */
306 if (op->downcall.status == 0 && op->downcall.trailer_size > 0) {
ade1d48b
AV
307 __u64 trailer_size = op->downcall.trailer_size;
308 size_t size;
5db11c21
MM
309 gossip_debug(GOSSIP_DEV_DEBUG,
310 "writev: trailer size %ld\n",
b5bbc843 311 (unsigned long)trailer_size);
5db11c21 312 if (count != (notrailer_count + 1)) {
ade1d48b 313 gossip_err("Error: trailer size (%ld) is non-zero, no trailer elements though? (%zu)\n", (unsigned long)trailer_size, count);
5db11c21
MM
314 dev_req_release(buffer);
315 put_op(op);
316 return -EPROTO;
317 }
ade1d48b
AV
318 size = iov[notrailer_count].iov_len;
319 if (size > trailer_size) {
320 gossip_err("writev error: trailer size (%ld) != iov_len (%zd)\n", (unsigned long)trailer_size, size);
5db11c21
MM
321 dev_req_release(buffer);
322 put_op(op);
323 return -EMSGSIZE;
324 }
325 /* Allocate a buffer large enough to hold the
326 * trailer bytes.
327 */
ade1d48b 328 op->downcall.trailer_buf = vmalloc(trailer_size);
5db11c21
MM
329 if (op->downcall.trailer_buf != NULL) {
330 gossip_debug(GOSSIP_DEV_DEBUG, "vmalloc: %p\n",
331 op->downcall.trailer_buf);
332 ret = copy_from_user(op->downcall.trailer_buf,
333 iov[notrailer_count].
334 iov_base,
ade1d48b 335 size);
5db11c21
MM
336 if (ret) {
337 gossip_err("Failed to copy trailer data from user space\n");
338 dev_req_release(buffer);
339 gossip_debug(GOSSIP_DEV_DEBUG,
340 "vfree: %p\n",
341 op->downcall.trailer_buf);
342 vfree(op->downcall.trailer_buf);
343 op->downcall.trailer_buf = NULL;
344 put_op(op);
345 return -EIO;
346 }
ade1d48b
AV
347 memset(op->downcall.trailer_buf + size, 0,
348 trailer_size - size);
5db11c21
MM
349 } else {
350 /* Change downcall status */
351 op->downcall.status = -ENOMEM;
352 gossip_err("writev: could not vmalloc for trailer!\n");
353 }
354 }
355
356 /* if this operation is an I/O operation and if it was
357 * initiated on behalf of a *synchronous* VFS I/O operation,
358 * only then we need to wait
359 * for all data to be copied before we can return to avoid
360 * buffer corruption and races that can pull the buffers
361 * out from under us.
362 *
363 * Essentially we're synchronizing with other parts of the
364 * vfs implicitly by not allowing the user space
365 * application reading/writing this device to return until
366 * the buffers are done being used.
367 */
f0ed4418
MB
368 if (op->upcall.type == PVFS2_VFS_OP_FILE_IO &&
369 op->upcall.req.io.async_vfs_io == PVFS_VFS_SYNC_IO) {
5db11c21
MM
370 int timed_out = 0;
371 DECLARE_WAITQUEUE(wait_entry, current);
372
373 /* tell the vfs op waiting on a waitqueue
374 * that this op is done
375 */
376 spin_lock(&op->lock);
377 set_op_state_serviced(op);
378 spin_unlock(&op->lock);
379
380 add_wait_queue_exclusive(&op->io_completion_waitq,
381 &wait_entry);
382 wake_up_interruptible(&op->waitq);
383
384 while (1) {
385 set_current_state(TASK_INTERRUPTIBLE);
386
387 spin_lock(&op->lock);
388 if (op->io_completed) {
389 spin_unlock(&op->lock);
390 break;
391 }
392 spin_unlock(&op->lock);
393
394 if (!signal_pending(current)) {
395 int timeout =
396 MSECS_TO_JIFFIES(1000 *
397 op_timeout_secs);
398 if (!schedule_timeout(timeout)) {
399 gossip_debug(GOSSIP_DEV_DEBUG, "*** I/O wait time is up\n");
400 timed_out = 1;
401 break;
402 }
403 continue;
404 }
405
406 gossip_debug(GOSSIP_DEV_DEBUG, "*** signal on I/O wait -- aborting\n");
407 break;
408 }
409
410 set_current_state(TASK_RUNNING);
411 remove_wait_queue(&op->io_completion_waitq,
412 &wait_entry);
413
414 /* NOTE: for I/O operations we handle releasing the op
415 * object except in the case of timeout. the reason we
416 * can't free the op in timeout cases is that the op
417 * service logic in the vfs retries operations using
418 * the same op ptr, thus it can't be freed.
419 */
420 if (!timed_out)
421 op_release(op);
422 } else {
423
424 /*
425 * tell the vfs op waiting on a waitqueue that
426 * this op is done
427 */
428 spin_lock(&op->lock);
429 set_op_state_serviced(op);
430 spin_unlock(&op->lock);
431 /*
54804949
MM
432 * for every other operation (i.e. non-I/O), we need to
433 * wake up the callers for downcall completion
434 * notification
5db11c21
MM
435 */
436 wake_up_interruptible(&op->waitq);
437 }
438 } else {
439 /* ignore downcalls that we're not interested in */
440 gossip_debug(GOSSIP_DEV_DEBUG,
441 "WARNING: No one's waiting for tag %llu\n",
442 llu(tag));
443 }
444 dev_req_release(buffer);
445
446 return total_returned_size;
447}
448
449static ssize_t pvfs2_devreq_write_iter(struct kiocb *iocb,
450 struct iov_iter *iter)
451{
452 return pvfs2_devreq_writev(iocb->ki_filp,
453 iter->iov,
454 iter->nr_segs,
455 &iocb->ki_pos);
456}
457
458/* Returns whether any FS are still pending remounted */
459static int mark_all_pending_mounts(void)
460{
461 int unmounted = 1;
462 struct pvfs2_sb_info_s *pvfs2_sb = NULL;
463
464 spin_lock(&pvfs2_superblocks_lock);
465 list_for_each_entry(pvfs2_sb, &pvfs2_superblocks, list) {
466 /* All of these file system require a remount */
467 pvfs2_sb->mount_pending = 1;
468 unmounted = 0;
469 }
470 spin_unlock(&pvfs2_superblocks_lock);
471 return unmounted;
472}
473
474/*
475 * Determine if a given file system needs to be remounted or not
476 * Returns -1 on error
477 * 0 if already mounted
478 * 1 if needs remount
479 */
480int fs_mount_pending(__s32 fsid)
481{
482 int mount_pending = -1;
483 struct pvfs2_sb_info_s *pvfs2_sb = NULL;
484
485 spin_lock(&pvfs2_superblocks_lock);
486 list_for_each_entry(pvfs2_sb, &pvfs2_superblocks, list) {
487 if (pvfs2_sb->fs_id == fsid) {
488 mount_pending = pvfs2_sb->mount_pending;
489 break;
490 }
491 }
492 spin_unlock(&pvfs2_superblocks_lock);
493 return mount_pending;
494}
495
496/*
497 * NOTE: gets called when the last reference to this device is dropped.
498 * Using the open_access_count variable, we enforce a reference count
499 * on this file so that it can be opened by only one process at a time.
500 * the devreq_mutex is used to make sure all i/o has completed
501 * before we call pvfs_bufmap_finalize, and similar such tricky
502 * situations
503 */
504static int pvfs2_devreq_release(struct inode *inode, struct file *file)
505{
506 int unmounted = 0;
507
508 gossip_debug(GOSSIP_DEV_DEBUG,
509 "%s:pvfs2-client-core: exiting, closing device\n",
510 __func__);
511
512 mutex_lock(&devreq_mutex);
513 pvfs_bufmap_finalize();
514
515 open_access_count--;
516
517 unmounted = mark_all_pending_mounts();
518 gossip_debug(GOSSIP_DEV_DEBUG, "PVFS2 Device Close: Filesystem(s) %s\n",
519 (unmounted ? "UNMOUNTED" : "MOUNTED"));
520 mutex_unlock(&devreq_mutex);
521
522 /*
523 * Walk through the list of ops in the request list, mark them
524 * as purged and wake them up.
525 */
526 purge_waiting_ops();
527 /*
528 * Walk through the hash table of in progress operations; mark
529 * them as purged and wake them up
530 */
531 purge_inprogress_ops();
532 gossip_debug(GOSSIP_DEV_DEBUG,
533 "pvfs2-client-core: device close complete\n");
534 return 0;
535}
536
537int is_daemon_in_service(void)
538{
539 int in_service;
540
541 /*
542 * What this function does is checks if client-core is alive
543 * based on the access count we maintain on the device.
544 */
545 mutex_lock(&devreq_mutex);
546 in_service = open_access_count == 1 ? 0 : -EIO;
547 mutex_unlock(&devreq_mutex);
548 return in_service;
549}
550
551static inline long check_ioctl_command(unsigned int command)
552{
553 /* Check for valid ioctl codes */
554 if (_IOC_TYPE(command) != PVFS_DEV_MAGIC) {
555 gossip_err("device ioctl magic numbers don't match! Did you rebuild pvfs2-client-core/libpvfs2? [cmd %x, magic %x != %x]\n",
556 command,
557 _IOC_TYPE(command),
558 PVFS_DEV_MAGIC);
559 return -EINVAL;
560 }
561 /* and valid ioctl commands */
562 if (_IOC_NR(command) >= PVFS_DEV_MAXNR || _IOC_NR(command) <= 0) {
563 gossip_err("Invalid ioctl command number [%d >= %d]\n",
564 _IOC_NR(command), PVFS_DEV_MAXNR);
565 return -ENOIOCTLCMD;
566 }
567 return 0;
568}
569
570static long dispatch_ioctl_command(unsigned int command, unsigned long arg)
571{
572 static __s32 magic = PVFS2_DEVREQ_MAGIC;
573 static __s32 max_up_size = MAX_ALIGNED_DEV_REQ_UPSIZE;
574 static __s32 max_down_size = MAX_ALIGNED_DEV_REQ_DOWNSIZE;
575 struct PVFS_dev_map_desc user_desc;
576 int ret = 0;
577 struct dev_mask_info_s mask_info = { 0 };
578 struct dev_mask2_info_s mask2_info = { 0, 0 };
579 int upstream_kmod = 1;
580 struct list_head *tmp = NULL;
581 struct pvfs2_sb_info_s *pvfs2_sb = NULL;
582
583 /* mtmoore: add locking here */
584
585 switch (command) {
586 case PVFS_DEV_GET_MAGIC:
587 return ((put_user(magic, (__s32 __user *) arg) == -EFAULT) ?
588 -EIO :
589 0);
590 case PVFS_DEV_GET_MAX_UPSIZE:
591 return ((put_user(max_up_size,
592 (__s32 __user *) arg) == -EFAULT) ?
593 -EIO :
594 0);
595 case PVFS_DEV_GET_MAX_DOWNSIZE:
596 return ((put_user(max_down_size,
597 (__s32 __user *) arg) == -EFAULT) ?
598 -EIO :
599 0);
600 case PVFS_DEV_MAP:
601 ret = copy_from_user(&user_desc,
602 (struct PVFS_dev_map_desc __user *)
603 arg,
604 sizeof(struct PVFS_dev_map_desc));
605 return ret ? -EIO : pvfs_bufmap_initialize(&user_desc);
606 case PVFS_DEV_REMOUNT_ALL:
607 gossip_debug(GOSSIP_DEV_DEBUG,
608 "pvfs2_devreq_ioctl: got PVFS_DEV_REMOUNT_ALL\n");
609
610 /*
611 * remount all mounted pvfs2 volumes to regain the lost
612 * dynamic mount tables (if any) -- NOTE: this is done
613 * without keeping the superblock list locked due to the
614 * upcall/downcall waiting. also, the request semaphore is
615 * used to ensure that no operations will be serviced until
616 * all of the remounts are serviced (to avoid ops between
617 * mounts to fail)
618 */
619 ret = mutex_lock_interruptible(&request_mutex);
620 if (ret < 0)
621 return ret;
622 gossip_debug(GOSSIP_DEV_DEBUG,
623 "pvfs2_devreq_ioctl: priority remount in progress\n");
624 list_for_each(tmp, &pvfs2_superblocks) {
625 pvfs2_sb =
626 list_entry(tmp, struct pvfs2_sb_info_s, list);
627 if (pvfs2_sb && (pvfs2_sb->sb)) {
628 gossip_debug(GOSSIP_DEV_DEBUG,
629 "Remounting SB %p\n",
630 pvfs2_sb);
631
632 ret = pvfs2_remount(pvfs2_sb->sb);
633 if (ret) {
634 gossip_debug(GOSSIP_DEV_DEBUG,
635 "SB %p remount failed\n",
636 pvfs2_sb);
637 break;
638 }
639 }
640 }
641 gossip_debug(GOSSIP_DEV_DEBUG,
642 "pvfs2_devreq_ioctl: priority remount complete\n");
643 mutex_unlock(&request_mutex);
644 return ret;
645
646 case PVFS_DEV_UPSTREAM:
647 ret = copy_to_user((void __user *)arg,
648 &upstream_kmod,
649 sizeof(upstream_kmod));
650
651 if (ret != 0)
652 return -EIO;
653 else
654 return ret;
655
656 case PVFS_DEV_CLIENT_MASK:
657 ret = copy_from_user(&mask2_info,
658 (void __user *)arg,
659 sizeof(struct dev_mask2_info_s));
660
661 if (ret != 0)
662 return -EIO;
663
664 client_debug_mask.mask1 = mask2_info.mask1_value;
665 client_debug_mask.mask2 = mask2_info.mask2_value;
666
667 pr_info("%s: client debug mask has been been received "
668 ":%llx: :%llx:\n",
669 __func__,
670 (unsigned long long)client_debug_mask.mask1,
671 (unsigned long long)client_debug_mask.mask2);
672
673 return ret;
674
675 case PVFS_DEV_CLIENT_STRING:
676 ret = copy_from_user(&client_debug_array_string,
677 (void __user *)arg,
678 PVFS2_MAX_DEBUG_STRING_LEN);
679 if (ret != 0) {
680 pr_info("%s: "
681 "PVFS_DEV_CLIENT_STRING: copy_from_user failed"
682 "\n",
683 __func__);
684 return -EIO;
685 }
686
687 pr_info("%s: client debug array string has been been received."
688 "\n",
689 __func__);
690
691 if (!help_string_initialized) {
692
693 /* Free the "we don't know yet" default string... */
694 kfree(debug_help_string);
695
696 /* build a proper debug help string */
697 if (orangefs_prepare_debugfs_help_string(0)) {
698 gossip_err("%s: "
699 "prepare_debugfs_help_string failed"
700 "\n",
701 __func__);
702 return -EIO;
703 }
704
705 /* Replace the boilerplate boot-time debug-help file. */
706 debugfs_remove(help_file_dentry);
707
708 help_file_dentry =
709 debugfs_create_file(
710 ORANGEFS_KMOD_DEBUG_HELP_FILE,
711 0444,
712 debug_dir,
713 debug_help_string,
714 &debug_help_fops);
715
716 if (!help_file_dentry) {
717 gossip_err("%s: debugfs_create_file failed for"
718 " :%s:!\n",
719 __func__,
720 ORANGEFS_KMOD_DEBUG_HELP_FILE);
721 return -EIO;
722 }
723 }
724
725 debug_mask_to_string(&client_debug_mask, 1);
726
727 debugfs_remove(client_debug_dentry);
728
729 pvfs2_client_debug_init();
730
731 help_string_initialized++;
732
733 return ret;
734
735 case PVFS_DEV_DEBUG:
736 ret = copy_from_user(&mask_info,
737 (void __user *)arg,
738 sizeof(mask_info));
739
740 if (ret != 0)
741 return -EIO;
742
743 if (mask_info.mask_type == KERNEL_MASK) {
744 if ((mask_info.mask_value == 0)
745 && (kernel_mask_set_mod_init)) {
746 /*
747 * the kernel debug mask was set when the
748 * kernel module was loaded; don't override
749 * it if the client-core was started without
750 * a value for PVFS2_KMODMASK.
751 */
752 return 0;
753 }
754 debug_mask_to_string(&mask_info.mask_value,
755 mask_info.mask_type);
756 gossip_debug_mask = mask_info.mask_value;
757 pr_info("PVFS: kernel debug mask has been modified to "
758 ":%s: :%llx:\n",
759 kernel_debug_string,
760 (unsigned long long)gossip_debug_mask);
761 } else if (mask_info.mask_type == CLIENT_MASK) {
762 debug_mask_to_string(&mask_info.mask_value,
763 mask_info.mask_type);
764 pr_info("PVFS: client debug mask has been modified to"
765 ":%s: :%llx:\n",
766 client_debug_string,
767 llu(mask_info.mask_value));
768 } else {
769 gossip_lerr("Invalid mask type....\n");
770 return -EINVAL;
771 }
772
773 return ret;
774
775 default:
776 return -ENOIOCTLCMD;
777 }
778 return -ENOIOCTLCMD;
779}
780
781static long pvfs2_devreq_ioctl(struct file *file,
782 unsigned int command, unsigned long arg)
783{
784 long ret;
785
786 /* Check for properly constructed commands */
787 ret = check_ioctl_command(command);
788 if (ret < 0)
789 return (int)ret;
790
791 return (int)dispatch_ioctl_command(command, arg);
792}
793
794#ifdef CONFIG_COMPAT /* CONFIG_COMPAT is in .config */
795
796/* Compat structure for the PVFS_DEV_MAP ioctl */
797struct PVFS_dev_map_desc32 {
798 compat_uptr_t ptr;
799 __s32 total_size;
800 __s32 size;
801 __s32 count;
802};
803
804static unsigned long translate_dev_map26(unsigned long args, long *error)
805{
806 struct PVFS_dev_map_desc32 __user *p32 = (void __user *)args;
807 /*
808 * Depending on the architecture, allocate some space on the
809 * user-call-stack based on our expected layout.
810 */
811 struct PVFS_dev_map_desc __user *p =
812 compat_alloc_user_space(sizeof(*p));
84d02150 813 compat_uptr_t addr;
5db11c21
MM
814
815 *error = 0;
816 /* get the ptr from the 32 bit user-space */
817 if (get_user(addr, &p32->ptr))
818 goto err;
819 /* try to put that into a 64-bit layout */
820 if (put_user(compat_ptr(addr), &p->ptr))
821 goto err;
822 /* copy the remaining fields */
823 if (copy_in_user(&p->total_size, &p32->total_size, sizeof(__s32)))
824 goto err;
825 if (copy_in_user(&p->size, &p32->size, sizeof(__s32)))
826 goto err;
827 if (copy_in_user(&p->count, &p32->count, sizeof(__s32)))
828 goto err;
829 return (unsigned long)p;
830err:
831 *error = -EFAULT;
832 return 0;
833}
834
835/*
836 * 32 bit user-space apps' ioctl handlers when kernel modules
837 * is compiled as a 64 bit one
838 */
839static long pvfs2_devreq_compat_ioctl(struct file *filp, unsigned int cmd,
840 unsigned long args)
841{
842 long ret;
843 unsigned long arg = args;
844
845 /* Check for properly constructed commands */
846 ret = check_ioctl_command(cmd);
847 if (ret < 0)
848 return ret;
849 if (cmd == PVFS_DEV_MAP) {
850 /*
851 * convert the arguments to what we expect internally
852 * in kernel space
853 */
854 arg = translate_dev_map26(args, &ret);
855 if (ret < 0) {
856 gossip_err("Could not translate dev map\n");
857 return ret;
858 }
859 }
860 /* no other ioctl requires translation */
861 return dispatch_ioctl_command(cmd, arg);
862}
863
2c590d5f
MM
864#endif /* CONFIG_COMPAT is in .config */
865
866/*
867 * The following two ioctl32 functions had been refactored into the above
868 * CONFIG_COMPAT ifdef, but that was an over simplification that was
869 * not noticed until we tried to compile on power pc...
870 */
871#if (defined(CONFIG_COMPAT) && !defined(HAVE_REGISTER_IOCTL32_CONVERSION)) || !defined(CONFIG_COMPAT)
5db11c21
MM
872static int pvfs2_ioctl32_init(void)
873{
874 return 0;
875}
876
877static void pvfs2_ioctl32_cleanup(void)
878{
879 return;
880}
2c590d5f 881#endif
5db11c21
MM
882
883/* the assigned character device major number */
884static int pvfs2_dev_major;
885
886/*
887 * Initialize pvfs2 device specific state:
888 * Must be called at module load time only
889 */
890int pvfs2_dev_init(void)
891{
892 int ret;
893
894 /* register the ioctl32 sub-system */
895 ret = pvfs2_ioctl32_init();
896 if (ret < 0)
897 return ret;
898
899 /* register pvfs2-req device */
900 pvfs2_dev_major = register_chrdev(0,
901 PVFS2_REQDEVICE_NAME,
902 &pvfs2_devreq_file_operations);
903 if (pvfs2_dev_major < 0) {
904 gossip_debug(GOSSIP_DEV_DEBUG,
905 "Failed to register /dev/%s (error %d)\n",
906 PVFS2_REQDEVICE_NAME, pvfs2_dev_major);
907 pvfs2_ioctl32_cleanup();
908 return pvfs2_dev_major;
909 }
910
911 gossip_debug(GOSSIP_DEV_DEBUG,
912 "*** /dev/%s character device registered ***\n",
913 PVFS2_REQDEVICE_NAME);
914 gossip_debug(GOSSIP_DEV_DEBUG, "'mknod /dev/%s c %d 0'.\n",
915 PVFS2_REQDEVICE_NAME, pvfs2_dev_major);
916 return 0;
917}
918
919void pvfs2_dev_cleanup(void)
920{
921 unregister_chrdev(pvfs2_dev_major, PVFS2_REQDEVICE_NAME);
922 gossip_debug(GOSSIP_DEV_DEBUG,
923 "*** /dev/%s character device unregistered ***\n",
924 PVFS2_REQDEVICE_NAME);
925 /* unregister the ioctl32 sub-system */
926 pvfs2_ioctl32_cleanup();
927}
928
929static unsigned int pvfs2_devreq_poll(struct file *file,
930 struct poll_table_struct *poll_table)
931{
932 int poll_revent_mask = 0;
933
934 if (open_access_count == 1) {
935 poll_wait(file, &pvfs2_request_list_waitq, poll_table);
936
937 spin_lock(&pvfs2_request_list_lock);
938 if (!list_empty(&pvfs2_request_list))
939 poll_revent_mask |= POLL_IN;
940 spin_unlock(&pvfs2_request_list_lock);
941 }
942 return poll_revent_mask;
943}
944
945const struct file_operations pvfs2_devreq_file_operations = {
946 .owner = THIS_MODULE,
947 .read = pvfs2_devreq_read,
948 .write_iter = pvfs2_devreq_write_iter,
949 .open = pvfs2_devreq_open,
950 .release = pvfs2_devreq_release,
951 .unlocked_ioctl = pvfs2_devreq_ioctl,
952
953#ifdef CONFIG_COMPAT /* CONFIG_COMPAT is in .config */
954 .compat_ioctl = pvfs2_devreq_compat_ioctl,
955#endif
956 .poll = pvfs2_devreq_poll
957};
This page took 0.061874 seconds and 5 git commands to generate.