2 * Public API and common code for kernel->userspace relay file support.
4 * Copyright (C) 2002-2005 - Tom Zanussi (zanussi@us.ibm.com), IBM Corp
5 * Copyright (C) 1999-2005 - Karim Yaghmour (karim@opersys.com)
6 * Copyright (C) 2008 - Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
8 * Moved to kernel/relay.c by Paul Mundt, 2006.
9 * November 2006 - CPU hotplug support by Mathieu Desnoyers
10 * (mathieu.desnoyers@polymtl.ca)
12 * This file is released under the GPL.
14 //ust// #include <linux/errno.h>
15 //ust// #include <linux/stddef.h>
16 //ust// #include <linux/slab.h>
17 //ust// #include <linux/module.h>
18 //ust// #include <linux/string.h>
19 //ust// #include <linux/ltt-relay.h>
20 //ust// #include <linux/vmalloc.h>
21 //ust// #include <linux/mm.h>
22 //ust// #include <linux/cpu.h>
23 //ust// #include <linux/splice.h>
24 //ust// #include <linux/bitops.h>
26 #include "kernelcompat.h"
32 /* list of open channels, for cpu hotplug */
33 static DEFINE_MUTEX(relay_channels_mutex
);
34 static LIST_HEAD(relay_channels
);
37 * relay_alloc_buf - allocate a channel buffer
38 * @buf: the buffer struct
39 * @size: total size of the buffer
41 //ust// static int relay_alloc_buf(struct rchan_buf *buf, size_t *size)
43 //ust// unsigned int i, n_pages;
44 //ust// struct buf_page *buf_page, *n;
46 //ust// *size = PAGE_ALIGN(*size);
47 //ust// n_pages = *size >> PAGE_SHIFT;
49 //ust// INIT_LIST_HEAD(&buf->pages);
51 //ust// for (i = 0; i < n_pages; i++) {
52 //ust// buf_page = kmalloc_node(sizeof(*buf_page), GFP_KERNEL,
53 //ust// cpu_to_node(buf->cpu));
54 //ust// if (unlikely(!buf_page))
55 //ust// goto depopulate;
56 //ust// buf_page->page = alloc_pages_node(cpu_to_node(buf->cpu),
57 //ust// GFP_KERNEL | __GFP_ZERO, 0);
58 //ust// if (unlikely(!buf_page->page)) {
59 //ust// kfree(buf_page);
60 //ust// goto depopulate;
62 //ust// list_add_tail(&buf_page->list, &buf->pages);
63 //ust// buf_page->offset = (size_t)i << PAGE_SHIFT;
64 //ust// buf_page->buf = buf;
65 //ust// set_page_private(buf_page->page, (unsigned long)buf_page);
67 //ust// buf->wpage = buf_page;
68 //ust// buf->hpage[0] = buf_page;
69 //ust// buf->hpage[1] = buf_page;
70 //ust// buf->rpage = buf_page;
73 //ust// buf->page_count = n_pages;
77 //ust// list_for_each_entry_safe(buf_page, n, &buf->pages, list) {
78 //ust// list_del_init(&buf_page->list);
79 //ust// __free_page(buf_page->page);
80 //ust// kfree(buf_page);
82 //ust// return -ENOMEM;
85 static int relay_alloc_buf(struct rchan_buf
*buf
, size_t *size
)
88 struct buf_page
*buf_page
, *n
;
92 *size
= PAGE_ALIGN(*size
);
94 /* Maybe do read-ahead */
95 result
= mmap(NULL
, *size
, PROT_READ
| PROT_WRITE
, MAP_ANONYMOUS
, -1, 0);
96 if(result
== MAP_FAILED
) {
101 buf
->buf_data
= result
;
102 buf
->buf_size
= *size
;
108 * relay_create_buf - allocate and initialize a channel buffer
109 * @chan: the relay channel
110 * @cpu: cpu the buffer belongs to
112 * Returns channel buffer if successful, %NULL otherwise.
114 static struct rchan_buf
*relay_create_buf(struct rchan
*chan
, int cpu
)
117 struct rchan_buf
*buf
= kzalloc(sizeof(struct rchan_buf
), GFP_KERNEL
);
122 ret
= relay_alloc_buf(buf
, &chan
->alloc_size
);
127 kref_get(&buf
->chan
->kref
);
136 * relay_destroy_channel - free the channel struct
137 * @kref: target kernel reference that contains the relay channel
139 * Should only be called from kref_put().
141 static void relay_destroy_channel(struct kref
*kref
)
143 struct rchan
*chan
= container_of(kref
, struct rchan
, kref
);
148 * relay_destroy_buf - destroy an rchan_buf struct and associated buffer
149 * @buf: the buffer struct
151 static void relay_destroy_buf(struct rchan_buf
*buf
)
153 struct rchan
*chan
= buf
->chan
;
154 struct buf_page
*buf_page
, *n
;
157 result
= munmap(buf
->buf_data
, buf
->buf_size
);
161 //ust// chan->buf[buf->cpu] = NULL;
163 kref_put(&chan
->kref
, relay_destroy_channel
);
167 * relay_remove_buf - remove a channel buffer
168 * @kref: target kernel reference that contains the relay buffer
170 * Removes the file from the fileystem, which also frees the
171 * rchan_buf_struct and the channel buffer. Should only be called from
174 static void relay_remove_buf(struct kref
*kref
)
176 struct rchan_buf
*buf
= container_of(kref
, struct rchan_buf
, kref
);
177 buf
->chan
->cb
->remove_buf_file(buf
->dentry
);
178 relay_destroy_buf(buf
);
182 * High-level relay kernel API and associated functions.
186 * rchan_callback implementations defining default channel behavior. Used
187 * in place of corresponding NULL values in client callback struct.
191 * create_buf_file_create() default callback. Does nothing.
193 static struct dentry
*create_buf_file_default_callback(const char *filename
,
194 struct dentry
*parent
,
196 struct rchan_buf
*buf
)
202 * remove_buf_file() default callback. Does nothing.
204 static int remove_buf_file_default_callback(struct dentry
*dentry
)
209 /* relay channel default callbacks */
210 static struct rchan_callbacks default_channel_callbacks
= {
211 .create_buf_file
= create_buf_file_default_callback
,
212 .remove_buf_file
= remove_buf_file_default_callback
,
216 * wakeup_readers - wake up readers waiting on a channel
217 * @data: contains the channel buffer
219 * This is the timer function used to defer reader waking.
221 static void wakeup_readers(unsigned long data
)
223 struct rchan_buf
*buf
= (struct rchan_buf
*)data
;
224 wake_up_interruptible(&buf
->read_wait
);
228 * __relay_reset - reset a channel buffer
229 * @buf: the channel buffer
230 * @init: 1 if this is a first-time initialization
232 * See relay_reset() for description of effect.
234 static void __relay_reset(struct rchan_buf
*buf
, unsigned int init
)
237 init_waitqueue_head(&buf
->read_wait
);
238 kref_init(&buf
->kref
);
239 setup_timer(&buf
->timer
, wakeup_readers
, (unsigned long)buf
);
241 del_timer_sync(&buf
->timer
);
247 * relay_open_buf - create a new relay channel buffer
249 * used by relay_open() and CPU hotplug.
251 static struct rchan_buf
*relay_open_buf(struct rchan
*chan
, unsigned int cpu
)
253 struct rchan_buf
*buf
= NULL
;
254 struct dentry
*dentry
;
257 tmpname
= kzalloc(NAME_MAX
+ 1, GFP_KERNEL
);
260 snprintf(tmpname
, NAME_MAX
, "%s%d", chan
->base_filename
, cpu
);
262 buf
= relay_create_buf(chan
, cpu
);
266 __relay_reset(buf
, 1);
268 /* Create file in fs */
269 //ust// dentry = chan->cb->create_buf_file(tmpname, chan->parent, S_IRUSR,
272 //ust// goto free_buf;
274 //ust// buf->dentry = dentry;
279 relay_destroy_buf(buf
);
288 * relay_close_buf - close a channel buffer
289 * @buf: channel buffer
291 * Marks the buffer finalized and restores the default callbacks.
292 * The channel buffer and channel buffer data structure are then freed
293 * automatically when the last reference is given up.
295 static void relay_close_buf(struct rchan_buf
*buf
)
297 del_timer_sync(&buf
->timer
);
298 kref_put(&buf
->kref
, relay_remove_buf
);
301 static void setup_callbacks(struct rchan
*chan
,
302 struct rchan_callbacks
*cb
)
305 chan
->cb
= &default_channel_callbacks
;
309 if (!cb
->create_buf_file
)
310 cb
->create_buf_file
= create_buf_file_default_callback
;
311 if (!cb
->remove_buf_file
)
312 cb
->remove_buf_file
= remove_buf_file_default_callback
;
317 * relay_hotcpu_callback - CPU hotplug callback
318 * @nb: notifier block
319 * @action: hotplug action to take
322 * Returns the success/failure of the operation. (%NOTIFY_OK, %NOTIFY_BAD)
324 //ust// static int __cpuinit relay_hotcpu_callback(struct notifier_block *nb,
325 //ust// unsigned long action,
328 //ust// unsigned int hotcpu = (unsigned long)hcpu;
329 //ust// struct rchan *chan;
331 //ust// switch (action) {
332 //ust// case CPU_UP_PREPARE:
333 //ust// case CPU_UP_PREPARE_FROZEN:
334 //ust// mutex_lock(&relay_channels_mutex);
335 //ust// list_for_each_entry(chan, &relay_channels, list) {
336 //ust// if (chan->buf[hotcpu])
338 //ust// chan->buf[hotcpu] = relay_open_buf(chan, hotcpu);
339 //ust// if (!chan->buf[hotcpu]) {
340 //ust// printk(KERN_ERR
341 //ust// "relay_hotcpu_callback: cpu %d buffer "
342 //ust// "creation failed\n", hotcpu);
343 //ust// mutex_unlock(&relay_channels_mutex);
344 //ust// return NOTIFY_BAD;
347 //ust// mutex_unlock(&relay_channels_mutex);
349 //ust// case CPU_DEAD:
350 //ust// case CPU_DEAD_FROZEN:
351 //ust// /* No need to flush the cpu : will be flushed upon
352 //ust// * final relay_flush() call. */
355 //ust// return NOTIFY_OK;
359 * ltt_relay_open - create a new relay channel
360 * @base_filename: base name of files to create
361 * @parent: dentry of parent directory, %NULL for root directory
362 * @subbuf_size: size of sub-buffers
363 * @n_subbufs: number of sub-buffers
364 * @cb: client callback functions
365 * @private_data: user-defined data
367 * Returns channel pointer if successful, %NULL otherwise.
369 * Creates a channel buffer for each cpu using the sizes and
370 * attributes specified. The created channel buffer files
371 * will be named base_filename0...base_filenameN-1. File
372 * permissions will be %S_IRUSR.
374 struct rchan
*ltt_relay_open(const char *base_filename
,
375 struct dentry
*parent
,
378 struct rchan_callbacks
*cb
,
386 if (!(subbuf_size
&& n_subbufs
))
389 chan
= kzalloc(sizeof(struct rchan
), GFP_KERNEL
);
393 chan
->version
= LTT_RELAY_CHANNEL_VERSION
;
394 chan
->n_subbufs
= n_subbufs
;
395 chan
->subbuf_size
= subbuf_size
;
396 chan
->subbuf_size_order
= get_count_order(subbuf_size
);
397 chan
->alloc_size
= FIX_SIZE(subbuf_size
* n_subbufs
);
398 chan
->parent
= parent
;
399 chan
->private_data
= private_data
;
400 strlcpy(chan
->base_filename
, base_filename
, NAME_MAX
);
401 setup_callbacks(chan
, cb
);
402 kref_init(&chan
->kref
);
404 mutex_lock(&relay_channels_mutex
);
405 for_each_online_cpu(i
) {
406 chan
->buf
[i
] = relay_open_buf(chan
, i
);
410 list_add(&chan
->list
, &relay_channels
);
411 mutex_unlock(&relay_channels_mutex
);
416 for_each_possible_cpu(i
) {
419 relay_close_buf(chan
->buf
[i
]);
422 kref_put(&chan
->kref
, relay_destroy_channel
);
423 mutex_unlock(&relay_channels_mutex
);
426 EXPORT_SYMBOL_GPL(ltt_relay_open
);
429 * ltt_relay_close - close the channel
432 * Closes all channel buffers and frees the channel.
434 void ltt_relay_close(struct rchan
*chan
)
441 mutex_lock(&relay_channels_mutex
);
442 for_each_possible_cpu(i
)
444 relay_close_buf(chan
->buf
[i
]);
446 list_del(&chan
->list
);
447 kref_put(&chan
->kref
, relay_destroy_channel
);
448 mutex_unlock(&relay_channels_mutex
);
450 EXPORT_SYMBOL_GPL(ltt_relay_close
);
453 * Start iteration at the previous element. Skip the real list head.
455 struct buf_page
*ltt_relay_find_prev_page(struct rchan_buf
*buf
,
456 struct buf_page
*page
, size_t offset
, ssize_t diff_offset
)
458 struct buf_page
*iter
;
459 size_t orig_iter_off
;
462 orig_iter_off
= page
->offset
;
463 list_for_each_entry_reverse(iter
, &page
->list
, list
) {
465 * Skip the real list head.
467 if (&iter
->list
== &buf
->pages
)
470 if (offset
>= iter
->offset
471 && offset
< iter
->offset
+ PAGE_SIZE
) {
472 #ifdef CONFIG_LTT_RELAY_CHECK_RANDOM_ACCESS
475 "Backward random access detected in "
476 "ltt_relay. Iterations %u, "
477 "offset %zu, orig iter->off %zu, "
478 "iter->off %zu diff_offset %zd.\n", i
,
479 offset
, orig_iter_off
, iter
->offset
,
490 EXPORT_SYMBOL_GPL(ltt_relay_find_prev_page
);
493 * Start iteration at the next element. Skip the real list head.
495 struct buf_page
*ltt_relay_find_next_page(struct rchan_buf
*buf
,
496 struct buf_page
*page
, size_t offset
, ssize_t diff_offset
)
498 struct buf_page
*iter
;
500 size_t orig_iter_off
;
502 orig_iter_off
= page
->offset
;
503 list_for_each_entry(iter
, &page
->list
, list
) {
505 * Skip the real list head.
507 if (&iter
->list
== &buf
->pages
)
510 if (offset
>= iter
->offset
511 && offset
< iter
->offset
+ PAGE_SIZE
) {
512 #ifdef CONFIG_LTT_RELAY_CHECK_RANDOM_ACCESS
515 "Forward random access detected in "
516 "ltt_relay. Iterations %u, "
517 "offset %zu, orig iter->off %zu, "
518 "iter->off %zu diff_offset %zd.\n", i
,
519 offset
, orig_iter_off
, iter
->offset
,
530 EXPORT_SYMBOL_GPL(ltt_relay_find_next_page
);
533 * ltt_relay_write - write data to a ltt_relay buffer.
535 * @offset : offset within the buffer
536 * @src : source address
537 * @len : length to write
538 * @page : cached buffer page
539 * @pagecpy : page size copied so far
541 void _ltt_relay_write(struct rchan_buf
*buf
, size_t offset
,
542 const void *src
, size_t len
, struct buf_page
*page
, ssize_t pagecpy
)
549 * Underlying layer should never ask for writes across
552 WARN_ON(offset
>= buf
->chan
->alloc_size
);
554 page
= ltt_relay_cache_page(buf
, &buf
->wpage
, page
, offset
);
555 pagecpy
= min_t(size_t, len
, PAGE_SIZE
- (offset
& ~PAGE_MASK
));
556 ltt_relay_do_copy(page_address(page
->page
)
557 + (offset
& ~PAGE_MASK
), src
, pagecpy
);
558 } while (unlikely(len
!= pagecpy
));
560 EXPORT_SYMBOL_GPL(_ltt_relay_write
);
563 * ltt_relay_read - read data from ltt_relay_buffer.
565 * @offset : offset within the buffer
566 * @dest : destination address
567 * @len : length to write
569 int ltt_relay_read(struct rchan_buf
*buf
, size_t offset
,
570 void *dest
, size_t len
)
572 struct buf_page
*page
;
573 ssize_t pagecpy
, orig_len
;
576 offset
&= buf
->chan
->alloc_size
- 1;
581 page
= ltt_relay_cache_page(buf
, &buf
->rpage
, page
, offset
);
582 pagecpy
= min_t(size_t, len
, PAGE_SIZE
- (offset
& ~PAGE_MASK
));
583 memcpy(dest
, page_address(page
->page
) + (offset
& ~PAGE_MASK
),
591 * Underlying layer should never ask for reads across
594 WARN_ON(offset
>= buf
->chan
->alloc_size
);
598 EXPORT_SYMBOL_GPL(ltt_relay_read
);
601 * ltt_relay_read_get_page - Get a whole page to read from
603 * @offset : offset within the buffer
605 //ust// struct buf_page *ltt_relay_read_get_page(struct rchan_buf *buf, size_t offset)
607 //ust// struct buf_page *page;
609 //ust// offset &= buf->chan->alloc_size - 1;
610 //ust// page = buf->rpage;
611 //ust// page = ltt_relay_cache_page(buf, &buf->rpage, page, offset);
614 //ust// EXPORT_SYMBOL_GPL(ltt_relay_read_get_page);
617 * ltt_relay_offset_address - get address of a location within the buffer
619 * @offset : offset within the buffer.
621 * Return the address where a given offset is located.
622 * Should be used to get the current subbuffer header pointer. Given we know
623 * it's never on a page boundary, it's safe to write directly to this address,
624 * as long as the write is never bigger than a page size.
626 void *ltt_relay_offset_address(struct rchan_buf
*buf
, size_t offset
)
628 struct buf_page
*page
;
631 offset
&= buf
->chan
->alloc_size
- 1;
632 odd
= !!(offset
& buf
->chan
->subbuf_size
);
633 page
= buf
->hpage
[odd
];
634 if (offset
< page
->offset
|| offset
>= page
->offset
+ PAGE_SIZE
)
635 buf
->hpage
[odd
] = page
= buf
->wpage
;
636 page
= ltt_relay_cache_page(buf
, &buf
->hpage
[odd
], page
, offset
);
637 return page_address(page
->page
) + (offset
& ~PAGE_MASK
);
639 //ust// EXPORT_SYMBOL_GPL(ltt_relay_offset_address);
642 * relay_file_open - open file op for relay files
646 * Increments the channel buffer refcount.
648 //ust// static int relay_file_open(struct inode *inode, struct file *filp)
650 //ust// struct rchan_buf *buf = inode->i_private;
651 //ust// kref_get(&buf->kref);
652 //ust// filp->private_data = buf;
654 //ust// return nonseekable_open(inode, filp);
658 * relay_file_release - release file op for relay files
662 * Decrements the channel refcount, as the filesystem is
663 * no longer using it.
665 static int relay_file_release(struct inode
*inode
, struct file
*filp
)
667 struct rchan_buf
*buf
= filp
->private_data
;
668 kref_put(&buf
->kref
, relay_remove_buf
);
673 //ust// const struct file_operations ltt_relay_file_operations = {
674 //ust// .open = relay_file_open,
675 //ust// .release = relay_file_release,
677 //ust// EXPORT_SYMBOL_GPL(ltt_relay_file_operations);
679 //ust// static __init int relay_init(void)
681 //ust// hotcpu_notifier(relay_hotcpu_callback, 5);
685 //ust// module_init(relay_init);