2 * ring_buffer_backend.c
4 * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; only
9 * version 2.1 of the License.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 #include <linux/stddef.h>
22 #include <linux/module.h>
23 #include <linux/string.h>
24 #include <linux/bitops.h>
25 #include <linux/delay.h>
26 #include <linux/errno.h>
27 #include <linux/slab.h>
28 #include <linux/cpu.h>
31 #include "../../wrapper/vmalloc.h" /* for wrapper_vmalloc_sync_all() */
32 #include "../../wrapper/ringbuffer/config.h"
33 #include "../../wrapper/ringbuffer/backend.h"
34 #include "../../wrapper/ringbuffer/frontend.h"
37 * lib_ring_buffer_backend_allocate - allocate a channel buffer
38 * @config: ring buffer instance configuration
39 * @buf: the buffer struct
40 * @size: total size of the buffer
41 * @num_subbuf: number of subbuffers
42 * @extra_reader_sb: need extra subbuffer for reader
45 int lib_ring_buffer_backend_allocate(const struct lib_ring_buffer_config
*config
,
46 struct lib_ring_buffer_backend
*bufb
,
47 size_t size
, size_t num_subbuf
,
50 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
51 unsigned long j
, num_pages
, num_pages_per_subbuf
, page_idx
= 0;
52 unsigned long subbuf_size
, mmap_offset
= 0;
53 unsigned long num_subbuf_alloc
;
58 num_pages
= size
>> PAGE_SHIFT
;
59 num_pages_per_subbuf
= num_pages
>> get_count_order(num_subbuf
);
60 subbuf_size
= chanb
->subbuf_size
;
61 num_subbuf_alloc
= num_subbuf
;
63 if (extra_reader_sb
) {
64 num_pages
+= num_pages_per_subbuf
; /* Add pages for reader */
68 pages
= kmalloc_node(ALIGN(sizeof(*pages
) * num_pages
,
69 1 << INTERNODE_CACHE_SHIFT
),
70 GFP_KERNEL
, cpu_to_node(max(bufb
->cpu
, 0)));
74 virt
= kmalloc_node(ALIGN(sizeof(*virt
) * num_pages
,
75 1 << INTERNODE_CACHE_SHIFT
),
76 GFP_KERNEL
, cpu_to_node(max(bufb
->cpu
, 0)));
80 bufb
->array
= kmalloc_node(ALIGN(sizeof(*bufb
->array
)
82 1 << INTERNODE_CACHE_SHIFT
),
83 GFP_KERNEL
, cpu_to_node(max(bufb
->cpu
, 0)));
84 if (unlikely(!bufb
->array
))
87 for (i
= 0; i
< num_pages
; i
++) {
88 pages
[i
] = alloc_pages_node(cpu_to_node(max(bufb
->cpu
, 0)),
89 GFP_KERNEL
| __GFP_ZERO
, 0);
90 if (unlikely(!pages
[i
]))
92 virt
[i
] = page_address(pages
[i
]);
94 bufb
->num_pages_per_subbuf
= num_pages_per_subbuf
;
96 /* Allocate backend pages array elements */
97 for (i
= 0; i
< num_subbuf_alloc
; i
++) {
100 sizeof(struct lib_ring_buffer_backend_pages
) +
101 sizeof(struct lib_ring_buffer_backend_page
)
102 * num_pages_per_subbuf
,
103 1 << INTERNODE_CACHE_SHIFT
),
104 GFP_KERNEL
, cpu_to_node(max(bufb
->cpu
, 0)));
109 /* Allocate write-side subbuffer table */
110 bufb
->buf_wsb
= kzalloc_node(ALIGN(
111 sizeof(struct lib_ring_buffer_backend_subbuffer
)
113 1 << INTERNODE_CACHE_SHIFT
),
114 GFP_KERNEL
, cpu_to_node(max(bufb
->cpu
, 0)));
115 if (unlikely(!bufb
->buf_wsb
))
118 for (i
= 0; i
< num_subbuf
; i
++)
119 bufb
->buf_wsb
[i
].id
= subbuffer_id(config
, 0, 1, i
);
121 /* Assign read-side subbuffer table */
123 bufb
->buf_rsb
.id
= subbuffer_id(config
, 0, 1,
124 num_subbuf_alloc
- 1);
126 bufb
->buf_rsb
.id
= subbuffer_id(config
, 0, 1, 0);
128 /* Assign pages to page index */
129 for (i
= 0; i
< num_subbuf_alloc
; i
++) {
130 for (j
= 0; j
< num_pages_per_subbuf
; j
++) {
131 CHAN_WARN_ON(chanb
, page_idx
> num_pages
);
132 bufb
->array
[i
]->p
[j
].virt
= virt
[page_idx
];
133 bufb
->array
[i
]->p
[j
].page
= pages
[page_idx
];
136 if (config
->output
== RING_BUFFER_MMAP
) {
137 bufb
->array
[i
]->mmap_offset
= mmap_offset
;
138 mmap_offset
+= subbuf_size
;
143 * If kmalloc ever uses vmalloc underneath, make sure the buffer pages
146 wrapper_vmalloc_sync_all();
152 for (i
= 0; (i
< num_subbuf_alloc
&& bufb
->array
[i
]); i
++)
153 kfree(bufb
->array
[i
]);
155 /* Free all allocated pages */
156 for (i
= 0; (i
< num_pages
&& pages
[i
]); i
++)
157 __free_page(pages
[i
]);
167 int lib_ring_buffer_backend_create(struct lib_ring_buffer_backend
*bufb
,
168 struct channel_backend
*chanb
, int cpu
)
170 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
172 bufb
->chan
= container_of(chanb
, struct channel
, backend
);
175 return lib_ring_buffer_backend_allocate(config
, bufb
, chanb
->buf_size
,
177 chanb
->extra_reader_sb
);
180 void lib_ring_buffer_backend_free(struct lib_ring_buffer_backend
*bufb
)
182 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
183 unsigned long i
, j
, num_subbuf_alloc
;
185 num_subbuf_alloc
= chanb
->num_subbuf
;
186 if (chanb
->extra_reader_sb
)
189 kfree(bufb
->buf_wsb
);
190 for (i
= 0; i
< num_subbuf_alloc
; i
++) {
191 for (j
= 0; j
< bufb
->num_pages_per_subbuf
; j
++)
192 __free_page(bufb
->array
[i
]->p
[j
].page
);
193 kfree(bufb
->array
[i
]);
199 void lib_ring_buffer_backend_reset(struct lib_ring_buffer_backend
*bufb
)
201 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
202 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
203 unsigned long num_subbuf_alloc
;
206 num_subbuf_alloc
= chanb
->num_subbuf
;
207 if (chanb
->extra_reader_sb
)
210 for (i
= 0; i
< chanb
->num_subbuf
; i
++)
211 bufb
->buf_wsb
[i
].id
= subbuffer_id(config
, 0, 1, i
);
212 if (chanb
->extra_reader_sb
)
213 bufb
->buf_rsb
.id
= subbuffer_id(config
, 0, 1,
214 num_subbuf_alloc
- 1);
216 bufb
->buf_rsb
.id
= subbuffer_id(config
, 0, 1, 0);
218 for (i
= 0; i
< num_subbuf_alloc
; i
++) {
219 /* Don't reset mmap_offset */
220 v_set(config
, &bufb
->array
[i
]->records_commit
, 0);
221 v_set(config
, &bufb
->array
[i
]->records_unread
, 0);
222 bufb
->array
[i
]->data_size
= 0;
223 /* Don't reset backend page and virt addresses */
225 /* Don't reset num_pages_per_subbuf, cpu, allocated */
226 v_set(config
, &bufb
->records_read
, 0);
230 * The frontend is responsible for also calling ring_buffer_backend_reset for
231 * each buffer when calling channel_backend_reset.
233 void channel_backend_reset(struct channel_backend
*chanb
)
235 struct channel
*chan
= container_of(chanb
, struct channel
, backend
);
236 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
239 * Don't reset buf_size, subbuf_size, subbuf_size_order,
240 * num_subbuf_order, buf_size_order, extra_reader_sb, num_subbuf,
241 * priv, notifiers, config, cpumask and name.
243 chanb
->start_tsc
= config
->cb
.ring_buffer_clock_read(chan
);
246 #ifdef CONFIG_HOTPLUG_CPU
248 * lib_ring_buffer_cpu_hp_callback - CPU hotplug callback
249 * @nb: notifier block
250 * @action: hotplug action to take
253 * Returns the success/failure of the operation. (%NOTIFY_OK, %NOTIFY_BAD)
256 int lib_ring_buffer_cpu_hp_callback(struct notifier_block
*nb
,
257 unsigned long action
,
260 unsigned int cpu
= (unsigned long)hcpu
;
261 struct channel_backend
*chanb
= container_of(nb
, struct channel_backend
,
263 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
264 struct lib_ring_buffer
*buf
;
267 CHAN_WARN_ON(chanb
, config
->alloc
== RING_BUFFER_ALLOC_GLOBAL
);
271 case CPU_UP_PREPARE_FROZEN
:
272 buf
= per_cpu_ptr(chanb
->buf
, cpu
);
273 ret
= lib_ring_buffer_create(buf
, chanb
, cpu
);
276 "ring_buffer_cpu_hp_callback: cpu %d "
277 "buffer creation failed\n", cpu
);
282 case CPU_DEAD_FROZEN
:
283 /* No need to do a buffer switch here, because it will happen
284 * when tracing is stopped, or will be done by switch timer CPU
293 * channel_backend_init - initialize a channel backend
294 * @chanb: channel backend
295 * @name: channel name
296 * @config: client ring buffer configuration
297 * @priv: client private data
298 * @parent: dentry of parent directory, %NULL for root directory
299 * @subbuf_size: size of sub-buffers (> PAGE_SIZE, power of 2)
300 * @num_subbuf: number of sub-buffers (power of 2)
302 * Returns channel pointer if successful, %NULL otherwise.
304 * Creates per-cpu channel buffers using the sizes and attributes
305 * specified. The created channel buffer files will be named
306 * name_0...name_N-1. File permissions will be %S_IRUSR.
308 * Called with CPU hotplug disabled.
310 int channel_backend_init(struct channel_backend
*chanb
,
312 const struct lib_ring_buffer_config
*config
,
313 void *priv
, size_t subbuf_size
, size_t num_subbuf
)
315 struct channel
*chan
= container_of(chanb
, struct channel
, backend
);
322 /* Check that the subbuffer size is larger than a page. */
323 if (subbuf_size
< PAGE_SIZE
)
327 * Make sure the number of subbuffers and subbuffer size are
328 * power of 2 and nonzero.
330 if (!subbuf_size
|| (subbuf_size
& (subbuf_size
- 1)))
332 if (!num_subbuf
|| (num_subbuf
& (num_subbuf
- 1)))
335 ret
= subbuffer_id_check_index(config
, num_subbuf
);
340 chanb
->buf_size
= num_subbuf
* subbuf_size
;
341 chanb
->subbuf_size
= subbuf_size
;
342 chanb
->buf_size_order
= get_count_order(chanb
->buf_size
);
343 chanb
->subbuf_size_order
= get_count_order(subbuf_size
);
344 chanb
->num_subbuf_order
= get_count_order(num_subbuf
);
345 chanb
->extra_reader_sb
=
346 (config
->mode
== RING_BUFFER_OVERWRITE
) ? 1 : 0;
347 chanb
->num_subbuf
= num_subbuf
;
348 strlcpy(chanb
->name
, name
, NAME_MAX
);
349 memcpy(&chanb
->config
, config
, sizeof(chanb
->config
));
351 if (config
->alloc
== RING_BUFFER_ALLOC_PER_CPU
) {
352 if (!zalloc_cpumask_var(&chanb
->cpumask
, GFP_KERNEL
))
356 if (config
->alloc
== RING_BUFFER_ALLOC_PER_CPU
) {
357 /* Allocating the buffer per-cpu structures */
358 chanb
->buf
= alloc_percpu(struct lib_ring_buffer
);
363 * In case of non-hotplug cpu, if the ring-buffer is allocated
364 * in early initcall, it will not be notified of secondary cpus.
365 * In that off case, we need to allocate for all possible cpus.
367 #ifdef CONFIG_HOTPLUG_CPU
369 * buf->backend.allocated test takes care of concurrent CPU
371 * Priority higher than frontend, so we create the ring buffer
372 * before we start the timer.
374 chanb
->cpu_hp_notifier
.notifier_call
=
375 lib_ring_buffer_cpu_hp_callback
;
376 chanb
->cpu_hp_notifier
.priority
= 5;
377 register_hotcpu_notifier(&chanb
->cpu_hp_notifier
);
380 for_each_online_cpu(i
) {
381 ret
= lib_ring_buffer_create(per_cpu_ptr(chanb
->buf
, i
),
384 goto free_bufs
; /* cpu hotplug locked */
388 for_each_possible_cpu(i
) {
389 ret
= lib_ring_buffer_create(per_cpu_ptr(chanb
->buf
, i
),
392 goto free_bufs
; /* cpu hotplug locked */
396 chanb
->buf
= kzalloc(sizeof(struct lib_ring_buffer
), GFP_KERNEL
);
399 ret
= lib_ring_buffer_create(chanb
->buf
, chanb
, -1);
403 chanb
->start_tsc
= config
->cb
.ring_buffer_clock_read(chan
);
408 if (config
->alloc
== RING_BUFFER_ALLOC_PER_CPU
) {
409 for_each_possible_cpu(i
) {
410 struct lib_ring_buffer
*buf
= per_cpu_ptr(chanb
->buf
, i
);
412 if (!buf
->backend
.allocated
)
414 lib_ring_buffer_free(buf
);
416 #ifdef CONFIG_HOTPLUG_CPU
419 free_percpu(chanb
->buf
);
423 if (config
->alloc
== RING_BUFFER_ALLOC_PER_CPU
)
424 free_cpumask_var(chanb
->cpumask
);
429 * channel_backend_unregister_notifiers - unregister notifiers
434 void channel_backend_unregister_notifiers(struct channel_backend
*chanb
)
436 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
438 if (config
->alloc
== RING_BUFFER_ALLOC_PER_CPU
)
439 unregister_hotcpu_notifier(&chanb
->cpu_hp_notifier
);
443 * channel_backend_free - destroy the channel
446 * Destroy all channel buffers and frees the channel.
448 void channel_backend_free(struct channel_backend
*chanb
)
450 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
453 if (config
->alloc
== RING_BUFFER_ALLOC_PER_CPU
) {
454 for_each_possible_cpu(i
) {
455 struct lib_ring_buffer
*buf
= per_cpu_ptr(chanb
->buf
, i
);
457 if (!buf
->backend
.allocated
)
459 lib_ring_buffer_free(buf
);
461 free_cpumask_var(chanb
->cpumask
);
462 free_percpu(chanb
->buf
);
464 struct lib_ring_buffer
*buf
= chanb
->buf
;
466 CHAN_WARN_ON(chanb
, !buf
->backend
.allocated
);
467 lib_ring_buffer_free(buf
);
473 * lib_ring_buffer_write - write data to a ring_buffer buffer.
474 * @bufb : buffer backend
475 * @offset : offset within the buffer
476 * @src : source address
477 * @len : length to write
478 * @pagecpy : page size copied so far
480 void _lib_ring_buffer_write(struct lib_ring_buffer_backend
*bufb
, size_t offset
,
481 const void *src
, size_t len
, ssize_t pagecpy
)
483 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
484 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
486 struct lib_ring_buffer_backend_pages
*rpages
;
487 unsigned long sb_bindex
, id
;
493 sbidx
= offset
>> chanb
->subbuf_size_order
;
494 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
497 * Underlying layer should never ask for writes across
500 CHAN_WARN_ON(chanb
, offset
>= chanb
->buf_size
);
502 pagecpy
= min_t(size_t, len
, PAGE_SIZE
- (offset
& ~PAGE_MASK
));
503 id
= bufb
->buf_wsb
[sbidx
].id
;
504 sb_bindex
= subbuffer_id_get_index(config
, id
);
505 rpages
= bufb
->array
[sb_bindex
];
506 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
507 && subbuffer_id_is_noref(config
, id
));
508 lib_ring_buffer_do_copy(config
,
509 rpages
->p
[index
].virt
510 + (offset
& ~PAGE_MASK
),
512 } while (unlikely(len
!= pagecpy
));
514 EXPORT_SYMBOL_GPL(_lib_ring_buffer_write
);
518 * lib_ring_buffer_memset - write len bytes of c to a ring_buffer buffer.
519 * @bufb : buffer backend
520 * @offset : offset within the buffer
521 * @c : the byte to write
522 * @len : length to write
523 * @pagecpy : page size copied so far
525 void _lib_ring_buffer_memset(struct lib_ring_buffer_backend
*bufb
,
527 int c
, size_t len
, ssize_t pagecpy
)
529 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
530 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
532 struct lib_ring_buffer_backend_pages
*rpages
;
533 unsigned long sb_bindex
, id
;
538 sbidx
= offset
>> chanb
->subbuf_size_order
;
539 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
542 * Underlying layer should never ask for writes across
545 CHAN_WARN_ON(chanb
, offset
>= chanb
->buf_size
);
547 pagecpy
= min_t(size_t, len
, PAGE_SIZE
- (offset
& ~PAGE_MASK
));
548 id
= bufb
->buf_wsb
[sbidx
].id
;
549 sb_bindex
= subbuffer_id_get_index(config
, id
);
550 rpages
= bufb
->array
[sb_bindex
];
551 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
552 && subbuffer_id_is_noref(config
, id
));
553 lib_ring_buffer_do_memset(rpages
->p
[index
].virt
554 + (offset
& ~PAGE_MASK
),
556 } while (unlikely(len
!= pagecpy
));
558 EXPORT_SYMBOL_GPL(_lib_ring_buffer_memset
);
561 * lib_ring_buffer_strcpy - write string data to a ring_buffer buffer.
562 * @bufb : buffer backend
563 * @offset : offset within the buffer
564 * @src : source address
565 * @len : length to write
566 * @pagecpy : page size copied so far
567 * @pad : character to use for padding
569 void _lib_ring_buffer_strcpy(struct lib_ring_buffer_backend
*bufb
,
570 size_t offset
, const char *src
, size_t len
,
571 size_t pagecpy
, int pad
)
573 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
574 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
576 struct lib_ring_buffer_backend_pages
*rpages
;
577 unsigned long sb_bindex
, id
;
578 int src_terminated
= 0;
580 CHAN_WARN_ON(chanb
, !len
);
586 sbidx
= offset
>> chanb
->subbuf_size_order
;
587 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
590 * Underlying layer should never ask for writes across
593 CHAN_WARN_ON(chanb
, offset
>= chanb
->buf_size
);
595 pagecpy
= min_t(size_t, len
, PAGE_SIZE
- (offset
& ~PAGE_MASK
));
596 id
= bufb
->buf_wsb
[sbidx
].id
;
597 sb_bindex
= subbuffer_id_get_index(config
, id
);
598 rpages
= bufb
->array
[sb_bindex
];
599 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
600 && subbuffer_id_is_noref(config
, id
));
602 if (likely(!src_terminated
)) {
603 size_t count
, to_copy
;
607 to_copy
--; /* Final '\0' */
608 count
= lib_ring_buffer_do_strcpy(config
,
609 rpages
->p
[index
].virt
610 + (offset
& ~PAGE_MASK
),
614 if (unlikely(count
< to_copy
)) {
615 size_t pad_len
= to_copy
- count
;
617 /* Next pages will have padding */
619 lib_ring_buffer_do_memset(rpages
->p
[index
].virt
620 + (offset
& ~PAGE_MASK
),
629 pad_len
--; /* Final '\0' */
630 lib_ring_buffer_do_memset(rpages
->p
[index
].virt
631 + (offset
& ~PAGE_MASK
),
635 } while (unlikely(len
!= pagecpy
));
637 lib_ring_buffer_do_memset(rpages
->p
[index
].virt
+ (offset
& ~PAGE_MASK
),
640 EXPORT_SYMBOL_GPL(_lib_ring_buffer_strcpy
);
643 * lib_ring_buffer_copy_from_user_inatomic - write user data to a ring_buffer buffer.
644 * @bufb : buffer backend
645 * @offset : offset within the buffer
646 * @src : source address
647 * @len : length to write
648 * @pagecpy : page size copied so far
650 * This function deals with userspace pointers, it should never be called
651 * directly without having the src pointer checked with access_ok()
654 void _lib_ring_buffer_copy_from_user_inatomic(struct lib_ring_buffer_backend
*bufb
,
656 const void __user
*src
, size_t len
,
659 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
660 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
662 struct lib_ring_buffer_backend_pages
*rpages
;
663 unsigned long sb_bindex
, id
;
670 sbidx
= offset
>> chanb
->subbuf_size_order
;
671 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
674 * Underlying layer should never ask for writes across
677 CHAN_WARN_ON(chanb
, offset
>= chanb
->buf_size
);
679 pagecpy
= min_t(size_t, len
, PAGE_SIZE
- (offset
& ~PAGE_MASK
));
680 id
= bufb
->buf_wsb
[sbidx
].id
;
681 sb_bindex
= subbuffer_id_get_index(config
, id
);
682 rpages
= bufb
->array
[sb_bindex
];
683 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
684 && subbuffer_id_is_noref(config
, id
));
685 ret
= lib_ring_buffer_do_copy_from_user_inatomic(rpages
->p
[index
].virt
686 + (offset
& ~PAGE_MASK
),
689 offset
+= (pagecpy
- ret
);
690 len
-= (pagecpy
- ret
);
691 _lib_ring_buffer_memset(bufb
, offset
, 0, len
, 0);
692 break; /* stop copy */
694 } while (unlikely(len
!= pagecpy
));
696 EXPORT_SYMBOL_GPL(_lib_ring_buffer_copy_from_user_inatomic
);
699 * lib_ring_buffer_strcpy_from_user_inatomic - write userspace string data to a ring_buffer buffer.
700 * @bufb : buffer backend
701 * @offset : offset within the buffer
702 * @src : source address
703 * @len : length to write
704 * @pagecpy : page size copied so far
705 * @pad : character to use for padding
707 * This function deals with userspace pointers, it should never be called
708 * directly without having the src pointer checked with access_ok()
711 void _lib_ring_buffer_strcpy_from_user_inatomic(struct lib_ring_buffer_backend
*bufb
,
712 size_t offset
, const char __user
*src
, size_t len
,
713 size_t pagecpy
, int pad
)
715 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
716 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
718 struct lib_ring_buffer_backend_pages
*rpages
;
719 unsigned long sb_bindex
, id
;
720 int src_terminated
= 0;
727 sbidx
= offset
>> chanb
->subbuf_size_order
;
728 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
731 * Underlying layer should never ask for writes across
734 CHAN_WARN_ON(chanb
, offset
>= chanb
->buf_size
);
736 pagecpy
= min_t(size_t, len
, PAGE_SIZE
- (offset
& ~PAGE_MASK
));
737 id
= bufb
->buf_wsb
[sbidx
].id
;
738 sb_bindex
= subbuffer_id_get_index(config
, id
);
739 rpages
= bufb
->array
[sb_bindex
];
740 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
741 && subbuffer_id_is_noref(config
, id
));
743 if (likely(!src_terminated
)) {
744 size_t count
, to_copy
;
748 to_copy
--; /* Final '\0' */
749 count
= lib_ring_buffer_do_strcpy_from_user_inatomic(config
,
750 rpages
->p
[index
].virt
751 + (offset
& ~PAGE_MASK
),
755 if (unlikely(count
< to_copy
)) {
756 size_t pad_len
= to_copy
- count
;
758 /* Next pages will have padding */
760 lib_ring_buffer_do_memset(rpages
->p
[index
].virt
761 + (offset
& ~PAGE_MASK
),
770 pad_len
--; /* Final '\0' */
771 lib_ring_buffer_do_memset(rpages
->p
[index
].virt
772 + (offset
& ~PAGE_MASK
),
776 } while (unlikely(len
!= pagecpy
));
778 lib_ring_buffer_do_memset(rpages
->p
[index
].virt
+ (offset
& ~PAGE_MASK
),
781 EXPORT_SYMBOL_GPL(_lib_ring_buffer_strcpy_from_user_inatomic
);
784 * lib_ring_buffer_read - read data from ring_buffer_buffer.
785 * @bufb : buffer backend
786 * @offset : offset within the buffer
787 * @dest : destination address
788 * @len : length to copy to destination
790 * Should be protected by get_subbuf/put_subbuf.
791 * Returns the length copied.
793 size_t lib_ring_buffer_read(struct lib_ring_buffer_backend
*bufb
, size_t offset
,
794 void *dest
, size_t len
)
796 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
797 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
799 ssize_t pagecpy
, orig_len
;
800 struct lib_ring_buffer_backend_pages
*rpages
;
801 unsigned long sb_bindex
, id
;
804 offset
&= chanb
->buf_size
- 1;
805 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
809 pagecpy
= min_t(size_t, len
, PAGE_SIZE
- (offset
& ~PAGE_MASK
));
810 id
= bufb
->buf_rsb
.id
;
811 sb_bindex
= subbuffer_id_get_index(config
, id
);
812 rpages
= bufb
->array
[sb_bindex
];
813 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
814 && subbuffer_id_is_noref(config
, id
));
815 memcpy(dest
, rpages
->p
[index
].virt
+ (offset
& ~PAGE_MASK
),
822 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
824 * Underlying layer should never ask for reads across
827 CHAN_WARN_ON(chanb
, offset
>= chanb
->buf_size
);
831 EXPORT_SYMBOL_GPL(lib_ring_buffer_read
);
834 * __lib_ring_buffer_copy_to_user - read data from ring_buffer to userspace
835 * @bufb : buffer backend
836 * @offset : offset within the buffer
837 * @dest : destination userspace address
838 * @len : length to copy to destination
840 * Should be protected by get_subbuf/put_subbuf.
841 * access_ok() must have been performed on dest addresses prior to call this
843 * Returns -EFAULT on error, 0 if ok.
845 int __lib_ring_buffer_copy_to_user(struct lib_ring_buffer_backend
*bufb
,
846 size_t offset
, void __user
*dest
, size_t len
)
848 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
849 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
852 struct lib_ring_buffer_backend_pages
*rpages
;
853 unsigned long sb_bindex
, id
;
855 offset
&= chanb
->buf_size
- 1;
856 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
860 pagecpy
= min_t(size_t, len
, PAGE_SIZE
- (offset
& ~PAGE_MASK
));
861 id
= bufb
->buf_rsb
.id
;
862 sb_bindex
= subbuffer_id_get_index(config
, id
);
863 rpages
= bufb
->array
[sb_bindex
];
864 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
865 && subbuffer_id_is_noref(config
, id
));
866 if (__copy_to_user(dest
,
867 rpages
->p
[index
].virt
+ (offset
& ~PAGE_MASK
),
875 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
877 * Underlying layer should never ask for reads across
880 CHAN_WARN_ON(chanb
, offset
>= chanb
->buf_size
);
884 EXPORT_SYMBOL_GPL(__lib_ring_buffer_copy_to_user
);
887 * lib_ring_buffer_read_cstr - read a C-style string from ring_buffer.
888 * @bufb : buffer backend
889 * @offset : offset within the buffer
890 * @dest : destination address
891 * @len : destination's length
893 * Return string's length, or -EINVAL on error.
894 * Should be protected by get_subbuf/put_subbuf.
895 * Destination length should be at least 1 to hold '\0'.
897 int lib_ring_buffer_read_cstr(struct lib_ring_buffer_backend
*bufb
, size_t offset
,
898 void *dest
, size_t len
)
900 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
901 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
903 ssize_t pagecpy
, pagelen
, strpagelen
, orig_offset
;
905 struct lib_ring_buffer_backend_pages
*rpages
;
906 unsigned long sb_bindex
, id
;
908 offset
&= chanb
->buf_size
- 1;
909 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
910 orig_offset
= offset
;
914 id
= bufb
->buf_rsb
.id
;
915 sb_bindex
= subbuffer_id_get_index(config
, id
);
916 rpages
= bufb
->array
[sb_bindex
];
917 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
918 && subbuffer_id_is_noref(config
, id
));
919 str
= (char *)rpages
->p
[index
].virt
+ (offset
& ~PAGE_MASK
);
920 pagelen
= PAGE_SIZE
- (offset
& ~PAGE_MASK
);
921 strpagelen
= strnlen(str
, pagelen
);
923 pagecpy
= min_t(size_t, len
, strpagelen
);
925 memcpy(dest
, str
, pagecpy
);
930 offset
+= strpagelen
;
931 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
932 if (strpagelen
< pagelen
)
935 * Underlying layer should never ask for reads across
938 CHAN_WARN_ON(chanb
, offset
>= chanb
->buf_size
);
941 ((char *)dest
)[0] = 0;
942 return offset
- orig_offset
;
944 EXPORT_SYMBOL_GPL(lib_ring_buffer_read_cstr
);
947 * lib_ring_buffer_read_get_page - Get a whole page to read from
948 * @bufb : buffer backend
949 * @offset : offset within the buffer
950 * @virt : pointer to page address (output)
952 * Should be protected by get_subbuf/put_subbuf.
953 * Returns the pointer to the page struct pointer.
955 struct page
**lib_ring_buffer_read_get_page(struct lib_ring_buffer_backend
*bufb
,
956 size_t offset
, void ***virt
)
959 struct lib_ring_buffer_backend_pages
*rpages
;
960 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
961 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
962 unsigned long sb_bindex
, id
;
964 offset
&= chanb
->buf_size
- 1;
965 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
966 id
= bufb
->buf_rsb
.id
;
967 sb_bindex
= subbuffer_id_get_index(config
, id
);
968 rpages
= bufb
->array
[sb_bindex
];
969 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
970 && subbuffer_id_is_noref(config
, id
));
971 *virt
= &rpages
->p
[index
].virt
;
972 return &rpages
->p
[index
].page
;
974 EXPORT_SYMBOL_GPL(lib_ring_buffer_read_get_page
);
977 * lib_ring_buffer_read_offset_address - get address of a buffer location
978 * @bufb : buffer backend
979 * @offset : offset within the buffer.
981 * Return the address where a given offset is located (for read).
982 * Should be used to get the current subbuffer header pointer. Given we know
983 * it's never on a page boundary, it's safe to read/write directly
984 * from/to this address, as long as the read/write is never bigger than a
987 void *lib_ring_buffer_read_offset_address(struct lib_ring_buffer_backend
*bufb
,
991 struct lib_ring_buffer_backend_pages
*rpages
;
992 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
993 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
994 unsigned long sb_bindex
, id
;
996 offset
&= chanb
->buf_size
- 1;
997 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
998 id
= bufb
->buf_rsb
.id
;
999 sb_bindex
= subbuffer_id_get_index(config
, id
);
1000 rpages
= bufb
->array
[sb_bindex
];
1001 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
1002 && subbuffer_id_is_noref(config
, id
));
1003 return rpages
->p
[index
].virt
+ (offset
& ~PAGE_MASK
);
1005 EXPORT_SYMBOL_GPL(lib_ring_buffer_read_offset_address
);
1008 * lib_ring_buffer_offset_address - get address of a location within the buffer
1009 * @bufb : buffer backend
1010 * @offset : offset within the buffer.
1012 * Return the address where a given offset is located.
1013 * Should be used to get the current subbuffer header pointer. Given we know
1014 * it's always at the beginning of a page, it's safe to write directly to this
1015 * address, as long as the write is never bigger than a page size.
1017 void *lib_ring_buffer_offset_address(struct lib_ring_buffer_backend
*bufb
,
1020 size_t sbidx
, index
;
1021 struct lib_ring_buffer_backend_pages
*rpages
;
1022 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
1023 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
1024 unsigned long sb_bindex
, id
;
1026 offset
&= chanb
->buf_size
- 1;
1027 sbidx
= offset
>> chanb
->subbuf_size_order
;
1028 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
1029 id
= bufb
->buf_wsb
[sbidx
].id
;
1030 sb_bindex
= subbuffer_id_get_index(config
, id
);
1031 rpages
= bufb
->array
[sb_bindex
];
1032 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
1033 && subbuffer_id_is_noref(config
, id
));
1034 return rpages
->p
[index
].virt
+ (offset
& ~PAGE_MASK
);
1036 EXPORT_SYMBOL_GPL(lib_ring_buffer_offset_address
);