struct switch_offsets {
unsigned long begin, end, old;
size_t pre_header_padding, size;
- unsigned int switch_new_start:1, switch_old_start:1, switch_old_end:1;
+ unsigned int switch_new_start:1, switch_new_end:1, switch_old_start:1,
+ switch_old_end:1;
};
#ifdef CONFIG_NO_HZ
config->cb.subbuffer_header_size());
}
+/*
+ * lib_ring_buffer_switch_new_end: finish switching current subbuffer
+ *
+ * The only remaining threads could be the ones with pending commits. They will
+ * have to do the deliver themselves.
+ */
+static
+void lib_ring_buffer_switch_new_end(struct lib_ring_buffer *buf,
+ struct channel *chan,
+ struct switch_offsets *offsets,
+ u64 tsc)
+{
+ const struct lib_ring_buffer_config *config = &chan->backend.config;
+ unsigned long endidx = subbuf_index(offsets->end - 1, chan);
+ unsigned long commit_count, padding_size, data_size;
+
+ data_size = subbuf_offset(offsets->end - 1, chan) + 1;
+ padding_size = chan->backend.subbuf_size - data_size;
+ subbuffer_set_data_size(config, &buf->backend, endidx, data_size);
+
+ /*
+ * Order all writes to buffer before the commit count update that will
+ * determine that the subbuffer is full.
+ */
+ if (config->ipi == RING_BUFFER_IPI_BARRIER) {
+ /*
+ * Must write slot data before incrementing commit count. This
+ * compiler barrier is upgraded into a smp_mb() by the IPI sent
+ * by get_subbuf().
+ */
+ barrier();
+ } else
+ smp_wmb();
+ v_add(config, padding_size, &buf->commit_hot[endidx].cc);
+ commit_count = v_read(config, &buf->commit_hot[endidx].cc);
+ lib_ring_buffer_check_deliver(config, buf, chan, offsets->end - 1,
+ commit_count, endidx);
+ lib_ring_buffer_write_commit_counter(config, buf, chan, endidx,
+ offsets->end, commit_count,
+ padding_size);
+}
+
/*
* Returns :
* 0 if ok
offsets->begin = v_read(config, &buf->offset);
offsets->old = offsets->begin;
offsets->switch_new_start = 0;
+ offsets->switch_new_end = 0;
offsets->switch_old_end = 0;
offsets->pre_header_padding = 0;
*/
}
offsets->end = offsets->begin + offsets->size;
+
+ if (unlikely(subbuf_offset(offsets->end, chan) == 0)) {
+ /*
+ * The offset_end will fall at the very beginning of the next
+ * subbuffer.
+ */
+ offsets->switch_new_end = 1; /* For offsets->begin */
+ }
return 0;
}
if (unlikely(offsets.switch_new_start))
lib_ring_buffer_switch_new_start(buf, chan, &offsets, ctx->tsc);
+ if (unlikely(offsets.switch_new_end))
+ lib_ring_buffer_switch_new_end(buf, chan, &offsets, ctx->tsc);
+
ctx->slot_size = offsets.size;
ctx->pre_offset = offsets.begin;
ctx->buf_offset = offsets.begin + offsets.pre_header_padding;