3 * Copyright (c) 2009, Microsoft Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16 * Place - Suite 330, Boston, MA 02111-1307 USA.
19 * Haiyang Zhang <haiyangz@microsoft.com>
20 * Hank Janssen <hjanssen@microsoft.com>
21 * K. Y. Srinivasan <kys@microsoft.com>
24 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
26 #include <linux/kernel.h>
28 #include <linux/hyperv.h>
29 #include <linux/uio.h>
31 #include "hyperv_vmbus.h"
33 void hv_begin_read(struct hv_ring_buffer_info
*rbi
)
35 rbi
->ring_buffer
->interrupt_mask
= 1;
39 u32
hv_end_read(struct hv_ring_buffer_info
*rbi
)
42 rbi
->ring_buffer
->interrupt_mask
= 0;
46 * Now check to see if the ring buffer is still empty.
47 * If it is not, we raced and we need to process new
50 return hv_get_bytes_to_read(rbi
);
54 * When we write to the ring buffer, check if the host needs to
55 * be signaled. Here is the details of this protocol:
57 * 1. The host guarantees that while it is draining the
58 * ring buffer, it will set the interrupt_mask to
59 * indicate it does not need to be interrupted when
62 * 2. The host guarantees that it will completely drain
63 * the ring buffer before exiting the read loop. Further,
64 * once the ring buffer is empty, it will clear the
65 * interrupt_mask and re-check to see if new data has
69 static bool hv_need_to_signal(u32 old_write
, struct hv_ring_buffer_info
*rbi
)
72 if (READ_ONCE(rbi
->ring_buffer
->interrupt_mask
))
75 /* check interrupt_mask before read_index */
78 * This is the only case we need to signal when the
79 * ring transitions from being empty to non-empty.
81 if (old_write
== READ_ONCE(rbi
->ring_buffer
->read_index
))
88 * To optimize the flow management on the send-side,
89 * when the sender is blocked because of lack of
90 * sufficient space in the ring buffer, potential the
91 * consumer of the ring buffer can signal the producer.
92 * This is controlled by the following parameters:
94 * 1. pending_send_sz: This is the size in bytes that the
95 * producer is trying to send.
96 * 2. The feature bit feat_pending_send_sz set to indicate if
97 * the consumer of the ring will signal when the ring
98 * state transitions from being full to a state where
99 * there is room for the producer to send the pending packet.
102 static bool hv_need_to_signal_on_read(struct hv_ring_buffer_info
*rbi
)
108 * Issue a full memory barrier before making the signaling decision.
109 * Here is the reason for having this barrier:
110 * If the reading of the pend_sz (in this function)
111 * were to be reordered and read before we commit the new read
112 * index (in the calling function) we could
113 * have a problem. If the host were to set the pending_sz after we
114 * have sampled pending_sz and go to sleep before we commit the
115 * read index, we could miss sending the interrupt. Issue a full
116 * memory barrier to address this.
120 pending_sz
= READ_ONCE(rbi
->ring_buffer
->pending_send_sz
);
121 /* If the other end is not blocked on write don't bother. */
125 cur_write_sz
= hv_get_bytes_to_write(rbi
);
127 if (cur_write_sz
>= pending_sz
)
133 /* Get the next write location for the specified ring buffer. */
135 hv_get_next_write_location(struct hv_ring_buffer_info
*ring_info
)
137 u32 next
= ring_info
->ring_buffer
->write_index
;
142 /* Set the next write location for the specified ring buffer. */
144 hv_set_next_write_location(struct hv_ring_buffer_info
*ring_info
,
145 u32 next_write_location
)
147 ring_info
->ring_buffer
->write_index
= next_write_location
;
150 /* Get the next read location for the specified ring buffer. */
152 hv_get_next_read_location(struct hv_ring_buffer_info
*ring_info
)
154 u32 next
= ring_info
->ring_buffer
->read_index
;
160 * Get the next read location + offset for the specified ring buffer.
161 * This allows the caller to skip.
164 hv_get_next_readlocation_withoffset(struct hv_ring_buffer_info
*ring_info
,
167 u32 next
= ring_info
->ring_buffer
->read_index
;
170 next
%= ring_info
->ring_datasize
;
175 /* Set the next read location for the specified ring buffer. */
177 hv_set_next_read_location(struct hv_ring_buffer_info
*ring_info
,
178 u32 next_read_location
)
180 ring_info
->ring_buffer
->read_index
= next_read_location
;
184 /* Get the start of the ring buffer. */
186 hv_get_ring_buffer(struct hv_ring_buffer_info
*ring_info
)
188 return (void *)ring_info
->ring_buffer
->buffer
;
192 /* Get the size of the ring buffer. */
194 hv_get_ring_buffersize(struct hv_ring_buffer_info
*ring_info
)
196 return ring_info
->ring_datasize
;
199 /* Get the read and write indices as u64 of the specified ring buffer. */
201 hv_get_ring_bufferindices(struct hv_ring_buffer_info
*ring_info
)
203 return (u64
)ring_info
->ring_buffer
->write_index
<< 32;
207 * Helper routine to copy to source from ring buffer.
208 * Assume there is enough room. Handles wrap-around in src case only!!
210 static u32
hv_copyfrom_ringbuffer(
211 struct hv_ring_buffer_info
*ring_info
,
214 u32 start_read_offset
)
216 void *ring_buffer
= hv_get_ring_buffer(ring_info
);
217 u32 ring_buffer_size
= hv_get_ring_buffersize(ring_info
);
221 /* wrap-around detected at the src */
222 if (destlen
> ring_buffer_size
- start_read_offset
) {
223 frag_len
= ring_buffer_size
- start_read_offset
;
225 memcpy(dest
, ring_buffer
+ start_read_offset
, frag_len
);
226 memcpy(dest
+ frag_len
, ring_buffer
, destlen
- frag_len
);
229 memcpy(dest
, ring_buffer
+ start_read_offset
, destlen
);
232 start_read_offset
+= destlen
;
233 start_read_offset
%= ring_buffer_size
;
235 return start_read_offset
;
240 * Helper routine to copy from source to ring buffer.
241 * Assume there is enough room. Handles wrap-around in dest case only!!
243 static u32
hv_copyto_ringbuffer(
244 struct hv_ring_buffer_info
*ring_info
,
245 u32 start_write_offset
,
249 void *ring_buffer
= hv_get_ring_buffer(ring_info
);
250 u32 ring_buffer_size
= hv_get_ring_buffersize(ring_info
);
253 /* wrap-around detected! */
254 if (srclen
> ring_buffer_size
- start_write_offset
) {
255 frag_len
= ring_buffer_size
- start_write_offset
;
256 memcpy(ring_buffer
+ start_write_offset
, src
, frag_len
);
257 memcpy(ring_buffer
, src
+ frag_len
, srclen
- frag_len
);
259 memcpy(ring_buffer
+ start_write_offset
, src
, srclen
);
261 start_write_offset
+= srclen
;
262 start_write_offset
%= ring_buffer_size
;
264 return start_write_offset
;
267 /* Get various debug metrics for the specified ring buffer. */
268 void hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info
*ring_info
,
269 struct hv_ring_buffer_debug_info
*debug_info
)
271 u32 bytes_avail_towrite
;
272 u32 bytes_avail_toread
;
274 if (ring_info
->ring_buffer
) {
275 hv_get_ringbuffer_availbytes(ring_info
,
277 &bytes_avail_towrite
);
279 debug_info
->bytes_avail_toread
= bytes_avail_toread
;
280 debug_info
->bytes_avail_towrite
= bytes_avail_towrite
;
281 debug_info
->current_read_index
=
282 ring_info
->ring_buffer
->read_index
;
283 debug_info
->current_write_index
=
284 ring_info
->ring_buffer
->write_index
;
285 debug_info
->current_interrupt_mask
=
286 ring_info
->ring_buffer
->interrupt_mask
;
290 /* Initialize the ring buffer. */
291 int hv_ringbuffer_init(struct hv_ring_buffer_info
*ring_info
,
292 void *buffer
, u32 buflen
)
294 if (sizeof(struct hv_ring_buffer
) != PAGE_SIZE
)
297 memset(ring_info
, 0, sizeof(struct hv_ring_buffer_info
));
299 ring_info
->ring_buffer
= (struct hv_ring_buffer
*)buffer
;
300 ring_info
->ring_buffer
->read_index
=
301 ring_info
->ring_buffer
->write_index
= 0;
303 /* Set the feature bit for enabling flow control. */
304 ring_info
->ring_buffer
->feature_bits
.value
= 1;
306 ring_info
->ring_size
= buflen
;
307 ring_info
->ring_datasize
= buflen
- sizeof(struct hv_ring_buffer
);
309 spin_lock_init(&ring_info
->ring_lock
);
314 /* Cleanup the ring buffer. */
315 void hv_ringbuffer_cleanup(struct hv_ring_buffer_info
*ring_info
)
319 /* Write to the ring buffer. */
320 int hv_ringbuffer_write(struct hv_ring_buffer_info
*outring_info
,
321 struct kvec
*kv_list
, u32 kv_count
, bool *signal
, bool lock
)
324 u32 bytes_avail_towrite
;
325 u32 totalbytes_towrite
= 0;
327 u32 next_write_location
;
329 u64 prev_indices
= 0;
330 unsigned long flags
= 0;
332 for (i
= 0; i
< kv_count
; i
++)
333 totalbytes_towrite
+= kv_list
[i
].iov_len
;
335 totalbytes_towrite
+= sizeof(u64
);
338 spin_lock_irqsave(&outring_info
->ring_lock
, flags
);
340 bytes_avail_towrite
= hv_get_bytes_to_write(outring_info
);
343 * If there is only room for the packet, assume it is full.
344 * Otherwise, the next time around, we think the ring buffer
345 * is empty since the read index == write index.
347 if (bytes_avail_towrite
<= totalbytes_towrite
) {
349 spin_unlock_irqrestore(&outring_info
->ring_lock
, flags
);
353 /* Write to the ring buffer */
354 next_write_location
= hv_get_next_write_location(outring_info
);
356 old_write
= next_write_location
;
358 for (i
= 0; i
< kv_count
; i
++) {
359 next_write_location
= hv_copyto_ringbuffer(outring_info
,
365 /* Set previous packet start */
366 prev_indices
= hv_get_ring_bufferindices(outring_info
);
368 next_write_location
= hv_copyto_ringbuffer(outring_info
,
373 /* Issue a full memory barrier before updating the write index */
376 /* Now, update the write location */
377 hv_set_next_write_location(outring_info
, next_write_location
);
381 spin_unlock_irqrestore(&outring_info
->ring_lock
, flags
);
383 *signal
= hv_need_to_signal(old_write
, outring_info
);
387 int hv_ringbuffer_read(struct hv_ring_buffer_info
*inring_info
,
388 void *buffer
, u32 buflen
, u32
*buffer_actual_len
,
389 u64
*requestid
, bool *signal
, bool raw
)
391 u32 bytes_avail_toread
;
392 u32 next_read_location
= 0;
393 u64 prev_indices
= 0;
394 struct vmpacket_descriptor desc
;
403 *buffer_actual_len
= 0;
406 bytes_avail_toread
= hv_get_bytes_to_read(inring_info
);
407 /* Make sure there is something to read */
408 if (bytes_avail_toread
< sizeof(desc
)) {
410 * No error is set when there is even no header, drivers are
411 * supposed to analyze buffer_actual_len.
416 next_read_location
= hv_get_next_read_location(inring_info
);
417 next_read_location
= hv_copyfrom_ringbuffer(inring_info
, &desc
,
421 offset
= raw
? 0 : (desc
.offset8
<< 3);
422 packetlen
= (desc
.len8
<< 3) - offset
;
423 *buffer_actual_len
= packetlen
;
424 *requestid
= desc
.trans_id
;
426 if (bytes_avail_toread
< packetlen
+ offset
)
429 if (packetlen
> buflen
)
433 hv_get_next_readlocation_withoffset(inring_info
, offset
);
435 next_read_location
= hv_copyfrom_ringbuffer(inring_info
,
440 next_read_location
= hv_copyfrom_ringbuffer(inring_info
,
446 * Make sure all reads are done before we update the read index since
447 * the writer may start writing to the read area once the read index
452 /* Update the read index */
453 hv_set_next_read_location(inring_info
, next_read_location
);
455 *signal
= hv_need_to_signal_on_read(inring_info
);