Drivers: hv: remove code duplication between vmbus_recvpacket()/vmbus_recvpacket_raw()
[deliverable/linux.git] / drivers / hv / ring_buffer.c
CommitLineData
3e7ee490
HJ
1/*
2 *
3 * Copyright (c) 2009, Microsoft Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 *
18 * Authors:
19 * Haiyang Zhang <haiyangz@microsoft.com>
20 * Hank Janssen <hjanssen@microsoft.com>
b2a5a585 21 * K. Y. Srinivasan <kys@microsoft.com>
3e7ee490
HJ
22 *
23 */
0a46618d 24#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3e7ee490 25
a0086dc5
GKH
26#include <linux/kernel.h>
27#include <linux/mm.h>
46a97191 28#include <linux/hyperv.h>
011a7c3c 29#include <linux/uio.h>
3f335ea2 30
0f2a6619 31#include "hyperv_vmbus.h"
3e7ee490 32
6fdf3b21
S
33void hv_begin_read(struct hv_ring_buffer_info *rbi)
34{
35 rbi->ring_buffer->interrupt_mask = 1;
35848f68 36 mb();
6fdf3b21
S
37}
38
39u32 hv_end_read(struct hv_ring_buffer_info *rbi)
40{
41 u32 read;
42 u32 write;
43
44 rbi->ring_buffer->interrupt_mask = 0;
35848f68 45 mb();
6fdf3b21
S
46
47 /*
48 * Now check to see if the ring buffer is still empty.
49 * If it is not, we raced and we need to process new
50 * incoming messages.
51 */
52 hv_get_ringbuffer_availbytes(rbi, &read, &write);
53
54 return read;
55}
56
98fa8cf4
S
57/*
58 * When we write to the ring buffer, check if the host needs to
59 * be signaled. Here is the details of this protocol:
60 *
61 * 1. The host guarantees that while it is draining the
62 * ring buffer, it will set the interrupt_mask to
63 * indicate it does not need to be interrupted when
64 * new data is placed.
65 *
66 * 2. The host guarantees that it will completely drain
67 * the ring buffer before exiting the read loop. Further,
68 * once the ring buffer is empty, it will clear the
69 * interrupt_mask and re-check to see if new data has
70 * arrived.
71 */
72
73static bool hv_need_to_signal(u32 old_write, struct hv_ring_buffer_info *rbi)
74{
35848f68 75 mb();
98fa8cf4
S
76 if (rbi->ring_buffer->interrupt_mask)
77 return false;
78
e91e84fa
JW
79 /* check interrupt_mask before read_index */
80 rmb();
98fa8cf4
S
81 /*
82 * This is the only case we need to signal when the
83 * ring transitions from being empty to non-empty.
84 */
85 if (old_write == rbi->ring_buffer->read_index)
86 return true;
87
88 return false;
89}
90
c2b8e520
S
91/*
92 * To optimize the flow management on the send-side,
93 * when the sender is blocked because of lack of
94 * sufficient space in the ring buffer, potential the
95 * consumer of the ring buffer can signal the producer.
96 * This is controlled by the following parameters:
97 *
98 * 1. pending_send_sz: This is the size in bytes that the
99 * producer is trying to send.
100 * 2. The feature bit feat_pending_send_sz set to indicate if
101 * the consumer of the ring will signal when the ring
102 * state transitions from being full to a state where
103 * there is room for the producer to send the pending packet.
104 */
105
a5cca686
CO
106static bool hv_need_to_signal_on_read(u32 prev_write_sz,
107 struct hv_ring_buffer_info *rbi)
c2b8e520 108{
c2b8e520
S
109 u32 cur_write_sz;
110 u32 r_size;
111 u32 write_loc = rbi->ring_buffer->write_index;
112 u32 read_loc = rbi->ring_buffer->read_index;
113 u32 pending_sz = rbi->ring_buffer->pending_send_sz;
114
822f18d4 115 /* If the other end is not blocked on write don't bother. */
c2b8e520
S
116 if (pending_sz == 0)
117 return false;
118
119 r_size = rbi->ring_datasize;
120 cur_write_sz = write_loc >= read_loc ? r_size - (write_loc - read_loc) :
121 read_loc - write_loc;
122
c2b8e520
S
123 if ((prev_write_sz < pending_sz) && (cur_write_sz >= pending_sz))
124 return true;
125
126 return false;
127}
3e7ee490 128
822f18d4 129/* Get the next write location for the specified ring buffer. */
4d643114 130static inline u32
2b8a912e 131hv_get_next_write_location(struct hv_ring_buffer_info *ring_info)
3e7ee490 132{
fc8c72eb 133 u32 next = ring_info->ring_buffer->write_index;
3e7ee490 134
3e7ee490
HJ
135 return next;
136}
137
822f18d4 138/* Set the next write location for the specified ring buffer. */
3e7ee490 139static inline void
2b8a912e 140hv_set_next_write_location(struct hv_ring_buffer_info *ring_info,
fc8c72eb 141 u32 next_write_location)
3e7ee490 142{
fc8c72eb 143 ring_info->ring_buffer->write_index = next_write_location;
3e7ee490
HJ
144}
145
822f18d4 146/* Get the next read location for the specified ring buffer. */
4d643114 147static inline u32
2b8a912e 148hv_get_next_read_location(struct hv_ring_buffer_info *ring_info)
3e7ee490 149{
fc8c72eb 150 u32 next = ring_info->ring_buffer->read_index;
3e7ee490 151
3e7ee490
HJ
152 return next;
153}
154
b2a5a585 155/*
b2a5a585 156 * Get the next read location + offset for the specified ring buffer.
822f18d4 157 * This allows the caller to skip.
b2a5a585 158 */
4d643114 159static inline u32
2b8a912e 160hv_get_next_readlocation_withoffset(struct hv_ring_buffer_info *ring_info,
1ac58644 161 u32 offset)
3e7ee490 162{
fc8c72eb 163 u32 next = ring_info->ring_buffer->read_index;
3e7ee490 164
fc8c72eb
HZ
165 next += offset;
166 next %= ring_info->ring_datasize;
3e7ee490
HJ
167
168 return next;
169}
170
822f18d4 171/* Set the next read location for the specified ring buffer. */
3e7ee490 172static inline void
2b8a912e 173hv_set_next_read_location(struct hv_ring_buffer_info *ring_info,
fc8c72eb 174 u32 next_read_location)
3e7ee490 175{
fc8c72eb 176 ring_info->ring_buffer->read_index = next_read_location;
3e7ee490
HJ
177}
178
179
822f18d4 180/* Get the start of the ring buffer. */
8282c400 181static inline void *
2b8a912e 182hv_get_ring_buffer(struct hv_ring_buffer_info *ring_info)
3e7ee490 183{
fc8c72eb 184 return (void *)ring_info->ring_buffer->buffer;
3e7ee490
HJ
185}
186
187
822f18d4 188/* Get the size of the ring buffer. */
4d643114 189static inline u32
2b8a912e 190hv_get_ring_buffersize(struct hv_ring_buffer_info *ring_info)
3e7ee490 191{
fc8c72eb 192 return ring_info->ring_datasize;
3e7ee490
HJ
193}
194
822f18d4 195/* Get the read and write indices as u64 of the specified ring buffer. */
59471438 196static inline u64
2b8a912e 197hv_get_ring_bufferindices(struct hv_ring_buffer_info *ring_info)
3e7ee490 198{
fc8c72eb 199 return (u64)ring_info->ring_buffer->write_index << 32;
3e7ee490
HJ
200}
201
8f1136ae 202/*
8f1136ae
S
203 * Helper routine to copy to source from ring buffer.
204 * Assume there is enough room. Handles wrap-around in src case only!!
8f1136ae
S
205 */
206static u32 hv_copyfrom_ringbuffer(
207 struct hv_ring_buffer_info *ring_info,
208 void *dest,
209 u32 destlen,
210 u32 start_read_offset)
211{
212 void *ring_buffer = hv_get_ring_buffer(ring_info);
213 u32 ring_buffer_size = hv_get_ring_buffersize(ring_info);
214
215 u32 frag_len;
216
217 /* wrap-around detected at the src */
218 if (destlen > ring_buffer_size - start_read_offset) {
219 frag_len = ring_buffer_size - start_read_offset;
220
221 memcpy(dest, ring_buffer + start_read_offset, frag_len);
222 memcpy(dest + frag_len, ring_buffer, destlen - frag_len);
223 } else
224
225 memcpy(dest, ring_buffer + start_read_offset, destlen);
226
227
228 start_read_offset += destlen;
229 start_read_offset %= ring_buffer_size;
230
231 return start_read_offset;
232}
233
234
7581578d 235/*
7581578d
S
236 * Helper routine to copy from source to ring buffer.
237 * Assume there is enough room. Handles wrap-around in dest case only!!
7581578d
S
238 */
239static u32 hv_copyto_ringbuffer(
fc8c72eb
HZ
240 struct hv_ring_buffer_info *ring_info,
241 u32 start_write_offset,
242 void *src,
7581578d
S
243 u32 srclen)
244{
245 void *ring_buffer = hv_get_ring_buffer(ring_info);
246 u32 ring_buffer_size = hv_get_ring_buffersize(ring_info);
247 u32 frag_len;
248
249 /* wrap-around detected! */
250 if (srclen > ring_buffer_size - start_write_offset) {
251 frag_len = ring_buffer_size - start_write_offset;
252 memcpy(ring_buffer + start_write_offset, src, frag_len);
253 memcpy(ring_buffer, src + frag_len, srclen - frag_len);
254 } else
255 memcpy(ring_buffer + start_write_offset, src, srclen);
3e7ee490 256
7581578d
S
257 start_write_offset += srclen;
258 start_write_offset %= ring_buffer_size;
259
260 return start_write_offset;
261}
3e7ee490 262
822f18d4 263/* Get various debug metrics for the specified ring buffer. */
a75b61d5 264void hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
80682b7a 265 struct hv_ring_buffer_debug_info *debug_info)
3e7ee490 266{
fc8c72eb
HZ
267 u32 bytes_avail_towrite;
268 u32 bytes_avail_toread;
3e7ee490 269
fc8c72eb 270 if (ring_info->ring_buffer) {
2b8a912e 271 hv_get_ringbuffer_availbytes(ring_info,
fc8c72eb
HZ
272 &bytes_avail_toread,
273 &bytes_avail_towrite);
3e7ee490 274
fc8c72eb
HZ
275 debug_info->bytes_avail_toread = bytes_avail_toread;
276 debug_info->bytes_avail_towrite = bytes_avail_towrite;
82f8bd40 277 debug_info->current_read_index =
fc8c72eb 278 ring_info->ring_buffer->read_index;
82f8bd40 279 debug_info->current_write_index =
fc8c72eb 280 ring_info->ring_buffer->write_index;
82f8bd40 281 debug_info->current_interrupt_mask =
fc8c72eb 282 ring_info->ring_buffer->interrupt_mask;
3e7ee490
HJ
283 }
284}
285
822f18d4 286/* Initialize the ring buffer. */
72a95cbc 287int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
fc8c72eb 288 void *buffer, u32 buflen)
3e7ee490 289{
4a1b3acc 290 if (sizeof(struct hv_ring_buffer) != PAGE_SIZE)
3324fb40 291 return -EINVAL;
3e7ee490 292
fc8c72eb 293 memset(ring_info, 0, sizeof(struct hv_ring_buffer_info));
3e7ee490 294
fc8c72eb
HZ
295 ring_info->ring_buffer = (struct hv_ring_buffer *)buffer;
296 ring_info->ring_buffer->read_index =
297 ring_info->ring_buffer->write_index = 0;
3e7ee490 298
822f18d4 299 /* Set the feature bit for enabling flow control. */
046c7911
S
300 ring_info->ring_buffer->feature_bits.value = 1;
301
fc8c72eb
HZ
302 ring_info->ring_size = buflen;
303 ring_info->ring_datasize = buflen - sizeof(struct hv_ring_buffer);
3e7ee490 304
fc8c72eb 305 spin_lock_init(&ring_info->ring_lock);
3e7ee490
HJ
306
307 return 0;
308}
309
822f18d4 310/* Cleanup the ring buffer. */
2dba688b 311void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
3e7ee490 312{
3e7ee490
HJ
313}
314
822f18d4 315/* Write to the ring buffer. */
633c4dce 316int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
011a7c3c 317 struct kvec *kv_list, u32 kv_count, bool *signal)
3e7ee490 318{
4408f531 319 int i = 0;
fc8c72eb
HZ
320 u32 bytes_avail_towrite;
321 u32 bytes_avail_toread;
322 u32 totalbytes_towrite = 0;
3e7ee490 323
66a60543 324 u32 next_write_location;
98fa8cf4 325 u32 old_write;
fc8c72eb 326 u64 prev_indices = 0;
a98f96ee 327 unsigned long flags;
3e7ee490 328
011a7c3c
S
329 for (i = 0; i < kv_count; i++)
330 totalbytes_towrite += kv_list[i].iov_len;
3e7ee490 331
fc8c72eb 332 totalbytes_towrite += sizeof(u64);
3e7ee490 333
fc8c72eb 334 spin_lock_irqsave(&outring_info->ring_lock, flags);
3e7ee490 335
2b8a912e 336 hv_get_ringbuffer_availbytes(outring_info,
fc8c72eb
HZ
337 &bytes_avail_toread,
338 &bytes_avail_towrite);
3e7ee490 339
822f18d4
VK
340 /*
341 * If there is only room for the packet, assume it is full.
342 * Otherwise, the next time around, we think the ring buffer
343 * is empty since the read index == write index.
344 */
fc8c72eb 345 if (bytes_avail_towrite <= totalbytes_towrite) {
fc8c72eb 346 spin_unlock_irqrestore(&outring_info->ring_lock, flags);
d2598f01 347 return -EAGAIN;
3e7ee490
HJ
348 }
349
454f18a9 350 /* Write to the ring buffer */
2b8a912e 351 next_write_location = hv_get_next_write_location(outring_info);
3e7ee490 352
98fa8cf4
S
353 old_write = next_write_location;
354
011a7c3c 355 for (i = 0; i < kv_count; i++) {
2b8a912e 356 next_write_location = hv_copyto_ringbuffer(outring_info,
fc8c72eb 357 next_write_location,
011a7c3c
S
358 kv_list[i].iov_base,
359 kv_list[i].iov_len);
3e7ee490
HJ
360 }
361
454f18a9 362 /* Set previous packet start */
2b8a912e 363 prev_indices = hv_get_ring_bufferindices(outring_info);
3e7ee490 364
2b8a912e 365 next_write_location = hv_copyto_ringbuffer(outring_info,
fc8c72eb
HZ
366 next_write_location,
367 &prev_indices,
b219b3f7 368 sizeof(u64));
3e7ee490 369
98fa8cf4 370 /* Issue a full memory barrier before updating the write index */
35848f68 371 mb();
3e7ee490 372
454f18a9 373 /* Now, update the write location */
2b8a912e 374 hv_set_next_write_location(outring_info, next_write_location);
3e7ee490 375
3e7ee490 376
fc8c72eb 377 spin_unlock_irqrestore(&outring_info->ring_lock, flags);
98fa8cf4
S
378
379 *signal = hv_need_to_signal(old_write, outring_info);
3e7ee490
HJ
380 return 0;
381}
382
b5f53dde
VK
383static inline int __hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info,
384 void *buffer, u32 buflen, u32 offset,
385 bool *signal, bool advance)
3e7ee490 386{
fc8c72eb
HZ
387 u32 bytes_avail_towrite;
388 u32 bytes_avail_toread;
389 u32 next_read_location = 0;
390 u64 prev_indices = 0;
a98f96ee 391 unsigned long flags;
3e7ee490 392
fc8c72eb 393 if (buflen <= 0)
a16e1485 394 return -EINVAL;
3e7ee490 395
fc8c72eb 396 spin_lock_irqsave(&inring_info->ring_lock, flags);
3e7ee490 397
2b8a912e 398 hv_get_ringbuffer_availbytes(inring_info,
fc8c72eb
HZ
399 &bytes_avail_toread,
400 &bytes_avail_towrite);
3e7ee490 401
454f18a9 402 /* Make sure there is something to read */
fc8c72eb 403 if (bytes_avail_toread < buflen) {
fc8c72eb 404 spin_unlock_irqrestore(&inring_info->ring_lock, flags);
3e7ee490 405
d2598f01 406 return -EAGAIN;
3e7ee490
HJ
407 }
408
1ac58644 409 next_read_location =
2b8a912e 410 hv_get_next_readlocation_withoffset(inring_info, offset);
3e7ee490 411
2b8a912e 412 next_read_location = hv_copyfrom_ringbuffer(inring_info,
fc8c72eb
HZ
413 buffer,
414 buflen,
415 next_read_location);
3e7ee490 416
b5f53dde
VK
417 if (!advance)
418 goto out_unlock;
419
2b8a912e 420 next_read_location = hv_copyfrom_ringbuffer(inring_info,
fc8c72eb 421 &prev_indices,
4408f531 422 sizeof(u64),
fc8c72eb 423 next_read_location);
3e7ee490 424
822f18d4
VK
425 /*
426 * Make sure all reads are done before we update the read index since
427 * the writer may start writing to the read area once the read index
428 * is updated.
429 */
35848f68 430 mb();
3e7ee490 431
454f18a9 432 /* Update the read index */
2b8a912e 433 hv_set_next_read_location(inring_info, next_read_location);
3e7ee490 434
a5cca686 435 *signal = hv_need_to_signal_on_read(bytes_avail_towrite, inring_info);
c2b8e520 436
b5f53dde
VK
437out_unlock:
438 spin_unlock_irqrestore(&inring_info->ring_lock, flags);
3e7ee490
HJ
439 return 0;
440}
b5f53dde
VK
441
442/* Read from ring buffer without advancing the read index. */
443int hv_ringbuffer_peek(struct hv_ring_buffer_info *inring_info,
444 void *buffer, u32 buflen)
445{
446 return __hv_ringbuffer_read(inring_info, buffer, buflen,
447 0, NULL, false);
448}
449
450/* Read from ring buffer and advance the read index. */
451int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info,
452 void *buffer, u32 buflen, u32 offset,
453 bool *signal)
454{
455 return __hv_ringbuffer_read(inring_info, buffer, buflen,
456 offset, signal, true);
457}
This page took 0.552273 seconds and 5 git commands to generate.