Staging: hv: netvsc: Fix a dereferencing issue
[deliverable/linux.git] / drivers / staging / hv / hyperv.h
1 /*
2 *
3 * Copyright (c) 2011, Microsoft Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 *
18 * Authors:
19 * Haiyang Zhang <haiyangz@microsoft.com>
20 * Hank Janssen <hjanssen@microsoft.com>
21 * K. Y. Srinivasan <kys@microsoft.com>
22 *
23 */
24
25 #ifndef _HYPERV_H
26 #define _HYPERV_H
27
28 #include <linux/scatterlist.h>
29 #include <linux/list.h>
30 #include <linux/uuid.h>
31 #include <linux/timer.h>
32 #include <linux/workqueue.h>
33 #include <linux/completion.h>
34 #include <linux/device.h>
35 #include <linux/mod_devicetable.h>
36
37
38 #include <asm/hyperv.h>
39
40
41 #define MAX_PAGE_BUFFER_COUNT 16
42 #define MAX_MULTIPAGE_BUFFER_COUNT 32 /* 128K */
43
44 #pragma pack(push, 1)
45
46 /* Single-page buffer */
47 struct hv_page_buffer {
48 u32 len;
49 u32 offset;
50 u64 pfn;
51 };
52
53 /* Multiple-page buffer */
54 struct hv_multipage_buffer {
55 /* Length and Offset determines the # of pfns in the array */
56 u32 len;
57 u32 offset;
58 u64 pfn_array[MAX_MULTIPAGE_BUFFER_COUNT];
59 };
60
61 /* 0x18 includes the proprietary packet header */
62 #define MAX_PAGE_BUFFER_PACKET (0x18 + \
63 (sizeof(struct hv_page_buffer) * \
64 MAX_PAGE_BUFFER_COUNT))
65 #define MAX_MULTIPAGE_BUFFER_PACKET (0x18 + \
66 sizeof(struct hv_multipage_buffer))
67
68
69 #pragma pack(pop)
70
71 struct hv_ring_buffer {
72 /* Offset in bytes from the start of ring data below */
73 u32 write_index;
74
75 /* Offset in bytes from the start of ring data below */
76 u32 read_index;
77
78 u32 interrupt_mask;
79
80 /* Pad it to PAGE_SIZE so that data starts on page boundary */
81 u8 reserved[4084];
82
83 /* NOTE:
84 * The interrupt_mask field is used only for channels but since our
85 * vmbus connection also uses this data structure and its data starts
86 * here, we commented out this field.
87 */
88
89 /*
90 * Ring data starts here + RingDataStartOffset
91 * !!! DO NOT place any fields below this !!!
92 */
93 u8 buffer[0];
94 } __packed;
95
96 struct hv_ring_buffer_info {
97 struct hv_ring_buffer *ring_buffer;
98 u32 ring_size; /* Include the shared header */
99 spinlock_t ring_lock;
100
101 u32 ring_datasize; /* < ring_size */
102 u32 ring_data_startoffset;
103 };
104
105 struct hv_ring_buffer_debug_info {
106 u32 current_interrupt_mask;
107 u32 current_read_index;
108 u32 current_write_index;
109 u32 bytes_avail_toread;
110 u32 bytes_avail_towrite;
111 };
112
113 /*
114 * We use the same version numbering for all Hyper-V modules.
115 *
116 * Definition of versioning is as follows;
117 *
118 * Major Number Changes for these scenarios;
119 * 1. When a new version of Windows Hyper-V
120 * is released.
121 * 2. A Major change has occurred in the
122 * Linux IC's.
123 * (For example the merge for the first time
124 * into the kernel) Every time the Major Number
125 * changes, the Revision number is reset to 0.
126 * Minor Number Changes when new functionality is added
127 * to the Linux IC's that is not a bug fix.
128 *
129 * 3.1 - Added completed hv_utils driver. Shutdown/Heartbeat/Timesync
130 */
131 #define HV_DRV_VERSION "3.1"
132
133
134 /*
135 * A revision number of vmbus that is used for ensuring both ends on a
136 * partition are using compatible versions.
137 */
138 #define VMBUS_REVISION_NUMBER 13
139
140 /* Make maximum size of pipe payload of 16K */
141 #define MAX_PIPE_DATA_PAYLOAD (sizeof(u8) * 16384)
142
143 /* Define PipeMode values. */
144 #define VMBUS_PIPE_TYPE_BYTE 0x00000000
145 #define VMBUS_PIPE_TYPE_MESSAGE 0x00000004
146
147 /* The size of the user defined data buffer for non-pipe offers. */
148 #define MAX_USER_DEFINED_BYTES 120
149
150 /* The size of the user defined data buffer for pipe offers. */
151 #define MAX_PIPE_USER_DEFINED_BYTES 116
152
153 /*
154 * At the center of the Channel Management library is the Channel Offer. This
155 * struct contains the fundamental information about an offer.
156 */
157 struct vmbus_channel_offer {
158 uuid_le if_type;
159 uuid_le if_instance;
160 u64 int_latency; /* in 100ns units */
161 u32 if_revision;
162 u32 server_ctx_size; /* in bytes */
163 u16 chn_flags;
164 u16 mmio_megabytes; /* in bytes * 1024 * 1024 */
165
166 union {
167 /* Non-pipes: The user has MAX_USER_DEFINED_BYTES bytes. */
168 struct {
169 unsigned char user_def[MAX_USER_DEFINED_BYTES];
170 } std;
171
172 /*
173 * Pipes:
174 * The following sructure is an integrated pipe protocol, which
175 * is implemented on top of standard user-defined data. Pipe
176 * clients have MAX_PIPE_USER_DEFINED_BYTES left for their own
177 * use.
178 */
179 struct {
180 u32 pipe_mode;
181 unsigned char user_def[MAX_PIPE_USER_DEFINED_BYTES];
182 } pipe;
183 } u;
184 u32 padding;
185 } __packed;
186
187 /* Server Flags */
188 #define VMBUS_CHANNEL_ENUMERATE_DEVICE_INTERFACE 1
189 #define VMBUS_CHANNEL_SERVER_SUPPORTS_TRANSFER_PAGES 2
190 #define VMBUS_CHANNEL_SERVER_SUPPORTS_GPADLS 4
191 #define VMBUS_CHANNEL_NAMED_PIPE_MODE 0x10
192 #define VMBUS_CHANNEL_LOOPBACK_OFFER 0x100
193 #define VMBUS_CHANNEL_PARENT_OFFER 0x200
194 #define VMBUS_CHANNEL_REQUEST_MONITORED_NOTIFICATION 0x400
195
196 struct vmpacket_descriptor {
197 u16 type;
198 u16 offset8;
199 u16 len8;
200 u16 flags;
201 u64 trans_id;
202 } __packed;
203
204 struct vmpacket_header {
205 u32 prev_pkt_start_offset;
206 struct vmpacket_descriptor descriptor;
207 } __packed;
208
209 struct vmtransfer_page_range {
210 u32 byte_count;
211 u32 byte_offset;
212 } __packed;
213
214 struct vmtransfer_page_packet_header {
215 struct vmpacket_descriptor d;
216 u16 xfer_pageset_id;
217 bool sender_owns_set;
218 u8 reserved;
219 u32 range_cnt;
220 struct vmtransfer_page_range ranges[1];
221 } __packed;
222
223 struct vmgpadl_packet_header {
224 struct vmpacket_descriptor d;
225 u32 gpadl;
226 u32 reserved;
227 } __packed;
228
229 struct vmadd_remove_transfer_page_set {
230 struct vmpacket_descriptor d;
231 u32 gpadl;
232 u16 xfer_pageset_id;
233 u16 reserved;
234 } __packed;
235
236 /*
237 * This structure defines a range in guest physical space that can be made to
238 * look virtually contiguous.
239 */
240 struct gpa_range {
241 u32 byte_count;
242 u32 byte_offset;
243 u64 pfn_array[0];
244 };
245
246 /*
247 * This is the format for an Establish Gpadl packet, which contains a handle by
248 * which this GPADL will be known and a set of GPA ranges associated with it.
249 * This can be converted to a MDL by the guest OS. If there are multiple GPA
250 * ranges, then the resulting MDL will be "chained," representing multiple VA
251 * ranges.
252 */
253 struct vmestablish_gpadl {
254 struct vmpacket_descriptor d;
255 u32 gpadl;
256 u32 range_cnt;
257 struct gpa_range range[1];
258 } __packed;
259
260 /*
261 * This is the format for a Teardown Gpadl packet, which indicates that the
262 * GPADL handle in the Establish Gpadl packet will never be referenced again.
263 */
264 struct vmteardown_gpadl {
265 struct vmpacket_descriptor d;
266 u32 gpadl;
267 u32 reserved; /* for alignment to a 8-byte boundary */
268 } __packed;
269
270 /*
271 * This is the format for a GPA-Direct packet, which contains a set of GPA
272 * ranges, in addition to commands and/or data.
273 */
274 struct vmdata_gpa_direct {
275 struct vmpacket_descriptor d;
276 u32 reserved;
277 u32 range_cnt;
278 struct gpa_range range[1];
279 } __packed;
280
281 /* This is the format for a Additional Data Packet. */
282 struct vmadditional_data {
283 struct vmpacket_descriptor d;
284 u64 total_bytes;
285 u32 offset;
286 u32 byte_cnt;
287 unsigned char data[1];
288 } __packed;
289
290 union vmpacket_largest_possible_header {
291 struct vmpacket_descriptor simple_hdr;
292 struct vmtransfer_page_packet_header xfer_page_hdr;
293 struct vmgpadl_packet_header gpadl_hdr;
294 struct vmadd_remove_transfer_page_set add_rm_xfer_page_hdr;
295 struct vmestablish_gpadl establish_gpadl_hdr;
296 struct vmteardown_gpadl teardown_gpadl_hdr;
297 struct vmdata_gpa_direct data_gpa_direct_hdr;
298 };
299
300 #define VMPACKET_DATA_START_ADDRESS(__packet) \
301 (void *)(((unsigned char *)__packet) + \
302 ((struct vmpacket_descriptor)__packet)->offset8 * 8)
303
304 #define VMPACKET_DATA_LENGTH(__packet) \
305 ((((struct vmpacket_descriptor)__packet)->len8 - \
306 ((struct vmpacket_descriptor)__packet)->offset8) * 8)
307
308 #define VMPACKET_TRANSFER_MODE(__packet) \
309 (((struct IMPACT)__packet)->type)
310
311 enum vmbus_packet_type {
312 VM_PKT_INVALID = 0x0,
313 VM_PKT_SYNCH = 0x1,
314 VM_PKT_ADD_XFER_PAGESET = 0x2,
315 VM_PKT_RM_XFER_PAGESET = 0x3,
316 VM_PKT_ESTABLISH_GPADL = 0x4,
317 VM_PKT_TEARDOWN_GPADL = 0x5,
318 VM_PKT_DATA_INBAND = 0x6,
319 VM_PKT_DATA_USING_XFER_PAGES = 0x7,
320 VM_PKT_DATA_USING_GPADL = 0x8,
321 VM_PKT_DATA_USING_GPA_DIRECT = 0x9,
322 VM_PKT_CANCEL_REQUEST = 0xa,
323 VM_PKT_COMP = 0xb,
324 VM_PKT_DATA_USING_ADDITIONAL_PKT = 0xc,
325 VM_PKT_ADDITIONAL_DATA = 0xd
326 };
327
328 #define VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED 1
329
330
331 /* Version 1 messages */
332 enum vmbus_channel_message_type {
333 CHANNELMSG_INVALID = 0,
334 CHANNELMSG_OFFERCHANNEL = 1,
335 CHANNELMSG_RESCIND_CHANNELOFFER = 2,
336 CHANNELMSG_REQUESTOFFERS = 3,
337 CHANNELMSG_ALLOFFERS_DELIVERED = 4,
338 CHANNELMSG_OPENCHANNEL = 5,
339 CHANNELMSG_OPENCHANNEL_RESULT = 6,
340 CHANNELMSG_CLOSECHANNEL = 7,
341 CHANNELMSG_GPADL_HEADER = 8,
342 CHANNELMSG_GPADL_BODY = 9,
343 CHANNELMSG_GPADL_CREATED = 10,
344 CHANNELMSG_GPADL_TEARDOWN = 11,
345 CHANNELMSG_GPADL_TORNDOWN = 12,
346 CHANNELMSG_RELID_RELEASED = 13,
347 CHANNELMSG_INITIATE_CONTACT = 14,
348 CHANNELMSG_VERSION_RESPONSE = 15,
349 CHANNELMSG_UNLOAD = 16,
350 #ifdef VMBUS_FEATURE_PARENT_OR_PEER_MEMORY_MAPPED_INTO_A_CHILD
351 CHANNELMSG_VIEWRANGE_ADD = 17,
352 CHANNELMSG_VIEWRANGE_REMOVE = 18,
353 #endif
354 CHANNELMSG_COUNT
355 };
356
357 struct vmbus_channel_message_header {
358 enum vmbus_channel_message_type msgtype;
359 u32 padding;
360 } __packed;
361
362 /* Query VMBus Version parameters */
363 struct vmbus_channel_query_vmbus_version {
364 struct vmbus_channel_message_header header;
365 u32 version;
366 } __packed;
367
368 /* VMBus Version Supported parameters */
369 struct vmbus_channel_version_supported {
370 struct vmbus_channel_message_header header;
371 bool version_supported;
372 } __packed;
373
374 /* Offer Channel parameters */
375 struct vmbus_channel_offer_channel {
376 struct vmbus_channel_message_header header;
377 struct vmbus_channel_offer offer;
378 u32 child_relid;
379 u8 monitorid;
380 bool monitor_allocated;
381 } __packed;
382
383 /* Rescind Offer parameters */
384 struct vmbus_channel_rescind_offer {
385 struct vmbus_channel_message_header header;
386 u32 child_relid;
387 } __packed;
388
389 /*
390 * Request Offer -- no parameters, SynIC message contains the partition ID
391 * Set Snoop -- no parameters, SynIC message contains the partition ID
392 * Clear Snoop -- no parameters, SynIC message contains the partition ID
393 * All Offers Delivered -- no parameters, SynIC message contains the partition
394 * ID
395 * Flush Client -- no parameters, SynIC message contains the partition ID
396 */
397
398 /* Open Channel parameters */
399 struct vmbus_channel_open_channel {
400 struct vmbus_channel_message_header header;
401
402 /* Identifies the specific VMBus channel that is being opened. */
403 u32 child_relid;
404
405 /* ID making a particular open request at a channel offer unique. */
406 u32 openid;
407
408 /* GPADL for the channel's ring buffer. */
409 u32 ringbuffer_gpadlhandle;
410
411 /* GPADL for the channel's server context save area. */
412 u32 server_contextarea_gpadlhandle;
413
414 /*
415 * The upstream ring buffer begins at offset zero in the memory
416 * described by RingBufferGpadlHandle. The downstream ring buffer
417 * follows it at this offset (in pages).
418 */
419 u32 downstream_ringbuffer_pageoffset;
420
421 /* User-specific data to be passed along to the server endpoint. */
422 unsigned char userdata[MAX_USER_DEFINED_BYTES];
423 } __packed;
424
425 /* Open Channel Result parameters */
426 struct vmbus_channel_open_result {
427 struct vmbus_channel_message_header header;
428 u32 child_relid;
429 u32 openid;
430 u32 status;
431 } __packed;
432
433 /* Close channel parameters; */
434 struct vmbus_channel_close_channel {
435 struct vmbus_channel_message_header header;
436 u32 child_relid;
437 } __packed;
438
439 /* Channel Message GPADL */
440 #define GPADL_TYPE_RING_BUFFER 1
441 #define GPADL_TYPE_SERVER_SAVE_AREA 2
442 #define GPADL_TYPE_TRANSACTION 8
443
444 /*
445 * The number of PFNs in a GPADL message is defined by the number of
446 * pages that would be spanned by ByteCount and ByteOffset. If the
447 * implied number of PFNs won't fit in this packet, there will be a
448 * follow-up packet that contains more.
449 */
450 struct vmbus_channel_gpadl_header {
451 struct vmbus_channel_message_header header;
452 u32 child_relid;
453 u32 gpadl;
454 u16 range_buflen;
455 u16 rangecount;
456 struct gpa_range range[0];
457 } __packed;
458
459 /* This is the followup packet that contains more PFNs. */
460 struct vmbus_channel_gpadl_body {
461 struct vmbus_channel_message_header header;
462 u32 msgnumber;
463 u32 gpadl;
464 u64 pfn[0];
465 } __packed;
466
467 struct vmbus_channel_gpadl_created {
468 struct vmbus_channel_message_header header;
469 u32 child_relid;
470 u32 gpadl;
471 u32 creation_status;
472 } __packed;
473
474 struct vmbus_channel_gpadl_teardown {
475 struct vmbus_channel_message_header header;
476 u32 child_relid;
477 u32 gpadl;
478 } __packed;
479
480 struct vmbus_channel_gpadl_torndown {
481 struct vmbus_channel_message_header header;
482 u32 gpadl;
483 } __packed;
484
485 #ifdef VMBUS_FEATURE_PARENT_OR_PEER_MEMORY_MAPPED_INTO_A_CHILD
486 struct vmbus_channel_view_range_add {
487 struct vmbus_channel_message_header header;
488 PHYSICAL_ADDRESS viewrange_base;
489 u64 viewrange_length;
490 u32 child_relid;
491 } __packed;
492
493 struct vmbus_channel_view_range_remove {
494 struct vmbus_channel_message_header header;
495 PHYSICAL_ADDRESS viewrange_base;
496 u32 child_relid;
497 } __packed;
498 #endif
499
500 struct vmbus_channel_relid_released {
501 struct vmbus_channel_message_header header;
502 u32 child_relid;
503 } __packed;
504
505 struct vmbus_channel_initiate_contact {
506 struct vmbus_channel_message_header header;
507 u32 vmbus_version_requested;
508 u32 padding2;
509 u64 interrupt_page;
510 u64 monitor_page1;
511 u64 monitor_page2;
512 } __packed;
513
514 struct vmbus_channel_version_response {
515 struct vmbus_channel_message_header header;
516 bool version_supported;
517 } __packed;
518
519 enum vmbus_channel_state {
520 CHANNEL_OFFER_STATE,
521 CHANNEL_OPENING_STATE,
522 CHANNEL_OPEN_STATE,
523 };
524
525 struct vmbus_channel_debug_info {
526 u32 relid;
527 enum vmbus_channel_state state;
528 uuid_le interfacetype;
529 uuid_le interface_instance;
530 u32 monitorid;
531 u32 servermonitor_pending;
532 u32 servermonitor_latency;
533 u32 servermonitor_connectionid;
534 u32 clientmonitor_pending;
535 u32 clientmonitor_latency;
536 u32 clientmonitor_connectionid;
537
538 struct hv_ring_buffer_debug_info inbound;
539 struct hv_ring_buffer_debug_info outbound;
540 };
541
542 /*
543 * Represents each channel msg on the vmbus connection This is a
544 * variable-size data structure depending on the msg type itself
545 */
546 struct vmbus_channel_msginfo {
547 /* Bookkeeping stuff */
548 struct list_head msglistentry;
549
550 /* So far, this is only used to handle gpadl body message */
551 struct list_head submsglist;
552
553 /* Synchronize the request/response if needed */
554 struct completion waitevent;
555 union {
556 struct vmbus_channel_version_supported version_supported;
557 struct vmbus_channel_open_result open_result;
558 struct vmbus_channel_gpadl_torndown gpadl_torndown;
559 struct vmbus_channel_gpadl_created gpadl_created;
560 struct vmbus_channel_version_response version_response;
561 } response;
562
563 u32 msgsize;
564 /*
565 * The channel message that goes out on the "wire".
566 * It will contain at minimum the VMBUS_CHANNEL_MESSAGE_HEADER header
567 */
568 unsigned char msg[0];
569 };
570
571 struct vmbus_close_msg {
572 struct vmbus_channel_msginfo info;
573 struct vmbus_channel_close_channel msg;
574 };
575
576 struct vmbus_channel {
577 struct list_head listentry;
578
579 struct hv_device *device_obj;
580
581 struct work_struct work;
582
583 enum vmbus_channel_state state;
584
585 struct vmbus_channel_offer_channel offermsg;
586 /*
587 * These are based on the OfferMsg.MonitorId.
588 * Save it here for easy access.
589 */
590 u8 monitor_grp;
591 u8 monitor_bit;
592
593 u32 ringbuffer_gpadlhandle;
594
595 /* Allocated memory for ring buffer */
596 void *ringbuffer_pages;
597 u32 ringbuffer_pagecount;
598 struct hv_ring_buffer_info outbound; /* send to parent */
599 struct hv_ring_buffer_info inbound; /* receive from parent */
600 spinlock_t inbound_lock;
601 struct workqueue_struct *controlwq;
602
603 struct vmbus_close_msg close_msg;
604
605 /* Channel callback are invoked in this workqueue context */
606 /* HANDLE dataWorkQueue; */
607
608 void (*onchannel_callback)(void *context);
609 void *channel_callback_context;
610 };
611
612 void free_channel(struct vmbus_channel *channel);
613
614 void vmbus_onmessage(void *context);
615
616 int vmbus_request_offers(void);
617
618 /* The format must be the same as struct vmdata_gpa_direct */
619 struct vmbus_channel_packet_page_buffer {
620 u16 type;
621 u16 dataoffset8;
622 u16 length8;
623 u16 flags;
624 u64 transactionid;
625 u32 reserved;
626 u32 rangecount;
627 struct hv_page_buffer range[MAX_PAGE_BUFFER_COUNT];
628 } __packed;
629
630 /* The format must be the same as struct vmdata_gpa_direct */
631 struct vmbus_channel_packet_multipage_buffer {
632 u16 type;
633 u16 dataoffset8;
634 u16 length8;
635 u16 flags;
636 u64 transactionid;
637 u32 reserved;
638 u32 rangecount; /* Always 1 in this case */
639 struct hv_multipage_buffer range;
640 } __packed;
641
642
643 extern int vmbus_open(struct vmbus_channel *channel,
644 u32 send_ringbuffersize,
645 u32 recv_ringbuffersize,
646 void *userdata,
647 u32 userdatalen,
648 void(*onchannel_callback)(void *context),
649 void *context);
650
651 extern void vmbus_close(struct vmbus_channel *channel);
652
653 extern int vmbus_sendpacket(struct vmbus_channel *channel,
654 const void *buffer,
655 u32 bufferLen,
656 u64 requestid,
657 enum vmbus_packet_type type,
658 u32 flags);
659
660 extern int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
661 struct hv_page_buffer pagebuffers[],
662 u32 pagecount,
663 void *buffer,
664 u32 bufferlen,
665 u64 requestid);
666
667 extern int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
668 struct hv_multipage_buffer *mpb,
669 void *buffer,
670 u32 bufferlen,
671 u64 requestid);
672
673 extern int vmbus_establish_gpadl(struct vmbus_channel *channel,
674 void *kbuffer,
675 u32 size,
676 u32 *gpadl_handle);
677
678 extern int vmbus_teardown_gpadl(struct vmbus_channel *channel,
679 u32 gpadl_handle);
680
681 extern int vmbus_recvpacket(struct vmbus_channel *channel,
682 void *buffer,
683 u32 bufferlen,
684 u32 *buffer_actual_len,
685 u64 *requestid);
686
687 extern int vmbus_recvpacket_raw(struct vmbus_channel *channel,
688 void *buffer,
689 u32 bufferlen,
690 u32 *buffer_actual_len,
691 u64 *requestid);
692
693
694 extern void vmbus_get_debug_info(struct vmbus_channel *channel,
695 struct vmbus_channel_debug_info *debug);
696
697 extern void vmbus_ontimer(unsigned long data);
698
699
700 #define LOWORD(dw) ((unsigned short)(dw))
701 #define HIWORD(dw) ((unsigned short)(((unsigned int) (dw) >> 16) & 0xFFFF))
702
703
704 #define VMBUS 0x0001
705 #define STORVSC 0x0002
706 #define NETVSC 0x0004
707 #define INPUTVSC 0x0008
708 #define BLKVSC 0x0010
709 #define VMBUS_DRV 0x0100
710 #define STORVSC_DRV 0x0200
711 #define NETVSC_DRV 0x0400
712 #define INPUTVSC_DRV 0x0800
713 #define BLKVSC_DRV 0x1000
714
715 #define ALL_MODULES (VMBUS |\
716 STORVSC |\
717 NETVSC |\
718 INPUTVSC |\
719 BLKVSC |\
720 VMBUS_DRV |\
721 STORVSC_DRV |\
722 NETVSC_DRV |\
723 INPUTVSC_DRV|\
724 BLKVSC_DRV)
725
726 /* Logging Level */
727 #define ERROR_LVL 3
728 #define WARNING_LVL 4
729 #define INFO_LVL 6
730 #define DEBUG_LVL 7
731 #define DEBUG_LVL_ENTEREXIT 8
732 #define DEBUG_RING_LVL 9
733
734 extern unsigned int vmbus_loglevel;
735
736 #define DPRINT(mod, lvl, fmt, args...) do {\
737 if ((mod & (HIWORD(vmbus_loglevel))) && \
738 (lvl <= LOWORD(vmbus_loglevel))) \
739 printk(KERN_DEBUG #mod": %s() " fmt "\n", __func__, ## args);\
740 } while (0)
741
742 #define DPRINT_DBG(mod, fmt, args...) do {\
743 if ((mod & (HIWORD(vmbus_loglevel))) && \
744 (DEBUG_LVL <= LOWORD(vmbus_loglevel))) \
745 printk(KERN_DEBUG #mod": %s() " fmt "\n", __func__, ## args);\
746 } while (0)
747
748 #define DPRINT_INFO(mod, fmt, args...) do {\
749 if ((mod & (HIWORD(vmbus_loglevel))) && \
750 (INFO_LVL <= LOWORD(vmbus_loglevel))) \
751 printk(KERN_INFO #mod": " fmt "\n", ## args);\
752 } while (0)
753
754 #define DPRINT_WARN(mod, fmt, args...) do {\
755 if ((mod & (HIWORD(vmbus_loglevel))) && \
756 (WARNING_LVL <= LOWORD(vmbus_loglevel))) \
757 printk(KERN_WARNING #mod": WARNING! " fmt "\n", ## args);\
758 } while (0)
759
760 #define DPRINT_ERR(mod, fmt, args...) do {\
761 if ((mod & (HIWORD(vmbus_loglevel))) && \
762 (ERROR_LVL <= LOWORD(vmbus_loglevel))) \
763 printk(KERN_ERR #mod": %s() ERROR!! " fmt "\n", \
764 __func__, ## args);\
765 } while (0)
766
767
768
769 struct hv_driver;
770 struct hv_device;
771
772 struct hv_dev_port_info {
773 u32 int_mask;
774 u32 read_idx;
775 u32 write_idx;
776 u32 bytes_avail_toread;
777 u32 bytes_avail_towrite;
778 };
779
780 struct hv_device_info {
781 u32 chn_id;
782 u32 chn_state;
783 uuid_le chn_type;
784 uuid_le chn_instance;
785
786 u32 monitor_id;
787 u32 server_monitor_pending;
788 u32 server_monitor_latency;
789 u32 server_monitor_conn_id;
790 u32 client_monitor_pending;
791 u32 client_monitor_latency;
792 u32 client_monitor_conn_id;
793
794 struct hv_dev_port_info inbound;
795 struct hv_dev_port_info outbound;
796 };
797
798 /* Base driver object */
799 struct hv_driver {
800 const char *name;
801
802 /* the device type supported by this driver */
803 uuid_le dev_type;
804 const struct hv_vmbus_device_id *id_table;
805
806 struct device_driver driver;
807
808 int (*probe)(struct hv_device *, const struct hv_vmbus_device_id *);
809 int (*remove)(struct hv_device *);
810 void (*shutdown)(struct hv_device *);
811
812 };
813
814 /* Base device object */
815 struct hv_device {
816 /* the device type id of this device */
817 uuid_le dev_type;
818
819 /* the device instance id of this device */
820 uuid_le dev_instance;
821
822 struct device device;
823
824 struct vmbus_channel *channel;
825 };
826
827
828 static inline struct hv_device *device_to_hv_device(struct device *d)
829 {
830 return container_of(d, struct hv_device, device);
831 }
832
833 static inline struct hv_driver *drv_to_hv_drv(struct device_driver *d)
834 {
835 return container_of(d, struct hv_driver, driver);
836 }
837
838 static inline void hv_set_drvdata(struct hv_device *dev, void *data)
839 {
840 dev_set_drvdata(&dev->device, data);
841 }
842
843 static inline void *hv_get_drvdata(struct hv_device *dev)
844 {
845 return dev_get_drvdata(&dev->device);
846 }
847
848 /* Vmbus interface */
849 #define vmbus_driver_register(driver) \
850 __vmbus_driver_register(driver, THIS_MODULE, KBUILD_MODNAME)
851 int __must_check __vmbus_driver_register(struct hv_driver *hv_driver,
852 struct module *owner,
853 const char *mod_name);
854 void vmbus_driver_unregister(struct hv_driver *hv_driver);
855
856 /**
857 * VMBUS_DEVICE - macro used to describe a specific hyperv vmbus device
858 *
859 * This macro is used to create a struct hv_vmbus_device_id that matches a
860 * specific device.
861 */
862 #define VMBUS_DEVICE(g0, g1, g2, g3, g4, g5, g6, g7, \
863 g8, g9, ga, gb, gc, gd, ge, gf) \
864 .guid = { g0, g1, g2, g3, g4, g5, g6, g7, \
865 g8, g9, ga, gb, gc, gd, ge, gf },
866
867 /*
868 * Common header for Hyper-V ICs
869 */
870
871 #define ICMSGTYPE_NEGOTIATE 0
872 #define ICMSGTYPE_HEARTBEAT 1
873 #define ICMSGTYPE_KVPEXCHANGE 2
874 #define ICMSGTYPE_SHUTDOWN 3
875 #define ICMSGTYPE_TIMESYNC 4
876 #define ICMSGTYPE_VSS 5
877
878 #define ICMSGHDRFLAG_TRANSACTION 1
879 #define ICMSGHDRFLAG_REQUEST 2
880 #define ICMSGHDRFLAG_RESPONSE 4
881
882 #define HV_S_OK 0x00000000
883 #define HV_E_FAIL 0x80004005
884 #define HV_ERROR_NOT_SUPPORTED 0x80070032
885 #define HV_ERROR_MACHINE_LOCKED 0x800704F7
886
887 /*
888 * While we want to handle util services as regular devices,
889 * there is only one instance of each of these services; so
890 * we statically allocate the service specific state.
891 */
892
893 struct hv_util_service {
894 u8 *recv_buffer;
895 void (*util_cb)(void *);
896 int (*util_init)(struct hv_util_service *);
897 void (*util_deinit)(void);
898 };
899
900 struct vmbuspipe_hdr {
901 u32 flags;
902 u32 msgsize;
903 } __packed;
904
905 struct ic_version {
906 u16 major;
907 u16 minor;
908 } __packed;
909
910 struct icmsg_hdr {
911 struct ic_version icverframe;
912 u16 icmsgtype;
913 struct ic_version icvermsg;
914 u16 icmsgsize;
915 u32 status;
916 u8 ictransaction_id;
917 u8 icflags;
918 u8 reserved[2];
919 } __packed;
920
921 struct icmsg_negotiate {
922 u16 icframe_vercnt;
923 u16 icmsg_vercnt;
924 u32 reserved;
925 struct ic_version icversion_data[1]; /* any size array */
926 } __packed;
927
928 struct shutdown_msg_data {
929 u32 reason_code;
930 u32 timeout_seconds;
931 u32 flags;
932 u8 display_message[2048];
933 } __packed;
934
935 struct heartbeat_msg_data {
936 u64 seq_num;
937 u32 reserved[8];
938 } __packed;
939
940 /* Time Sync IC defs */
941 #define ICTIMESYNCFLAG_PROBE 0
942 #define ICTIMESYNCFLAG_SYNC 1
943 #define ICTIMESYNCFLAG_SAMPLE 2
944
945 #ifdef __x86_64__
946 #define WLTIMEDELTA 116444736000000000L /* in 100ns unit */
947 #else
948 #define WLTIMEDELTA 116444736000000000LL
949 #endif
950
951 struct ictimesync_data {
952 u64 parenttime;
953 u64 childtime;
954 u64 roundtriptime;
955 u8 flags;
956 } __packed;
957
958 struct hyperv_service_callback {
959 u8 msg_type;
960 char *log_msg;
961 uuid_le data;
962 struct vmbus_channel *channel;
963 void (*callback) (void *context);
964 };
965
966 extern void prep_negotiate_resp(struct icmsg_hdr *,
967 struct icmsg_negotiate *, u8 *);
968
969 #endif /* _HYPERV_H */
This page took 0.052298 seconds and 5 git commands to generate.