Merge branch 'mailbox-for-next' of git://git.linaro.org/landing-teams/working/fujitsu...
[deliverable/linux.git] / drivers / gpu / drm / vmwgfx / vmwgfx_drv.h
CommitLineData
fb1d9738
JB
1/**************************************************************************
2 *
54fbde8a 3 * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
fb1d9738
JB
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#ifndef _VMWGFX_DRV_H_
29#define _VMWGFX_DRV_H_
30
31#include "vmwgfx_reg.h"
760285e7
DH
32#include <drm/drmP.h>
33#include <drm/vmwgfx_drm.h>
34#include <drm/drm_hashtab.h>
35#include <linux/suspend.h>
36#include <drm/ttm/ttm_bo_driver.h>
37#include <drm/ttm/ttm_object.h>
38#include <drm/ttm/ttm_lock.h>
39#include <drm/ttm/ttm_execbuf_util.h>
40#include <drm/ttm/ttm_module.h>
ae2a1040 41#include "vmwgfx_fence.h"
fb1d9738 42
5476aa46 43#define VMWGFX_DRIVER_DATE "20160210"
2ae7b03c 44#define VMWGFX_DRIVER_MAJOR 2
5476aa46 45#define VMWGFX_DRIVER_MINOR 10
35c05125 46#define VMWGFX_DRIVER_PATCHLEVEL 0
fb1d9738
JB
47#define VMWGFX_FILE_PAGE_OFFSET 0x00100000
48#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
49#define VMWGFX_MAX_RELOCATIONS 2048
be38ab6e 50#define VMWGFX_MAX_VALIDATIONS 2048
7c4f7780 51#define VMWGFX_MAX_DISPLAYS 16
be38ab6e 52#define VMWGFX_CMD_BOUNCE_INIT_SIZE 32768
35c05125 53#define VMWGFX_ENABLE_SCREEN_TARGET_OTABLE 1
fb1d9738 54
3530bdc3
TH
55/*
56 * Perhaps we should have sysfs entries for these.
57 */
58#define VMWGFX_NUM_GB_CONTEXT 256
59#define VMWGFX_NUM_GB_SHADER 20000
60#define VMWGFX_NUM_GB_SURFACE 32768
7cba9062 61#define VMWGFX_NUM_GB_SCREEN_TARGET VMWGFX_MAX_DISPLAYS
d80efd5c
TH
62#define VMWGFX_NUM_DXCONTEXT 256
63#define VMWGFX_NUM_DXQUERY 512
3530bdc3
TH
64#define VMWGFX_NUM_MOB (VMWGFX_NUM_GB_CONTEXT +\
65 VMWGFX_NUM_GB_SHADER +\
7cba9062
TH
66 VMWGFX_NUM_GB_SURFACE +\
67 VMWGFX_NUM_GB_SCREEN_TARGET)
3530bdc3 68
135cba0d
TH
69#define VMW_PL_GMR TTM_PL_PRIV0
70#define VMW_PL_FLAG_GMR TTM_PL_FLAG_PRIV0
6da768aa
TH
71#define VMW_PL_MOB TTM_PL_PRIV1
72#define VMW_PL_FLAG_MOB TTM_PL_FLAG_PRIV1
135cba0d 73
ae2a1040
TH
74#define VMW_RES_CONTEXT ttm_driver_type0
75#define VMW_RES_SURFACE ttm_driver_type1
76#define VMW_RES_STREAM ttm_driver_type2
77#define VMW_RES_FENCE ttm_driver_type3
c74c162f 78#define VMW_RES_SHADER ttm_driver_type4
ae2a1040 79
fb1d9738
JB
80struct vmw_fpriv {
81 struct drm_master *locked_master;
82 struct ttm_object_file *tfile;
d5bde956 83 bool gb_aware;
fb1d9738
JB
84};
85
86struct vmw_dma_buffer {
87 struct ttm_buffer_object base;
c0951b79 88 struct list_head res_list;
459d0fa7 89 s32 pin_count;
fd11a3c0
SY
90 /* Not ref-counted. Protected by binding_mutex */
91 struct vmw_resource *dx_query_ctx;
fb1d9738
JB
92};
93
c0951b79
TH
94/**
95 * struct vmw_validate_buffer - Carries validation info about buffers.
96 *
97 * @base: Validation info for TTM.
98 * @hash: Hash entry for quick lookup of the TTM buffer object.
99 *
100 * This structure contains also driver private validation info
101 * on top of the info needed by TTM.
102 */
103struct vmw_validate_buffer {
104 struct ttm_validate_buffer base;
105 struct drm_hash_item hash;
96c5f0df 106 bool validate_as_mob;
c0951b79
TH
107};
108
109struct vmw_res_func;
fb1d9738
JB
110struct vmw_resource {
111 struct kref kref;
112 struct vmw_private *dev_priv;
fb1d9738 113 int id;
fb1d9738 114 bool avail;
c0951b79
TH
115 unsigned long backup_size;
116 bool res_dirty; /* Protected by backup buffer reserved */
117 bool backup_dirty; /* Protected by backup buffer reserved */
118 struct vmw_dma_buffer *backup;
119 unsigned long backup_offset;
ed93394c 120 unsigned long pin_count; /* Protected by resource reserved */
c0951b79
TH
121 const struct vmw_res_func *func;
122 struct list_head lru_head; /* Protected by the resource lock */
123 struct list_head mob_head; /* Protected by @backup reserved */
173fb7d4 124 struct list_head binding_head; /* Protected by binding_mutex */
fb1d9738 125 void (*res_free) (struct vmw_resource *res);
c0951b79
TH
126 void (*hw_destroy) (struct vmw_resource *res);
127};
128
18e4a466
TH
129
130/*
131 * Resources that are managed using ioctls.
132 */
c0951b79
TH
133enum vmw_res_type {
134 vmw_res_context,
135 vmw_res_surface,
136 vmw_res_stream,
c74c162f 137 vmw_res_shader,
d80efd5c
TH
138 vmw_res_dx_context,
139 vmw_res_cotable,
140 vmw_res_view,
c0951b79 141 vmw_res_max
fb1d9738
JB
142};
143
18e4a466
TH
144/*
145 * Resources that are managed using command streams.
146 */
147enum vmw_cmdbuf_res_type {
d80efd5c
TH
148 vmw_cmdbuf_res_shader,
149 vmw_cmdbuf_res_view
18e4a466
TH
150};
151
152struct vmw_cmdbuf_res_manager;
153
fb1d9738
JB
154struct vmw_cursor_snooper {
155 struct drm_crtc *crtc;
156 size_t age;
157 uint32_t *image;
158};
159
2fcd5a73 160struct vmw_framebuffer;
5bb39e81 161struct vmw_surface_offset;
2fcd5a73 162
fb1d9738
JB
163struct vmw_surface {
164 struct vmw_resource res;
165 uint32_t flags;
166 uint32_t format;
167 uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES];
c0951b79 168 struct drm_vmw_size base_size;
fb1d9738
JB
169 struct drm_vmw_size *sizes;
170 uint32_t num_sizes;
5ffdb658 171 bool scanout;
d80efd5c 172 uint32_t array_size;
fb1d9738
JB
173 /* TODO so far just a extra pointer */
174 struct vmw_cursor_snooper snooper;
5bb39e81 175 struct vmw_surface_offset *offsets;
c0951b79
TH
176 SVGA3dTextureFilter autogen_filter;
177 uint32_t multisample_count;
d80efd5c 178 struct list_head view_list;
fb1d9738
JB
179};
180
6bcd8d3c 181struct vmw_marker_queue {
1925d456 182 struct list_head head;
f166e6dc
TG
183 u64 lag;
184 u64 lag_time;
1925d456
TH
185 spinlock_t lock;
186};
187
fb1d9738
JB
188struct vmw_fifo_state {
189 unsigned long reserved_size;
b9eb1a61
TH
190 u32 *dynamic_buffer;
191 u32 *static_buffer;
fb1d9738
JB
192 unsigned long static_buffer_size;
193 bool using_bounce_buffer;
194 uint32_t capabilities;
85b9e487 195 struct mutex fifo_mutex;
fb1d9738 196 struct rw_semaphore rwsem;
6bcd8d3c 197 struct vmw_marker_queue marker_queue;
d80efd5c 198 bool dx;
fb1d9738
JB
199};
200
201struct vmw_relocation {
ddcda24e 202 SVGAMobId *mob_loc;
fb1d9738
JB
203 SVGAGuestPtr *location;
204 uint32_t index;
205};
206
c0951b79
TH
207/**
208 * struct vmw_res_cache_entry - resource information cache entry
209 *
210 * @valid: Whether the entry is valid, which also implies that the execbuf
211 * code holds a reference to the resource, and it's placed on the
212 * validation list.
213 * @handle: User-space handle of a resource.
214 * @res: Non-ref-counted pointer to the resource.
215 *
216 * Used to avoid frequent repeated user-space handle lookups of the
217 * same resource.
218 */
219struct vmw_res_cache_entry {
220 bool valid;
221 uint32_t handle;
222 struct vmw_resource *res;
223 struct vmw_resource_val_node *node;
224};
225
d92d9851
TH
226/**
227 * enum vmw_dma_map_mode - indicate how to perform TTM page dma mappings.
228 */
229enum vmw_dma_map_mode {
230 vmw_dma_phys, /* Use physical page addresses */
231 vmw_dma_alloc_coherent, /* Use TTM coherent pages */
232 vmw_dma_map_populate, /* Unmap from DMA just after unpopulate */
233 vmw_dma_map_bind, /* Unmap from DMA just before unbind */
234 vmw_dma_map_max
235};
236
237/**
238 * struct vmw_sg_table - Scatter/gather table for binding, with additional
239 * device-specific information.
240 *
241 * @sgt: Pointer to a struct sg_table with binding information
e1c05067 242 * @num_regions: Number of regions with device-address contiguous pages
d92d9851
TH
243 */
244struct vmw_sg_table {
245 enum vmw_dma_map_mode mode;
246 struct page **pages;
247 const dma_addr_t *addrs;
248 struct sg_table *sgt;
249 unsigned long num_regions;
250 unsigned long num_pages;
251};
252
253/**
254 * struct vmw_piter - Page iterator that iterates over a list of pages
255 * and DMA addresses that could be either a scatter-gather list or
256 * arrays
257 *
258 * @pages: Array of page pointers to the pages.
259 * @addrs: DMA addresses to the pages if coherent pages are used.
260 * @iter: Scatter-gather page iterator. Current position in SG list.
261 * @i: Current position in arrays.
262 * @num_pages: Number of pages total.
263 * @next: Function to advance the iterator. Returns false if past the list
264 * of pages, true otherwise.
265 * @dma_address: Function to return the DMA address of the current page.
266 */
267struct vmw_piter {
268 struct page **pages;
269 const dma_addr_t *addrs;
270 struct sg_page_iter iter;
271 unsigned long i;
272 unsigned long num_pages;
273 bool (*next)(struct vmw_piter *);
274 dma_addr_t (*dma_address)(struct vmw_piter *);
275 struct page *(*page)(struct vmw_piter *);
276};
277
b5c3b1a6 278/*
c8261a96 279 * enum vmw_display_unit_type - Describes the display unit
b5c3b1a6 280 */
c8261a96
SY
281enum vmw_display_unit_type {
282 vmw_du_invalid = 0,
283 vmw_du_legacy,
35c05125
SY
284 vmw_du_screen_object,
285 vmw_du_screen_target
b5c3b1a6
TH
286};
287
b5c3b1a6 288
fb1d9738 289struct vmw_sw_context{
c0951b79
TH
290 struct drm_open_hash res_ht;
291 bool res_ht_initialized;
922ade0d 292 bool kernel; /**< is the called made from the kernel */
d5bde956 293 struct vmw_fpriv *fp;
fb1d9738
JB
294 struct list_head validate_nodes;
295 struct vmw_relocation relocs[VMWGFX_MAX_RELOCATIONS];
296 uint32_t cur_reloc;
c0951b79 297 struct vmw_validate_buffer val_bufs[VMWGFX_MAX_VALIDATIONS];
fb1d9738 298 uint32_t cur_val_buf;
be38ab6e
TH
299 uint32_t *cmd_bounce;
300 uint32_t cmd_bounce_size;
f18c8840 301 struct list_head resource_list;
d80efd5c 302 struct list_head ctx_resource_list; /* For contexts and cotables */
459d0fa7 303 struct vmw_dma_buffer *cur_query_bo;
c0951b79
TH
304 struct list_head res_relocations;
305 uint32_t *buf_start;
306 struct vmw_res_cache_entry res_cache[vmw_res_max];
307 struct vmw_resource *last_query_ctx;
308 bool needs_post_query_barrier;
309 struct vmw_resource *error_resource;
d80efd5c
TH
310 struct vmw_ctx_binding_state *staged_bindings;
311 bool staged_bindings_inuse;
18e4a466 312 struct list_head staged_cmd_res;
d80efd5c
TH
313 struct vmw_resource_val_node *dx_ctx_node;
314 struct vmw_dma_buffer *dx_query_mob;
315 struct vmw_resource *dx_query_ctx;
316 struct vmw_cmdbuf_res_manager *man;
fb1d9738
JB
317};
318
319struct vmw_legacy_display;
320struct vmw_overlay;
321
322struct vmw_master {
323 struct ttm_lock lock;
324};
325
7c4f7780
TH
326struct vmw_vga_topology_state {
327 uint32_t width;
328 uint32_t height;
329 uint32_t primary;
330 uint32_t pos_x;
331 uint32_t pos_y;
332};
333
d80efd5c
TH
334
335/*
336 * struct vmw_otable - Guest Memory OBject table metadata
337 *
338 * @size: Size of the table (page-aligned).
339 * @page_table: Pointer to a struct vmw_mob holding the page table.
340 */
341struct vmw_otable {
342 unsigned long size;
343 struct vmw_mob *page_table;
344 bool enabled;
345};
346
347struct vmw_otable_batch {
348 unsigned num_otables;
349 struct vmw_otable *otables;
350 struct vmw_resource *context;
351 struct ttm_buffer_object *otable_bo;
352};
353
fb1d9738
JB
354struct vmw_private {
355 struct ttm_bo_device bdev;
356 struct ttm_bo_global_ref bo_global_ref;
ba4420c2 357 struct drm_global_reference mem_global_ref;
fb1d9738
JB
358
359 struct vmw_fifo_state fifo;
360
361 struct drm_device *dev;
362 unsigned long vmw_chipset;
363 unsigned int io_start;
364 uint32_t vram_start;
365 uint32_t vram_size;
bc2d6508 366 uint32_t prim_bb_mem;
fb1d9738
JB
367 uint32_t mmio_start;
368 uint32_t mmio_size;
369 uint32_t fb_max_width;
370 uint32_t fb_max_height;
35c05125
SY
371 uint32_t texture_max_width;
372 uint32_t texture_max_height;
373 uint32_t stdu_max_width;
374 uint32_t stdu_max_height;
eb4f923b
JB
375 uint32_t initial_width;
376 uint32_t initial_height;
b76ff5ea 377 u32 *mmio_virt;
fb1d9738 378 uint32_t capabilities;
fb1d9738 379 uint32_t max_gmr_ids;
fb17f189 380 uint32_t max_gmr_pages;
6da768aa 381 uint32_t max_mob_pages;
857aea1c 382 uint32_t max_mob_size;
fb17f189 383 uint32_t memory_size;
135cba0d 384 bool has_gmr;
3530bdc3 385 bool has_mob;
496eb6fd
TH
386 spinlock_t hw_lock;
387 spinlock_t cap_lock;
d80efd5c 388 bool has_dx;
04319d89 389 bool assume_16bpp;
fb1d9738
JB
390
391 /*
392 * VGA registers.
393 */
394
7c4f7780 395 struct vmw_vga_topology_state vga_save[VMWGFX_MAX_DISPLAYS];
fb1d9738
JB
396 uint32_t vga_width;
397 uint32_t vga_height;
fb1d9738 398 uint32_t vga_bpp;
7c4f7780 399 uint32_t vga_bpl;
d7e1958d 400 uint32_t vga_pitchlock;
fb1d9738 401
7c4f7780
TH
402 uint32_t num_displays;
403
fb1d9738
JB
404 /*
405 * Framebuffer info.
406 */
407
408 void *fb_info;
c8261a96 409 enum vmw_display_unit_type active_display_unit;
fb1d9738
JB
410 struct vmw_legacy_display *ldu_priv;
411 struct vmw_overlay *overlay_priv;
578e609a 412 struct drm_property *hotplug_mode_update_property;
76404ac0 413 struct drm_property *implicit_placement_property;
75c06855
TH
414 unsigned num_implicit;
415 struct vmw_framebuffer *implicit_fb;
93cd1681 416 struct mutex global_kms_state_mutex;
fb1d9738
JB
417
418 /*
419 * Context and surface management.
420 */
421
422 rwlock_t resource_lock;
c0951b79 423 struct idr res_idr[vmw_res_max];
fb1d9738
JB
424 /*
425 * Block lastclose from racing with firstopen.
426 */
427
428 struct mutex init_mutex;
429
430 /*
431 * A resource manager for kernel-only surfaces and
432 * contexts.
433 */
434
435 struct ttm_object_device *tdev;
436
437 /*
438 * Fencing and IRQs.
439 */
440
6bcd8d3c 441 atomic_t marker_seq;
fb1d9738
JB
442 wait_queue_head_t fence_queue;
443 wait_queue_head_t fifo_queue;
496eb6fd
TH
444 spinlock_t waiter_lock;
445 int fence_queue_waiters; /* Protected by waiter_lock */
446 int goal_queue_waiters; /* Protected by waiter_lock */
d2e8851a
TH
447 int cmdbuf_waiters; /* Protected by waiter_lock */
448 int error_waiters; /* Protected by waiter_lock */
449 int fifo_queue_waiters; /* Protected by waiter_lock */
6bcd8d3c 450 uint32_t last_read_seqno;
ae2a1040 451 struct vmw_fence_manager *fman;
d2e8851a 452 uint32_t irq_mask; /* Updates protected by waiter_lock */
fb1d9738
JB
453
454 /*
455 * Device state
456 */
457
458 uint32_t traces_state;
459 uint32_t enable_state;
460 uint32_t config_done_state;
461
462 /**
463 * Execbuf
464 */
465 /**
466 * Protected by the cmdbuf mutex.
467 */
468
469 struct vmw_sw_context ctx;
fb1d9738 470 struct mutex cmdbuf_mutex;
173fb7d4 471 struct mutex binding_mutex;
fb1d9738 472
fb1d9738
JB
473 /**
474 * Operating mode.
475 */
476
477 bool stealth;
30c78bb8 478 bool enable_fb;
153b3d5b 479 spinlock_t svga_lock;
fb1d9738
JB
480
481 /**
482 * Master management.
483 */
484
485 struct vmw_master *active_master;
486 struct vmw_master fbdev_master;
d9f36a00 487 struct notifier_block pm_nb;
094e0fa8 488 bool suspended;
153b3d5b 489 bool refuse_hibernation;
30c78bb8
TH
490
491 struct mutex release_mutex;
153b3d5b 492 atomic_t num_fifo_resources;
e2fa3a76 493
294adf7d
TH
494 /*
495 * Replace this with an rwsem as soon as we have down_xx_interruptible()
496 */
497 struct ttm_lock reservation_sem;
498
e2fa3a76
TH
499 /*
500 * Query processing. These members
501 * are protected by the cmdbuf mutex.
502 */
503
459d0fa7
TH
504 struct vmw_dma_buffer *dummy_query_bo;
505 struct vmw_dma_buffer *pinned_bo;
e2fa3a76 506 uint32_t query_cid;
c0951b79 507 uint32_t query_cid_valid;
e2fa3a76 508 bool dummy_query_bo_pinned;
5bb39e81
TH
509
510 /*
511 * Surface swapping. The "surface_lru" list is protected by the
512 * resource lock in order to be able to destroy a surface and take
513 * it off the lru atomically. "used_memory_size" is currently
514 * protected by the cmdbuf mutex for simplicity.
515 */
516
c0951b79 517 struct list_head res_lru[vmw_res_max];
5bb39e81 518 uint32_t used_memory_size;
d92d9851
TH
519
520 /*
521 * DMA mapping stuff.
522 */
523 enum vmw_dma_map_mode map_mode;
3530bdc3
TH
524
525 /*
526 * Guest Backed stuff
527 */
d80efd5c 528 struct vmw_otable_batch otable_batch;
3eab3d9e
TH
529
530 struct vmw_cmdbuf_man *cman;
fb1d9738
JB
531};
532
c0951b79
TH
533static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res)
534{
535 return container_of(res, struct vmw_surface, res);
536}
537
fb1d9738
JB
538static inline struct vmw_private *vmw_priv(struct drm_device *dev)
539{
540 return (struct vmw_private *)dev->dev_private;
541}
542
543static inline struct vmw_fpriv *vmw_fpriv(struct drm_file *file_priv)
544{
545 return (struct vmw_fpriv *)file_priv->driver_priv;
546}
547
548static inline struct vmw_master *vmw_master(struct drm_master *master)
549{
550 return (struct vmw_master *) master->driver_priv;
551}
552
496eb6fd
TH
553/*
554 * The locking here is fine-grained, so that it is performed once
555 * for every read- and write operation. This is of course costly, but we
556 * don't perform much register access in the timing critical paths anyway.
557 * Instead we have the extra benefit of being sure that we don't forget
558 * the hw lock around register accesses.
559 */
fb1d9738
JB
560static inline void vmw_write(struct vmw_private *dev_priv,
561 unsigned int offset, uint32_t value)
562{
496eb6fd
TH
563 unsigned long irq_flags;
564
565 spin_lock_irqsave(&dev_priv->hw_lock, irq_flags);
fb1d9738
JB
566 outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
567 outl(value, dev_priv->io_start + VMWGFX_VALUE_PORT);
496eb6fd 568 spin_unlock_irqrestore(&dev_priv->hw_lock, irq_flags);
fb1d9738
JB
569}
570
571static inline uint32_t vmw_read(struct vmw_private *dev_priv,
572 unsigned int offset)
573{
496eb6fd
TH
574 unsigned long irq_flags;
575 u32 val;
fb1d9738 576
496eb6fd 577 spin_lock_irqsave(&dev_priv->hw_lock, irq_flags);
fb1d9738
JB
578 outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
579 val = inl(dev_priv->io_start + VMWGFX_VALUE_PORT);
496eb6fd
TH
580 spin_unlock_irqrestore(&dev_priv->hw_lock, irq_flags);
581
fb1d9738
JB
582 return val;
583}
584
153b3d5b
TH
585extern void vmw_svga_enable(struct vmw_private *dev_priv);
586extern void vmw_svga_disable(struct vmw_private *dev_priv);
587
30c78bb8 588
fb1d9738
JB
589/**
590 * GMR utilities - vmwgfx_gmr.c
591 */
592
593extern int vmw_gmr_bind(struct vmw_private *dev_priv,
d92d9851 594 const struct vmw_sg_table *vsgt,
135cba0d
TH
595 unsigned long num_pages,
596 int gmr_id);
fb1d9738
JB
597extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id);
598
599/**
600 * Resource utilities - vmwgfx_resource.c
601 */
c0951b79 602struct vmw_user_resource_conv;
fb1d9738 603
fb1d9738
JB
604extern void vmw_resource_unreference(struct vmw_resource **p_res);
605extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res);
30f82d81
TH
606extern struct vmw_resource *
607vmw_resource_reference_unless_doomed(struct vmw_resource *res);
c0951b79 608extern int vmw_resource_validate(struct vmw_resource *res);
1a4b172a
TH
609extern int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
610 bool no_backup);
c0951b79 611extern bool vmw_resource_needs_backup(const struct vmw_resource *res);
551a6697
JB
612extern int vmw_user_lookup_handle(struct vmw_private *dev_priv,
613 struct ttm_object_file *tfile,
614 uint32_t handle,
615 struct vmw_surface **out_surf,
616 struct vmw_dma_buffer **out_buf);
c0951b79
TH
617extern int vmw_user_resource_lookup_handle(
618 struct vmw_private *dev_priv,
619 struct ttm_object_file *tfile,
620 uint32_t handle,
621 const struct vmw_user_resource_conv *converter,
622 struct vmw_resource **p_res);
fb1d9738
JB
623extern void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo);
624extern int vmw_dmabuf_init(struct vmw_private *dev_priv,
625 struct vmw_dma_buffer *vmw_bo,
626 size_t size, struct ttm_placement *placement,
627 bool interuptable,
628 void (*bo_free) (struct ttm_buffer_object *bo));
d08a9b9c
TH
629extern int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
630 struct ttm_object_file *tfile);
a97e2192
TH
631extern int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
632 struct ttm_object_file *tfile,
633 uint32_t size,
634 bool shareable,
635 uint32_t *handle,
54c12bc3
TH
636 struct vmw_dma_buffer **p_dma_buf,
637 struct ttm_base_object **p_base);
a97e2192
TH
638extern int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
639 struct vmw_dma_buffer *dma_buf,
640 uint32_t *handle);
fb1d9738
JB
641extern int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
642 struct drm_file *file_priv);
643extern int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
644 struct drm_file *file_priv);
1d7a5cbf
TH
645extern int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
646 struct drm_file *file_priv);
fb1d9738
JB
647extern uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
648 uint32_t cur_validate_node);
649extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo);
650extern int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
54c12bc3
TH
651 uint32_t id, struct vmw_dma_buffer **out,
652 struct ttm_base_object **base);
fb1d9738
JB
653extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
654 struct drm_file *file_priv);
655extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
656 struct drm_file *file_priv);
657extern int vmw_user_stream_lookup(struct vmw_private *dev_priv,
658 struct ttm_object_file *tfile,
659 uint32_t *inout_id,
660 struct vmw_resource **out);
c0951b79 661extern void vmw_resource_unreserve(struct vmw_resource *res,
d80efd5c 662 bool switch_backup,
c0951b79
TH
663 struct vmw_dma_buffer *new_backup,
664 unsigned long new_backup_offset);
665extern void vmw_resource_move_notify(struct ttm_buffer_object *bo,
666 struct ttm_mem_reg *mem);
fd11a3c0
SY
667extern void vmw_query_move_notify(struct ttm_buffer_object *bo,
668 struct ttm_mem_reg *mem);
669extern int vmw_query_readback_all(struct vmw_dma_buffer *dx_query_mob);
c0951b79
TH
670extern void vmw_fence_single_bo(struct ttm_buffer_object *bo,
671 struct vmw_fence_obj *fence);
672extern void vmw_resource_evict_all(struct vmw_private *dev_priv);
fb1d9738 673
d991ef03
JB
674/**
675 * DMA buffer helper routines - vmwgfx_dmabuf.c
676 */
459d0fa7 677extern int vmw_dmabuf_pin_in_placement(struct vmw_private *vmw_priv,
d991ef03 678 struct vmw_dma_buffer *bo,
459d0fa7
TH
679 struct ttm_placement *placement,
680 bool interruptible);
681extern int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv,
682 struct vmw_dma_buffer *buf,
683 bool interruptible);
684extern int vmw_dmabuf_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
685 struct vmw_dma_buffer *buf,
686 bool interruptible);
687extern int vmw_dmabuf_pin_in_start_of_vram(struct vmw_private *vmw_priv,
688 struct vmw_dma_buffer *bo,
689 bool interruptible);
d991ef03
JB
690extern int vmw_dmabuf_unpin(struct vmw_private *vmw_priv,
691 struct vmw_dma_buffer *bo,
692 bool interruptible);
b37a6b9a
TH
693extern void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *buf,
694 SVGAGuestPtr *ptr);
459d0fa7 695extern void vmw_bo_pin_reserved(struct vmw_dma_buffer *bo, bool pin);
fb1d9738
JB
696
697/**
698 * Misc Ioctl functionality - vmwgfx_ioctl.c
699 */
700
701extern int vmw_getparam_ioctl(struct drm_device *dev, void *data,
702 struct drm_file *file_priv);
f63f6a59
TH
703extern int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
704 struct drm_file *file_priv);
2fcd5a73
JB
705extern int vmw_present_ioctl(struct drm_device *dev, void *data,
706 struct drm_file *file_priv);
707extern int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
708 struct drm_file *file_priv);
5438ae88
TH
709extern unsigned int vmw_fops_poll(struct file *filp,
710 struct poll_table_struct *wait);
711extern ssize_t vmw_fops_read(struct file *filp, char __user *buffer,
712 size_t count, loff_t *offset);
fb1d9738
JB
713
714/**
715 * Fifo utilities - vmwgfx_fifo.c
716 */
717
718extern int vmw_fifo_init(struct vmw_private *dev_priv,
719 struct vmw_fifo_state *fifo);
720extern void vmw_fifo_release(struct vmw_private *dev_priv,
721 struct vmw_fifo_state *fifo);
722extern void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes);
d80efd5c
TH
723extern void *
724vmw_fifo_reserve_dx(struct vmw_private *dev_priv, uint32_t bytes, int ctx_id);
fb1d9738 725extern void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes);
d80efd5c 726extern void vmw_fifo_commit_flush(struct vmw_private *dev_priv, uint32_t bytes);
fb1d9738 727extern int vmw_fifo_send_fence(struct vmw_private *dev_priv,
6bcd8d3c 728 uint32_t *seqno);
2298e804 729extern void vmw_fifo_ping_host_locked(struct vmw_private *, uint32_t reason);
fb1d9738 730extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason);
8e19a951 731extern bool vmw_fifo_have_3d(struct vmw_private *dev_priv);
d7e1958d 732extern bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv);
e2fa3a76
TH
733extern int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv,
734 uint32_t cid);
3eab3d9e
TH
735extern int vmw_fifo_flush(struct vmw_private *dev_priv,
736 bool interruptible);
fb1d9738
JB
737
738/**
739 * TTM glue - vmwgfx_ttm_glue.c
740 */
741
742extern int vmw_ttm_global_init(struct vmw_private *dev_priv);
743extern void vmw_ttm_global_release(struct vmw_private *dev_priv);
744extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma);
745
746/**
747 * TTM buffer object driver - vmwgfx_buffer.c
748 */
749
308d17ef 750extern const size_t vmw_tt_size;
fb1d9738
JB
751extern struct ttm_placement vmw_vram_placement;
752extern struct ttm_placement vmw_vram_ne_placement;
8ba5152a 753extern struct ttm_placement vmw_vram_sys_placement;
135cba0d 754extern struct ttm_placement vmw_vram_gmr_placement;
d991ef03 755extern struct ttm_placement vmw_vram_gmr_ne_placement;
fb1d9738 756extern struct ttm_placement vmw_sys_placement;
3530bdc3 757extern struct ttm_placement vmw_sys_ne_placement;
d991ef03 758extern struct ttm_placement vmw_evictable_placement;
5bb39e81 759extern struct ttm_placement vmw_srf_placement;
96c5f0df 760extern struct ttm_placement vmw_mob_placement;
3eab3d9e 761extern struct ttm_placement vmw_mob_ne_placement;
fb1d9738
JB
762extern struct ttm_bo_driver vmw_bo_driver;
763extern int vmw_dma_quiescent(struct drm_device *dev);
0fd53cfb
TH
764extern int vmw_bo_map_dma(struct ttm_buffer_object *bo);
765extern void vmw_bo_unmap_dma(struct ttm_buffer_object *bo);
766extern const struct vmw_sg_table *
767vmw_bo_sg_table(struct ttm_buffer_object *bo);
d92d9851
TH
768extern void vmw_piter_start(struct vmw_piter *viter,
769 const struct vmw_sg_table *vsgt,
770 unsigned long p_offs);
771
772/**
773 * vmw_piter_next - Advance the iterator one page.
774 *
775 * @viter: Pointer to the iterator to advance.
776 *
777 * Returns false if past the list of pages, true otherwise.
778 */
779static inline bool vmw_piter_next(struct vmw_piter *viter)
780{
781 return viter->next(viter);
782}
783
784/**
785 * vmw_piter_dma_addr - Return the DMA address of the current page.
786 *
787 * @viter: Pointer to the iterator
788 *
789 * Returns the DMA address of the page pointed to by @viter.
790 */
791static inline dma_addr_t vmw_piter_dma_addr(struct vmw_piter *viter)
792{
793 return viter->dma_address(viter);
794}
795
796/**
797 * vmw_piter_page - Return a pointer to the current page.
798 *
799 * @viter: Pointer to the iterator
800 *
801 * Returns the DMA address of the page pointed to by @viter.
802 */
803static inline struct page *vmw_piter_page(struct vmw_piter *viter)
804{
805 return viter->page(viter);
806}
fb1d9738
JB
807
808/**
809 * Command submission - vmwgfx_execbuf.c
810 */
811
d80efd5c
TH
812extern int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data,
813 struct drm_file *file_priv, size_t size);
922ade0d
TH
814extern int vmw_execbuf_process(struct drm_file *file_priv,
815 struct vmw_private *dev_priv,
816 void __user *user_commands,
817 void *kernel_commands,
818 uint32_t command_size,
819 uint64_t throttle_us,
d80efd5c 820 uint32_t dx_context_handle,
922ade0d 821 struct drm_vmw_fence_rep __user
bb1bd2f4
JB
822 *user_fence_rep,
823 struct vmw_fence_obj **out_fence);
c0951b79
TH
824extern void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
825 struct vmw_fence_obj *fence);
826extern void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv);
e2fa3a76 827
5bb39e81
TH
828extern int vmw_execbuf_fence_commands(struct drm_file *file_priv,
829 struct vmw_private *dev_priv,
830 struct vmw_fence_obj **p_fence,
831 uint32_t *p_handle);
57c5ee79
TH
832extern void vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
833 struct vmw_fpriv *vmw_fp,
834 int ret,
835 struct drm_vmw_fence_rep __user
836 *user_fence_rep,
837 struct vmw_fence_obj *fence,
838 uint32_t fence_handle);
1a4b172a
TH
839extern int vmw_validate_single_buffer(struct vmw_private *dev_priv,
840 struct ttm_buffer_object *bo,
841 bool interruptible,
842 bool validate_as_mob);
843
5bb39e81 844
fb1d9738
JB
845/**
846 * IRQs and wating - vmwgfx_irq.c
847 */
848
e9f0d76f 849extern irqreturn_t vmw_irq_handler(int irq, void *arg);
6bcd8d3c 850extern int vmw_wait_seqno(struct vmw_private *dev_priv, bool lazy,
c8261a96
SY
851 uint32_t seqno, bool interruptible,
852 unsigned long timeout);
fb1d9738
JB
853extern void vmw_irq_preinstall(struct drm_device *dev);
854extern int vmw_irq_postinstall(struct drm_device *dev);
855extern void vmw_irq_uninstall(struct drm_device *dev);
6bcd8d3c
TH
856extern bool vmw_seqno_passed(struct vmw_private *dev_priv,
857 uint32_t seqno);
fb1d9738
JB
858extern int vmw_fallback_wait(struct vmw_private *dev_priv,
859 bool lazy,
860 bool fifo_idle,
6bcd8d3c 861 uint32_t seqno,
fb1d9738
JB
862 bool interruptible,
863 unsigned long timeout);
6bcd8d3c 864extern void vmw_update_seqno(struct vmw_private *dev_priv,
1925d456 865 struct vmw_fifo_state *fifo_state);
ae2a1040
TH
866extern void vmw_seqno_waiter_add(struct vmw_private *dev_priv);
867extern void vmw_seqno_waiter_remove(struct vmw_private *dev_priv);
57c5ee79
TH
868extern void vmw_goal_waiter_add(struct vmw_private *dev_priv);
869extern void vmw_goal_waiter_remove(struct vmw_private *dev_priv);
3eab3d9e
TH
870extern void vmw_generic_waiter_add(struct vmw_private *dev_priv, u32 flag,
871 int *waiter_count);
872extern void vmw_generic_waiter_remove(struct vmw_private *dev_priv,
873 u32 flag, int *waiter_count);
1925d456
TH
874
875/**
6bcd8d3c
TH
876 * Rudimentary fence-like objects currently used only for throttling -
877 * vmwgfx_marker.c
1925d456
TH
878 */
879
6bcd8d3c
TH
880extern void vmw_marker_queue_init(struct vmw_marker_queue *queue);
881extern void vmw_marker_queue_takedown(struct vmw_marker_queue *queue);
882extern int vmw_marker_push(struct vmw_marker_queue *queue,
c8261a96 883 uint32_t seqno);
6bcd8d3c 884extern int vmw_marker_pull(struct vmw_marker_queue *queue,
c8261a96 885 uint32_t signaled_seqno);
1925d456 886extern int vmw_wait_lag(struct vmw_private *dev_priv,
6bcd8d3c 887 struct vmw_marker_queue *queue, uint32_t us);
fb1d9738
JB
888
889/**
890 * Kernel framebuffer - vmwgfx_fb.c
891 */
892
893int vmw_fb_init(struct vmw_private *vmw_priv);
894int vmw_fb_close(struct vmw_private *dev_priv);
895int vmw_fb_off(struct vmw_private *vmw_priv);
896int vmw_fb_on(struct vmw_private *vmw_priv);
897
898/**
899 * Kernel modesetting - vmwgfx_kms.c
900 */
901
902int vmw_kms_init(struct vmw_private *dev_priv);
903int vmw_kms_close(struct vmw_private *dev_priv);
904int vmw_kms_save_vga(struct vmw_private *vmw_priv);
905int vmw_kms_restore_vga(struct vmw_private *vmw_priv);
906int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
907 struct drm_file *file_priv);
908void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv);
909void vmw_kms_cursor_snoop(struct vmw_surface *srf,
910 struct ttm_object_file *tfile,
911 struct ttm_buffer_object *bo,
912 SVGA3dCmdHeader *header);
0bef23f9
MD
913int vmw_kms_write_svga(struct vmw_private *vmw_priv,
914 unsigned width, unsigned height, unsigned pitch,
915 unsigned bpp, unsigned depth);
3a939a5e 916void vmw_kms_idle_workqueues(struct vmw_master *vmaster);
e133e737
TH
917bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
918 uint32_t pitch,
919 uint32_t height);
88e72717
TR
920u32 vmw_get_vblank_counter(struct drm_device *dev, unsigned int pipe);
921int vmw_enable_vblank(struct drm_device *dev, unsigned int pipe);
922void vmw_disable_vblank(struct drm_device *dev, unsigned int pipe);
2fcd5a73
JB
923int vmw_kms_present(struct vmw_private *dev_priv,
924 struct drm_file *file_priv,
925 struct vmw_framebuffer *vfb,
926 struct vmw_surface *surface,
927 uint32_t sid, int32_t destX, int32_t destY,
928 struct drm_vmw_rect *clips,
929 uint32_t num_clips);
cd2b89e7
TH
930int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
931 struct drm_file *file_priv);
8fbf9d92 932void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv);
fb1d9738 933
5e1782d2
DA
934int vmw_dumb_create(struct drm_file *file_priv,
935 struct drm_device *dev,
936 struct drm_mode_create_dumb *args);
937
938int vmw_dumb_map_offset(struct drm_file *file_priv,
939 struct drm_device *dev, uint32_t handle,
940 uint64_t *offset);
941int vmw_dumb_destroy(struct drm_file *file_priv,
942 struct drm_device *dev,
943 uint32_t handle);
1a4b172a 944extern int vmw_resource_pin(struct vmw_resource *res, bool interruptible);
ed93394c 945extern void vmw_resource_unpin(struct vmw_resource *res);
d80efd5c 946extern enum vmw_res_type vmw_res_type(const struct vmw_resource *res);
ed93394c 947
fb1d9738
JB
948/**
949 * Overlay control - vmwgfx_overlay.c
950 */
951
952int vmw_overlay_init(struct vmw_private *dev_priv);
953int vmw_overlay_close(struct vmw_private *dev_priv);
954int vmw_overlay_ioctl(struct drm_device *dev, void *data,
955 struct drm_file *file_priv);
956int vmw_overlay_stop_all(struct vmw_private *dev_priv);
957int vmw_overlay_resume_all(struct vmw_private *dev_priv);
958int vmw_overlay_pause_all(struct vmw_private *dev_priv);
959int vmw_overlay_claim(struct vmw_private *dev_priv, uint32_t *out);
960int vmw_overlay_unref(struct vmw_private *dev_priv, uint32_t stream_id);
961int vmw_overlay_num_overlays(struct vmw_private *dev_priv);
962int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv);
963
135cba0d
TH
964/**
965 * GMR Id manager
966 */
967
968extern const struct ttm_mem_type_manager_func vmw_gmrid_manager_func;
969
69977ff5
TH
970/**
971 * Prime - vmwgfx_prime.c
972 */
973
974extern const struct dma_buf_ops vmw_prime_dmabuf_ops;
975extern int vmw_prime_fd_to_handle(struct drm_device *dev,
976 struct drm_file *file_priv,
977 int fd, u32 *handle);
978extern int vmw_prime_handle_to_fd(struct drm_device *dev,
979 struct drm_file *file_priv,
980 uint32_t handle, uint32_t flags,
981 int *prime_fd);
982
3530bdc3
TH
983/*
984 * MemoryOBject management - vmwgfx_mob.c
985 */
986struct vmw_mob;
987extern int vmw_mob_bind(struct vmw_private *dev_priv, struct vmw_mob *mob,
0fd53cfb
TH
988 const struct vmw_sg_table *vsgt,
989 unsigned long num_data_pages, int32_t mob_id);
3530bdc3
TH
990extern void vmw_mob_unbind(struct vmw_private *dev_priv,
991 struct vmw_mob *mob);
992extern void vmw_mob_destroy(struct vmw_mob *mob);
993extern struct vmw_mob *vmw_mob_create(unsigned long data_pages);
994extern int vmw_otables_setup(struct vmw_private *dev_priv);
995extern void vmw_otables_takedown(struct vmw_private *dev_priv);
69977ff5 996
7086d099
TH
997/*
998 * Context management - vmwgfx_context.c
999 */
1000
1001extern const struct vmw_user_resource_conv *user_context_converter;
1002
7086d099
TH
1003extern int vmw_context_check(struct vmw_private *dev_priv,
1004 struct ttm_object_file *tfile,
1005 int id,
1006 struct vmw_resource **p_res);
1007extern int vmw_context_define_ioctl(struct drm_device *dev, void *data,
1008 struct drm_file *file_priv);
d80efd5c
TH
1009extern int vmw_extended_context_define_ioctl(struct drm_device *dev, void *data,
1010 struct drm_file *file_priv);
7086d099
TH
1011extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
1012 struct drm_file *file_priv);
30f82d81 1013extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx);
18e4a466
TH
1014extern struct vmw_cmdbuf_res_manager *
1015vmw_context_res_man(struct vmw_resource *ctx);
d80efd5c
TH
1016extern struct vmw_resource *vmw_context_cotable(struct vmw_resource *ctx,
1017 SVGACOTableType cotable_type);
1018extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx);
1019struct vmw_ctx_binding_state;
1020extern struct vmw_ctx_binding_state *
1021vmw_context_binding_state(struct vmw_resource *ctx);
1022extern void vmw_dx_context_scrub_cotables(struct vmw_resource *ctx,
1023 bool readback);
fd11a3c0
SY
1024extern int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
1025 struct vmw_dma_buffer *mob);
1026extern struct vmw_dma_buffer *
1027vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res);
1028
d80efd5c 1029
7086d099
TH
1030/*
1031 * Surface management - vmwgfx_surface.c
1032 */
1033
1034extern const struct vmw_user_resource_conv *user_surface_converter;
1035
1036extern void vmw_surface_res_free(struct vmw_resource *res);
1037extern int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
1038 struct drm_file *file_priv);
1039extern int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
1040 struct drm_file *file_priv);
1041extern int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
1042 struct drm_file *file_priv);
1043extern int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
1044 struct drm_file *file_priv);
1045extern int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data,
1046 struct drm_file *file_priv);
1047extern int vmw_surface_check(struct vmw_private *dev_priv,
1048 struct ttm_object_file *tfile,
1049 uint32_t handle, int *id);
1050extern int vmw_surface_validate(struct vmw_private *dev_priv,
1051 struct vmw_surface *srf);
233826a7
SY
1052int vmw_surface_gb_priv_define(struct drm_device *dev,
1053 uint32_t user_accounting_size,
1054 uint32_t svga3d_flags,
1055 SVGA3dSurfaceFormat format,
1056 bool for_scanout,
1057 uint32_t num_mip_levels,
1058 uint32_t multisample_count,
d80efd5c 1059 uint32_t array_size,
233826a7
SY
1060 struct drm_vmw_size size,
1061 struct vmw_surface **srf_out);
7086d099 1062
c74c162f
TH
1063/*
1064 * Shader management - vmwgfx_shader.c
1065 */
1066
7086d099
TH
1067extern const struct vmw_user_resource_conv *user_shader_converter;
1068
c74c162f
TH
1069extern int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
1070 struct drm_file *file_priv);
1071extern int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data,
1072 struct drm_file *file_priv);
18e4a466
TH
1073extern int vmw_compat_shader_add(struct vmw_private *dev_priv,
1074 struct vmw_cmdbuf_res_manager *man,
d5bde956
TH
1075 u32 user_key, const void *bytecode,
1076 SVGA3dShaderType shader_type,
1077 size_t size,
d5bde956 1078 struct list_head *list);
d80efd5c
TH
1079extern int vmw_shader_remove(struct vmw_cmdbuf_res_manager *man,
1080 u32 user_key, SVGA3dShaderType shader_type,
1081 struct list_head *list);
1082extern int vmw_dx_shader_add(struct vmw_cmdbuf_res_manager *man,
1083 struct vmw_resource *ctx,
1084 u32 user_key,
1085 SVGA3dShaderType shader_type,
1086 struct list_head *list);
1087extern void vmw_dx_shader_cotable_list_scrub(struct vmw_private *dev_priv,
1088 struct list_head *list,
1089 bool readback);
1090
18e4a466 1091extern struct vmw_resource *
d80efd5c
TH
1092vmw_shader_lookup(struct vmw_cmdbuf_res_manager *man,
1093 u32 user_key, SVGA3dShaderType shader_type);
18e4a466
TH
1094
1095/*
1096 * Command buffer managed resources - vmwgfx_cmdbuf_res.c
1097 */
1098
1099extern struct vmw_cmdbuf_res_manager *
1100vmw_cmdbuf_res_man_create(struct vmw_private *dev_priv);
1101extern void vmw_cmdbuf_res_man_destroy(struct vmw_cmdbuf_res_manager *man);
1102extern size_t vmw_cmdbuf_res_man_size(void);
1103extern struct vmw_resource *
1104vmw_cmdbuf_res_lookup(struct vmw_cmdbuf_res_manager *man,
1105 enum vmw_cmdbuf_res_type res_type,
1106 u32 user_key);
1107extern void vmw_cmdbuf_res_revert(struct list_head *list);
1108extern void vmw_cmdbuf_res_commit(struct list_head *list);
1109extern int vmw_cmdbuf_res_add(struct vmw_cmdbuf_res_manager *man,
1110 enum vmw_cmdbuf_res_type res_type,
1111 u32 user_key,
1112 struct vmw_resource *res,
1113 struct list_head *list);
1114extern int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man,
1115 enum vmw_cmdbuf_res_type res_type,
1116 u32 user_key,
d80efd5c
TH
1117 struct list_head *list,
1118 struct vmw_resource **res);
d5bde956 1119
d80efd5c
TH
1120/*
1121 * COTable management - vmwgfx_cotable.c
1122 */
1123extern const SVGACOTableType vmw_cotable_scrub_order[];
1124extern struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv,
1125 struct vmw_resource *ctx,
1126 u32 type);
1127extern int vmw_cotable_notify(struct vmw_resource *res, int id);
1128extern int vmw_cotable_scrub(struct vmw_resource *res, bool readback);
1129extern void vmw_cotable_add_resource(struct vmw_resource *ctx,
1130 struct list_head *head);
c74c162f 1131
3eab3d9e
TH
1132/*
1133 * Command buffer managerment vmwgfx_cmdbuf.c
1134 */
1135struct vmw_cmdbuf_man;
1136struct vmw_cmdbuf_header;
1137
1138extern struct vmw_cmdbuf_man *
1139vmw_cmdbuf_man_create(struct vmw_private *dev_priv);
1140extern int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man,
1141 size_t size, size_t default_size);
1142extern void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man);
1143extern void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man *man);
1144extern int vmw_cmdbuf_idle(struct vmw_cmdbuf_man *man, bool interruptible,
1145 unsigned long timeout);
1146extern void *vmw_cmdbuf_reserve(struct vmw_cmdbuf_man *man, size_t size,
1147 int ctx_id, bool interruptible,
1148 struct vmw_cmdbuf_header *header);
1149extern void vmw_cmdbuf_commit(struct vmw_cmdbuf_man *man, size_t size,
1150 struct vmw_cmdbuf_header *header,
1151 bool flush);
1152extern void vmw_cmdbuf_tasklet_schedule(struct vmw_cmdbuf_man *man);
1153extern void *vmw_cmdbuf_alloc(struct vmw_cmdbuf_man *man,
1154 size_t size, bool interruptible,
1155 struct vmw_cmdbuf_header **p_header);
1156extern void vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header);
1157extern int vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man,
1158 bool interruptible);
d5bde956 1159
c74c162f 1160
fb1d9738
JB
1161/**
1162 * Inline helper functions
1163 */
1164
1165static inline void vmw_surface_unreference(struct vmw_surface **srf)
1166{
1167 struct vmw_surface *tmp_srf = *srf;
1168 struct vmw_resource *res = &tmp_srf->res;
1169 *srf = NULL;
1170
1171 vmw_resource_unreference(&res);
1172}
1173
1174static inline struct vmw_surface *vmw_surface_reference(struct vmw_surface *srf)
1175{
1176 (void) vmw_resource_reference(&srf->res);
1177 return srf;
1178}
1179
1180static inline void vmw_dmabuf_unreference(struct vmw_dma_buffer **buf)
1181{
1182 struct vmw_dma_buffer *tmp_buf = *buf;
bf6f0368 1183
fb1d9738 1184 *buf = NULL;
bf6f0368
TH
1185 if (tmp_buf != NULL) {
1186 struct ttm_buffer_object *bo = &tmp_buf->base;
fb1d9738 1187
bf6f0368
TH
1188 ttm_bo_unref(&bo);
1189 }
fb1d9738
JB
1190}
1191
1192static inline struct vmw_dma_buffer *vmw_dmabuf_reference(struct vmw_dma_buffer *buf)
1193{
1194 if (ttm_bo_reference(&buf->base))
1195 return buf;
1196 return NULL;
1197}
1198
ae2a1040
TH
1199static inline struct ttm_mem_global *vmw_mem_glob(struct vmw_private *dev_priv)
1200{
1201 return (struct ttm_mem_global *) dev_priv->mem_global_ref.object;
1202}
153b3d5b
TH
1203
1204static inline void vmw_fifo_resource_inc(struct vmw_private *dev_priv)
1205{
1206 atomic_inc(&dev_priv->num_fifo_resources);
1207}
1208
1209static inline void vmw_fifo_resource_dec(struct vmw_private *dev_priv)
1210{
1211 atomic_dec(&dev_priv->num_fifo_resources);
1212}
b76ff5ea
TH
1213
1214/**
1215 * vmw_mmio_read - Perform a MMIO read from volatile memory
1216 *
1217 * @addr: The address to read from
1218 *
1219 * This function is intended to be equivalent to ioread32() on
1220 * memremap'd memory, but without byteswapping.
1221 */
1222static inline u32 vmw_mmio_read(u32 *addr)
1223{
1224 return READ_ONCE(*addr);
1225}
1226
1227/**
1228 * vmw_mmio_write - Perform a MMIO write to volatile memory
1229 *
1230 * @addr: The address to write to
1231 *
1232 * This function is intended to be equivalent to iowrite32 on
1233 * memremap'd memory, but without byteswapping.
1234 */
1235static inline void vmw_mmio_write(u32 value, u32 *addr)
1236{
1237 WRITE_ONCE(*addr, value);
1238}
f9217913
SY
1239
1240/**
1241 * Add vmw_msg module function
1242 */
1243extern int vmw_host_log(const char *log);
1244
fb1d9738 1245#endif
This page took 0.403532 seconds and 5 git commands to generate.