Merge branch 'drm-uapi-extern-c-fixes' of https://github.com/evelikov/linux into...
[deliverable/linux.git] / drivers / gpu / drm / vmwgfx / vmwgfx_drv.h
CommitLineData
fb1d9738
JB
1/**************************************************************************
2 *
54fbde8a 3 * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
fb1d9738
JB
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#ifndef _VMWGFX_DRV_H_
29#define _VMWGFX_DRV_H_
30
31#include "vmwgfx_reg.h"
760285e7
DH
32#include <drm/drmP.h>
33#include <drm/vmwgfx_drm.h>
34#include <drm/drm_hashtab.h>
35#include <linux/suspend.h>
36#include <drm/ttm/ttm_bo_driver.h>
37#include <drm/ttm/ttm_object.h>
38#include <drm/ttm/ttm_lock.h>
39#include <drm/ttm/ttm_execbuf_util.h>
40#include <drm/ttm/ttm_module.h>
ae2a1040 41#include "vmwgfx_fence.h"
fb1d9738 42
5476aa46 43#define VMWGFX_DRIVER_DATE "20160210"
2ae7b03c 44#define VMWGFX_DRIVER_MAJOR 2
5476aa46 45#define VMWGFX_DRIVER_MINOR 10
35c05125 46#define VMWGFX_DRIVER_PATCHLEVEL 0
fb1d9738
JB
47#define VMWGFX_FILE_PAGE_OFFSET 0x00100000
48#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
49#define VMWGFX_MAX_RELOCATIONS 2048
be38ab6e 50#define VMWGFX_MAX_VALIDATIONS 2048
7c4f7780 51#define VMWGFX_MAX_DISPLAYS 16
be38ab6e 52#define VMWGFX_CMD_BOUNCE_INIT_SIZE 32768
35c05125 53#define VMWGFX_ENABLE_SCREEN_TARGET_OTABLE 1
fb1d9738 54
3530bdc3
TH
55/*
56 * Perhaps we should have sysfs entries for these.
57 */
58#define VMWGFX_NUM_GB_CONTEXT 256
59#define VMWGFX_NUM_GB_SHADER 20000
60#define VMWGFX_NUM_GB_SURFACE 32768
7cba9062 61#define VMWGFX_NUM_GB_SCREEN_TARGET VMWGFX_MAX_DISPLAYS
d80efd5c
TH
62#define VMWGFX_NUM_DXCONTEXT 256
63#define VMWGFX_NUM_DXQUERY 512
3530bdc3
TH
64#define VMWGFX_NUM_MOB (VMWGFX_NUM_GB_CONTEXT +\
65 VMWGFX_NUM_GB_SHADER +\
7cba9062
TH
66 VMWGFX_NUM_GB_SURFACE +\
67 VMWGFX_NUM_GB_SCREEN_TARGET)
3530bdc3 68
135cba0d
TH
69#define VMW_PL_GMR TTM_PL_PRIV0
70#define VMW_PL_FLAG_GMR TTM_PL_FLAG_PRIV0
6da768aa
TH
71#define VMW_PL_MOB TTM_PL_PRIV1
72#define VMW_PL_FLAG_MOB TTM_PL_FLAG_PRIV1
135cba0d 73
ae2a1040
TH
74#define VMW_RES_CONTEXT ttm_driver_type0
75#define VMW_RES_SURFACE ttm_driver_type1
76#define VMW_RES_STREAM ttm_driver_type2
77#define VMW_RES_FENCE ttm_driver_type3
c74c162f 78#define VMW_RES_SHADER ttm_driver_type4
ae2a1040 79
fb1d9738
JB
80struct vmw_fpriv {
81 struct drm_master *locked_master;
82 struct ttm_object_file *tfile;
d5bde956 83 bool gb_aware;
fb1d9738
JB
84};
85
86struct vmw_dma_buffer {
87 struct ttm_buffer_object base;
c0951b79 88 struct list_head res_list;
459d0fa7 89 s32 pin_count;
fd11a3c0
SY
90 /* Not ref-counted. Protected by binding_mutex */
91 struct vmw_resource *dx_query_ctx;
fb1d9738
JB
92};
93
c0951b79
TH
94/**
95 * struct vmw_validate_buffer - Carries validation info about buffers.
96 *
97 * @base: Validation info for TTM.
98 * @hash: Hash entry for quick lookup of the TTM buffer object.
99 *
100 * This structure contains also driver private validation info
101 * on top of the info needed by TTM.
102 */
103struct vmw_validate_buffer {
104 struct ttm_validate_buffer base;
105 struct drm_hash_item hash;
96c5f0df 106 bool validate_as_mob;
c0951b79
TH
107};
108
109struct vmw_res_func;
fb1d9738
JB
110struct vmw_resource {
111 struct kref kref;
112 struct vmw_private *dev_priv;
fb1d9738 113 int id;
fb1d9738 114 bool avail;
c0951b79
TH
115 unsigned long backup_size;
116 bool res_dirty; /* Protected by backup buffer reserved */
117 bool backup_dirty; /* Protected by backup buffer reserved */
118 struct vmw_dma_buffer *backup;
119 unsigned long backup_offset;
ed93394c 120 unsigned long pin_count; /* Protected by resource reserved */
c0951b79
TH
121 const struct vmw_res_func *func;
122 struct list_head lru_head; /* Protected by the resource lock */
123 struct list_head mob_head; /* Protected by @backup reserved */
173fb7d4 124 struct list_head binding_head; /* Protected by binding_mutex */
fb1d9738 125 void (*res_free) (struct vmw_resource *res);
c0951b79
TH
126 void (*hw_destroy) (struct vmw_resource *res);
127};
128
18e4a466
TH
129
130/*
131 * Resources that are managed using ioctls.
132 */
c0951b79
TH
133enum vmw_res_type {
134 vmw_res_context,
135 vmw_res_surface,
136 vmw_res_stream,
c74c162f 137 vmw_res_shader,
d80efd5c
TH
138 vmw_res_dx_context,
139 vmw_res_cotable,
140 vmw_res_view,
c0951b79 141 vmw_res_max
fb1d9738
JB
142};
143
18e4a466
TH
144/*
145 * Resources that are managed using command streams.
146 */
147enum vmw_cmdbuf_res_type {
d80efd5c
TH
148 vmw_cmdbuf_res_shader,
149 vmw_cmdbuf_res_view
18e4a466
TH
150};
151
152struct vmw_cmdbuf_res_manager;
153
fb1d9738
JB
154struct vmw_cursor_snooper {
155 struct drm_crtc *crtc;
156 size_t age;
157 uint32_t *image;
158};
159
2fcd5a73 160struct vmw_framebuffer;
5bb39e81 161struct vmw_surface_offset;
2fcd5a73 162
fb1d9738
JB
163struct vmw_surface {
164 struct vmw_resource res;
165 uint32_t flags;
166 uint32_t format;
167 uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES];
c0951b79 168 struct drm_vmw_size base_size;
fb1d9738
JB
169 struct drm_vmw_size *sizes;
170 uint32_t num_sizes;
5ffdb658 171 bool scanout;
d80efd5c 172 uint32_t array_size;
fb1d9738
JB
173 /* TODO so far just a extra pointer */
174 struct vmw_cursor_snooper snooper;
5bb39e81 175 struct vmw_surface_offset *offsets;
c0951b79
TH
176 SVGA3dTextureFilter autogen_filter;
177 uint32_t multisample_count;
d80efd5c 178 struct list_head view_list;
fb1d9738
JB
179};
180
6bcd8d3c 181struct vmw_marker_queue {
1925d456 182 struct list_head head;
f166e6dc
TG
183 u64 lag;
184 u64 lag_time;
1925d456
TH
185 spinlock_t lock;
186};
187
fb1d9738
JB
188struct vmw_fifo_state {
189 unsigned long reserved_size;
b9eb1a61
TH
190 u32 *dynamic_buffer;
191 u32 *static_buffer;
fb1d9738
JB
192 unsigned long static_buffer_size;
193 bool using_bounce_buffer;
194 uint32_t capabilities;
85b9e487 195 struct mutex fifo_mutex;
fb1d9738 196 struct rw_semaphore rwsem;
6bcd8d3c 197 struct vmw_marker_queue marker_queue;
d80efd5c 198 bool dx;
fb1d9738
JB
199};
200
201struct vmw_relocation {
ddcda24e 202 SVGAMobId *mob_loc;
fb1d9738
JB
203 SVGAGuestPtr *location;
204 uint32_t index;
205};
206
c0951b79
TH
207/**
208 * struct vmw_res_cache_entry - resource information cache entry
209 *
210 * @valid: Whether the entry is valid, which also implies that the execbuf
211 * code holds a reference to the resource, and it's placed on the
212 * validation list.
213 * @handle: User-space handle of a resource.
214 * @res: Non-ref-counted pointer to the resource.
215 *
216 * Used to avoid frequent repeated user-space handle lookups of the
217 * same resource.
218 */
219struct vmw_res_cache_entry {
220 bool valid;
221 uint32_t handle;
222 struct vmw_resource *res;
223 struct vmw_resource_val_node *node;
224};
225
d92d9851
TH
226/**
227 * enum vmw_dma_map_mode - indicate how to perform TTM page dma mappings.
228 */
229enum vmw_dma_map_mode {
230 vmw_dma_phys, /* Use physical page addresses */
231 vmw_dma_alloc_coherent, /* Use TTM coherent pages */
232 vmw_dma_map_populate, /* Unmap from DMA just after unpopulate */
233 vmw_dma_map_bind, /* Unmap from DMA just before unbind */
234 vmw_dma_map_max
235};
236
237/**
238 * struct vmw_sg_table - Scatter/gather table for binding, with additional
239 * device-specific information.
240 *
241 * @sgt: Pointer to a struct sg_table with binding information
e1c05067 242 * @num_regions: Number of regions with device-address contiguous pages
d92d9851
TH
243 */
244struct vmw_sg_table {
245 enum vmw_dma_map_mode mode;
246 struct page **pages;
247 const dma_addr_t *addrs;
248 struct sg_table *sgt;
249 unsigned long num_regions;
250 unsigned long num_pages;
251};
252
253/**
254 * struct vmw_piter - Page iterator that iterates over a list of pages
255 * and DMA addresses that could be either a scatter-gather list or
256 * arrays
257 *
258 * @pages: Array of page pointers to the pages.
259 * @addrs: DMA addresses to the pages if coherent pages are used.
260 * @iter: Scatter-gather page iterator. Current position in SG list.
261 * @i: Current position in arrays.
262 * @num_pages: Number of pages total.
263 * @next: Function to advance the iterator. Returns false if past the list
264 * of pages, true otherwise.
265 * @dma_address: Function to return the DMA address of the current page.
266 */
267struct vmw_piter {
268 struct page **pages;
269 const dma_addr_t *addrs;
270 struct sg_page_iter iter;
271 unsigned long i;
272 unsigned long num_pages;
273 bool (*next)(struct vmw_piter *);
274 dma_addr_t (*dma_address)(struct vmw_piter *);
275 struct page *(*page)(struct vmw_piter *);
276};
277
b5c3b1a6 278/*
c8261a96 279 * enum vmw_display_unit_type - Describes the display unit
b5c3b1a6 280 */
c8261a96
SY
281enum vmw_display_unit_type {
282 vmw_du_invalid = 0,
283 vmw_du_legacy,
35c05125
SY
284 vmw_du_screen_object,
285 vmw_du_screen_target
b5c3b1a6
TH
286};
287
b5c3b1a6 288
fb1d9738 289struct vmw_sw_context{
c0951b79
TH
290 struct drm_open_hash res_ht;
291 bool res_ht_initialized;
922ade0d 292 bool kernel; /**< is the called made from the kernel */
d5bde956 293 struct vmw_fpriv *fp;
fb1d9738
JB
294 struct list_head validate_nodes;
295 struct vmw_relocation relocs[VMWGFX_MAX_RELOCATIONS];
296 uint32_t cur_reloc;
c0951b79 297 struct vmw_validate_buffer val_bufs[VMWGFX_MAX_VALIDATIONS];
fb1d9738 298 uint32_t cur_val_buf;
be38ab6e
TH
299 uint32_t *cmd_bounce;
300 uint32_t cmd_bounce_size;
f18c8840 301 struct list_head resource_list;
d80efd5c 302 struct list_head ctx_resource_list; /* For contexts and cotables */
459d0fa7 303 struct vmw_dma_buffer *cur_query_bo;
c0951b79
TH
304 struct list_head res_relocations;
305 uint32_t *buf_start;
306 struct vmw_res_cache_entry res_cache[vmw_res_max];
307 struct vmw_resource *last_query_ctx;
308 bool needs_post_query_barrier;
309 struct vmw_resource *error_resource;
d80efd5c
TH
310 struct vmw_ctx_binding_state *staged_bindings;
311 bool staged_bindings_inuse;
18e4a466 312 struct list_head staged_cmd_res;
d80efd5c
TH
313 struct vmw_resource_val_node *dx_ctx_node;
314 struct vmw_dma_buffer *dx_query_mob;
315 struct vmw_resource *dx_query_ctx;
316 struct vmw_cmdbuf_res_manager *man;
fb1d9738
JB
317};
318
319struct vmw_legacy_display;
320struct vmw_overlay;
321
322struct vmw_master {
323 struct ttm_lock lock;
324};
325
7c4f7780
TH
326struct vmw_vga_topology_state {
327 uint32_t width;
328 uint32_t height;
329 uint32_t primary;
330 uint32_t pos_x;
331 uint32_t pos_y;
332};
333
d80efd5c
TH
334
335/*
336 * struct vmw_otable - Guest Memory OBject table metadata
337 *
338 * @size: Size of the table (page-aligned).
339 * @page_table: Pointer to a struct vmw_mob holding the page table.
340 */
341struct vmw_otable {
342 unsigned long size;
343 struct vmw_mob *page_table;
344 bool enabled;
345};
346
347struct vmw_otable_batch {
348 unsigned num_otables;
349 struct vmw_otable *otables;
350 struct vmw_resource *context;
351 struct ttm_buffer_object *otable_bo;
352};
353
fb1d9738
JB
354struct vmw_private {
355 struct ttm_bo_device bdev;
356 struct ttm_bo_global_ref bo_global_ref;
ba4420c2 357 struct drm_global_reference mem_global_ref;
fb1d9738
JB
358
359 struct vmw_fifo_state fifo;
360
361 struct drm_device *dev;
362 unsigned long vmw_chipset;
363 unsigned int io_start;
364 uint32_t vram_start;
365 uint32_t vram_size;
bc2d6508 366 uint32_t prim_bb_mem;
fb1d9738
JB
367 uint32_t mmio_start;
368 uint32_t mmio_size;
369 uint32_t fb_max_width;
370 uint32_t fb_max_height;
35c05125
SY
371 uint32_t texture_max_width;
372 uint32_t texture_max_height;
373 uint32_t stdu_max_width;
374 uint32_t stdu_max_height;
eb4f923b
JB
375 uint32_t initial_width;
376 uint32_t initial_height;
b76ff5ea 377 u32 *mmio_virt;
fb1d9738 378 uint32_t capabilities;
fb1d9738 379 uint32_t max_gmr_ids;
fb17f189 380 uint32_t max_gmr_pages;
6da768aa 381 uint32_t max_mob_pages;
857aea1c 382 uint32_t max_mob_size;
fb17f189 383 uint32_t memory_size;
135cba0d 384 bool has_gmr;
3530bdc3 385 bool has_mob;
496eb6fd
TH
386 spinlock_t hw_lock;
387 spinlock_t cap_lock;
d80efd5c 388 bool has_dx;
fb1d9738
JB
389
390 /*
391 * VGA registers.
392 */
393
7c4f7780 394 struct vmw_vga_topology_state vga_save[VMWGFX_MAX_DISPLAYS];
fb1d9738
JB
395 uint32_t vga_width;
396 uint32_t vga_height;
fb1d9738 397 uint32_t vga_bpp;
7c4f7780 398 uint32_t vga_bpl;
d7e1958d 399 uint32_t vga_pitchlock;
fb1d9738 400
7c4f7780
TH
401 uint32_t num_displays;
402
fb1d9738
JB
403 /*
404 * Framebuffer info.
405 */
406
407 void *fb_info;
c8261a96 408 enum vmw_display_unit_type active_display_unit;
fb1d9738
JB
409 struct vmw_legacy_display *ldu_priv;
410 struct vmw_overlay *overlay_priv;
578e609a 411 struct drm_property *hotplug_mode_update_property;
76404ac0 412 struct drm_property *implicit_placement_property;
75c06855
TH
413 unsigned num_implicit;
414 struct vmw_framebuffer *implicit_fb;
fb1d9738
JB
415
416 /*
417 * Context and surface management.
418 */
419
420 rwlock_t resource_lock;
c0951b79 421 struct idr res_idr[vmw_res_max];
fb1d9738
JB
422 /*
423 * Block lastclose from racing with firstopen.
424 */
425
426 struct mutex init_mutex;
427
428 /*
429 * A resource manager for kernel-only surfaces and
430 * contexts.
431 */
432
433 struct ttm_object_device *tdev;
434
435 /*
436 * Fencing and IRQs.
437 */
438
6bcd8d3c 439 atomic_t marker_seq;
fb1d9738
JB
440 wait_queue_head_t fence_queue;
441 wait_queue_head_t fifo_queue;
496eb6fd
TH
442 spinlock_t waiter_lock;
443 int fence_queue_waiters; /* Protected by waiter_lock */
444 int goal_queue_waiters; /* Protected by waiter_lock */
d2e8851a
TH
445 int cmdbuf_waiters; /* Protected by waiter_lock */
446 int error_waiters; /* Protected by waiter_lock */
447 int fifo_queue_waiters; /* Protected by waiter_lock */
6bcd8d3c 448 uint32_t last_read_seqno;
ae2a1040 449 struct vmw_fence_manager *fman;
d2e8851a 450 uint32_t irq_mask; /* Updates protected by waiter_lock */
fb1d9738
JB
451
452 /*
453 * Device state
454 */
455
456 uint32_t traces_state;
457 uint32_t enable_state;
458 uint32_t config_done_state;
459
460 /**
461 * Execbuf
462 */
463 /**
464 * Protected by the cmdbuf mutex.
465 */
466
467 struct vmw_sw_context ctx;
fb1d9738 468 struct mutex cmdbuf_mutex;
173fb7d4 469 struct mutex binding_mutex;
fb1d9738 470
fb1d9738
JB
471 /**
472 * Operating mode.
473 */
474
475 bool stealth;
30c78bb8 476 bool enable_fb;
153b3d5b 477 spinlock_t svga_lock;
fb1d9738
JB
478
479 /**
480 * Master management.
481 */
482
483 struct vmw_master *active_master;
484 struct vmw_master fbdev_master;
d9f36a00 485 struct notifier_block pm_nb;
094e0fa8 486 bool suspended;
153b3d5b 487 bool refuse_hibernation;
30c78bb8
TH
488
489 struct mutex release_mutex;
153b3d5b 490 atomic_t num_fifo_resources;
e2fa3a76 491
294adf7d
TH
492 /*
493 * Replace this with an rwsem as soon as we have down_xx_interruptible()
494 */
495 struct ttm_lock reservation_sem;
496
e2fa3a76
TH
497 /*
498 * Query processing. These members
499 * are protected by the cmdbuf mutex.
500 */
501
459d0fa7
TH
502 struct vmw_dma_buffer *dummy_query_bo;
503 struct vmw_dma_buffer *pinned_bo;
e2fa3a76 504 uint32_t query_cid;
c0951b79 505 uint32_t query_cid_valid;
e2fa3a76 506 bool dummy_query_bo_pinned;
5bb39e81
TH
507
508 /*
509 * Surface swapping. The "surface_lru" list is protected by the
510 * resource lock in order to be able to destroy a surface and take
511 * it off the lru atomically. "used_memory_size" is currently
512 * protected by the cmdbuf mutex for simplicity.
513 */
514
c0951b79 515 struct list_head res_lru[vmw_res_max];
5bb39e81 516 uint32_t used_memory_size;
d92d9851
TH
517
518 /*
519 * DMA mapping stuff.
520 */
521 enum vmw_dma_map_mode map_mode;
3530bdc3
TH
522
523 /*
524 * Guest Backed stuff
525 */
d80efd5c 526 struct vmw_otable_batch otable_batch;
3eab3d9e
TH
527
528 struct vmw_cmdbuf_man *cman;
fb1d9738
JB
529};
530
c0951b79
TH
531static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res)
532{
533 return container_of(res, struct vmw_surface, res);
534}
535
fb1d9738
JB
536static inline struct vmw_private *vmw_priv(struct drm_device *dev)
537{
538 return (struct vmw_private *)dev->dev_private;
539}
540
541static inline struct vmw_fpriv *vmw_fpriv(struct drm_file *file_priv)
542{
543 return (struct vmw_fpriv *)file_priv->driver_priv;
544}
545
546static inline struct vmw_master *vmw_master(struct drm_master *master)
547{
548 return (struct vmw_master *) master->driver_priv;
549}
550
496eb6fd
TH
551/*
552 * The locking here is fine-grained, so that it is performed once
553 * for every read- and write operation. This is of course costly, but we
554 * don't perform much register access in the timing critical paths anyway.
555 * Instead we have the extra benefit of being sure that we don't forget
556 * the hw lock around register accesses.
557 */
fb1d9738
JB
558static inline void vmw_write(struct vmw_private *dev_priv,
559 unsigned int offset, uint32_t value)
560{
496eb6fd
TH
561 unsigned long irq_flags;
562
563 spin_lock_irqsave(&dev_priv->hw_lock, irq_flags);
fb1d9738
JB
564 outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
565 outl(value, dev_priv->io_start + VMWGFX_VALUE_PORT);
496eb6fd 566 spin_unlock_irqrestore(&dev_priv->hw_lock, irq_flags);
fb1d9738
JB
567}
568
569static inline uint32_t vmw_read(struct vmw_private *dev_priv,
570 unsigned int offset)
571{
496eb6fd
TH
572 unsigned long irq_flags;
573 u32 val;
fb1d9738 574
496eb6fd 575 spin_lock_irqsave(&dev_priv->hw_lock, irq_flags);
fb1d9738
JB
576 outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
577 val = inl(dev_priv->io_start + VMWGFX_VALUE_PORT);
496eb6fd
TH
578 spin_unlock_irqrestore(&dev_priv->hw_lock, irq_flags);
579
fb1d9738
JB
580 return val;
581}
582
153b3d5b
TH
583extern void vmw_svga_enable(struct vmw_private *dev_priv);
584extern void vmw_svga_disable(struct vmw_private *dev_priv);
585
30c78bb8 586
fb1d9738
JB
587/**
588 * GMR utilities - vmwgfx_gmr.c
589 */
590
591extern int vmw_gmr_bind(struct vmw_private *dev_priv,
d92d9851 592 const struct vmw_sg_table *vsgt,
135cba0d
TH
593 unsigned long num_pages,
594 int gmr_id);
fb1d9738
JB
595extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id);
596
597/**
598 * Resource utilities - vmwgfx_resource.c
599 */
c0951b79 600struct vmw_user_resource_conv;
fb1d9738 601
fb1d9738
JB
602extern void vmw_resource_unreference(struct vmw_resource **p_res);
603extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res);
30f82d81
TH
604extern struct vmw_resource *
605vmw_resource_reference_unless_doomed(struct vmw_resource *res);
c0951b79 606extern int vmw_resource_validate(struct vmw_resource *res);
1a4b172a
TH
607extern int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
608 bool no_backup);
c0951b79 609extern bool vmw_resource_needs_backup(const struct vmw_resource *res);
551a6697
JB
610extern int vmw_user_lookup_handle(struct vmw_private *dev_priv,
611 struct ttm_object_file *tfile,
612 uint32_t handle,
613 struct vmw_surface **out_surf,
614 struct vmw_dma_buffer **out_buf);
c0951b79
TH
615extern int vmw_user_resource_lookup_handle(
616 struct vmw_private *dev_priv,
617 struct ttm_object_file *tfile,
618 uint32_t handle,
619 const struct vmw_user_resource_conv *converter,
620 struct vmw_resource **p_res);
fb1d9738
JB
621extern void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo);
622extern int vmw_dmabuf_init(struct vmw_private *dev_priv,
623 struct vmw_dma_buffer *vmw_bo,
624 size_t size, struct ttm_placement *placement,
625 bool interuptable,
626 void (*bo_free) (struct ttm_buffer_object *bo));
d08a9b9c
TH
627extern int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
628 struct ttm_object_file *tfile);
a97e2192
TH
629extern int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
630 struct ttm_object_file *tfile,
631 uint32_t size,
632 bool shareable,
633 uint32_t *handle,
54c12bc3
TH
634 struct vmw_dma_buffer **p_dma_buf,
635 struct ttm_base_object **p_base);
a97e2192
TH
636extern int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
637 struct vmw_dma_buffer *dma_buf,
638 uint32_t *handle);
fb1d9738
JB
639extern int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
640 struct drm_file *file_priv);
641extern int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
642 struct drm_file *file_priv);
1d7a5cbf
TH
643extern int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
644 struct drm_file *file_priv);
fb1d9738
JB
645extern uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
646 uint32_t cur_validate_node);
647extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo);
648extern int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
54c12bc3
TH
649 uint32_t id, struct vmw_dma_buffer **out,
650 struct ttm_base_object **base);
fb1d9738
JB
651extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
652 struct drm_file *file_priv);
653extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
654 struct drm_file *file_priv);
655extern int vmw_user_stream_lookup(struct vmw_private *dev_priv,
656 struct ttm_object_file *tfile,
657 uint32_t *inout_id,
658 struct vmw_resource **out);
c0951b79 659extern void vmw_resource_unreserve(struct vmw_resource *res,
d80efd5c 660 bool switch_backup,
c0951b79
TH
661 struct vmw_dma_buffer *new_backup,
662 unsigned long new_backup_offset);
663extern void vmw_resource_move_notify(struct ttm_buffer_object *bo,
664 struct ttm_mem_reg *mem);
fd11a3c0
SY
665extern void vmw_query_move_notify(struct ttm_buffer_object *bo,
666 struct ttm_mem_reg *mem);
667extern int vmw_query_readback_all(struct vmw_dma_buffer *dx_query_mob);
c0951b79
TH
668extern void vmw_fence_single_bo(struct ttm_buffer_object *bo,
669 struct vmw_fence_obj *fence);
670extern void vmw_resource_evict_all(struct vmw_private *dev_priv);
fb1d9738 671
d991ef03
JB
672/**
673 * DMA buffer helper routines - vmwgfx_dmabuf.c
674 */
459d0fa7 675extern int vmw_dmabuf_pin_in_placement(struct vmw_private *vmw_priv,
d991ef03 676 struct vmw_dma_buffer *bo,
459d0fa7
TH
677 struct ttm_placement *placement,
678 bool interruptible);
679extern int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv,
680 struct vmw_dma_buffer *buf,
681 bool interruptible);
682extern int vmw_dmabuf_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
683 struct vmw_dma_buffer *buf,
684 bool interruptible);
685extern int vmw_dmabuf_pin_in_start_of_vram(struct vmw_private *vmw_priv,
686 struct vmw_dma_buffer *bo,
687 bool interruptible);
d991ef03
JB
688extern int vmw_dmabuf_unpin(struct vmw_private *vmw_priv,
689 struct vmw_dma_buffer *bo,
690 bool interruptible);
b37a6b9a
TH
691extern void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *buf,
692 SVGAGuestPtr *ptr);
459d0fa7 693extern void vmw_bo_pin_reserved(struct vmw_dma_buffer *bo, bool pin);
fb1d9738
JB
694
695/**
696 * Misc Ioctl functionality - vmwgfx_ioctl.c
697 */
698
699extern int vmw_getparam_ioctl(struct drm_device *dev, void *data,
700 struct drm_file *file_priv);
f63f6a59
TH
701extern int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
702 struct drm_file *file_priv);
2fcd5a73
JB
703extern int vmw_present_ioctl(struct drm_device *dev, void *data,
704 struct drm_file *file_priv);
705extern int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
706 struct drm_file *file_priv);
5438ae88
TH
707extern unsigned int vmw_fops_poll(struct file *filp,
708 struct poll_table_struct *wait);
709extern ssize_t vmw_fops_read(struct file *filp, char __user *buffer,
710 size_t count, loff_t *offset);
fb1d9738
JB
711
712/**
713 * Fifo utilities - vmwgfx_fifo.c
714 */
715
716extern int vmw_fifo_init(struct vmw_private *dev_priv,
717 struct vmw_fifo_state *fifo);
718extern void vmw_fifo_release(struct vmw_private *dev_priv,
719 struct vmw_fifo_state *fifo);
720extern void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes);
d80efd5c
TH
721extern void *
722vmw_fifo_reserve_dx(struct vmw_private *dev_priv, uint32_t bytes, int ctx_id);
fb1d9738 723extern void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes);
d80efd5c 724extern void vmw_fifo_commit_flush(struct vmw_private *dev_priv, uint32_t bytes);
fb1d9738 725extern int vmw_fifo_send_fence(struct vmw_private *dev_priv,
6bcd8d3c 726 uint32_t *seqno);
2298e804 727extern void vmw_fifo_ping_host_locked(struct vmw_private *, uint32_t reason);
fb1d9738 728extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason);
8e19a951 729extern bool vmw_fifo_have_3d(struct vmw_private *dev_priv);
d7e1958d 730extern bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv);
e2fa3a76
TH
731extern int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv,
732 uint32_t cid);
3eab3d9e
TH
733extern int vmw_fifo_flush(struct vmw_private *dev_priv,
734 bool interruptible);
fb1d9738
JB
735
736/**
737 * TTM glue - vmwgfx_ttm_glue.c
738 */
739
740extern int vmw_ttm_global_init(struct vmw_private *dev_priv);
741extern void vmw_ttm_global_release(struct vmw_private *dev_priv);
742extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma);
743
744/**
745 * TTM buffer object driver - vmwgfx_buffer.c
746 */
747
308d17ef 748extern const size_t vmw_tt_size;
fb1d9738
JB
749extern struct ttm_placement vmw_vram_placement;
750extern struct ttm_placement vmw_vram_ne_placement;
8ba5152a 751extern struct ttm_placement vmw_vram_sys_placement;
135cba0d 752extern struct ttm_placement vmw_vram_gmr_placement;
d991ef03 753extern struct ttm_placement vmw_vram_gmr_ne_placement;
fb1d9738 754extern struct ttm_placement vmw_sys_placement;
3530bdc3 755extern struct ttm_placement vmw_sys_ne_placement;
d991ef03 756extern struct ttm_placement vmw_evictable_placement;
5bb39e81 757extern struct ttm_placement vmw_srf_placement;
96c5f0df 758extern struct ttm_placement vmw_mob_placement;
3eab3d9e 759extern struct ttm_placement vmw_mob_ne_placement;
fb1d9738
JB
760extern struct ttm_bo_driver vmw_bo_driver;
761extern int vmw_dma_quiescent(struct drm_device *dev);
0fd53cfb
TH
762extern int vmw_bo_map_dma(struct ttm_buffer_object *bo);
763extern void vmw_bo_unmap_dma(struct ttm_buffer_object *bo);
764extern const struct vmw_sg_table *
765vmw_bo_sg_table(struct ttm_buffer_object *bo);
d92d9851
TH
766extern void vmw_piter_start(struct vmw_piter *viter,
767 const struct vmw_sg_table *vsgt,
768 unsigned long p_offs);
769
770/**
771 * vmw_piter_next - Advance the iterator one page.
772 *
773 * @viter: Pointer to the iterator to advance.
774 *
775 * Returns false if past the list of pages, true otherwise.
776 */
777static inline bool vmw_piter_next(struct vmw_piter *viter)
778{
779 return viter->next(viter);
780}
781
782/**
783 * vmw_piter_dma_addr - Return the DMA address of the current page.
784 *
785 * @viter: Pointer to the iterator
786 *
787 * Returns the DMA address of the page pointed to by @viter.
788 */
789static inline dma_addr_t vmw_piter_dma_addr(struct vmw_piter *viter)
790{
791 return viter->dma_address(viter);
792}
793
794/**
795 * vmw_piter_page - Return a pointer to the current page.
796 *
797 * @viter: Pointer to the iterator
798 *
799 * Returns the DMA address of the page pointed to by @viter.
800 */
801static inline struct page *vmw_piter_page(struct vmw_piter *viter)
802{
803 return viter->page(viter);
804}
fb1d9738
JB
805
806/**
807 * Command submission - vmwgfx_execbuf.c
808 */
809
d80efd5c
TH
810extern int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data,
811 struct drm_file *file_priv, size_t size);
922ade0d
TH
812extern int vmw_execbuf_process(struct drm_file *file_priv,
813 struct vmw_private *dev_priv,
814 void __user *user_commands,
815 void *kernel_commands,
816 uint32_t command_size,
817 uint64_t throttle_us,
d80efd5c 818 uint32_t dx_context_handle,
922ade0d 819 struct drm_vmw_fence_rep __user
bb1bd2f4
JB
820 *user_fence_rep,
821 struct vmw_fence_obj **out_fence);
c0951b79
TH
822extern void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
823 struct vmw_fence_obj *fence);
824extern void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv);
e2fa3a76 825
5bb39e81
TH
826extern int vmw_execbuf_fence_commands(struct drm_file *file_priv,
827 struct vmw_private *dev_priv,
828 struct vmw_fence_obj **p_fence,
829 uint32_t *p_handle);
57c5ee79
TH
830extern void vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
831 struct vmw_fpriv *vmw_fp,
832 int ret,
833 struct drm_vmw_fence_rep __user
834 *user_fence_rep,
835 struct vmw_fence_obj *fence,
836 uint32_t fence_handle);
1a4b172a
TH
837extern int vmw_validate_single_buffer(struct vmw_private *dev_priv,
838 struct ttm_buffer_object *bo,
839 bool interruptible,
840 bool validate_as_mob);
841
5bb39e81 842
fb1d9738
JB
843/**
844 * IRQs and wating - vmwgfx_irq.c
845 */
846
e9f0d76f 847extern irqreturn_t vmw_irq_handler(int irq, void *arg);
6bcd8d3c 848extern int vmw_wait_seqno(struct vmw_private *dev_priv, bool lazy,
c8261a96
SY
849 uint32_t seqno, bool interruptible,
850 unsigned long timeout);
fb1d9738
JB
851extern void vmw_irq_preinstall(struct drm_device *dev);
852extern int vmw_irq_postinstall(struct drm_device *dev);
853extern void vmw_irq_uninstall(struct drm_device *dev);
6bcd8d3c
TH
854extern bool vmw_seqno_passed(struct vmw_private *dev_priv,
855 uint32_t seqno);
fb1d9738
JB
856extern int vmw_fallback_wait(struct vmw_private *dev_priv,
857 bool lazy,
858 bool fifo_idle,
6bcd8d3c 859 uint32_t seqno,
fb1d9738
JB
860 bool interruptible,
861 unsigned long timeout);
6bcd8d3c 862extern void vmw_update_seqno(struct vmw_private *dev_priv,
1925d456 863 struct vmw_fifo_state *fifo_state);
ae2a1040
TH
864extern void vmw_seqno_waiter_add(struct vmw_private *dev_priv);
865extern void vmw_seqno_waiter_remove(struct vmw_private *dev_priv);
57c5ee79
TH
866extern void vmw_goal_waiter_add(struct vmw_private *dev_priv);
867extern void vmw_goal_waiter_remove(struct vmw_private *dev_priv);
3eab3d9e
TH
868extern void vmw_generic_waiter_add(struct vmw_private *dev_priv, u32 flag,
869 int *waiter_count);
870extern void vmw_generic_waiter_remove(struct vmw_private *dev_priv,
871 u32 flag, int *waiter_count);
1925d456
TH
872
873/**
6bcd8d3c
TH
874 * Rudimentary fence-like objects currently used only for throttling -
875 * vmwgfx_marker.c
1925d456
TH
876 */
877
6bcd8d3c
TH
878extern void vmw_marker_queue_init(struct vmw_marker_queue *queue);
879extern void vmw_marker_queue_takedown(struct vmw_marker_queue *queue);
880extern int vmw_marker_push(struct vmw_marker_queue *queue,
c8261a96 881 uint32_t seqno);
6bcd8d3c 882extern int vmw_marker_pull(struct vmw_marker_queue *queue,
c8261a96 883 uint32_t signaled_seqno);
1925d456 884extern int vmw_wait_lag(struct vmw_private *dev_priv,
6bcd8d3c 885 struct vmw_marker_queue *queue, uint32_t us);
fb1d9738
JB
886
887/**
888 * Kernel framebuffer - vmwgfx_fb.c
889 */
890
891int vmw_fb_init(struct vmw_private *vmw_priv);
892int vmw_fb_close(struct vmw_private *dev_priv);
893int vmw_fb_off(struct vmw_private *vmw_priv);
894int vmw_fb_on(struct vmw_private *vmw_priv);
895
896/**
897 * Kernel modesetting - vmwgfx_kms.c
898 */
899
900int vmw_kms_init(struct vmw_private *dev_priv);
901int vmw_kms_close(struct vmw_private *dev_priv);
902int vmw_kms_save_vga(struct vmw_private *vmw_priv);
903int vmw_kms_restore_vga(struct vmw_private *vmw_priv);
904int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
905 struct drm_file *file_priv);
906void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv);
907void vmw_kms_cursor_snoop(struct vmw_surface *srf,
908 struct ttm_object_file *tfile,
909 struct ttm_buffer_object *bo,
910 SVGA3dCmdHeader *header);
0bef23f9
MD
911int vmw_kms_write_svga(struct vmw_private *vmw_priv,
912 unsigned width, unsigned height, unsigned pitch,
913 unsigned bpp, unsigned depth);
3a939a5e 914void vmw_kms_idle_workqueues(struct vmw_master *vmaster);
e133e737
TH
915bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
916 uint32_t pitch,
917 uint32_t height);
88e72717
TR
918u32 vmw_get_vblank_counter(struct drm_device *dev, unsigned int pipe);
919int vmw_enable_vblank(struct drm_device *dev, unsigned int pipe);
920void vmw_disable_vblank(struct drm_device *dev, unsigned int pipe);
2fcd5a73
JB
921int vmw_kms_present(struct vmw_private *dev_priv,
922 struct drm_file *file_priv,
923 struct vmw_framebuffer *vfb,
924 struct vmw_surface *surface,
925 uint32_t sid, int32_t destX, int32_t destY,
926 struct drm_vmw_rect *clips,
927 uint32_t num_clips);
cd2b89e7
TH
928int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
929 struct drm_file *file_priv);
8fbf9d92 930void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv);
fb1d9738 931
5e1782d2
DA
932int vmw_dumb_create(struct drm_file *file_priv,
933 struct drm_device *dev,
934 struct drm_mode_create_dumb *args);
935
936int vmw_dumb_map_offset(struct drm_file *file_priv,
937 struct drm_device *dev, uint32_t handle,
938 uint64_t *offset);
939int vmw_dumb_destroy(struct drm_file *file_priv,
940 struct drm_device *dev,
941 uint32_t handle);
1a4b172a 942extern int vmw_resource_pin(struct vmw_resource *res, bool interruptible);
ed93394c 943extern void vmw_resource_unpin(struct vmw_resource *res);
d80efd5c 944extern enum vmw_res_type vmw_res_type(const struct vmw_resource *res);
ed93394c 945
fb1d9738
JB
946/**
947 * Overlay control - vmwgfx_overlay.c
948 */
949
950int vmw_overlay_init(struct vmw_private *dev_priv);
951int vmw_overlay_close(struct vmw_private *dev_priv);
952int vmw_overlay_ioctl(struct drm_device *dev, void *data,
953 struct drm_file *file_priv);
954int vmw_overlay_stop_all(struct vmw_private *dev_priv);
955int vmw_overlay_resume_all(struct vmw_private *dev_priv);
956int vmw_overlay_pause_all(struct vmw_private *dev_priv);
957int vmw_overlay_claim(struct vmw_private *dev_priv, uint32_t *out);
958int vmw_overlay_unref(struct vmw_private *dev_priv, uint32_t stream_id);
959int vmw_overlay_num_overlays(struct vmw_private *dev_priv);
960int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv);
961
135cba0d
TH
962/**
963 * GMR Id manager
964 */
965
966extern const struct ttm_mem_type_manager_func vmw_gmrid_manager_func;
967
69977ff5
TH
968/**
969 * Prime - vmwgfx_prime.c
970 */
971
972extern const struct dma_buf_ops vmw_prime_dmabuf_ops;
973extern int vmw_prime_fd_to_handle(struct drm_device *dev,
974 struct drm_file *file_priv,
975 int fd, u32 *handle);
976extern int vmw_prime_handle_to_fd(struct drm_device *dev,
977 struct drm_file *file_priv,
978 uint32_t handle, uint32_t flags,
979 int *prime_fd);
980
3530bdc3
TH
981/*
982 * MemoryOBject management - vmwgfx_mob.c
983 */
984struct vmw_mob;
985extern int vmw_mob_bind(struct vmw_private *dev_priv, struct vmw_mob *mob,
0fd53cfb
TH
986 const struct vmw_sg_table *vsgt,
987 unsigned long num_data_pages, int32_t mob_id);
3530bdc3
TH
988extern void vmw_mob_unbind(struct vmw_private *dev_priv,
989 struct vmw_mob *mob);
990extern void vmw_mob_destroy(struct vmw_mob *mob);
991extern struct vmw_mob *vmw_mob_create(unsigned long data_pages);
992extern int vmw_otables_setup(struct vmw_private *dev_priv);
993extern void vmw_otables_takedown(struct vmw_private *dev_priv);
69977ff5 994
7086d099
TH
995/*
996 * Context management - vmwgfx_context.c
997 */
998
999extern const struct vmw_user_resource_conv *user_context_converter;
1000
7086d099
TH
1001extern int vmw_context_check(struct vmw_private *dev_priv,
1002 struct ttm_object_file *tfile,
1003 int id,
1004 struct vmw_resource **p_res);
1005extern int vmw_context_define_ioctl(struct drm_device *dev, void *data,
1006 struct drm_file *file_priv);
d80efd5c
TH
1007extern int vmw_extended_context_define_ioctl(struct drm_device *dev, void *data,
1008 struct drm_file *file_priv);
7086d099
TH
1009extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
1010 struct drm_file *file_priv);
30f82d81 1011extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx);
18e4a466
TH
1012extern struct vmw_cmdbuf_res_manager *
1013vmw_context_res_man(struct vmw_resource *ctx);
d80efd5c
TH
1014extern struct vmw_resource *vmw_context_cotable(struct vmw_resource *ctx,
1015 SVGACOTableType cotable_type);
1016extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx);
1017struct vmw_ctx_binding_state;
1018extern struct vmw_ctx_binding_state *
1019vmw_context_binding_state(struct vmw_resource *ctx);
1020extern void vmw_dx_context_scrub_cotables(struct vmw_resource *ctx,
1021 bool readback);
fd11a3c0
SY
1022extern int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
1023 struct vmw_dma_buffer *mob);
1024extern struct vmw_dma_buffer *
1025vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res);
1026
d80efd5c 1027
7086d099
TH
1028/*
1029 * Surface management - vmwgfx_surface.c
1030 */
1031
1032extern const struct vmw_user_resource_conv *user_surface_converter;
1033
1034extern void vmw_surface_res_free(struct vmw_resource *res);
1035extern int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
1036 struct drm_file *file_priv);
1037extern int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
1038 struct drm_file *file_priv);
1039extern int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
1040 struct drm_file *file_priv);
1041extern int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
1042 struct drm_file *file_priv);
1043extern int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data,
1044 struct drm_file *file_priv);
1045extern int vmw_surface_check(struct vmw_private *dev_priv,
1046 struct ttm_object_file *tfile,
1047 uint32_t handle, int *id);
1048extern int vmw_surface_validate(struct vmw_private *dev_priv,
1049 struct vmw_surface *srf);
233826a7
SY
1050int vmw_surface_gb_priv_define(struct drm_device *dev,
1051 uint32_t user_accounting_size,
1052 uint32_t svga3d_flags,
1053 SVGA3dSurfaceFormat format,
1054 bool for_scanout,
1055 uint32_t num_mip_levels,
1056 uint32_t multisample_count,
d80efd5c 1057 uint32_t array_size,
233826a7
SY
1058 struct drm_vmw_size size,
1059 struct vmw_surface **srf_out);
7086d099 1060
c74c162f
TH
1061/*
1062 * Shader management - vmwgfx_shader.c
1063 */
1064
7086d099
TH
1065extern const struct vmw_user_resource_conv *user_shader_converter;
1066
c74c162f
TH
1067extern int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
1068 struct drm_file *file_priv);
1069extern int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data,
1070 struct drm_file *file_priv);
18e4a466
TH
1071extern int vmw_compat_shader_add(struct vmw_private *dev_priv,
1072 struct vmw_cmdbuf_res_manager *man,
d5bde956
TH
1073 u32 user_key, const void *bytecode,
1074 SVGA3dShaderType shader_type,
1075 size_t size,
d5bde956 1076 struct list_head *list);
d80efd5c
TH
1077extern int vmw_shader_remove(struct vmw_cmdbuf_res_manager *man,
1078 u32 user_key, SVGA3dShaderType shader_type,
1079 struct list_head *list);
1080extern int vmw_dx_shader_add(struct vmw_cmdbuf_res_manager *man,
1081 struct vmw_resource *ctx,
1082 u32 user_key,
1083 SVGA3dShaderType shader_type,
1084 struct list_head *list);
1085extern void vmw_dx_shader_cotable_list_scrub(struct vmw_private *dev_priv,
1086 struct list_head *list,
1087 bool readback);
1088
18e4a466 1089extern struct vmw_resource *
d80efd5c
TH
1090vmw_shader_lookup(struct vmw_cmdbuf_res_manager *man,
1091 u32 user_key, SVGA3dShaderType shader_type);
18e4a466
TH
1092
1093/*
1094 * Command buffer managed resources - vmwgfx_cmdbuf_res.c
1095 */
1096
1097extern struct vmw_cmdbuf_res_manager *
1098vmw_cmdbuf_res_man_create(struct vmw_private *dev_priv);
1099extern void vmw_cmdbuf_res_man_destroy(struct vmw_cmdbuf_res_manager *man);
1100extern size_t vmw_cmdbuf_res_man_size(void);
1101extern struct vmw_resource *
1102vmw_cmdbuf_res_lookup(struct vmw_cmdbuf_res_manager *man,
1103 enum vmw_cmdbuf_res_type res_type,
1104 u32 user_key);
1105extern void vmw_cmdbuf_res_revert(struct list_head *list);
1106extern void vmw_cmdbuf_res_commit(struct list_head *list);
1107extern int vmw_cmdbuf_res_add(struct vmw_cmdbuf_res_manager *man,
1108 enum vmw_cmdbuf_res_type res_type,
1109 u32 user_key,
1110 struct vmw_resource *res,
1111 struct list_head *list);
1112extern int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man,
1113 enum vmw_cmdbuf_res_type res_type,
1114 u32 user_key,
d80efd5c
TH
1115 struct list_head *list,
1116 struct vmw_resource **res);
d5bde956 1117
d80efd5c
TH
1118/*
1119 * COTable management - vmwgfx_cotable.c
1120 */
1121extern const SVGACOTableType vmw_cotable_scrub_order[];
1122extern struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv,
1123 struct vmw_resource *ctx,
1124 u32 type);
1125extern int vmw_cotable_notify(struct vmw_resource *res, int id);
1126extern int vmw_cotable_scrub(struct vmw_resource *res, bool readback);
1127extern void vmw_cotable_add_resource(struct vmw_resource *ctx,
1128 struct list_head *head);
c74c162f 1129
3eab3d9e
TH
1130/*
1131 * Command buffer managerment vmwgfx_cmdbuf.c
1132 */
1133struct vmw_cmdbuf_man;
1134struct vmw_cmdbuf_header;
1135
1136extern struct vmw_cmdbuf_man *
1137vmw_cmdbuf_man_create(struct vmw_private *dev_priv);
1138extern int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man,
1139 size_t size, size_t default_size);
1140extern void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man);
1141extern void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man *man);
1142extern int vmw_cmdbuf_idle(struct vmw_cmdbuf_man *man, bool interruptible,
1143 unsigned long timeout);
1144extern void *vmw_cmdbuf_reserve(struct vmw_cmdbuf_man *man, size_t size,
1145 int ctx_id, bool interruptible,
1146 struct vmw_cmdbuf_header *header);
1147extern void vmw_cmdbuf_commit(struct vmw_cmdbuf_man *man, size_t size,
1148 struct vmw_cmdbuf_header *header,
1149 bool flush);
1150extern void vmw_cmdbuf_tasklet_schedule(struct vmw_cmdbuf_man *man);
1151extern void *vmw_cmdbuf_alloc(struct vmw_cmdbuf_man *man,
1152 size_t size, bool interruptible,
1153 struct vmw_cmdbuf_header **p_header);
1154extern void vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header);
1155extern int vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man,
1156 bool interruptible);
d5bde956 1157
c74c162f 1158
fb1d9738
JB
1159/**
1160 * Inline helper functions
1161 */
1162
1163static inline void vmw_surface_unreference(struct vmw_surface **srf)
1164{
1165 struct vmw_surface *tmp_srf = *srf;
1166 struct vmw_resource *res = &tmp_srf->res;
1167 *srf = NULL;
1168
1169 vmw_resource_unreference(&res);
1170}
1171
1172static inline struct vmw_surface *vmw_surface_reference(struct vmw_surface *srf)
1173{
1174 (void) vmw_resource_reference(&srf->res);
1175 return srf;
1176}
1177
1178static inline void vmw_dmabuf_unreference(struct vmw_dma_buffer **buf)
1179{
1180 struct vmw_dma_buffer *tmp_buf = *buf;
bf6f0368 1181
fb1d9738 1182 *buf = NULL;
bf6f0368
TH
1183 if (tmp_buf != NULL) {
1184 struct ttm_buffer_object *bo = &tmp_buf->base;
fb1d9738 1185
bf6f0368
TH
1186 ttm_bo_unref(&bo);
1187 }
fb1d9738
JB
1188}
1189
1190static inline struct vmw_dma_buffer *vmw_dmabuf_reference(struct vmw_dma_buffer *buf)
1191{
1192 if (ttm_bo_reference(&buf->base))
1193 return buf;
1194 return NULL;
1195}
1196
ae2a1040
TH
1197static inline struct ttm_mem_global *vmw_mem_glob(struct vmw_private *dev_priv)
1198{
1199 return (struct ttm_mem_global *) dev_priv->mem_global_ref.object;
1200}
153b3d5b
TH
1201
1202static inline void vmw_fifo_resource_inc(struct vmw_private *dev_priv)
1203{
1204 atomic_inc(&dev_priv->num_fifo_resources);
1205}
1206
1207static inline void vmw_fifo_resource_dec(struct vmw_private *dev_priv)
1208{
1209 atomic_dec(&dev_priv->num_fifo_resources);
1210}
b76ff5ea
TH
1211
1212/**
1213 * vmw_mmio_read - Perform a MMIO read from volatile memory
1214 *
1215 * @addr: The address to read from
1216 *
1217 * This function is intended to be equivalent to ioread32() on
1218 * memremap'd memory, but without byteswapping.
1219 */
1220static inline u32 vmw_mmio_read(u32 *addr)
1221{
1222 return READ_ONCE(*addr);
1223}
1224
1225/**
1226 * vmw_mmio_write - Perform a MMIO write to volatile memory
1227 *
1228 * @addr: The address to write to
1229 *
1230 * This function is intended to be equivalent to iowrite32 on
1231 * memremap'd memory, but without byteswapping.
1232 */
1233static inline void vmw_mmio_write(u32 value, u32 *addr)
1234{
1235 WRITE_ONCE(*addr, value);
1236}
fb1d9738 1237#endif
This page took 0.418734 seconds and 5 git commands to generate.