drm/vmwgfx: Implement the cursor_set2 callback v2
[deliverable/linux.git] / drivers / gpu / drm / vmwgfx / vmwgfx_drv.h
1 /**************************************************************************
2 *
3 * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #ifndef _VMWGFX_DRV_H_
29 #define _VMWGFX_DRV_H_
30
31 #include "vmwgfx_reg.h"
32 #include <drm/drmP.h>
33 #include <drm/vmwgfx_drm.h>
34 #include <drm/drm_hashtab.h>
35 #include <linux/suspend.h>
36 #include <drm/ttm/ttm_bo_driver.h>
37 #include <drm/ttm/ttm_object.h>
38 #include <drm/ttm/ttm_lock.h>
39 #include <drm/ttm/ttm_execbuf_util.h>
40 #include <drm/ttm/ttm_module.h>
41 #include "vmwgfx_fence.h"
42
43 #define VMWGFX_DRIVER_DATE "20150810"
44 #define VMWGFX_DRIVER_MAJOR 2
45 #define VMWGFX_DRIVER_MINOR 9
46 #define VMWGFX_DRIVER_PATCHLEVEL 0
47 #define VMWGFX_FILE_PAGE_OFFSET 0x00100000
48 #define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
49 #define VMWGFX_MAX_RELOCATIONS 2048
50 #define VMWGFX_MAX_VALIDATIONS 2048
51 #define VMWGFX_MAX_DISPLAYS 16
52 #define VMWGFX_CMD_BOUNCE_INIT_SIZE 32768
53 #define VMWGFX_ENABLE_SCREEN_TARGET_OTABLE 1
54
55 /*
56 * Perhaps we should have sysfs entries for these.
57 */
58 #define VMWGFX_NUM_GB_CONTEXT 256
59 #define VMWGFX_NUM_GB_SHADER 20000
60 #define VMWGFX_NUM_GB_SURFACE 32768
61 #define VMWGFX_NUM_GB_SCREEN_TARGET VMWGFX_MAX_DISPLAYS
62 #define VMWGFX_NUM_DXCONTEXT 256
63 #define VMWGFX_NUM_DXQUERY 512
64 #define VMWGFX_NUM_MOB (VMWGFX_NUM_GB_CONTEXT +\
65 VMWGFX_NUM_GB_SHADER +\
66 VMWGFX_NUM_GB_SURFACE +\
67 VMWGFX_NUM_GB_SCREEN_TARGET)
68
69 #define VMW_PL_GMR TTM_PL_PRIV0
70 #define VMW_PL_FLAG_GMR TTM_PL_FLAG_PRIV0
71 #define VMW_PL_MOB TTM_PL_PRIV1
72 #define VMW_PL_FLAG_MOB TTM_PL_FLAG_PRIV1
73
74 #define VMW_RES_CONTEXT ttm_driver_type0
75 #define VMW_RES_SURFACE ttm_driver_type1
76 #define VMW_RES_STREAM ttm_driver_type2
77 #define VMW_RES_FENCE ttm_driver_type3
78 #define VMW_RES_SHADER ttm_driver_type4
79
80 struct vmw_fpriv {
81 struct drm_master *locked_master;
82 struct ttm_object_file *tfile;
83 struct list_head fence_events;
84 bool gb_aware;
85 };
86
87 struct vmw_dma_buffer {
88 struct ttm_buffer_object base;
89 struct list_head res_list;
90 s32 pin_count;
91 /* Not ref-counted. Protected by binding_mutex */
92 struct vmw_resource *dx_query_ctx;
93 };
94
95 /**
96 * struct vmw_validate_buffer - Carries validation info about buffers.
97 *
98 * @base: Validation info for TTM.
99 * @hash: Hash entry for quick lookup of the TTM buffer object.
100 *
101 * This structure contains also driver private validation info
102 * on top of the info needed by TTM.
103 */
104 struct vmw_validate_buffer {
105 struct ttm_validate_buffer base;
106 struct drm_hash_item hash;
107 bool validate_as_mob;
108 };
109
110 struct vmw_res_func;
111 struct vmw_resource {
112 struct kref kref;
113 struct vmw_private *dev_priv;
114 int id;
115 bool avail;
116 unsigned long backup_size;
117 bool res_dirty; /* Protected by backup buffer reserved */
118 bool backup_dirty; /* Protected by backup buffer reserved */
119 struct vmw_dma_buffer *backup;
120 unsigned long backup_offset;
121 unsigned long pin_count; /* Protected by resource reserved */
122 const struct vmw_res_func *func;
123 struct list_head lru_head; /* Protected by the resource lock */
124 struct list_head mob_head; /* Protected by @backup reserved */
125 struct list_head binding_head; /* Protected by binding_mutex */
126 void (*res_free) (struct vmw_resource *res);
127 void (*hw_destroy) (struct vmw_resource *res);
128 };
129
130
131 /*
132 * Resources that are managed using ioctls.
133 */
134 enum vmw_res_type {
135 vmw_res_context,
136 vmw_res_surface,
137 vmw_res_stream,
138 vmw_res_shader,
139 vmw_res_dx_context,
140 vmw_res_cotable,
141 vmw_res_view,
142 vmw_res_max
143 };
144
145 /*
146 * Resources that are managed using command streams.
147 */
148 enum vmw_cmdbuf_res_type {
149 vmw_cmdbuf_res_shader,
150 vmw_cmdbuf_res_view
151 };
152
153 struct vmw_cmdbuf_res_manager;
154
155 struct vmw_cursor_snooper {
156 struct drm_crtc *crtc;
157 size_t age;
158 uint32_t *image;
159 };
160
161 struct vmw_framebuffer;
162 struct vmw_surface_offset;
163
164 struct vmw_surface {
165 struct vmw_resource res;
166 uint32_t flags;
167 uint32_t format;
168 uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES];
169 struct drm_vmw_size base_size;
170 struct drm_vmw_size *sizes;
171 uint32_t num_sizes;
172 bool scanout;
173 uint32_t array_size;
174 /* TODO so far just a extra pointer */
175 struct vmw_cursor_snooper snooper;
176 struct vmw_surface_offset *offsets;
177 SVGA3dTextureFilter autogen_filter;
178 uint32_t multisample_count;
179 struct list_head view_list;
180 };
181
182 struct vmw_marker_queue {
183 struct list_head head;
184 u64 lag;
185 u64 lag_time;
186 spinlock_t lock;
187 };
188
189 struct vmw_fifo_state {
190 unsigned long reserved_size;
191 u32 *dynamic_buffer;
192 u32 *static_buffer;
193 unsigned long static_buffer_size;
194 bool using_bounce_buffer;
195 uint32_t capabilities;
196 struct mutex fifo_mutex;
197 struct rw_semaphore rwsem;
198 struct vmw_marker_queue marker_queue;
199 bool dx;
200 };
201
202 struct vmw_relocation {
203 SVGAMobId *mob_loc;
204 SVGAGuestPtr *location;
205 uint32_t index;
206 };
207
208 /**
209 * struct vmw_res_cache_entry - resource information cache entry
210 *
211 * @valid: Whether the entry is valid, which also implies that the execbuf
212 * code holds a reference to the resource, and it's placed on the
213 * validation list.
214 * @handle: User-space handle of a resource.
215 * @res: Non-ref-counted pointer to the resource.
216 *
217 * Used to avoid frequent repeated user-space handle lookups of the
218 * same resource.
219 */
220 struct vmw_res_cache_entry {
221 bool valid;
222 uint32_t handle;
223 struct vmw_resource *res;
224 struct vmw_resource_val_node *node;
225 };
226
227 /**
228 * enum vmw_dma_map_mode - indicate how to perform TTM page dma mappings.
229 */
230 enum vmw_dma_map_mode {
231 vmw_dma_phys, /* Use physical page addresses */
232 vmw_dma_alloc_coherent, /* Use TTM coherent pages */
233 vmw_dma_map_populate, /* Unmap from DMA just after unpopulate */
234 vmw_dma_map_bind, /* Unmap from DMA just before unbind */
235 vmw_dma_map_max
236 };
237
238 /**
239 * struct vmw_sg_table - Scatter/gather table for binding, with additional
240 * device-specific information.
241 *
242 * @sgt: Pointer to a struct sg_table with binding information
243 * @num_regions: Number of regions with device-address contiguous pages
244 */
245 struct vmw_sg_table {
246 enum vmw_dma_map_mode mode;
247 struct page **pages;
248 const dma_addr_t *addrs;
249 struct sg_table *sgt;
250 unsigned long num_regions;
251 unsigned long num_pages;
252 };
253
254 /**
255 * struct vmw_piter - Page iterator that iterates over a list of pages
256 * and DMA addresses that could be either a scatter-gather list or
257 * arrays
258 *
259 * @pages: Array of page pointers to the pages.
260 * @addrs: DMA addresses to the pages if coherent pages are used.
261 * @iter: Scatter-gather page iterator. Current position in SG list.
262 * @i: Current position in arrays.
263 * @num_pages: Number of pages total.
264 * @next: Function to advance the iterator. Returns false if past the list
265 * of pages, true otherwise.
266 * @dma_address: Function to return the DMA address of the current page.
267 */
268 struct vmw_piter {
269 struct page **pages;
270 const dma_addr_t *addrs;
271 struct sg_page_iter iter;
272 unsigned long i;
273 unsigned long num_pages;
274 bool (*next)(struct vmw_piter *);
275 dma_addr_t (*dma_address)(struct vmw_piter *);
276 struct page *(*page)(struct vmw_piter *);
277 };
278
279 /*
280 * enum vmw_display_unit_type - Describes the display unit
281 */
282 enum vmw_display_unit_type {
283 vmw_du_invalid = 0,
284 vmw_du_legacy,
285 vmw_du_screen_object,
286 vmw_du_screen_target
287 };
288
289
290 struct vmw_sw_context{
291 struct drm_open_hash res_ht;
292 bool res_ht_initialized;
293 bool kernel; /**< is the called made from the kernel */
294 struct vmw_fpriv *fp;
295 struct list_head validate_nodes;
296 struct vmw_relocation relocs[VMWGFX_MAX_RELOCATIONS];
297 uint32_t cur_reloc;
298 struct vmw_validate_buffer val_bufs[VMWGFX_MAX_VALIDATIONS];
299 uint32_t cur_val_buf;
300 uint32_t *cmd_bounce;
301 uint32_t cmd_bounce_size;
302 struct list_head resource_list;
303 struct list_head ctx_resource_list; /* For contexts and cotables */
304 struct vmw_dma_buffer *cur_query_bo;
305 struct list_head res_relocations;
306 uint32_t *buf_start;
307 struct vmw_res_cache_entry res_cache[vmw_res_max];
308 struct vmw_resource *last_query_ctx;
309 bool needs_post_query_barrier;
310 struct vmw_resource *error_resource;
311 struct vmw_ctx_binding_state *staged_bindings;
312 bool staged_bindings_inuse;
313 struct list_head staged_cmd_res;
314 struct vmw_resource_val_node *dx_ctx_node;
315 struct vmw_dma_buffer *dx_query_mob;
316 struct vmw_resource *dx_query_ctx;
317 struct vmw_cmdbuf_res_manager *man;
318 };
319
320 struct vmw_legacy_display;
321 struct vmw_overlay;
322
323 struct vmw_master {
324 struct ttm_lock lock;
325 };
326
327 struct vmw_vga_topology_state {
328 uint32_t width;
329 uint32_t height;
330 uint32_t primary;
331 uint32_t pos_x;
332 uint32_t pos_y;
333 };
334
335
336 /*
337 * struct vmw_otable - Guest Memory OBject table metadata
338 *
339 * @size: Size of the table (page-aligned).
340 * @page_table: Pointer to a struct vmw_mob holding the page table.
341 */
342 struct vmw_otable {
343 unsigned long size;
344 struct vmw_mob *page_table;
345 bool enabled;
346 };
347
348 struct vmw_otable_batch {
349 unsigned num_otables;
350 struct vmw_otable *otables;
351 struct vmw_resource *context;
352 struct ttm_buffer_object *otable_bo;
353 };
354
355 struct vmw_private {
356 struct ttm_bo_device bdev;
357 struct ttm_bo_global_ref bo_global_ref;
358 struct drm_global_reference mem_global_ref;
359
360 struct vmw_fifo_state fifo;
361
362 struct drm_device *dev;
363 unsigned long vmw_chipset;
364 unsigned int io_start;
365 uint32_t vram_start;
366 uint32_t vram_size;
367 uint32_t prim_bb_mem;
368 uint32_t mmio_start;
369 uint32_t mmio_size;
370 uint32_t fb_max_width;
371 uint32_t fb_max_height;
372 uint32_t texture_max_width;
373 uint32_t texture_max_height;
374 uint32_t stdu_max_width;
375 uint32_t stdu_max_height;
376 uint32_t initial_width;
377 uint32_t initial_height;
378 u32 *mmio_virt;
379 uint32_t capabilities;
380 uint32_t max_gmr_ids;
381 uint32_t max_gmr_pages;
382 uint32_t max_mob_pages;
383 uint32_t max_mob_size;
384 uint32_t memory_size;
385 bool has_gmr;
386 bool has_mob;
387 spinlock_t hw_lock;
388 spinlock_t cap_lock;
389 bool has_dx;
390
391 /*
392 * VGA registers.
393 */
394
395 struct vmw_vga_topology_state vga_save[VMWGFX_MAX_DISPLAYS];
396 uint32_t vga_width;
397 uint32_t vga_height;
398 uint32_t vga_bpp;
399 uint32_t vga_bpl;
400 uint32_t vga_pitchlock;
401
402 uint32_t num_displays;
403
404 /*
405 * Framebuffer info.
406 */
407
408 void *fb_info;
409 enum vmw_display_unit_type active_display_unit;
410 struct vmw_legacy_display *ldu_priv;
411 struct vmw_screen_object_display *sou_priv;
412 struct vmw_overlay *overlay_priv;
413
414 /*
415 * Context and surface management.
416 */
417
418 rwlock_t resource_lock;
419 struct idr res_idr[vmw_res_max];
420 /*
421 * Block lastclose from racing with firstopen.
422 */
423
424 struct mutex init_mutex;
425
426 /*
427 * A resource manager for kernel-only surfaces and
428 * contexts.
429 */
430
431 struct ttm_object_device *tdev;
432
433 /*
434 * Fencing and IRQs.
435 */
436
437 atomic_t marker_seq;
438 wait_queue_head_t fence_queue;
439 wait_queue_head_t fifo_queue;
440 spinlock_t waiter_lock;
441 int fence_queue_waiters; /* Protected by waiter_lock */
442 int goal_queue_waiters; /* Protected by waiter_lock */
443 int cmdbuf_waiters; /* Protected by waiter_lock */
444 int error_waiters; /* Protected by waiter_lock */
445 int fifo_queue_waiters; /* Protected by waiter_lock */
446 uint32_t last_read_seqno;
447 struct vmw_fence_manager *fman;
448 uint32_t irq_mask; /* Updates protected by waiter_lock */
449
450 /*
451 * Device state
452 */
453
454 uint32_t traces_state;
455 uint32_t enable_state;
456 uint32_t config_done_state;
457
458 /**
459 * Execbuf
460 */
461 /**
462 * Protected by the cmdbuf mutex.
463 */
464
465 struct vmw_sw_context ctx;
466 struct mutex cmdbuf_mutex;
467 struct mutex binding_mutex;
468
469 /**
470 * Operating mode.
471 */
472
473 bool stealth;
474 bool enable_fb;
475 spinlock_t svga_lock;
476
477 /**
478 * Master management.
479 */
480
481 struct vmw_master *active_master;
482 struct vmw_master fbdev_master;
483 struct notifier_block pm_nb;
484 bool suspended;
485 bool refuse_hibernation;
486
487 struct mutex release_mutex;
488 atomic_t num_fifo_resources;
489
490 /*
491 * Replace this with an rwsem as soon as we have down_xx_interruptible()
492 */
493 struct ttm_lock reservation_sem;
494
495 /*
496 * Query processing. These members
497 * are protected by the cmdbuf mutex.
498 */
499
500 struct vmw_dma_buffer *dummy_query_bo;
501 struct vmw_dma_buffer *pinned_bo;
502 uint32_t query_cid;
503 uint32_t query_cid_valid;
504 bool dummy_query_bo_pinned;
505
506 /*
507 * Surface swapping. The "surface_lru" list is protected by the
508 * resource lock in order to be able to destroy a surface and take
509 * it off the lru atomically. "used_memory_size" is currently
510 * protected by the cmdbuf mutex for simplicity.
511 */
512
513 struct list_head res_lru[vmw_res_max];
514 uint32_t used_memory_size;
515
516 /*
517 * DMA mapping stuff.
518 */
519 enum vmw_dma_map_mode map_mode;
520
521 /*
522 * Guest Backed stuff
523 */
524 struct vmw_otable_batch otable_batch;
525
526 struct vmw_cmdbuf_man *cman;
527 };
528
529 static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res)
530 {
531 return container_of(res, struct vmw_surface, res);
532 }
533
534 static inline struct vmw_private *vmw_priv(struct drm_device *dev)
535 {
536 return (struct vmw_private *)dev->dev_private;
537 }
538
539 static inline struct vmw_fpriv *vmw_fpriv(struct drm_file *file_priv)
540 {
541 return (struct vmw_fpriv *)file_priv->driver_priv;
542 }
543
544 static inline struct vmw_master *vmw_master(struct drm_master *master)
545 {
546 return (struct vmw_master *) master->driver_priv;
547 }
548
549 /*
550 * The locking here is fine-grained, so that it is performed once
551 * for every read- and write operation. This is of course costly, but we
552 * don't perform much register access in the timing critical paths anyway.
553 * Instead we have the extra benefit of being sure that we don't forget
554 * the hw lock around register accesses.
555 */
556 static inline void vmw_write(struct vmw_private *dev_priv,
557 unsigned int offset, uint32_t value)
558 {
559 unsigned long irq_flags;
560
561 spin_lock_irqsave(&dev_priv->hw_lock, irq_flags);
562 outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
563 outl(value, dev_priv->io_start + VMWGFX_VALUE_PORT);
564 spin_unlock_irqrestore(&dev_priv->hw_lock, irq_flags);
565 }
566
567 static inline uint32_t vmw_read(struct vmw_private *dev_priv,
568 unsigned int offset)
569 {
570 unsigned long irq_flags;
571 u32 val;
572
573 spin_lock_irqsave(&dev_priv->hw_lock, irq_flags);
574 outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
575 val = inl(dev_priv->io_start + VMWGFX_VALUE_PORT);
576 spin_unlock_irqrestore(&dev_priv->hw_lock, irq_flags);
577
578 return val;
579 }
580
581 extern void vmw_svga_enable(struct vmw_private *dev_priv);
582 extern void vmw_svga_disable(struct vmw_private *dev_priv);
583
584
585 /**
586 * GMR utilities - vmwgfx_gmr.c
587 */
588
589 extern int vmw_gmr_bind(struct vmw_private *dev_priv,
590 const struct vmw_sg_table *vsgt,
591 unsigned long num_pages,
592 int gmr_id);
593 extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id);
594
595 /**
596 * Resource utilities - vmwgfx_resource.c
597 */
598 struct vmw_user_resource_conv;
599
600 extern void vmw_resource_unreference(struct vmw_resource **p_res);
601 extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res);
602 extern struct vmw_resource *
603 vmw_resource_reference_unless_doomed(struct vmw_resource *res);
604 extern int vmw_resource_validate(struct vmw_resource *res);
605 extern int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
606 bool no_backup);
607 extern bool vmw_resource_needs_backup(const struct vmw_resource *res);
608 extern int vmw_user_lookup_handle(struct vmw_private *dev_priv,
609 struct ttm_object_file *tfile,
610 uint32_t handle,
611 struct vmw_surface **out_surf,
612 struct vmw_dma_buffer **out_buf);
613 extern int vmw_user_resource_lookup_handle(
614 struct vmw_private *dev_priv,
615 struct ttm_object_file *tfile,
616 uint32_t handle,
617 const struct vmw_user_resource_conv *converter,
618 struct vmw_resource **p_res);
619 extern void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo);
620 extern int vmw_dmabuf_init(struct vmw_private *dev_priv,
621 struct vmw_dma_buffer *vmw_bo,
622 size_t size, struct ttm_placement *placement,
623 bool interuptable,
624 void (*bo_free) (struct ttm_buffer_object *bo));
625 extern int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
626 struct ttm_object_file *tfile);
627 extern int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
628 struct ttm_object_file *tfile,
629 uint32_t size,
630 bool shareable,
631 uint32_t *handle,
632 struct vmw_dma_buffer **p_dma_buf,
633 struct ttm_base_object **p_base);
634 extern int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
635 struct vmw_dma_buffer *dma_buf,
636 uint32_t *handle);
637 extern int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
638 struct drm_file *file_priv);
639 extern int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
640 struct drm_file *file_priv);
641 extern int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
642 struct drm_file *file_priv);
643 extern uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
644 uint32_t cur_validate_node);
645 extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo);
646 extern int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
647 uint32_t id, struct vmw_dma_buffer **out,
648 struct ttm_base_object **base);
649 extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
650 struct drm_file *file_priv);
651 extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
652 struct drm_file *file_priv);
653 extern int vmw_user_stream_lookup(struct vmw_private *dev_priv,
654 struct ttm_object_file *tfile,
655 uint32_t *inout_id,
656 struct vmw_resource **out);
657 extern void vmw_resource_unreserve(struct vmw_resource *res,
658 bool switch_backup,
659 struct vmw_dma_buffer *new_backup,
660 unsigned long new_backup_offset);
661 extern void vmw_resource_move_notify(struct ttm_buffer_object *bo,
662 struct ttm_mem_reg *mem);
663 extern void vmw_query_move_notify(struct ttm_buffer_object *bo,
664 struct ttm_mem_reg *mem);
665 extern int vmw_query_readback_all(struct vmw_dma_buffer *dx_query_mob);
666 extern void vmw_fence_single_bo(struct ttm_buffer_object *bo,
667 struct vmw_fence_obj *fence);
668 extern void vmw_resource_evict_all(struct vmw_private *dev_priv);
669
670 /**
671 * DMA buffer helper routines - vmwgfx_dmabuf.c
672 */
673 extern int vmw_dmabuf_pin_in_placement(struct vmw_private *vmw_priv,
674 struct vmw_dma_buffer *bo,
675 struct ttm_placement *placement,
676 bool interruptible);
677 extern int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv,
678 struct vmw_dma_buffer *buf,
679 bool interruptible);
680 extern int vmw_dmabuf_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
681 struct vmw_dma_buffer *buf,
682 bool interruptible);
683 extern int vmw_dmabuf_pin_in_start_of_vram(struct vmw_private *vmw_priv,
684 struct vmw_dma_buffer *bo,
685 bool interruptible);
686 extern int vmw_dmabuf_unpin(struct vmw_private *vmw_priv,
687 struct vmw_dma_buffer *bo,
688 bool interruptible);
689 extern void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *buf,
690 SVGAGuestPtr *ptr);
691 extern void vmw_bo_pin_reserved(struct vmw_dma_buffer *bo, bool pin);
692
693 /**
694 * Misc Ioctl functionality - vmwgfx_ioctl.c
695 */
696
697 extern int vmw_getparam_ioctl(struct drm_device *dev, void *data,
698 struct drm_file *file_priv);
699 extern int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
700 struct drm_file *file_priv);
701 extern int vmw_present_ioctl(struct drm_device *dev, void *data,
702 struct drm_file *file_priv);
703 extern int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
704 struct drm_file *file_priv);
705 extern unsigned int vmw_fops_poll(struct file *filp,
706 struct poll_table_struct *wait);
707 extern ssize_t vmw_fops_read(struct file *filp, char __user *buffer,
708 size_t count, loff_t *offset);
709
710 /**
711 * Fifo utilities - vmwgfx_fifo.c
712 */
713
714 extern int vmw_fifo_init(struct vmw_private *dev_priv,
715 struct vmw_fifo_state *fifo);
716 extern void vmw_fifo_release(struct vmw_private *dev_priv,
717 struct vmw_fifo_state *fifo);
718 extern void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes);
719 extern void *
720 vmw_fifo_reserve_dx(struct vmw_private *dev_priv, uint32_t bytes, int ctx_id);
721 extern void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes);
722 extern void vmw_fifo_commit_flush(struct vmw_private *dev_priv, uint32_t bytes);
723 extern int vmw_fifo_send_fence(struct vmw_private *dev_priv,
724 uint32_t *seqno);
725 extern void vmw_fifo_ping_host_locked(struct vmw_private *, uint32_t reason);
726 extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason);
727 extern bool vmw_fifo_have_3d(struct vmw_private *dev_priv);
728 extern bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv);
729 extern int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv,
730 uint32_t cid);
731 extern int vmw_fifo_flush(struct vmw_private *dev_priv,
732 bool interruptible);
733
734 /**
735 * TTM glue - vmwgfx_ttm_glue.c
736 */
737
738 extern int vmw_ttm_global_init(struct vmw_private *dev_priv);
739 extern void vmw_ttm_global_release(struct vmw_private *dev_priv);
740 extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma);
741
742 /**
743 * TTM buffer object driver - vmwgfx_buffer.c
744 */
745
746 extern const size_t vmw_tt_size;
747 extern struct ttm_placement vmw_vram_placement;
748 extern struct ttm_placement vmw_vram_ne_placement;
749 extern struct ttm_placement vmw_vram_sys_placement;
750 extern struct ttm_placement vmw_vram_gmr_placement;
751 extern struct ttm_placement vmw_vram_gmr_ne_placement;
752 extern struct ttm_placement vmw_sys_placement;
753 extern struct ttm_placement vmw_sys_ne_placement;
754 extern struct ttm_placement vmw_evictable_placement;
755 extern struct ttm_placement vmw_srf_placement;
756 extern struct ttm_placement vmw_mob_placement;
757 extern struct ttm_placement vmw_mob_ne_placement;
758 extern struct ttm_bo_driver vmw_bo_driver;
759 extern int vmw_dma_quiescent(struct drm_device *dev);
760 extern int vmw_bo_map_dma(struct ttm_buffer_object *bo);
761 extern void vmw_bo_unmap_dma(struct ttm_buffer_object *bo);
762 extern const struct vmw_sg_table *
763 vmw_bo_sg_table(struct ttm_buffer_object *bo);
764 extern void vmw_piter_start(struct vmw_piter *viter,
765 const struct vmw_sg_table *vsgt,
766 unsigned long p_offs);
767
768 /**
769 * vmw_piter_next - Advance the iterator one page.
770 *
771 * @viter: Pointer to the iterator to advance.
772 *
773 * Returns false if past the list of pages, true otherwise.
774 */
775 static inline bool vmw_piter_next(struct vmw_piter *viter)
776 {
777 return viter->next(viter);
778 }
779
780 /**
781 * vmw_piter_dma_addr - Return the DMA address of the current page.
782 *
783 * @viter: Pointer to the iterator
784 *
785 * Returns the DMA address of the page pointed to by @viter.
786 */
787 static inline dma_addr_t vmw_piter_dma_addr(struct vmw_piter *viter)
788 {
789 return viter->dma_address(viter);
790 }
791
792 /**
793 * vmw_piter_page - Return a pointer to the current page.
794 *
795 * @viter: Pointer to the iterator
796 *
797 * Returns the DMA address of the page pointed to by @viter.
798 */
799 static inline struct page *vmw_piter_page(struct vmw_piter *viter)
800 {
801 return viter->page(viter);
802 }
803
804 /**
805 * Command submission - vmwgfx_execbuf.c
806 */
807
808 extern int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data,
809 struct drm_file *file_priv, size_t size);
810 extern int vmw_execbuf_process(struct drm_file *file_priv,
811 struct vmw_private *dev_priv,
812 void __user *user_commands,
813 void *kernel_commands,
814 uint32_t command_size,
815 uint64_t throttle_us,
816 uint32_t dx_context_handle,
817 struct drm_vmw_fence_rep __user
818 *user_fence_rep,
819 struct vmw_fence_obj **out_fence);
820 extern void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
821 struct vmw_fence_obj *fence);
822 extern void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv);
823
824 extern int vmw_execbuf_fence_commands(struct drm_file *file_priv,
825 struct vmw_private *dev_priv,
826 struct vmw_fence_obj **p_fence,
827 uint32_t *p_handle);
828 extern void vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
829 struct vmw_fpriv *vmw_fp,
830 int ret,
831 struct drm_vmw_fence_rep __user
832 *user_fence_rep,
833 struct vmw_fence_obj *fence,
834 uint32_t fence_handle);
835 extern int vmw_validate_single_buffer(struct vmw_private *dev_priv,
836 struct ttm_buffer_object *bo,
837 bool interruptible,
838 bool validate_as_mob);
839
840
841 /**
842 * IRQs and wating - vmwgfx_irq.c
843 */
844
845 extern irqreturn_t vmw_irq_handler(int irq, void *arg);
846 extern int vmw_wait_seqno(struct vmw_private *dev_priv, bool lazy,
847 uint32_t seqno, bool interruptible,
848 unsigned long timeout);
849 extern void vmw_irq_preinstall(struct drm_device *dev);
850 extern int vmw_irq_postinstall(struct drm_device *dev);
851 extern void vmw_irq_uninstall(struct drm_device *dev);
852 extern bool vmw_seqno_passed(struct vmw_private *dev_priv,
853 uint32_t seqno);
854 extern int vmw_fallback_wait(struct vmw_private *dev_priv,
855 bool lazy,
856 bool fifo_idle,
857 uint32_t seqno,
858 bool interruptible,
859 unsigned long timeout);
860 extern void vmw_update_seqno(struct vmw_private *dev_priv,
861 struct vmw_fifo_state *fifo_state);
862 extern void vmw_seqno_waiter_add(struct vmw_private *dev_priv);
863 extern void vmw_seqno_waiter_remove(struct vmw_private *dev_priv);
864 extern void vmw_goal_waiter_add(struct vmw_private *dev_priv);
865 extern void vmw_goal_waiter_remove(struct vmw_private *dev_priv);
866 extern void vmw_generic_waiter_add(struct vmw_private *dev_priv, u32 flag,
867 int *waiter_count);
868 extern void vmw_generic_waiter_remove(struct vmw_private *dev_priv,
869 u32 flag, int *waiter_count);
870
871 /**
872 * Rudimentary fence-like objects currently used only for throttling -
873 * vmwgfx_marker.c
874 */
875
876 extern void vmw_marker_queue_init(struct vmw_marker_queue *queue);
877 extern void vmw_marker_queue_takedown(struct vmw_marker_queue *queue);
878 extern int vmw_marker_push(struct vmw_marker_queue *queue,
879 uint32_t seqno);
880 extern int vmw_marker_pull(struct vmw_marker_queue *queue,
881 uint32_t signaled_seqno);
882 extern int vmw_wait_lag(struct vmw_private *dev_priv,
883 struct vmw_marker_queue *queue, uint32_t us);
884
885 /**
886 * Kernel framebuffer - vmwgfx_fb.c
887 */
888
889 int vmw_fb_init(struct vmw_private *vmw_priv);
890 int vmw_fb_close(struct vmw_private *dev_priv);
891 int vmw_fb_off(struct vmw_private *vmw_priv);
892 int vmw_fb_on(struct vmw_private *vmw_priv);
893
894 /**
895 * Kernel modesetting - vmwgfx_kms.c
896 */
897
898 int vmw_kms_init(struct vmw_private *dev_priv);
899 int vmw_kms_close(struct vmw_private *dev_priv);
900 int vmw_kms_save_vga(struct vmw_private *vmw_priv);
901 int vmw_kms_restore_vga(struct vmw_private *vmw_priv);
902 int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
903 struct drm_file *file_priv);
904 void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv);
905 void vmw_kms_cursor_snoop(struct vmw_surface *srf,
906 struct ttm_object_file *tfile,
907 struct ttm_buffer_object *bo,
908 SVGA3dCmdHeader *header);
909 int vmw_kms_write_svga(struct vmw_private *vmw_priv,
910 unsigned width, unsigned height, unsigned pitch,
911 unsigned bpp, unsigned depth);
912 void vmw_kms_idle_workqueues(struct vmw_master *vmaster);
913 bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
914 uint32_t pitch,
915 uint32_t height);
916 u32 vmw_get_vblank_counter(struct drm_device *dev, unsigned int pipe);
917 int vmw_enable_vblank(struct drm_device *dev, unsigned int pipe);
918 void vmw_disable_vblank(struct drm_device *dev, unsigned int pipe);
919 int vmw_kms_present(struct vmw_private *dev_priv,
920 struct drm_file *file_priv,
921 struct vmw_framebuffer *vfb,
922 struct vmw_surface *surface,
923 uint32_t sid, int32_t destX, int32_t destY,
924 struct drm_vmw_rect *clips,
925 uint32_t num_clips);
926 int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
927 struct drm_file *file_priv);
928 void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv);
929
930 int vmw_dumb_create(struct drm_file *file_priv,
931 struct drm_device *dev,
932 struct drm_mode_create_dumb *args);
933
934 int vmw_dumb_map_offset(struct drm_file *file_priv,
935 struct drm_device *dev, uint32_t handle,
936 uint64_t *offset);
937 int vmw_dumb_destroy(struct drm_file *file_priv,
938 struct drm_device *dev,
939 uint32_t handle);
940 extern int vmw_resource_pin(struct vmw_resource *res, bool interruptible);
941 extern void vmw_resource_unpin(struct vmw_resource *res);
942 extern enum vmw_res_type vmw_res_type(const struct vmw_resource *res);
943
944 /**
945 * Overlay control - vmwgfx_overlay.c
946 */
947
948 int vmw_overlay_init(struct vmw_private *dev_priv);
949 int vmw_overlay_close(struct vmw_private *dev_priv);
950 int vmw_overlay_ioctl(struct drm_device *dev, void *data,
951 struct drm_file *file_priv);
952 int vmw_overlay_stop_all(struct vmw_private *dev_priv);
953 int vmw_overlay_resume_all(struct vmw_private *dev_priv);
954 int vmw_overlay_pause_all(struct vmw_private *dev_priv);
955 int vmw_overlay_claim(struct vmw_private *dev_priv, uint32_t *out);
956 int vmw_overlay_unref(struct vmw_private *dev_priv, uint32_t stream_id);
957 int vmw_overlay_num_overlays(struct vmw_private *dev_priv);
958 int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv);
959
960 /**
961 * GMR Id manager
962 */
963
964 extern const struct ttm_mem_type_manager_func vmw_gmrid_manager_func;
965
966 /**
967 * Prime - vmwgfx_prime.c
968 */
969
970 extern const struct dma_buf_ops vmw_prime_dmabuf_ops;
971 extern int vmw_prime_fd_to_handle(struct drm_device *dev,
972 struct drm_file *file_priv,
973 int fd, u32 *handle);
974 extern int vmw_prime_handle_to_fd(struct drm_device *dev,
975 struct drm_file *file_priv,
976 uint32_t handle, uint32_t flags,
977 int *prime_fd);
978
979 /*
980 * MemoryOBject management - vmwgfx_mob.c
981 */
982 struct vmw_mob;
983 extern int vmw_mob_bind(struct vmw_private *dev_priv, struct vmw_mob *mob,
984 const struct vmw_sg_table *vsgt,
985 unsigned long num_data_pages, int32_t mob_id);
986 extern void vmw_mob_unbind(struct vmw_private *dev_priv,
987 struct vmw_mob *mob);
988 extern void vmw_mob_destroy(struct vmw_mob *mob);
989 extern struct vmw_mob *vmw_mob_create(unsigned long data_pages);
990 extern int vmw_otables_setup(struct vmw_private *dev_priv);
991 extern void vmw_otables_takedown(struct vmw_private *dev_priv);
992
993 /*
994 * Context management - vmwgfx_context.c
995 */
996
997 extern const struct vmw_user_resource_conv *user_context_converter;
998
999 extern int vmw_context_check(struct vmw_private *dev_priv,
1000 struct ttm_object_file *tfile,
1001 int id,
1002 struct vmw_resource **p_res);
1003 extern int vmw_context_define_ioctl(struct drm_device *dev, void *data,
1004 struct drm_file *file_priv);
1005 extern int vmw_extended_context_define_ioctl(struct drm_device *dev, void *data,
1006 struct drm_file *file_priv);
1007 extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
1008 struct drm_file *file_priv);
1009 extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx);
1010 extern struct vmw_cmdbuf_res_manager *
1011 vmw_context_res_man(struct vmw_resource *ctx);
1012 extern struct vmw_resource *vmw_context_cotable(struct vmw_resource *ctx,
1013 SVGACOTableType cotable_type);
1014 extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx);
1015 struct vmw_ctx_binding_state;
1016 extern struct vmw_ctx_binding_state *
1017 vmw_context_binding_state(struct vmw_resource *ctx);
1018 extern void vmw_dx_context_scrub_cotables(struct vmw_resource *ctx,
1019 bool readback);
1020 extern int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
1021 struct vmw_dma_buffer *mob);
1022 extern struct vmw_dma_buffer *
1023 vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res);
1024
1025
1026 /*
1027 * Surface management - vmwgfx_surface.c
1028 */
1029
1030 extern const struct vmw_user_resource_conv *user_surface_converter;
1031
1032 extern void vmw_surface_res_free(struct vmw_resource *res);
1033 extern int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
1034 struct drm_file *file_priv);
1035 extern int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
1036 struct drm_file *file_priv);
1037 extern int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
1038 struct drm_file *file_priv);
1039 extern int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
1040 struct drm_file *file_priv);
1041 extern int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data,
1042 struct drm_file *file_priv);
1043 extern int vmw_surface_check(struct vmw_private *dev_priv,
1044 struct ttm_object_file *tfile,
1045 uint32_t handle, int *id);
1046 extern int vmw_surface_validate(struct vmw_private *dev_priv,
1047 struct vmw_surface *srf);
1048 int vmw_surface_gb_priv_define(struct drm_device *dev,
1049 uint32_t user_accounting_size,
1050 uint32_t svga3d_flags,
1051 SVGA3dSurfaceFormat format,
1052 bool for_scanout,
1053 uint32_t num_mip_levels,
1054 uint32_t multisample_count,
1055 uint32_t array_size,
1056 struct drm_vmw_size size,
1057 struct vmw_surface **srf_out);
1058
1059 /*
1060 * Shader management - vmwgfx_shader.c
1061 */
1062
1063 extern const struct vmw_user_resource_conv *user_shader_converter;
1064
1065 extern int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
1066 struct drm_file *file_priv);
1067 extern int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data,
1068 struct drm_file *file_priv);
1069 extern int vmw_compat_shader_add(struct vmw_private *dev_priv,
1070 struct vmw_cmdbuf_res_manager *man,
1071 u32 user_key, const void *bytecode,
1072 SVGA3dShaderType shader_type,
1073 size_t size,
1074 struct list_head *list);
1075 extern int vmw_shader_remove(struct vmw_cmdbuf_res_manager *man,
1076 u32 user_key, SVGA3dShaderType shader_type,
1077 struct list_head *list);
1078 extern int vmw_dx_shader_add(struct vmw_cmdbuf_res_manager *man,
1079 struct vmw_resource *ctx,
1080 u32 user_key,
1081 SVGA3dShaderType shader_type,
1082 struct list_head *list);
1083 extern void vmw_dx_shader_cotable_list_scrub(struct vmw_private *dev_priv,
1084 struct list_head *list,
1085 bool readback);
1086
1087 extern struct vmw_resource *
1088 vmw_shader_lookup(struct vmw_cmdbuf_res_manager *man,
1089 u32 user_key, SVGA3dShaderType shader_type);
1090
1091 /*
1092 * Command buffer managed resources - vmwgfx_cmdbuf_res.c
1093 */
1094
1095 extern struct vmw_cmdbuf_res_manager *
1096 vmw_cmdbuf_res_man_create(struct vmw_private *dev_priv);
1097 extern void vmw_cmdbuf_res_man_destroy(struct vmw_cmdbuf_res_manager *man);
1098 extern size_t vmw_cmdbuf_res_man_size(void);
1099 extern struct vmw_resource *
1100 vmw_cmdbuf_res_lookup(struct vmw_cmdbuf_res_manager *man,
1101 enum vmw_cmdbuf_res_type res_type,
1102 u32 user_key);
1103 extern void vmw_cmdbuf_res_revert(struct list_head *list);
1104 extern void vmw_cmdbuf_res_commit(struct list_head *list);
1105 extern int vmw_cmdbuf_res_add(struct vmw_cmdbuf_res_manager *man,
1106 enum vmw_cmdbuf_res_type res_type,
1107 u32 user_key,
1108 struct vmw_resource *res,
1109 struct list_head *list);
1110 extern int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man,
1111 enum vmw_cmdbuf_res_type res_type,
1112 u32 user_key,
1113 struct list_head *list,
1114 struct vmw_resource **res);
1115
1116 /*
1117 * COTable management - vmwgfx_cotable.c
1118 */
1119 extern const SVGACOTableType vmw_cotable_scrub_order[];
1120 extern struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv,
1121 struct vmw_resource *ctx,
1122 u32 type);
1123 extern int vmw_cotable_notify(struct vmw_resource *res, int id);
1124 extern int vmw_cotable_scrub(struct vmw_resource *res, bool readback);
1125 extern void vmw_cotable_add_resource(struct vmw_resource *ctx,
1126 struct list_head *head);
1127
1128 /*
1129 * Command buffer managerment vmwgfx_cmdbuf.c
1130 */
1131 struct vmw_cmdbuf_man;
1132 struct vmw_cmdbuf_header;
1133
1134 extern struct vmw_cmdbuf_man *
1135 vmw_cmdbuf_man_create(struct vmw_private *dev_priv);
1136 extern int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man,
1137 size_t size, size_t default_size);
1138 extern void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man);
1139 extern void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man *man);
1140 extern int vmw_cmdbuf_idle(struct vmw_cmdbuf_man *man, bool interruptible,
1141 unsigned long timeout);
1142 extern void *vmw_cmdbuf_reserve(struct vmw_cmdbuf_man *man, size_t size,
1143 int ctx_id, bool interruptible,
1144 struct vmw_cmdbuf_header *header);
1145 extern void vmw_cmdbuf_commit(struct vmw_cmdbuf_man *man, size_t size,
1146 struct vmw_cmdbuf_header *header,
1147 bool flush);
1148 extern void vmw_cmdbuf_tasklet_schedule(struct vmw_cmdbuf_man *man);
1149 extern void *vmw_cmdbuf_alloc(struct vmw_cmdbuf_man *man,
1150 size_t size, bool interruptible,
1151 struct vmw_cmdbuf_header **p_header);
1152 extern void vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header);
1153 extern int vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man,
1154 bool interruptible);
1155
1156
1157 /**
1158 * Inline helper functions
1159 */
1160
1161 static inline void vmw_surface_unreference(struct vmw_surface **srf)
1162 {
1163 struct vmw_surface *tmp_srf = *srf;
1164 struct vmw_resource *res = &tmp_srf->res;
1165 *srf = NULL;
1166
1167 vmw_resource_unreference(&res);
1168 }
1169
1170 static inline struct vmw_surface *vmw_surface_reference(struct vmw_surface *srf)
1171 {
1172 (void) vmw_resource_reference(&srf->res);
1173 return srf;
1174 }
1175
1176 static inline void vmw_dmabuf_unreference(struct vmw_dma_buffer **buf)
1177 {
1178 struct vmw_dma_buffer *tmp_buf = *buf;
1179
1180 *buf = NULL;
1181 if (tmp_buf != NULL) {
1182 struct ttm_buffer_object *bo = &tmp_buf->base;
1183
1184 ttm_bo_unref(&bo);
1185 }
1186 }
1187
1188 static inline struct vmw_dma_buffer *vmw_dmabuf_reference(struct vmw_dma_buffer *buf)
1189 {
1190 if (ttm_bo_reference(&buf->base))
1191 return buf;
1192 return NULL;
1193 }
1194
1195 static inline struct ttm_mem_global *vmw_mem_glob(struct vmw_private *dev_priv)
1196 {
1197 return (struct ttm_mem_global *) dev_priv->mem_global_ref.object;
1198 }
1199
1200 static inline void vmw_fifo_resource_inc(struct vmw_private *dev_priv)
1201 {
1202 atomic_inc(&dev_priv->num_fifo_resources);
1203 }
1204
1205 static inline void vmw_fifo_resource_dec(struct vmw_private *dev_priv)
1206 {
1207 atomic_dec(&dev_priv->num_fifo_resources);
1208 }
1209
1210 /**
1211 * vmw_mmio_read - Perform a MMIO read from volatile memory
1212 *
1213 * @addr: The address to read from
1214 *
1215 * This function is intended to be equivalent to ioread32() on
1216 * memremap'd memory, but without byteswapping.
1217 */
1218 static inline u32 vmw_mmio_read(u32 *addr)
1219 {
1220 return READ_ONCE(*addr);
1221 }
1222
1223 /**
1224 * vmw_mmio_write - Perform a MMIO write to volatile memory
1225 *
1226 * @addr: The address to write to
1227 *
1228 * This function is intended to be equivalent to iowrite32 on
1229 * memremap'd memory, but without byteswapping.
1230 */
1231 static inline void vmw_mmio_write(u32 value, u32 *addr)
1232 {
1233 WRITE_ONCE(*addr, value);
1234 }
1235 #endif
This page took 0.056959 seconds and 5 git commands to generate.