Commit | Line | Data |
---|---|---|
fb1d9738 JB |
1 | /************************************************************************** |
2 | * | |
3 | * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA | |
4 | * All Rights Reserved. | |
5 | * | |
6 | * Permission is hereby granted, free of charge, to any person obtaining a | |
7 | * copy of this software and associated documentation files (the | |
8 | * "Software"), to deal in the Software without restriction, including | |
9 | * without limitation the rights to use, copy, modify, merge, publish, | |
10 | * distribute, sub license, and/or sell copies of the Software, and to | |
11 | * permit persons to whom the Software is furnished to do so, subject to | |
12 | * the following conditions: | |
13 | * | |
14 | * The above copyright notice and this permission notice (including the | |
15 | * next paragraph) shall be included in all copies or substantial portions | |
16 | * of the Software. | |
17 | * | |
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | |
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | |
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | |
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | |
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | |
25 | * | |
26 | **************************************************************************/ | |
27 | ||
28 | #include "drmP.h" | |
29 | #include "vmwgfx_drv.h" | |
30 | #include "ttm/ttm_placement.h" | |
31 | #include "ttm/ttm_bo_driver.h" | |
32 | #include "ttm/ttm_object.h" | |
33 | #include "ttm/ttm_module.h" | |
34 | ||
35 | #define VMWGFX_DRIVER_NAME "vmwgfx" | |
36 | #define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices" | |
37 | #define VMWGFX_CHIP_SVGAII 0 | |
38 | #define VMW_FB_RESERVATION 0 | |
39 | ||
40 | /** | |
41 | * Fully encoded drm commands. Might move to vmw_drm.h | |
42 | */ | |
43 | ||
44 | #define DRM_IOCTL_VMW_GET_PARAM \ | |
45 | DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GET_PARAM, \ | |
46 | struct drm_vmw_getparam_arg) | |
47 | #define DRM_IOCTL_VMW_ALLOC_DMABUF \ | |
48 | DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_ALLOC_DMABUF, \ | |
49 | union drm_vmw_alloc_dmabuf_arg) | |
50 | #define DRM_IOCTL_VMW_UNREF_DMABUF \ | |
51 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_DMABUF, \ | |
52 | struct drm_vmw_unref_dmabuf_arg) | |
53 | #define DRM_IOCTL_VMW_CURSOR_BYPASS \ | |
54 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CURSOR_BYPASS, \ | |
55 | struct drm_vmw_cursor_bypass_arg) | |
56 | ||
57 | #define DRM_IOCTL_VMW_CONTROL_STREAM \ | |
58 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CONTROL_STREAM, \ | |
59 | struct drm_vmw_control_stream_arg) | |
60 | #define DRM_IOCTL_VMW_CLAIM_STREAM \ | |
61 | DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CLAIM_STREAM, \ | |
62 | struct drm_vmw_stream_arg) | |
63 | #define DRM_IOCTL_VMW_UNREF_STREAM \ | |
64 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_STREAM, \ | |
65 | struct drm_vmw_stream_arg) | |
66 | ||
67 | #define DRM_IOCTL_VMW_CREATE_CONTEXT \ | |
68 | DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CREATE_CONTEXT, \ | |
69 | struct drm_vmw_context_arg) | |
70 | #define DRM_IOCTL_VMW_UNREF_CONTEXT \ | |
71 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_CONTEXT, \ | |
72 | struct drm_vmw_context_arg) | |
73 | #define DRM_IOCTL_VMW_CREATE_SURFACE \ | |
74 | DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SURFACE, \ | |
75 | union drm_vmw_surface_create_arg) | |
76 | #define DRM_IOCTL_VMW_UNREF_SURFACE \ | |
77 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SURFACE, \ | |
78 | struct drm_vmw_surface_arg) | |
79 | #define DRM_IOCTL_VMW_REF_SURFACE \ | |
80 | DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_REF_SURFACE, \ | |
81 | union drm_vmw_surface_reference_arg) | |
82 | #define DRM_IOCTL_VMW_EXECBUF \ | |
83 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_EXECBUF, \ | |
84 | struct drm_vmw_execbuf_arg) | |
85 | #define DRM_IOCTL_VMW_FIFO_DEBUG \ | |
86 | DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FIFO_DEBUG, \ | |
87 | struct drm_vmw_fifo_debug_arg) | |
88 | #define DRM_IOCTL_VMW_FENCE_WAIT \ | |
89 | DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT, \ | |
90 | struct drm_vmw_fence_wait_arg) | |
d8bd19d2 JB |
91 | #define DRM_IOCTL_VMW_UPDATE_LAYOUT \ |
92 | DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT, \ | |
93 | struct drm_vmw_update_layout_arg) | |
fb1d9738 JB |
94 | |
95 | ||
96 | /** | |
97 | * The core DRM version of this macro doesn't account for | |
98 | * DRM_COMMAND_BASE. | |
99 | */ | |
100 | ||
101 | #define VMW_IOCTL_DEF(ioctl, func, flags) \ | |
1b2f1489 | 102 | [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = {DRM_##ioctl, flags, func, DRM_IOCTL_##ioctl} |
fb1d9738 JB |
103 | |
104 | /** | |
105 | * Ioctl definitions. | |
106 | */ | |
107 | ||
108 | static struct drm_ioctl_desc vmw_ioctls[] = { | |
1b2f1489 | 109 | VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl, |
e1f78003 | 110 | DRM_AUTH | DRM_UNLOCKED), |
1b2f1489 | 111 | VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl, |
e1f78003 | 112 | DRM_AUTH | DRM_UNLOCKED), |
1b2f1489 | 113 | VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl, |
e1f78003 | 114 | DRM_AUTH | DRM_UNLOCKED), |
1b2f1489 | 115 | VMW_IOCTL_DEF(VMW_CURSOR_BYPASS, |
e1f78003 TH |
116 | vmw_kms_cursor_bypass_ioctl, |
117 | DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED), | |
fb1d9738 | 118 | |
1b2f1489 | 119 | VMW_IOCTL_DEF(VMW_CONTROL_STREAM, vmw_overlay_ioctl, |
e1f78003 | 120 | DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED), |
1b2f1489 | 121 | VMW_IOCTL_DEF(VMW_CLAIM_STREAM, vmw_stream_claim_ioctl, |
e1f78003 | 122 | DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED), |
1b2f1489 | 123 | VMW_IOCTL_DEF(VMW_UNREF_STREAM, vmw_stream_unref_ioctl, |
e1f78003 | 124 | DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED), |
fb1d9738 | 125 | |
1b2f1489 | 126 | VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl, |
e1f78003 | 127 | DRM_AUTH | DRM_UNLOCKED), |
1b2f1489 | 128 | VMW_IOCTL_DEF(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl, |
e1f78003 | 129 | DRM_AUTH | DRM_UNLOCKED), |
1b2f1489 | 130 | VMW_IOCTL_DEF(VMW_CREATE_SURFACE, vmw_surface_define_ioctl, |
e1f78003 | 131 | DRM_AUTH | DRM_UNLOCKED), |
1b2f1489 | 132 | VMW_IOCTL_DEF(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl, |
e1f78003 | 133 | DRM_AUTH | DRM_UNLOCKED), |
1b2f1489 | 134 | VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl, |
e1f78003 | 135 | DRM_AUTH | DRM_UNLOCKED), |
1b2f1489 | 136 | VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl, |
e1f78003 | 137 | DRM_AUTH | DRM_UNLOCKED), |
1b2f1489 | 138 | VMW_IOCTL_DEF(VMW_FIFO_DEBUG, vmw_fifo_debug_ioctl, |
e1f78003 | 139 | DRM_AUTH | DRM_ROOT_ONLY | DRM_MASTER | DRM_UNLOCKED), |
1b2f1489 | 140 | VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_wait_ioctl, |
d8bd19d2 | 141 | DRM_AUTH | DRM_UNLOCKED), |
1b2f1489 | 142 | VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT, vmw_kms_update_layout_ioctl, |
d8bd19d2 | 143 | DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED) |
fb1d9738 JB |
144 | }; |
145 | ||
146 | static struct pci_device_id vmw_pci_id_list[] = { | |
147 | {0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII}, | |
148 | {0, 0, 0} | |
149 | }; | |
150 | ||
30c78bb8 | 151 | static int enable_fbdev; |
fb1d9738 JB |
152 | |
153 | static int vmw_probe(struct pci_dev *, const struct pci_device_id *); | |
154 | static void vmw_master_init(struct vmw_master *); | |
d9f36a00 TH |
155 | static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val, |
156 | void *ptr); | |
fb1d9738 | 157 | |
30c78bb8 TH |
158 | MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev"); |
159 | module_param_named(enable_fbdev, enable_fbdev, int, 0600); | |
160 | ||
fb1d9738 JB |
161 | static void vmw_print_capabilities(uint32_t capabilities) |
162 | { | |
163 | DRM_INFO("Capabilities:\n"); | |
164 | if (capabilities & SVGA_CAP_RECT_COPY) | |
165 | DRM_INFO(" Rect copy.\n"); | |
166 | if (capabilities & SVGA_CAP_CURSOR) | |
167 | DRM_INFO(" Cursor.\n"); | |
168 | if (capabilities & SVGA_CAP_CURSOR_BYPASS) | |
169 | DRM_INFO(" Cursor bypass.\n"); | |
170 | if (capabilities & SVGA_CAP_CURSOR_BYPASS_2) | |
171 | DRM_INFO(" Cursor bypass 2.\n"); | |
172 | if (capabilities & SVGA_CAP_8BIT_EMULATION) | |
173 | DRM_INFO(" 8bit emulation.\n"); | |
174 | if (capabilities & SVGA_CAP_ALPHA_CURSOR) | |
175 | DRM_INFO(" Alpha cursor.\n"); | |
176 | if (capabilities & SVGA_CAP_3D) | |
177 | DRM_INFO(" 3D.\n"); | |
178 | if (capabilities & SVGA_CAP_EXTENDED_FIFO) | |
179 | DRM_INFO(" Extended Fifo.\n"); | |
180 | if (capabilities & SVGA_CAP_MULTIMON) | |
181 | DRM_INFO(" Multimon.\n"); | |
182 | if (capabilities & SVGA_CAP_PITCHLOCK) | |
183 | DRM_INFO(" Pitchlock.\n"); | |
184 | if (capabilities & SVGA_CAP_IRQMASK) | |
185 | DRM_INFO(" Irq mask.\n"); | |
186 | if (capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) | |
187 | DRM_INFO(" Display Topology.\n"); | |
188 | if (capabilities & SVGA_CAP_GMR) | |
189 | DRM_INFO(" GMR.\n"); | |
190 | if (capabilities & SVGA_CAP_TRACES) | |
191 | DRM_INFO(" Traces.\n"); | |
192 | } | |
193 | ||
194 | static int vmw_request_device(struct vmw_private *dev_priv) | |
195 | { | |
196 | int ret; | |
197 | ||
fb1d9738 JB |
198 | ret = vmw_fifo_init(dev_priv, &dev_priv->fifo); |
199 | if (unlikely(ret != 0)) { | |
200 | DRM_ERROR("Unable to initialize FIFO.\n"); | |
201 | return ret; | |
202 | } | |
203 | ||
204 | return 0; | |
205 | } | |
206 | ||
207 | static void vmw_release_device(struct vmw_private *dev_priv) | |
208 | { | |
209 | vmw_fifo_release(dev_priv, &dev_priv->fifo); | |
30c78bb8 TH |
210 | } |
211 | ||
212 | int vmw_3d_resource_inc(struct vmw_private *dev_priv) | |
213 | { | |
214 | int ret = 0; | |
215 | ||
216 | mutex_lock(&dev_priv->release_mutex); | |
217 | if (unlikely(dev_priv->num_3d_resources++ == 0)) { | |
218 | ret = vmw_request_device(dev_priv); | |
219 | if (unlikely(ret != 0)) | |
220 | --dev_priv->num_3d_resources; | |
221 | } | |
222 | mutex_unlock(&dev_priv->release_mutex); | |
223 | return ret; | |
fb1d9738 JB |
224 | } |
225 | ||
226 | ||
30c78bb8 TH |
227 | void vmw_3d_resource_dec(struct vmw_private *dev_priv) |
228 | { | |
229 | int32_t n3d; | |
230 | ||
231 | mutex_lock(&dev_priv->release_mutex); | |
232 | if (unlikely(--dev_priv->num_3d_resources == 0)) | |
233 | vmw_release_device(dev_priv); | |
234 | n3d = (int32_t) dev_priv->num_3d_resources; | |
235 | mutex_unlock(&dev_priv->release_mutex); | |
236 | ||
237 | BUG_ON(n3d < 0); | |
238 | } | |
239 | ||
fb1d9738 JB |
240 | static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) |
241 | { | |
242 | struct vmw_private *dev_priv; | |
243 | int ret; | |
c188660f | 244 | uint32_t svga_id; |
fb1d9738 JB |
245 | |
246 | dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL); | |
247 | if (unlikely(dev_priv == NULL)) { | |
248 | DRM_ERROR("Failed allocating a device private struct.\n"); | |
249 | return -ENOMEM; | |
250 | } | |
251 | memset(dev_priv, 0, sizeof(*dev_priv)); | |
252 | ||
253 | dev_priv->dev = dev; | |
254 | dev_priv->vmw_chipset = chipset; | |
7704befb | 255 | dev_priv->last_read_sequence = (uint32_t) -100; |
fb1d9738 JB |
256 | mutex_init(&dev_priv->hw_mutex); |
257 | mutex_init(&dev_priv->cmdbuf_mutex); | |
30c78bb8 | 258 | mutex_init(&dev_priv->release_mutex); |
fb1d9738 JB |
259 | rwlock_init(&dev_priv->resource_lock); |
260 | idr_init(&dev_priv->context_idr); | |
261 | idr_init(&dev_priv->surface_idr); | |
262 | idr_init(&dev_priv->stream_idr); | |
fb1d9738 JB |
263 | mutex_init(&dev_priv->init_mutex); |
264 | init_waitqueue_head(&dev_priv->fence_queue); | |
265 | init_waitqueue_head(&dev_priv->fifo_queue); | |
266 | atomic_set(&dev_priv->fence_queue_waiters, 0); | |
267 | atomic_set(&dev_priv->fifo_queue_waiters, 0); | |
fb1d9738 JB |
268 | |
269 | dev_priv->io_start = pci_resource_start(dev->pdev, 0); | |
270 | dev_priv->vram_start = pci_resource_start(dev->pdev, 1); | |
271 | dev_priv->mmio_start = pci_resource_start(dev->pdev, 2); | |
272 | ||
30c78bb8 TH |
273 | dev_priv->enable_fb = enable_fbdev; |
274 | ||
fb1d9738 | 275 | mutex_lock(&dev_priv->hw_mutex); |
c188660f PH |
276 | |
277 | vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2); | |
278 | svga_id = vmw_read(dev_priv, SVGA_REG_ID); | |
279 | if (svga_id != SVGA_ID_2) { | |
280 | ret = -ENOSYS; | |
281 | DRM_ERROR("Unsuported SVGA ID 0x%x\n", svga_id); | |
282 | mutex_unlock(&dev_priv->hw_mutex); | |
283 | goto out_err0; | |
284 | } | |
285 | ||
fb1d9738 JB |
286 | dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES); |
287 | ||
288 | if (dev_priv->capabilities & SVGA_CAP_GMR) { | |
289 | dev_priv->max_gmr_descriptors = | |
290 | vmw_read(dev_priv, | |
291 | SVGA_REG_GMR_MAX_DESCRIPTOR_LENGTH); | |
292 | dev_priv->max_gmr_ids = | |
293 | vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS); | |
294 | } | |
295 | ||
296 | dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE); | |
297 | dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE); | |
298 | dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH); | |
299 | dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT); | |
300 | ||
301 | mutex_unlock(&dev_priv->hw_mutex); | |
302 | ||
303 | vmw_print_capabilities(dev_priv->capabilities); | |
304 | ||
305 | if (dev_priv->capabilities & SVGA_CAP_GMR) { | |
306 | DRM_INFO("Max GMR ids is %u\n", | |
307 | (unsigned)dev_priv->max_gmr_ids); | |
308 | DRM_INFO("Max GMR descriptors is %u\n", | |
309 | (unsigned)dev_priv->max_gmr_descriptors); | |
310 | } | |
311 | DRM_INFO("VRAM at 0x%08x size is %u kiB\n", | |
312 | dev_priv->vram_start, dev_priv->vram_size / 1024); | |
313 | DRM_INFO("MMIO at 0x%08x size is %u kiB\n", | |
314 | dev_priv->mmio_start, dev_priv->mmio_size / 1024); | |
315 | ||
316 | ret = vmw_ttm_global_init(dev_priv); | |
317 | if (unlikely(ret != 0)) | |
318 | goto out_err0; | |
319 | ||
320 | ||
321 | vmw_master_init(&dev_priv->fbdev_master); | |
322 | ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM); | |
323 | dev_priv->active_master = &dev_priv->fbdev_master; | |
324 | ||
a2c06ee2 | 325 | |
fb1d9738 JB |
326 | ret = ttm_bo_device_init(&dev_priv->bdev, |
327 | dev_priv->bo_global_ref.ref.object, | |
328 | &vmw_bo_driver, VMWGFX_FILE_PAGE_OFFSET, | |
329 | false); | |
330 | if (unlikely(ret != 0)) { | |
331 | DRM_ERROR("Failed initializing TTM buffer object driver.\n"); | |
332 | goto out_err1; | |
333 | } | |
334 | ||
335 | ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM, | |
336 | (dev_priv->vram_size >> PAGE_SHIFT)); | |
337 | if (unlikely(ret != 0)) { | |
338 | DRM_ERROR("Failed initializing memory manager for VRAM.\n"); | |
339 | goto out_err2; | |
340 | } | |
341 | ||
135cba0d TH |
342 | dev_priv->has_gmr = true; |
343 | if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR, | |
344 | dev_priv->max_gmr_ids) != 0) { | |
345 | DRM_INFO("No GMR memory available. " | |
346 | "Graphics memory resources are very limited.\n"); | |
347 | dev_priv->has_gmr = false; | |
348 | } | |
349 | ||
fb1d9738 JB |
350 | dev_priv->mmio_mtrr = drm_mtrr_add(dev_priv->mmio_start, |
351 | dev_priv->mmio_size, DRM_MTRR_WC); | |
352 | ||
353 | dev_priv->mmio_virt = ioremap_wc(dev_priv->mmio_start, | |
354 | dev_priv->mmio_size); | |
355 | ||
356 | if (unlikely(dev_priv->mmio_virt == NULL)) { | |
357 | ret = -ENOMEM; | |
358 | DRM_ERROR("Failed mapping MMIO.\n"); | |
359 | goto out_err3; | |
360 | } | |
361 | ||
d7e1958d JB |
362 | /* Need mmio memory to check for fifo pitchlock cap. */ |
363 | if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) && | |
364 | !(dev_priv->capabilities & SVGA_CAP_PITCHLOCK) && | |
365 | !vmw_fifo_have_pitchlock(dev_priv)) { | |
366 | ret = -ENOSYS; | |
367 | DRM_ERROR("Hardware has no pitchlock\n"); | |
368 | goto out_err4; | |
369 | } | |
370 | ||
fb1d9738 JB |
371 | dev_priv->tdev = ttm_object_device_init |
372 | (dev_priv->mem_global_ref.object, 12); | |
373 | ||
374 | if (unlikely(dev_priv->tdev == NULL)) { | |
375 | DRM_ERROR("Unable to initialize TTM object management.\n"); | |
376 | ret = -ENOMEM; | |
377 | goto out_err4; | |
378 | } | |
379 | ||
380 | dev->dev_private = dev_priv; | |
381 | ||
fb1d9738 JB |
382 | ret = pci_request_regions(dev->pdev, "vmwgfx probe"); |
383 | dev_priv->stealth = (ret != 0); | |
384 | if (dev_priv->stealth) { | |
385 | /** | |
386 | * Request at least the mmio PCI resource. | |
387 | */ | |
388 | ||
389 | DRM_INFO("It appears like vesafb is loaded. " | |
f2d12b8e | 390 | "Ignore above error if any.\n"); |
fb1d9738 JB |
391 | ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe"); |
392 | if (unlikely(ret != 0)) { | |
393 | DRM_ERROR("Failed reserving the SVGA MMIO resource.\n"); | |
394 | goto out_no_device; | |
395 | } | |
fb1d9738 | 396 | } |
7a1c2f6c TH |
397 | ret = vmw_kms_init(dev_priv); |
398 | if (unlikely(ret != 0)) | |
399 | goto out_no_kms; | |
f2d12b8e | 400 | vmw_overlay_init(dev_priv); |
30c78bb8 TH |
401 | if (dev_priv->enable_fb) { |
402 | ret = vmw_3d_resource_inc(dev_priv); | |
403 | if (unlikely(ret != 0)) | |
404 | goto out_no_fifo; | |
405 | vmw_kms_save_vga(dev_priv); | |
406 | vmw_fb_init(dev_priv); | |
407 | DRM_INFO("%s", vmw_fifo_have_3d(dev_priv) ? | |
408 | "Detected device 3D availability.\n" : | |
409 | "Detected no device 3D availability.\n"); | |
410 | } else { | |
411 | DRM_INFO("Delayed 3D detection since we're not " | |
412 | "running the device in SVGA mode yet.\n"); | |
413 | } | |
fb1d9738 | 414 | |
7a1c2f6c TH |
415 | if (dev_priv->capabilities & SVGA_CAP_IRQMASK) { |
416 | ret = drm_irq_install(dev); | |
417 | if (unlikely(ret != 0)) { | |
418 | DRM_ERROR("Failed installing irq: %d\n", ret); | |
419 | goto out_no_irq; | |
420 | } | |
421 | } | |
422 | ||
d9f36a00 TH |
423 | dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier; |
424 | register_pm_notifier(&dev_priv->pm_nb); | |
425 | ||
fb1d9738 JB |
426 | return 0; |
427 | ||
7a1c2f6c TH |
428 | out_no_irq: |
429 | if (dev_priv->enable_fb) { | |
430 | vmw_fb_close(dev_priv); | |
431 | vmw_kms_restore_vga(dev_priv); | |
432 | vmw_3d_resource_dec(dev_priv); | |
433 | } | |
30c78bb8 TH |
434 | out_no_fifo: |
435 | vmw_overlay_close(dev_priv); | |
436 | vmw_kms_close(dev_priv); | |
7a1c2f6c | 437 | out_no_kms: |
30c78bb8 TH |
438 | if (dev_priv->stealth) |
439 | pci_release_region(dev->pdev, 2); | |
440 | else | |
441 | pci_release_regions(dev->pdev); | |
fb1d9738 | 442 | out_no_device: |
fb1d9738 JB |
443 | ttm_object_device_release(&dev_priv->tdev); |
444 | out_err4: | |
445 | iounmap(dev_priv->mmio_virt); | |
446 | out_err3: | |
447 | drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start, | |
448 | dev_priv->mmio_size, DRM_MTRR_WC); | |
135cba0d TH |
449 | if (dev_priv->has_gmr) |
450 | (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); | |
fb1d9738 JB |
451 | (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); |
452 | out_err2: | |
453 | (void)ttm_bo_device_release(&dev_priv->bdev); | |
454 | out_err1: | |
455 | vmw_ttm_global_release(dev_priv); | |
456 | out_err0: | |
fb1d9738 JB |
457 | idr_destroy(&dev_priv->surface_idr); |
458 | idr_destroy(&dev_priv->context_idr); | |
459 | idr_destroy(&dev_priv->stream_idr); | |
460 | kfree(dev_priv); | |
461 | return ret; | |
462 | } | |
463 | ||
464 | static int vmw_driver_unload(struct drm_device *dev) | |
465 | { | |
466 | struct vmw_private *dev_priv = vmw_priv(dev); | |
467 | ||
d9f36a00 TH |
468 | unregister_pm_notifier(&dev_priv->pm_nb); |
469 | ||
be38ab6e TH |
470 | if (dev_priv->ctx.cmd_bounce) |
471 | vfree(dev_priv->ctx.cmd_bounce); | |
7a1c2f6c TH |
472 | if (dev_priv->capabilities & SVGA_CAP_IRQMASK) |
473 | drm_irq_uninstall(dev_priv->dev); | |
30c78bb8 TH |
474 | if (dev_priv->enable_fb) { |
475 | vmw_fb_close(dev_priv); | |
476 | vmw_kms_restore_vga(dev_priv); | |
477 | vmw_3d_resource_dec(dev_priv); | |
478 | } | |
f2d12b8e TH |
479 | vmw_kms_close(dev_priv); |
480 | vmw_overlay_close(dev_priv); | |
f2d12b8e | 481 | if (dev_priv->stealth) |
fb1d9738 | 482 | pci_release_region(dev->pdev, 2); |
f2d12b8e TH |
483 | else |
484 | pci_release_regions(dev->pdev); | |
485 | ||
fb1d9738 JB |
486 | ttm_object_device_release(&dev_priv->tdev); |
487 | iounmap(dev_priv->mmio_virt); | |
488 | drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start, | |
489 | dev_priv->mmio_size, DRM_MTRR_WC); | |
135cba0d TH |
490 | if (dev_priv->has_gmr) |
491 | (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); | |
fb1d9738 JB |
492 | (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); |
493 | (void)ttm_bo_device_release(&dev_priv->bdev); | |
494 | vmw_ttm_global_release(dev_priv); | |
fb1d9738 JB |
495 | idr_destroy(&dev_priv->surface_idr); |
496 | idr_destroy(&dev_priv->context_idr); | |
497 | idr_destroy(&dev_priv->stream_idr); | |
498 | ||
499 | kfree(dev_priv); | |
500 | ||
501 | return 0; | |
502 | } | |
503 | ||
504 | static void vmw_postclose(struct drm_device *dev, | |
505 | struct drm_file *file_priv) | |
506 | { | |
507 | struct vmw_fpriv *vmw_fp; | |
508 | ||
509 | vmw_fp = vmw_fpriv(file_priv); | |
510 | ttm_object_file_release(&vmw_fp->tfile); | |
511 | if (vmw_fp->locked_master) | |
512 | drm_master_put(&vmw_fp->locked_master); | |
513 | kfree(vmw_fp); | |
514 | } | |
515 | ||
516 | static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv) | |
517 | { | |
518 | struct vmw_private *dev_priv = vmw_priv(dev); | |
519 | struct vmw_fpriv *vmw_fp; | |
520 | int ret = -ENOMEM; | |
521 | ||
522 | vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL); | |
523 | if (unlikely(vmw_fp == NULL)) | |
524 | return ret; | |
525 | ||
526 | vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10); | |
527 | if (unlikely(vmw_fp->tfile == NULL)) | |
528 | goto out_no_tfile; | |
529 | ||
530 | file_priv->driver_priv = vmw_fp; | |
531 | ||
532 | if (unlikely(dev_priv->bdev.dev_mapping == NULL)) | |
533 | dev_priv->bdev.dev_mapping = | |
534 | file_priv->filp->f_path.dentry->d_inode->i_mapping; | |
535 | ||
536 | return 0; | |
537 | ||
538 | out_no_tfile: | |
539 | kfree(vmw_fp); | |
540 | return ret; | |
541 | } | |
542 | ||
543 | static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd, | |
544 | unsigned long arg) | |
545 | { | |
546 | struct drm_file *file_priv = filp->private_data; | |
547 | struct drm_device *dev = file_priv->minor->dev; | |
548 | unsigned int nr = DRM_IOCTL_NR(cmd); | |
fb1d9738 JB |
549 | |
550 | /* | |
e1f78003 | 551 | * Do extra checking on driver private ioctls. |
fb1d9738 JB |
552 | */ |
553 | ||
554 | if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END) | |
555 | && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) { | |
556 | struct drm_ioctl_desc *ioctl = | |
557 | &vmw_ioctls[nr - DRM_COMMAND_BASE]; | |
558 | ||
2854eeda | 559 | if (unlikely(ioctl->cmd_drv != cmd)) { |
fb1d9738 JB |
560 | DRM_ERROR("Invalid command format, ioctl %d\n", |
561 | nr - DRM_COMMAND_BASE); | |
562 | return -EINVAL; | |
563 | } | |
fb1d9738 JB |
564 | } |
565 | ||
e1f78003 | 566 | return drm_ioctl(filp, cmd, arg); |
fb1d9738 JB |
567 | } |
568 | ||
569 | static int vmw_firstopen(struct drm_device *dev) | |
570 | { | |
571 | struct vmw_private *dev_priv = vmw_priv(dev); | |
572 | dev_priv->is_opened = true; | |
573 | ||
574 | return 0; | |
575 | } | |
576 | ||
577 | static void vmw_lastclose(struct drm_device *dev) | |
578 | { | |
579 | struct vmw_private *dev_priv = vmw_priv(dev); | |
580 | struct drm_crtc *crtc; | |
581 | struct drm_mode_set set; | |
582 | int ret; | |
583 | ||
584 | /** | |
585 | * Do nothing on the lastclose call from drm_unload. | |
586 | */ | |
587 | ||
588 | if (!dev_priv->is_opened) | |
589 | return; | |
590 | ||
591 | dev_priv->is_opened = false; | |
592 | set.x = 0; | |
593 | set.y = 0; | |
594 | set.fb = NULL; | |
595 | set.mode = NULL; | |
596 | set.connectors = NULL; | |
597 | set.num_connectors = 0; | |
598 | ||
599 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | |
600 | set.crtc = crtc; | |
601 | ret = crtc->funcs->set_config(&set); | |
602 | WARN_ON(ret != 0); | |
603 | } | |
604 | ||
605 | } | |
606 | ||
607 | static void vmw_master_init(struct vmw_master *vmaster) | |
608 | { | |
609 | ttm_lock_init(&vmaster->lock); | |
3a939a5e TH |
610 | INIT_LIST_HEAD(&vmaster->fb_surf); |
611 | mutex_init(&vmaster->fb_surf_mutex); | |
fb1d9738 JB |
612 | } |
613 | ||
614 | static int vmw_master_create(struct drm_device *dev, | |
615 | struct drm_master *master) | |
616 | { | |
617 | struct vmw_master *vmaster; | |
618 | ||
fb1d9738 JB |
619 | vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL); |
620 | if (unlikely(vmaster == NULL)) | |
621 | return -ENOMEM; | |
622 | ||
3a939a5e | 623 | vmw_master_init(vmaster); |
fb1d9738 JB |
624 | ttm_lock_set_kill(&vmaster->lock, true, SIGTERM); |
625 | master->driver_priv = vmaster; | |
626 | ||
627 | return 0; | |
628 | } | |
629 | ||
630 | static void vmw_master_destroy(struct drm_device *dev, | |
631 | struct drm_master *master) | |
632 | { | |
633 | struct vmw_master *vmaster = vmw_master(master); | |
634 | ||
fb1d9738 JB |
635 | master->driver_priv = NULL; |
636 | kfree(vmaster); | |
637 | } | |
638 | ||
639 | ||
640 | static int vmw_master_set(struct drm_device *dev, | |
641 | struct drm_file *file_priv, | |
642 | bool from_open) | |
643 | { | |
644 | struct vmw_private *dev_priv = vmw_priv(dev); | |
645 | struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); | |
646 | struct vmw_master *active = dev_priv->active_master; | |
647 | struct vmw_master *vmaster = vmw_master(file_priv->master); | |
648 | int ret = 0; | |
649 | ||
30c78bb8 TH |
650 | if (!dev_priv->enable_fb) { |
651 | ret = vmw_3d_resource_inc(dev_priv); | |
652 | if (unlikely(ret != 0)) | |
653 | return ret; | |
654 | vmw_kms_save_vga(dev_priv); | |
655 | mutex_lock(&dev_priv->hw_mutex); | |
656 | vmw_write(dev_priv, SVGA_REG_TRACES, 0); | |
657 | mutex_unlock(&dev_priv->hw_mutex); | |
658 | } | |
659 | ||
fb1d9738 JB |
660 | if (active) { |
661 | BUG_ON(active != &dev_priv->fbdev_master); | |
662 | ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile); | |
663 | if (unlikely(ret != 0)) | |
664 | goto out_no_active_lock; | |
665 | ||
666 | ttm_lock_set_kill(&active->lock, true, SIGTERM); | |
667 | ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM); | |
668 | if (unlikely(ret != 0)) { | |
669 | DRM_ERROR("Unable to clean VRAM on " | |
670 | "master drop.\n"); | |
671 | } | |
672 | ||
673 | dev_priv->active_master = NULL; | |
674 | } | |
675 | ||
676 | ttm_lock_set_kill(&vmaster->lock, false, SIGTERM); | |
677 | if (!from_open) { | |
678 | ttm_vt_unlock(&vmaster->lock); | |
679 | BUG_ON(vmw_fp->locked_master != file_priv->master); | |
680 | drm_master_put(&vmw_fp->locked_master); | |
681 | } | |
682 | ||
683 | dev_priv->active_master = vmaster; | |
684 | ||
685 | return 0; | |
686 | ||
687 | out_no_active_lock: | |
30c78bb8 TH |
688 | if (!dev_priv->enable_fb) { |
689 | mutex_lock(&dev_priv->hw_mutex); | |
690 | vmw_write(dev_priv, SVGA_REG_TRACES, 1); | |
691 | mutex_unlock(&dev_priv->hw_mutex); | |
692 | vmw_kms_restore_vga(dev_priv); | |
693 | vmw_3d_resource_dec(dev_priv); | |
694 | } | |
fb1d9738 JB |
695 | return ret; |
696 | } | |
697 | ||
698 | static void vmw_master_drop(struct drm_device *dev, | |
699 | struct drm_file *file_priv, | |
700 | bool from_release) | |
701 | { | |
702 | struct vmw_private *dev_priv = vmw_priv(dev); | |
703 | struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); | |
704 | struct vmw_master *vmaster = vmw_master(file_priv->master); | |
705 | int ret; | |
706 | ||
fb1d9738 JB |
707 | /** |
708 | * Make sure the master doesn't disappear while we have | |
709 | * it locked. | |
710 | */ | |
711 | ||
712 | vmw_fp->locked_master = drm_master_get(file_priv->master); | |
713 | ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile); | |
3a939a5e | 714 | vmw_kms_idle_workqueues(vmaster); |
fb1d9738 JB |
715 | |
716 | if (unlikely((ret != 0))) { | |
717 | DRM_ERROR("Unable to lock TTM at VT switch.\n"); | |
718 | drm_master_put(&vmw_fp->locked_master); | |
719 | } | |
720 | ||
721 | ttm_lock_set_kill(&vmaster->lock, true, SIGTERM); | |
722 | ||
30c78bb8 TH |
723 | if (!dev_priv->enable_fb) { |
724 | ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM); | |
725 | if (unlikely(ret != 0)) | |
726 | DRM_ERROR("Unable to clean VRAM on master drop.\n"); | |
727 | mutex_lock(&dev_priv->hw_mutex); | |
728 | vmw_write(dev_priv, SVGA_REG_TRACES, 1); | |
729 | mutex_unlock(&dev_priv->hw_mutex); | |
730 | vmw_kms_restore_vga(dev_priv); | |
731 | vmw_3d_resource_dec(dev_priv); | |
732 | } | |
733 | ||
fb1d9738 JB |
734 | dev_priv->active_master = &dev_priv->fbdev_master; |
735 | ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM); | |
736 | ttm_vt_unlock(&dev_priv->fbdev_master.lock); | |
737 | ||
30c78bb8 TH |
738 | if (dev_priv->enable_fb) |
739 | vmw_fb_on(dev_priv); | |
fb1d9738 JB |
740 | } |
741 | ||
742 | ||
743 | static void vmw_remove(struct pci_dev *pdev) | |
744 | { | |
745 | struct drm_device *dev = pci_get_drvdata(pdev); | |
746 | ||
747 | drm_put_dev(dev); | |
748 | } | |
749 | ||
d9f36a00 TH |
750 | static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val, |
751 | void *ptr) | |
752 | { | |
753 | struct vmw_private *dev_priv = | |
754 | container_of(nb, struct vmw_private, pm_nb); | |
755 | struct vmw_master *vmaster = dev_priv->active_master; | |
756 | ||
757 | switch (val) { | |
758 | case PM_HIBERNATION_PREPARE: | |
759 | case PM_SUSPEND_PREPARE: | |
760 | ttm_suspend_lock(&vmaster->lock); | |
761 | ||
762 | /** | |
763 | * This empties VRAM and unbinds all GMR bindings. | |
764 | * Buffer contents is moved to swappable memory. | |
765 | */ | |
766 | ttm_bo_swapout_all(&dev_priv->bdev); | |
094e0fa8 | 767 | |
d9f36a00 TH |
768 | break; |
769 | case PM_POST_HIBERNATION: | |
770 | case PM_POST_SUSPEND: | |
094e0fa8 | 771 | case PM_POST_RESTORE: |
d9f36a00 | 772 | ttm_suspend_unlock(&vmaster->lock); |
094e0fa8 | 773 | |
d9f36a00 TH |
774 | break; |
775 | case PM_RESTORE_PREPARE: | |
776 | break; | |
d9f36a00 TH |
777 | default: |
778 | break; | |
779 | } | |
780 | return 0; | |
781 | } | |
782 | ||
783 | /** | |
784 | * These might not be needed with the virtual SVGA device. | |
785 | */ | |
786 | ||
7fbd721a | 787 | static int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state) |
d9f36a00 | 788 | { |
094e0fa8 TH |
789 | struct drm_device *dev = pci_get_drvdata(pdev); |
790 | struct vmw_private *dev_priv = vmw_priv(dev); | |
791 | ||
792 | if (dev_priv->num_3d_resources != 0) { | |
793 | DRM_INFO("Can't suspend or hibernate " | |
794 | "while 3D resources are active.\n"); | |
795 | return -EBUSY; | |
796 | } | |
797 | ||
d9f36a00 TH |
798 | pci_save_state(pdev); |
799 | pci_disable_device(pdev); | |
800 | pci_set_power_state(pdev, PCI_D3hot); | |
801 | return 0; | |
802 | } | |
803 | ||
7fbd721a | 804 | static int vmw_pci_resume(struct pci_dev *pdev) |
d9f36a00 TH |
805 | { |
806 | pci_set_power_state(pdev, PCI_D0); | |
807 | pci_restore_state(pdev); | |
808 | return pci_enable_device(pdev); | |
809 | } | |
810 | ||
7fbd721a TH |
811 | static int vmw_pm_suspend(struct device *kdev) |
812 | { | |
813 | struct pci_dev *pdev = to_pci_dev(kdev); | |
814 | struct pm_message dummy; | |
815 | ||
816 | dummy.event = 0; | |
817 | ||
818 | return vmw_pci_suspend(pdev, dummy); | |
819 | } | |
820 | ||
821 | static int vmw_pm_resume(struct device *kdev) | |
822 | { | |
823 | struct pci_dev *pdev = to_pci_dev(kdev); | |
824 | ||
825 | return vmw_pci_resume(pdev); | |
826 | } | |
827 | ||
828 | static int vmw_pm_prepare(struct device *kdev) | |
829 | { | |
830 | struct pci_dev *pdev = to_pci_dev(kdev); | |
831 | struct drm_device *dev = pci_get_drvdata(pdev); | |
832 | struct vmw_private *dev_priv = vmw_priv(dev); | |
833 | ||
834 | /** | |
835 | * Release 3d reference held by fbdev and potentially | |
836 | * stop fifo. | |
837 | */ | |
838 | dev_priv->suspended = true; | |
839 | if (dev_priv->enable_fb) | |
840 | vmw_3d_resource_dec(dev_priv); | |
841 | ||
842 | if (dev_priv->num_3d_resources != 0) { | |
843 | ||
844 | DRM_INFO("Can't suspend or hibernate " | |
845 | "while 3D resources are active.\n"); | |
846 | ||
847 | if (dev_priv->enable_fb) | |
848 | vmw_3d_resource_inc(dev_priv); | |
849 | dev_priv->suspended = false; | |
850 | return -EBUSY; | |
851 | } | |
852 | ||
853 | return 0; | |
854 | } | |
855 | ||
856 | static void vmw_pm_complete(struct device *kdev) | |
857 | { | |
858 | struct pci_dev *pdev = to_pci_dev(kdev); | |
859 | struct drm_device *dev = pci_get_drvdata(pdev); | |
860 | struct vmw_private *dev_priv = vmw_priv(dev); | |
861 | ||
862 | /** | |
863 | * Reclaim 3d reference held by fbdev and potentially | |
864 | * start fifo. | |
865 | */ | |
866 | if (dev_priv->enable_fb) | |
867 | vmw_3d_resource_inc(dev_priv); | |
868 | ||
869 | dev_priv->suspended = false; | |
870 | } | |
871 | ||
872 | static const struct dev_pm_ops vmw_pm_ops = { | |
873 | .prepare = vmw_pm_prepare, | |
874 | .complete = vmw_pm_complete, | |
875 | .suspend = vmw_pm_suspend, | |
876 | .resume = vmw_pm_resume, | |
877 | }; | |
878 | ||
fb1d9738 JB |
879 | static struct drm_driver driver = { |
880 | .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | | |
881 | DRIVER_MODESET, | |
882 | .load = vmw_driver_load, | |
883 | .unload = vmw_driver_unload, | |
884 | .firstopen = vmw_firstopen, | |
885 | .lastclose = vmw_lastclose, | |
886 | .irq_preinstall = vmw_irq_preinstall, | |
887 | .irq_postinstall = vmw_irq_postinstall, | |
888 | .irq_uninstall = vmw_irq_uninstall, | |
889 | .irq_handler = vmw_irq_handler, | |
7a1c2f6c | 890 | .get_vblank_counter = vmw_get_vblank_counter, |
fb1d9738 | 891 | .reclaim_buffers_locked = NULL, |
fb1d9738 JB |
892 | .ioctls = vmw_ioctls, |
893 | .num_ioctls = DRM_ARRAY_SIZE(vmw_ioctls), | |
894 | .dma_quiescent = NULL, /*vmw_dma_quiescent, */ | |
895 | .master_create = vmw_master_create, | |
896 | .master_destroy = vmw_master_destroy, | |
897 | .master_set = vmw_master_set, | |
898 | .master_drop = vmw_master_drop, | |
899 | .open = vmw_driver_open, | |
900 | .postclose = vmw_postclose, | |
901 | .fops = { | |
902 | .owner = THIS_MODULE, | |
903 | .open = drm_open, | |
904 | .release = drm_release, | |
905 | .unlocked_ioctl = vmw_unlocked_ioctl, | |
906 | .mmap = vmw_mmap, | |
907 | .poll = drm_poll, | |
908 | .fasync = drm_fasync, | |
909 | #if defined(CONFIG_COMPAT) | |
910 | .compat_ioctl = drm_compat_ioctl, | |
911 | #endif | |
dc880abe | 912 | .llseek = noop_llseek, |
7fbd721a | 913 | }, |
fb1d9738 JB |
914 | .name = VMWGFX_DRIVER_NAME, |
915 | .desc = VMWGFX_DRIVER_DESC, | |
916 | .date = VMWGFX_DRIVER_DATE, | |
917 | .major = VMWGFX_DRIVER_MAJOR, | |
918 | .minor = VMWGFX_DRIVER_MINOR, | |
919 | .patchlevel = VMWGFX_DRIVER_PATCHLEVEL | |
920 | }; | |
921 | ||
8410ea3b DA |
922 | static struct pci_driver vmw_pci_driver = { |
923 | .name = VMWGFX_DRIVER_NAME, | |
924 | .id_table = vmw_pci_id_list, | |
925 | .probe = vmw_probe, | |
926 | .remove = vmw_remove, | |
927 | .driver = { | |
928 | .pm = &vmw_pm_ops | |
929 | } | |
930 | }; | |
931 | ||
fb1d9738 JB |
932 | static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent) |
933 | { | |
dcdb1674 | 934 | return drm_get_pci_dev(pdev, ent, &driver); |
fb1d9738 JB |
935 | } |
936 | ||
937 | static int __init vmwgfx_init(void) | |
938 | { | |
939 | int ret; | |
8410ea3b | 940 | ret = drm_pci_init(&driver, &vmw_pci_driver); |
fb1d9738 JB |
941 | if (ret) |
942 | DRM_ERROR("Failed initializing DRM.\n"); | |
943 | return ret; | |
944 | } | |
945 | ||
946 | static void __exit vmwgfx_exit(void) | |
947 | { | |
8410ea3b | 948 | drm_pci_exit(&driver, &vmw_pci_driver); |
fb1d9738 JB |
949 | } |
950 | ||
951 | module_init(vmwgfx_init); | |
952 | module_exit(vmwgfx_exit); | |
953 | ||
954 | MODULE_AUTHOR("VMware Inc. and others"); | |
955 | MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device"); | |
956 | MODULE_LICENSE("GPL and additional rights"); | |
73558ead TH |
957 | MODULE_VERSION(__stringify(VMWGFX_DRIVER_MAJOR) "." |
958 | __stringify(VMWGFX_DRIVER_MINOR) "." | |
959 | __stringify(VMWGFX_DRIVER_PATCHLEVEL) "." | |
960 | "0"); |