| 1 | /************************************************************************** |
| 2 | * |
| 3 | * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA |
| 4 | * All Rights Reserved. |
| 5 | * |
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 7 | * copy of this software and associated documentation files (the |
| 8 | * "Software"), to deal in the Software without restriction, including |
| 9 | * without limitation the rights to use, copy, modify, merge, publish, |
| 10 | * distribute, sub license, and/or sell copies of the Software, and to |
| 11 | * permit persons to whom the Software is furnished to do so, subject to |
| 12 | * the following conditions: |
| 13 | * |
| 14 | * The above copyright notice and this permission notice (including the |
| 15 | * next paragraph) shall be included in all copies or substantial portions |
| 16 | * of the Software. |
| 17 | * |
| 18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
| 21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
| 22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
| 23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
| 24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
| 25 | * |
| 26 | **************************************************************************/ |
| 27 | #include <linux/module.h> |
| 28 | |
| 29 | #include <drm/drmP.h> |
| 30 | #include "vmwgfx_drv.h" |
| 31 | #include <drm/ttm/ttm_placement.h> |
| 32 | #include <drm/ttm/ttm_bo_driver.h> |
| 33 | #include <drm/ttm/ttm_object.h> |
| 34 | #include <drm/ttm/ttm_module.h> |
| 35 | #include <linux/dma_remapping.h> |
| 36 | |
| 37 | #define VMWGFX_DRIVER_NAME "vmwgfx" |
| 38 | #define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices" |
| 39 | #define VMWGFX_CHIP_SVGAII 0 |
| 40 | #define VMW_FB_RESERVATION 0 |
| 41 | |
| 42 | #define VMW_MIN_INITIAL_WIDTH 800 |
| 43 | #define VMW_MIN_INITIAL_HEIGHT 600 |
| 44 | |
| 45 | |
| 46 | /** |
| 47 | * Fully encoded drm commands. Might move to vmw_drm.h |
| 48 | */ |
| 49 | |
| 50 | #define DRM_IOCTL_VMW_GET_PARAM \ |
| 51 | DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GET_PARAM, \ |
| 52 | struct drm_vmw_getparam_arg) |
| 53 | #define DRM_IOCTL_VMW_ALLOC_DMABUF \ |
| 54 | DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_ALLOC_DMABUF, \ |
| 55 | union drm_vmw_alloc_dmabuf_arg) |
| 56 | #define DRM_IOCTL_VMW_UNREF_DMABUF \ |
| 57 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_DMABUF, \ |
| 58 | struct drm_vmw_unref_dmabuf_arg) |
| 59 | #define DRM_IOCTL_VMW_CURSOR_BYPASS \ |
| 60 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CURSOR_BYPASS, \ |
| 61 | struct drm_vmw_cursor_bypass_arg) |
| 62 | |
| 63 | #define DRM_IOCTL_VMW_CONTROL_STREAM \ |
| 64 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CONTROL_STREAM, \ |
| 65 | struct drm_vmw_control_stream_arg) |
| 66 | #define DRM_IOCTL_VMW_CLAIM_STREAM \ |
| 67 | DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CLAIM_STREAM, \ |
| 68 | struct drm_vmw_stream_arg) |
| 69 | #define DRM_IOCTL_VMW_UNREF_STREAM \ |
| 70 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_STREAM, \ |
| 71 | struct drm_vmw_stream_arg) |
| 72 | |
| 73 | #define DRM_IOCTL_VMW_CREATE_CONTEXT \ |
| 74 | DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CREATE_CONTEXT, \ |
| 75 | struct drm_vmw_context_arg) |
| 76 | #define DRM_IOCTL_VMW_UNREF_CONTEXT \ |
| 77 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_CONTEXT, \ |
| 78 | struct drm_vmw_context_arg) |
| 79 | #define DRM_IOCTL_VMW_CREATE_SURFACE \ |
| 80 | DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SURFACE, \ |
| 81 | union drm_vmw_surface_create_arg) |
| 82 | #define DRM_IOCTL_VMW_UNREF_SURFACE \ |
| 83 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SURFACE, \ |
| 84 | struct drm_vmw_surface_arg) |
| 85 | #define DRM_IOCTL_VMW_REF_SURFACE \ |
| 86 | DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_REF_SURFACE, \ |
| 87 | union drm_vmw_surface_reference_arg) |
| 88 | #define DRM_IOCTL_VMW_EXECBUF \ |
| 89 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_EXECBUF, \ |
| 90 | struct drm_vmw_execbuf_arg) |
| 91 | #define DRM_IOCTL_VMW_GET_3D_CAP \ |
| 92 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_GET_3D_CAP, \ |
| 93 | struct drm_vmw_get_3d_cap_arg) |
| 94 | #define DRM_IOCTL_VMW_FENCE_WAIT \ |
| 95 | DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT, \ |
| 96 | struct drm_vmw_fence_wait_arg) |
| 97 | #define DRM_IOCTL_VMW_FENCE_SIGNALED \ |
| 98 | DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_SIGNALED, \ |
| 99 | struct drm_vmw_fence_signaled_arg) |
| 100 | #define DRM_IOCTL_VMW_FENCE_UNREF \ |
| 101 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_UNREF, \ |
| 102 | struct drm_vmw_fence_arg) |
| 103 | #define DRM_IOCTL_VMW_FENCE_EVENT \ |
| 104 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_EVENT, \ |
| 105 | struct drm_vmw_fence_event_arg) |
| 106 | #define DRM_IOCTL_VMW_PRESENT \ |
| 107 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT, \ |
| 108 | struct drm_vmw_present_arg) |
| 109 | #define DRM_IOCTL_VMW_PRESENT_READBACK \ |
| 110 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT_READBACK, \ |
| 111 | struct drm_vmw_present_readback_arg) |
| 112 | #define DRM_IOCTL_VMW_UPDATE_LAYOUT \ |
| 113 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT, \ |
| 114 | struct drm_vmw_update_layout_arg) |
| 115 | #define DRM_IOCTL_VMW_CREATE_SHADER \ |
| 116 | DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SHADER, \ |
| 117 | struct drm_vmw_shader_create_arg) |
| 118 | #define DRM_IOCTL_VMW_UNREF_SHADER \ |
| 119 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SHADER, \ |
| 120 | struct drm_vmw_shader_arg) |
| 121 | #define DRM_IOCTL_VMW_GB_SURFACE_CREATE \ |
| 122 | DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_CREATE, \ |
| 123 | union drm_vmw_gb_surface_create_arg) |
| 124 | #define DRM_IOCTL_VMW_GB_SURFACE_REF \ |
| 125 | DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF, \ |
| 126 | union drm_vmw_gb_surface_reference_arg) |
| 127 | #define DRM_IOCTL_VMW_SYNCCPU \ |
| 128 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_SYNCCPU, \ |
| 129 | struct drm_vmw_synccpu_arg) |
| 130 | |
| 131 | /** |
| 132 | * The core DRM version of this macro doesn't account for |
| 133 | * DRM_COMMAND_BASE. |
| 134 | */ |
| 135 | |
| 136 | #define VMW_IOCTL_DEF(ioctl, func, flags) \ |
| 137 | [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = {DRM_IOCTL_##ioctl, flags, func} |
| 138 | |
| 139 | /** |
| 140 | * Ioctl definitions. |
| 141 | */ |
| 142 | |
| 143 | static const struct drm_ioctl_desc vmw_ioctls[] = { |
| 144 | VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl, |
| 145 | DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW), |
| 146 | VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl, |
| 147 | DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW), |
| 148 | VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl, |
| 149 | DRM_UNLOCKED | DRM_RENDER_ALLOW), |
| 150 | VMW_IOCTL_DEF(VMW_CURSOR_BYPASS, |
| 151 | vmw_kms_cursor_bypass_ioctl, |
| 152 | DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED), |
| 153 | |
| 154 | VMW_IOCTL_DEF(VMW_CONTROL_STREAM, vmw_overlay_ioctl, |
| 155 | DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED), |
| 156 | VMW_IOCTL_DEF(VMW_CLAIM_STREAM, vmw_stream_claim_ioctl, |
| 157 | DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED), |
| 158 | VMW_IOCTL_DEF(VMW_UNREF_STREAM, vmw_stream_unref_ioctl, |
| 159 | DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED), |
| 160 | |
| 161 | VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl, |
| 162 | DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW), |
| 163 | VMW_IOCTL_DEF(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl, |
| 164 | DRM_UNLOCKED | DRM_RENDER_ALLOW), |
| 165 | VMW_IOCTL_DEF(VMW_CREATE_SURFACE, vmw_surface_define_ioctl, |
| 166 | DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW), |
| 167 | VMW_IOCTL_DEF(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl, |
| 168 | DRM_UNLOCKED | DRM_RENDER_ALLOW), |
| 169 | VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl, |
| 170 | DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW), |
| 171 | VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl, |
| 172 | DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW), |
| 173 | VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl, |
| 174 | DRM_UNLOCKED | DRM_RENDER_ALLOW), |
| 175 | VMW_IOCTL_DEF(VMW_FENCE_SIGNALED, |
| 176 | vmw_fence_obj_signaled_ioctl, |
| 177 | DRM_UNLOCKED | DRM_RENDER_ALLOW), |
| 178 | VMW_IOCTL_DEF(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl, |
| 179 | DRM_UNLOCKED | DRM_RENDER_ALLOW), |
| 180 | VMW_IOCTL_DEF(VMW_FENCE_EVENT, vmw_fence_event_ioctl, |
| 181 | DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW), |
| 182 | VMW_IOCTL_DEF(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl, |
| 183 | DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW), |
| 184 | |
| 185 | /* these allow direct access to the framebuffers mark as master only */ |
| 186 | VMW_IOCTL_DEF(VMW_PRESENT, vmw_present_ioctl, |
| 187 | DRM_MASTER | DRM_AUTH | DRM_UNLOCKED), |
| 188 | VMW_IOCTL_DEF(VMW_PRESENT_READBACK, |
| 189 | vmw_present_readback_ioctl, |
| 190 | DRM_MASTER | DRM_AUTH | DRM_UNLOCKED), |
| 191 | VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT, |
| 192 | vmw_kms_update_layout_ioctl, |
| 193 | DRM_MASTER | DRM_UNLOCKED), |
| 194 | VMW_IOCTL_DEF(VMW_CREATE_SHADER, |
| 195 | vmw_shader_define_ioctl, |
| 196 | DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW), |
| 197 | VMW_IOCTL_DEF(VMW_UNREF_SHADER, |
| 198 | vmw_shader_destroy_ioctl, |
| 199 | DRM_UNLOCKED | DRM_RENDER_ALLOW), |
| 200 | VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE, |
| 201 | vmw_gb_surface_define_ioctl, |
| 202 | DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW), |
| 203 | VMW_IOCTL_DEF(VMW_GB_SURFACE_REF, |
| 204 | vmw_gb_surface_reference_ioctl, |
| 205 | DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW), |
| 206 | VMW_IOCTL_DEF(VMW_SYNCCPU, |
| 207 | vmw_user_dmabuf_synccpu_ioctl, |
| 208 | DRM_UNLOCKED | DRM_RENDER_ALLOW), |
| 209 | }; |
| 210 | |
| 211 | static struct pci_device_id vmw_pci_id_list[] = { |
| 212 | {0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII}, |
| 213 | {0, 0, 0} |
| 214 | }; |
| 215 | MODULE_DEVICE_TABLE(pci, vmw_pci_id_list); |
| 216 | |
| 217 | static int enable_fbdev = IS_ENABLED(CONFIG_DRM_VMWGFX_FBCON); |
| 218 | static int vmw_force_iommu; |
| 219 | static int vmw_restrict_iommu; |
| 220 | static int vmw_force_coherent; |
| 221 | static int vmw_restrict_dma_mask; |
| 222 | |
| 223 | static int vmw_probe(struct pci_dev *, const struct pci_device_id *); |
| 224 | static void vmw_master_init(struct vmw_master *); |
| 225 | static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val, |
| 226 | void *ptr); |
| 227 | |
| 228 | MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev"); |
| 229 | module_param_named(enable_fbdev, enable_fbdev, int, 0600); |
| 230 | MODULE_PARM_DESC(force_dma_api, "Force using the DMA API for TTM pages"); |
| 231 | module_param_named(force_dma_api, vmw_force_iommu, int, 0600); |
| 232 | MODULE_PARM_DESC(restrict_iommu, "Try to limit IOMMU usage for TTM pages"); |
| 233 | module_param_named(restrict_iommu, vmw_restrict_iommu, int, 0600); |
| 234 | MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages"); |
| 235 | module_param_named(force_coherent, vmw_force_coherent, int, 0600); |
| 236 | MODULE_PARM_DESC(restrict_dma_mask, "Restrict DMA mask to 44 bits with IOMMU"); |
| 237 | module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, 0600); |
| 238 | |
| 239 | |
| 240 | static void vmw_print_capabilities(uint32_t capabilities) |
| 241 | { |
| 242 | DRM_INFO("Capabilities:\n"); |
| 243 | if (capabilities & SVGA_CAP_RECT_COPY) |
| 244 | DRM_INFO(" Rect copy.\n"); |
| 245 | if (capabilities & SVGA_CAP_CURSOR) |
| 246 | DRM_INFO(" Cursor.\n"); |
| 247 | if (capabilities & SVGA_CAP_CURSOR_BYPASS) |
| 248 | DRM_INFO(" Cursor bypass.\n"); |
| 249 | if (capabilities & SVGA_CAP_CURSOR_BYPASS_2) |
| 250 | DRM_INFO(" Cursor bypass 2.\n"); |
| 251 | if (capabilities & SVGA_CAP_8BIT_EMULATION) |
| 252 | DRM_INFO(" 8bit emulation.\n"); |
| 253 | if (capabilities & SVGA_CAP_ALPHA_CURSOR) |
| 254 | DRM_INFO(" Alpha cursor.\n"); |
| 255 | if (capabilities & SVGA_CAP_3D) |
| 256 | DRM_INFO(" 3D.\n"); |
| 257 | if (capabilities & SVGA_CAP_EXTENDED_FIFO) |
| 258 | DRM_INFO(" Extended Fifo.\n"); |
| 259 | if (capabilities & SVGA_CAP_MULTIMON) |
| 260 | DRM_INFO(" Multimon.\n"); |
| 261 | if (capabilities & SVGA_CAP_PITCHLOCK) |
| 262 | DRM_INFO(" Pitchlock.\n"); |
| 263 | if (capabilities & SVGA_CAP_IRQMASK) |
| 264 | DRM_INFO(" Irq mask.\n"); |
| 265 | if (capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) |
| 266 | DRM_INFO(" Display Topology.\n"); |
| 267 | if (capabilities & SVGA_CAP_GMR) |
| 268 | DRM_INFO(" GMR.\n"); |
| 269 | if (capabilities & SVGA_CAP_TRACES) |
| 270 | DRM_INFO(" Traces.\n"); |
| 271 | if (capabilities & SVGA_CAP_GMR2) |
| 272 | DRM_INFO(" GMR2.\n"); |
| 273 | if (capabilities & SVGA_CAP_SCREEN_OBJECT_2) |
| 274 | DRM_INFO(" Screen Object 2.\n"); |
| 275 | if (capabilities & SVGA_CAP_COMMAND_BUFFERS) |
| 276 | DRM_INFO(" Command Buffers.\n"); |
| 277 | if (capabilities & SVGA_CAP_CMD_BUFFERS_2) |
| 278 | DRM_INFO(" Command Buffers 2.\n"); |
| 279 | if (capabilities & SVGA_CAP_GBOBJECTS) |
| 280 | DRM_INFO(" Guest Backed Resources.\n"); |
| 281 | if (capabilities & SVGA_CAP_CMD_BUFFERS_3) |
| 282 | DRM_INFO(" Command Buffers 3.\n"); |
| 283 | } |
| 284 | |
| 285 | /** |
| 286 | * vmw_dummy_query_bo_create - create a bo to hold a dummy query result |
| 287 | * |
| 288 | * @dev_priv: A device private structure. |
| 289 | * |
| 290 | * This function creates a small buffer object that holds the query |
| 291 | * result for dummy queries emitted as query barriers. |
| 292 | * The function will then map the first page and initialize a pending |
| 293 | * occlusion query result structure, Finally it will unmap the buffer. |
| 294 | * No interruptible waits are done within this function. |
| 295 | * |
| 296 | * Returns an error if bo creation or initialization fails. |
| 297 | */ |
| 298 | static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv) |
| 299 | { |
| 300 | int ret; |
| 301 | struct vmw_dma_buffer *vbo; |
| 302 | struct ttm_bo_kmap_obj map; |
| 303 | volatile SVGA3dQueryResult *result; |
| 304 | bool dummy; |
| 305 | |
| 306 | /* |
| 307 | * Create the vbo as pinned, so that a tryreserve will |
| 308 | * immediately succeed. This is because we're the only |
| 309 | * user of the bo currently. |
| 310 | */ |
| 311 | vbo = kzalloc(sizeof(*vbo), GFP_KERNEL); |
| 312 | if (!vbo) |
| 313 | return -ENOMEM; |
| 314 | |
| 315 | ret = vmw_dmabuf_init(dev_priv, vbo, PAGE_SIZE, |
| 316 | &vmw_sys_ne_placement, false, |
| 317 | &vmw_dmabuf_bo_free); |
| 318 | if (unlikely(ret != 0)) |
| 319 | return ret; |
| 320 | |
| 321 | ret = ttm_bo_reserve(&vbo->base, false, true, false, NULL); |
| 322 | BUG_ON(ret != 0); |
| 323 | vmw_bo_pin_reserved(vbo, true); |
| 324 | |
| 325 | ret = ttm_bo_kmap(&vbo->base, 0, 1, &map); |
| 326 | if (likely(ret == 0)) { |
| 327 | result = ttm_kmap_obj_virtual(&map, &dummy); |
| 328 | result->totalSize = sizeof(*result); |
| 329 | result->state = SVGA3D_QUERYSTATE_PENDING; |
| 330 | result->result32 = 0xff; |
| 331 | ttm_bo_kunmap(&map); |
| 332 | } |
| 333 | vmw_bo_pin_reserved(vbo, false); |
| 334 | ttm_bo_unreserve(&vbo->base); |
| 335 | |
| 336 | if (unlikely(ret != 0)) { |
| 337 | DRM_ERROR("Dummy query buffer map failed.\n"); |
| 338 | vmw_dmabuf_unreference(&vbo); |
| 339 | } else |
| 340 | dev_priv->dummy_query_bo = vbo; |
| 341 | |
| 342 | return ret; |
| 343 | } |
| 344 | |
| 345 | /** |
| 346 | * vmw_request_device_late - Perform late device setup |
| 347 | * |
| 348 | * @dev_priv: Pointer to device private. |
| 349 | * |
| 350 | * This function performs setup of otables and enables large command |
| 351 | * buffer submission. These tasks are split out to a separate function |
| 352 | * because it reverts vmw_release_device_early and is intended to be used |
| 353 | * by an error path in the hibernation code. |
| 354 | */ |
| 355 | static int vmw_request_device_late(struct vmw_private *dev_priv) |
| 356 | { |
| 357 | int ret; |
| 358 | |
| 359 | if (dev_priv->has_mob) { |
| 360 | ret = vmw_otables_setup(dev_priv); |
| 361 | if (unlikely(ret != 0)) { |
| 362 | DRM_ERROR("Unable to initialize " |
| 363 | "guest Memory OBjects.\n"); |
| 364 | return ret; |
| 365 | } |
| 366 | } |
| 367 | |
| 368 | if (dev_priv->cman) { |
| 369 | ret = vmw_cmdbuf_set_pool_size(dev_priv->cman, |
| 370 | 256*4096, 2*4096); |
| 371 | if (ret) { |
| 372 | struct vmw_cmdbuf_man *man = dev_priv->cman; |
| 373 | |
| 374 | dev_priv->cman = NULL; |
| 375 | vmw_cmdbuf_man_destroy(man); |
| 376 | } |
| 377 | } |
| 378 | |
| 379 | return 0; |
| 380 | } |
| 381 | |
| 382 | static int vmw_request_device(struct vmw_private *dev_priv) |
| 383 | { |
| 384 | int ret; |
| 385 | |
| 386 | ret = vmw_fifo_init(dev_priv, &dev_priv->fifo); |
| 387 | if (unlikely(ret != 0)) { |
| 388 | DRM_ERROR("Unable to initialize FIFO.\n"); |
| 389 | return ret; |
| 390 | } |
| 391 | vmw_fence_fifo_up(dev_priv->fman); |
| 392 | dev_priv->cman = vmw_cmdbuf_man_create(dev_priv); |
| 393 | if (IS_ERR(dev_priv->cman)) |
| 394 | dev_priv->cman = NULL; |
| 395 | |
| 396 | ret = vmw_request_device_late(dev_priv); |
| 397 | if (ret) |
| 398 | goto out_no_mob; |
| 399 | |
| 400 | ret = vmw_dummy_query_bo_create(dev_priv); |
| 401 | if (unlikely(ret != 0)) |
| 402 | goto out_no_query_bo; |
| 403 | |
| 404 | return 0; |
| 405 | |
| 406 | out_no_query_bo: |
| 407 | if (dev_priv->cman) |
| 408 | vmw_cmdbuf_remove_pool(dev_priv->cman); |
| 409 | if (dev_priv->has_mob) { |
| 410 | (void) ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB); |
| 411 | vmw_otables_takedown(dev_priv); |
| 412 | } |
| 413 | if (dev_priv->cman) |
| 414 | vmw_cmdbuf_man_destroy(dev_priv->cman); |
| 415 | out_no_mob: |
| 416 | vmw_fence_fifo_down(dev_priv->fman); |
| 417 | vmw_fifo_release(dev_priv, &dev_priv->fifo); |
| 418 | return ret; |
| 419 | } |
| 420 | |
| 421 | /** |
| 422 | * vmw_release_device_early - Early part of fifo takedown. |
| 423 | * |
| 424 | * @dev_priv: Pointer to device private struct. |
| 425 | * |
| 426 | * This is the first part of command submission takedown, to be called before |
| 427 | * buffer management is taken down. |
| 428 | */ |
| 429 | static void vmw_release_device_early(struct vmw_private *dev_priv) |
| 430 | { |
| 431 | /* |
| 432 | * Previous destructions should've released |
| 433 | * the pinned bo. |
| 434 | */ |
| 435 | |
| 436 | BUG_ON(dev_priv->pinned_bo != NULL); |
| 437 | |
| 438 | vmw_dmabuf_unreference(&dev_priv->dummy_query_bo); |
| 439 | if (dev_priv->cman) |
| 440 | vmw_cmdbuf_remove_pool(dev_priv->cman); |
| 441 | |
| 442 | if (dev_priv->has_mob) { |
| 443 | ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB); |
| 444 | vmw_otables_takedown(dev_priv); |
| 445 | } |
| 446 | } |
| 447 | |
| 448 | /** |
| 449 | * vmw_release_device_late - Late part of fifo takedown. |
| 450 | * |
| 451 | * @dev_priv: Pointer to device private struct. |
| 452 | * |
| 453 | * This is the last part of the command submission takedown, to be called when |
| 454 | * command submission is no longer needed. It may wait on pending fences. |
| 455 | */ |
| 456 | static void vmw_release_device_late(struct vmw_private *dev_priv) |
| 457 | { |
| 458 | vmw_fence_fifo_down(dev_priv->fman); |
| 459 | if (dev_priv->cman) |
| 460 | vmw_cmdbuf_man_destroy(dev_priv->cman); |
| 461 | |
| 462 | vmw_fifo_release(dev_priv, &dev_priv->fifo); |
| 463 | } |
| 464 | |
| 465 | /** |
| 466 | * Sets the initial_[width|height] fields on the given vmw_private. |
| 467 | * |
| 468 | * It does so by reading SVGA_REG_[WIDTH|HEIGHT] regs and then |
| 469 | * clamping the value to fb_max_[width|height] fields and the |
| 470 | * VMW_MIN_INITIAL_[WIDTH|HEIGHT]. |
| 471 | * If the values appear to be invalid, set them to |
| 472 | * VMW_MIN_INITIAL_[WIDTH|HEIGHT]. |
| 473 | */ |
| 474 | static void vmw_get_initial_size(struct vmw_private *dev_priv) |
| 475 | { |
| 476 | uint32_t width; |
| 477 | uint32_t height; |
| 478 | |
| 479 | width = vmw_read(dev_priv, SVGA_REG_WIDTH); |
| 480 | height = vmw_read(dev_priv, SVGA_REG_HEIGHT); |
| 481 | |
| 482 | width = max_t(uint32_t, width, VMW_MIN_INITIAL_WIDTH); |
| 483 | height = max_t(uint32_t, height, VMW_MIN_INITIAL_HEIGHT); |
| 484 | |
| 485 | if (width > dev_priv->fb_max_width || |
| 486 | height > dev_priv->fb_max_height) { |
| 487 | |
| 488 | /* |
| 489 | * This is a host error and shouldn't occur. |
| 490 | */ |
| 491 | |
| 492 | width = VMW_MIN_INITIAL_WIDTH; |
| 493 | height = VMW_MIN_INITIAL_HEIGHT; |
| 494 | } |
| 495 | |
| 496 | dev_priv->initial_width = width; |
| 497 | dev_priv->initial_height = height; |
| 498 | } |
| 499 | |
| 500 | /** |
| 501 | * vmw_dma_select_mode - Determine how DMA mappings should be set up for this |
| 502 | * system. |
| 503 | * |
| 504 | * @dev_priv: Pointer to a struct vmw_private |
| 505 | * |
| 506 | * This functions tries to determine the IOMMU setup and what actions |
| 507 | * need to be taken by the driver to make system pages visible to the |
| 508 | * device. |
| 509 | * If this function decides that DMA is not possible, it returns -EINVAL. |
| 510 | * The driver may then try to disable features of the device that require |
| 511 | * DMA. |
| 512 | */ |
| 513 | static int vmw_dma_select_mode(struct vmw_private *dev_priv) |
| 514 | { |
| 515 | static const char *names[vmw_dma_map_max] = { |
| 516 | [vmw_dma_phys] = "Using physical TTM page addresses.", |
| 517 | [vmw_dma_alloc_coherent] = "Using coherent TTM pages.", |
| 518 | [vmw_dma_map_populate] = "Keeping DMA mappings.", |
| 519 | [vmw_dma_map_bind] = "Giving up DMA mappings early."}; |
| 520 | #ifdef CONFIG_X86 |
| 521 | const struct dma_map_ops *dma_ops = get_dma_ops(dev_priv->dev->dev); |
| 522 | |
| 523 | #ifdef CONFIG_INTEL_IOMMU |
| 524 | if (intel_iommu_enabled) { |
| 525 | dev_priv->map_mode = vmw_dma_map_populate; |
| 526 | goto out_fixup; |
| 527 | } |
| 528 | #endif |
| 529 | |
| 530 | if (!(vmw_force_iommu || vmw_force_coherent)) { |
| 531 | dev_priv->map_mode = vmw_dma_phys; |
| 532 | DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]); |
| 533 | return 0; |
| 534 | } |
| 535 | |
| 536 | dev_priv->map_mode = vmw_dma_map_populate; |
| 537 | |
| 538 | if (dma_ops->sync_single_for_cpu) |
| 539 | dev_priv->map_mode = vmw_dma_alloc_coherent; |
| 540 | #ifdef CONFIG_SWIOTLB |
| 541 | if (swiotlb_nr_tbl() == 0) |
| 542 | dev_priv->map_mode = vmw_dma_map_populate; |
| 543 | #endif |
| 544 | |
| 545 | #ifdef CONFIG_INTEL_IOMMU |
| 546 | out_fixup: |
| 547 | #endif |
| 548 | if (dev_priv->map_mode == vmw_dma_map_populate && |
| 549 | vmw_restrict_iommu) |
| 550 | dev_priv->map_mode = vmw_dma_map_bind; |
| 551 | |
| 552 | if (vmw_force_coherent) |
| 553 | dev_priv->map_mode = vmw_dma_alloc_coherent; |
| 554 | |
| 555 | #if !defined(CONFIG_SWIOTLB) && !defined(CONFIG_INTEL_IOMMU) |
| 556 | /* |
| 557 | * No coherent page pool |
| 558 | */ |
| 559 | if (dev_priv->map_mode == vmw_dma_alloc_coherent) |
| 560 | return -EINVAL; |
| 561 | #endif |
| 562 | |
| 563 | #else /* CONFIG_X86 */ |
| 564 | dev_priv->map_mode = vmw_dma_map_populate; |
| 565 | #endif /* CONFIG_X86 */ |
| 566 | |
| 567 | DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]); |
| 568 | |
| 569 | return 0; |
| 570 | } |
| 571 | |
| 572 | /** |
| 573 | * vmw_dma_masks - set required page- and dma masks |
| 574 | * |
| 575 | * @dev: Pointer to struct drm-device |
| 576 | * |
| 577 | * With 32-bit we can only handle 32 bit PFNs. Optionally set that |
| 578 | * restriction also for 64-bit systems. |
| 579 | */ |
| 580 | #ifdef CONFIG_INTEL_IOMMU |
| 581 | static int vmw_dma_masks(struct vmw_private *dev_priv) |
| 582 | { |
| 583 | struct drm_device *dev = dev_priv->dev; |
| 584 | |
| 585 | if (intel_iommu_enabled && |
| 586 | (sizeof(unsigned long) == 4 || vmw_restrict_dma_mask)) { |
| 587 | DRM_INFO("Restricting DMA addresses to 44 bits.\n"); |
| 588 | return dma_set_mask(dev->dev, DMA_BIT_MASK(44)); |
| 589 | } |
| 590 | return 0; |
| 591 | } |
| 592 | #else |
| 593 | static int vmw_dma_masks(struct vmw_private *dev_priv) |
| 594 | { |
| 595 | return 0; |
| 596 | } |
| 597 | #endif |
| 598 | |
| 599 | static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) |
| 600 | { |
| 601 | struct vmw_private *dev_priv; |
| 602 | int ret; |
| 603 | uint32_t svga_id; |
| 604 | enum vmw_res_type i; |
| 605 | bool refuse_dma = false; |
| 606 | |
| 607 | dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL); |
| 608 | if (unlikely(dev_priv == NULL)) { |
| 609 | DRM_ERROR("Failed allocating a device private struct.\n"); |
| 610 | return -ENOMEM; |
| 611 | } |
| 612 | |
| 613 | pci_set_master(dev->pdev); |
| 614 | |
| 615 | dev_priv->dev = dev; |
| 616 | dev_priv->vmw_chipset = chipset; |
| 617 | dev_priv->last_read_seqno = (uint32_t) -100; |
| 618 | mutex_init(&dev_priv->cmdbuf_mutex); |
| 619 | mutex_init(&dev_priv->release_mutex); |
| 620 | mutex_init(&dev_priv->binding_mutex); |
| 621 | rwlock_init(&dev_priv->resource_lock); |
| 622 | ttm_lock_init(&dev_priv->reservation_sem); |
| 623 | spin_lock_init(&dev_priv->hw_lock); |
| 624 | spin_lock_init(&dev_priv->waiter_lock); |
| 625 | spin_lock_init(&dev_priv->cap_lock); |
| 626 | spin_lock_init(&dev_priv->svga_lock); |
| 627 | |
| 628 | for (i = vmw_res_context; i < vmw_res_max; ++i) { |
| 629 | idr_init(&dev_priv->res_idr[i]); |
| 630 | INIT_LIST_HEAD(&dev_priv->res_lru[i]); |
| 631 | } |
| 632 | |
| 633 | mutex_init(&dev_priv->init_mutex); |
| 634 | init_waitqueue_head(&dev_priv->fence_queue); |
| 635 | init_waitqueue_head(&dev_priv->fifo_queue); |
| 636 | dev_priv->fence_queue_waiters = 0; |
| 637 | atomic_set(&dev_priv->fifo_queue_waiters, 0); |
| 638 | |
| 639 | dev_priv->used_memory_size = 0; |
| 640 | |
| 641 | dev_priv->io_start = pci_resource_start(dev->pdev, 0); |
| 642 | dev_priv->vram_start = pci_resource_start(dev->pdev, 1); |
| 643 | dev_priv->mmio_start = pci_resource_start(dev->pdev, 2); |
| 644 | |
| 645 | dev_priv->enable_fb = enable_fbdev; |
| 646 | |
| 647 | vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2); |
| 648 | svga_id = vmw_read(dev_priv, SVGA_REG_ID); |
| 649 | if (svga_id != SVGA_ID_2) { |
| 650 | ret = -ENOSYS; |
| 651 | DRM_ERROR("Unsupported SVGA ID 0x%x\n", svga_id); |
| 652 | goto out_err0; |
| 653 | } |
| 654 | |
| 655 | dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES); |
| 656 | ret = vmw_dma_select_mode(dev_priv); |
| 657 | if (unlikely(ret != 0)) { |
| 658 | DRM_INFO("Restricting capabilities due to IOMMU setup.\n"); |
| 659 | refuse_dma = true; |
| 660 | } |
| 661 | |
| 662 | dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE); |
| 663 | dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE); |
| 664 | dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH); |
| 665 | dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT); |
| 666 | |
| 667 | vmw_get_initial_size(dev_priv); |
| 668 | |
| 669 | if (dev_priv->capabilities & SVGA_CAP_GMR2) { |
| 670 | dev_priv->max_gmr_ids = |
| 671 | vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS); |
| 672 | dev_priv->max_gmr_pages = |
| 673 | vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES); |
| 674 | dev_priv->memory_size = |
| 675 | vmw_read(dev_priv, SVGA_REG_MEMORY_SIZE); |
| 676 | dev_priv->memory_size -= dev_priv->vram_size; |
| 677 | } else { |
| 678 | /* |
| 679 | * An arbitrary limit of 512MiB on surface |
| 680 | * memory. But all HWV8 hardware supports GMR2. |
| 681 | */ |
| 682 | dev_priv->memory_size = 512*1024*1024; |
| 683 | } |
| 684 | dev_priv->max_mob_pages = 0; |
| 685 | dev_priv->max_mob_size = 0; |
| 686 | if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) { |
| 687 | uint64_t mem_size = |
| 688 | vmw_read(dev_priv, |
| 689 | SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB); |
| 690 | |
| 691 | dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE; |
| 692 | dev_priv->prim_bb_mem = |
| 693 | vmw_read(dev_priv, |
| 694 | SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM); |
| 695 | dev_priv->max_mob_size = |
| 696 | vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE); |
| 697 | dev_priv->stdu_max_width = |
| 698 | vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_WIDTH); |
| 699 | dev_priv->stdu_max_height = |
| 700 | vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_HEIGHT); |
| 701 | |
| 702 | vmw_write(dev_priv, SVGA_REG_DEV_CAP, |
| 703 | SVGA3D_DEVCAP_MAX_TEXTURE_WIDTH); |
| 704 | dev_priv->texture_max_width = vmw_read(dev_priv, |
| 705 | SVGA_REG_DEV_CAP); |
| 706 | vmw_write(dev_priv, SVGA_REG_DEV_CAP, |
| 707 | SVGA3D_DEVCAP_MAX_TEXTURE_HEIGHT); |
| 708 | dev_priv->texture_max_height = vmw_read(dev_priv, |
| 709 | SVGA_REG_DEV_CAP); |
| 710 | } else { |
| 711 | dev_priv->texture_max_width = 8192; |
| 712 | dev_priv->texture_max_height = 8192; |
| 713 | dev_priv->prim_bb_mem = dev_priv->vram_size; |
| 714 | } |
| 715 | |
| 716 | vmw_print_capabilities(dev_priv->capabilities); |
| 717 | |
| 718 | ret = vmw_dma_masks(dev_priv); |
| 719 | if (unlikely(ret != 0)) |
| 720 | goto out_err0; |
| 721 | |
| 722 | if (dev_priv->capabilities & SVGA_CAP_GMR2) { |
| 723 | DRM_INFO("Max GMR ids is %u\n", |
| 724 | (unsigned)dev_priv->max_gmr_ids); |
| 725 | DRM_INFO("Max number of GMR pages is %u\n", |
| 726 | (unsigned)dev_priv->max_gmr_pages); |
| 727 | DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n", |
| 728 | (unsigned)dev_priv->memory_size / 1024); |
| 729 | } |
| 730 | DRM_INFO("Maximum display memory size is %u kiB\n", |
| 731 | dev_priv->prim_bb_mem / 1024); |
| 732 | DRM_INFO("VRAM at 0x%08x size is %u kiB\n", |
| 733 | dev_priv->vram_start, dev_priv->vram_size / 1024); |
| 734 | DRM_INFO("MMIO at 0x%08x size is %u kiB\n", |
| 735 | dev_priv->mmio_start, dev_priv->mmio_size / 1024); |
| 736 | |
| 737 | ret = vmw_ttm_global_init(dev_priv); |
| 738 | if (unlikely(ret != 0)) |
| 739 | goto out_err0; |
| 740 | |
| 741 | |
| 742 | vmw_master_init(&dev_priv->fbdev_master); |
| 743 | ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM); |
| 744 | dev_priv->active_master = &dev_priv->fbdev_master; |
| 745 | |
| 746 | |
| 747 | dev_priv->mmio_mtrr = arch_phys_wc_add(dev_priv->mmio_start, |
| 748 | dev_priv->mmio_size); |
| 749 | |
| 750 | dev_priv->mmio_virt = ioremap_wc(dev_priv->mmio_start, |
| 751 | dev_priv->mmio_size); |
| 752 | |
| 753 | if (unlikely(dev_priv->mmio_virt == NULL)) { |
| 754 | ret = -ENOMEM; |
| 755 | DRM_ERROR("Failed mapping MMIO.\n"); |
| 756 | goto out_err3; |
| 757 | } |
| 758 | |
| 759 | /* Need mmio memory to check for fifo pitchlock cap. */ |
| 760 | if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) && |
| 761 | !(dev_priv->capabilities & SVGA_CAP_PITCHLOCK) && |
| 762 | !vmw_fifo_have_pitchlock(dev_priv)) { |
| 763 | ret = -ENOSYS; |
| 764 | DRM_ERROR("Hardware has no pitchlock\n"); |
| 765 | goto out_err4; |
| 766 | } |
| 767 | |
| 768 | dev_priv->tdev = ttm_object_device_init |
| 769 | (dev_priv->mem_global_ref.object, 12, &vmw_prime_dmabuf_ops); |
| 770 | |
| 771 | if (unlikely(dev_priv->tdev == NULL)) { |
| 772 | DRM_ERROR("Unable to initialize TTM object management.\n"); |
| 773 | ret = -ENOMEM; |
| 774 | goto out_err4; |
| 775 | } |
| 776 | |
| 777 | dev->dev_private = dev_priv; |
| 778 | |
| 779 | ret = pci_request_regions(dev->pdev, "vmwgfx probe"); |
| 780 | dev_priv->stealth = (ret != 0); |
| 781 | if (dev_priv->stealth) { |
| 782 | /** |
| 783 | * Request at least the mmio PCI resource. |
| 784 | */ |
| 785 | |
| 786 | DRM_INFO("It appears like vesafb is loaded. " |
| 787 | "Ignore above error if any.\n"); |
| 788 | ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe"); |
| 789 | if (unlikely(ret != 0)) { |
| 790 | DRM_ERROR("Failed reserving the SVGA MMIO resource.\n"); |
| 791 | goto out_no_device; |
| 792 | } |
| 793 | } |
| 794 | |
| 795 | if (dev_priv->capabilities & SVGA_CAP_IRQMASK) { |
| 796 | ret = drm_irq_install(dev, dev->pdev->irq); |
| 797 | if (ret != 0) { |
| 798 | DRM_ERROR("Failed installing irq: %d\n", ret); |
| 799 | goto out_no_irq; |
| 800 | } |
| 801 | } |
| 802 | |
| 803 | dev_priv->fman = vmw_fence_manager_init(dev_priv); |
| 804 | if (unlikely(dev_priv->fman == NULL)) { |
| 805 | ret = -ENOMEM; |
| 806 | goto out_no_fman; |
| 807 | } |
| 808 | |
| 809 | ret = ttm_bo_device_init(&dev_priv->bdev, |
| 810 | dev_priv->bo_global_ref.ref.object, |
| 811 | &vmw_bo_driver, |
| 812 | dev->anon_inode->i_mapping, |
| 813 | VMWGFX_FILE_PAGE_OFFSET, |
| 814 | false); |
| 815 | if (unlikely(ret != 0)) { |
| 816 | DRM_ERROR("Failed initializing TTM buffer object driver.\n"); |
| 817 | goto out_no_bdev; |
| 818 | } |
| 819 | |
| 820 | /* |
| 821 | * Enable VRAM, but initially don't use it until SVGA is enabled and |
| 822 | * unhidden. |
| 823 | */ |
| 824 | ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM, |
| 825 | (dev_priv->vram_size >> PAGE_SHIFT)); |
| 826 | if (unlikely(ret != 0)) { |
| 827 | DRM_ERROR("Failed initializing memory manager for VRAM.\n"); |
| 828 | goto out_no_vram; |
| 829 | } |
| 830 | dev_priv->bdev.man[TTM_PL_VRAM].use_type = false; |
| 831 | |
| 832 | dev_priv->has_gmr = true; |
| 833 | if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) || |
| 834 | refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR, |
| 835 | VMW_PL_GMR) != 0) { |
| 836 | DRM_INFO("No GMR memory available. " |
| 837 | "Graphics memory resources are very limited.\n"); |
| 838 | dev_priv->has_gmr = false; |
| 839 | } |
| 840 | |
| 841 | if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) { |
| 842 | dev_priv->has_mob = true; |
| 843 | if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB, |
| 844 | VMW_PL_MOB) != 0) { |
| 845 | DRM_INFO("No MOB memory available. " |
| 846 | "3D will be disabled.\n"); |
| 847 | dev_priv->has_mob = false; |
| 848 | } |
| 849 | } |
| 850 | |
| 851 | ret = vmw_kms_init(dev_priv); |
| 852 | if (unlikely(ret != 0)) |
| 853 | goto out_no_kms; |
| 854 | vmw_overlay_init(dev_priv); |
| 855 | |
| 856 | ret = vmw_request_device(dev_priv); |
| 857 | if (ret) |
| 858 | goto out_no_fifo; |
| 859 | |
| 860 | if (dev_priv->enable_fb) { |
| 861 | vmw_fifo_resource_inc(dev_priv); |
| 862 | vmw_svga_enable(dev_priv); |
| 863 | vmw_fb_init(dev_priv); |
| 864 | } |
| 865 | |
| 866 | dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier; |
| 867 | register_pm_notifier(&dev_priv->pm_nb); |
| 868 | |
| 869 | return 0; |
| 870 | |
| 871 | out_no_fifo: |
| 872 | vmw_overlay_close(dev_priv); |
| 873 | vmw_kms_close(dev_priv); |
| 874 | out_no_kms: |
| 875 | if (dev_priv->has_mob) |
| 876 | (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB); |
| 877 | if (dev_priv->has_gmr) |
| 878 | (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); |
| 879 | (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); |
| 880 | out_no_vram: |
| 881 | (void)ttm_bo_device_release(&dev_priv->bdev); |
| 882 | out_no_bdev: |
| 883 | vmw_fence_manager_takedown(dev_priv->fman); |
| 884 | out_no_fman: |
| 885 | if (dev_priv->capabilities & SVGA_CAP_IRQMASK) |
| 886 | drm_irq_uninstall(dev_priv->dev); |
| 887 | out_no_irq: |
| 888 | if (dev_priv->stealth) |
| 889 | pci_release_region(dev->pdev, 2); |
| 890 | else |
| 891 | pci_release_regions(dev->pdev); |
| 892 | out_no_device: |
| 893 | ttm_object_device_release(&dev_priv->tdev); |
| 894 | out_err4: |
| 895 | iounmap(dev_priv->mmio_virt); |
| 896 | out_err3: |
| 897 | arch_phys_wc_del(dev_priv->mmio_mtrr); |
| 898 | vmw_ttm_global_release(dev_priv); |
| 899 | out_err0: |
| 900 | for (i = vmw_res_context; i < vmw_res_max; ++i) |
| 901 | idr_destroy(&dev_priv->res_idr[i]); |
| 902 | |
| 903 | kfree(dev_priv); |
| 904 | return ret; |
| 905 | } |
| 906 | |
| 907 | static int vmw_driver_unload(struct drm_device *dev) |
| 908 | { |
| 909 | struct vmw_private *dev_priv = vmw_priv(dev); |
| 910 | enum vmw_res_type i; |
| 911 | |
| 912 | unregister_pm_notifier(&dev_priv->pm_nb); |
| 913 | |
| 914 | if (dev_priv->ctx.res_ht_initialized) |
| 915 | drm_ht_remove(&dev_priv->ctx.res_ht); |
| 916 | vfree(dev_priv->ctx.cmd_bounce); |
| 917 | if (dev_priv->enable_fb) { |
| 918 | vmw_fb_close(dev_priv); |
| 919 | vmw_fifo_resource_dec(dev_priv); |
| 920 | vmw_svga_disable(dev_priv); |
| 921 | } |
| 922 | |
| 923 | vmw_kms_close(dev_priv); |
| 924 | vmw_overlay_close(dev_priv); |
| 925 | |
| 926 | if (dev_priv->has_gmr) |
| 927 | (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); |
| 928 | (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); |
| 929 | |
| 930 | vmw_release_device_early(dev_priv); |
| 931 | if (dev_priv->has_mob) |
| 932 | (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB); |
| 933 | (void) ttm_bo_device_release(&dev_priv->bdev); |
| 934 | vmw_release_device_late(dev_priv); |
| 935 | vmw_fence_manager_takedown(dev_priv->fman); |
| 936 | if (dev_priv->capabilities & SVGA_CAP_IRQMASK) |
| 937 | drm_irq_uninstall(dev_priv->dev); |
| 938 | if (dev_priv->stealth) |
| 939 | pci_release_region(dev->pdev, 2); |
| 940 | else |
| 941 | pci_release_regions(dev->pdev); |
| 942 | |
| 943 | ttm_object_device_release(&dev_priv->tdev); |
| 944 | iounmap(dev_priv->mmio_virt); |
| 945 | arch_phys_wc_del(dev_priv->mmio_mtrr); |
| 946 | (void)ttm_bo_device_release(&dev_priv->bdev); |
| 947 | vmw_ttm_global_release(dev_priv); |
| 948 | |
| 949 | for (i = vmw_res_context; i < vmw_res_max; ++i) |
| 950 | idr_destroy(&dev_priv->res_idr[i]); |
| 951 | |
| 952 | kfree(dev_priv); |
| 953 | |
| 954 | return 0; |
| 955 | } |
| 956 | |
| 957 | static void vmw_preclose(struct drm_device *dev, |
| 958 | struct drm_file *file_priv) |
| 959 | { |
| 960 | struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); |
| 961 | struct vmw_private *dev_priv = vmw_priv(dev); |
| 962 | |
| 963 | vmw_event_fence_fpriv_gone(dev_priv->fman, &vmw_fp->fence_events); |
| 964 | } |
| 965 | |
| 966 | static void vmw_postclose(struct drm_device *dev, |
| 967 | struct drm_file *file_priv) |
| 968 | { |
| 969 | struct vmw_fpriv *vmw_fp; |
| 970 | |
| 971 | vmw_fp = vmw_fpriv(file_priv); |
| 972 | |
| 973 | if (vmw_fp->locked_master) { |
| 974 | struct vmw_master *vmaster = |
| 975 | vmw_master(vmw_fp->locked_master); |
| 976 | |
| 977 | ttm_lock_set_kill(&vmaster->lock, true, SIGTERM); |
| 978 | ttm_vt_unlock(&vmaster->lock); |
| 979 | drm_master_put(&vmw_fp->locked_master); |
| 980 | } |
| 981 | |
| 982 | ttm_object_file_release(&vmw_fp->tfile); |
| 983 | kfree(vmw_fp); |
| 984 | } |
| 985 | |
| 986 | static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv) |
| 987 | { |
| 988 | struct vmw_private *dev_priv = vmw_priv(dev); |
| 989 | struct vmw_fpriv *vmw_fp; |
| 990 | int ret = -ENOMEM; |
| 991 | |
| 992 | vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL); |
| 993 | if (unlikely(vmw_fp == NULL)) |
| 994 | return ret; |
| 995 | |
| 996 | INIT_LIST_HEAD(&vmw_fp->fence_events); |
| 997 | vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10); |
| 998 | if (unlikely(vmw_fp->tfile == NULL)) |
| 999 | goto out_no_tfile; |
| 1000 | |
| 1001 | file_priv->driver_priv = vmw_fp; |
| 1002 | |
| 1003 | return 0; |
| 1004 | |
| 1005 | out_no_tfile: |
| 1006 | kfree(vmw_fp); |
| 1007 | return ret; |
| 1008 | } |
| 1009 | |
| 1010 | static struct vmw_master *vmw_master_check(struct drm_device *dev, |
| 1011 | struct drm_file *file_priv, |
| 1012 | unsigned int flags) |
| 1013 | { |
| 1014 | int ret; |
| 1015 | struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); |
| 1016 | struct vmw_master *vmaster; |
| 1017 | |
| 1018 | if (file_priv->minor->type != DRM_MINOR_LEGACY || |
| 1019 | !(flags & DRM_AUTH)) |
| 1020 | return NULL; |
| 1021 | |
| 1022 | ret = mutex_lock_interruptible(&dev->master_mutex); |
| 1023 | if (unlikely(ret != 0)) |
| 1024 | return ERR_PTR(-ERESTARTSYS); |
| 1025 | |
| 1026 | if (file_priv->is_master) { |
| 1027 | mutex_unlock(&dev->master_mutex); |
| 1028 | return NULL; |
| 1029 | } |
| 1030 | |
| 1031 | /* |
| 1032 | * Check if we were previously master, but now dropped. |
| 1033 | */ |
| 1034 | if (vmw_fp->locked_master) { |
| 1035 | mutex_unlock(&dev->master_mutex); |
| 1036 | DRM_ERROR("Dropped master trying to access ioctl that " |
| 1037 | "requires authentication.\n"); |
| 1038 | return ERR_PTR(-EACCES); |
| 1039 | } |
| 1040 | mutex_unlock(&dev->master_mutex); |
| 1041 | |
| 1042 | /* |
| 1043 | * Taking the drm_global_mutex after the TTM lock might deadlock |
| 1044 | */ |
| 1045 | if (!(flags & DRM_UNLOCKED)) { |
| 1046 | DRM_ERROR("Refusing locked ioctl access.\n"); |
| 1047 | return ERR_PTR(-EDEADLK); |
| 1048 | } |
| 1049 | |
| 1050 | /* |
| 1051 | * Take the TTM lock. Possibly sleep waiting for the authenticating |
| 1052 | * master to become master again, or for a SIGTERM if the |
| 1053 | * authenticating master exits. |
| 1054 | */ |
| 1055 | vmaster = vmw_master(file_priv->master); |
| 1056 | ret = ttm_read_lock(&vmaster->lock, true); |
| 1057 | if (unlikely(ret != 0)) |
| 1058 | vmaster = ERR_PTR(ret); |
| 1059 | |
| 1060 | return vmaster; |
| 1061 | } |
| 1062 | |
| 1063 | static long vmw_generic_ioctl(struct file *filp, unsigned int cmd, |
| 1064 | unsigned long arg, |
| 1065 | long (*ioctl_func)(struct file *, unsigned int, |
| 1066 | unsigned long)) |
| 1067 | { |
| 1068 | struct drm_file *file_priv = filp->private_data; |
| 1069 | struct drm_device *dev = file_priv->minor->dev; |
| 1070 | unsigned int nr = DRM_IOCTL_NR(cmd); |
| 1071 | struct vmw_master *vmaster; |
| 1072 | unsigned int flags; |
| 1073 | long ret; |
| 1074 | |
| 1075 | /* |
| 1076 | * Do extra checking on driver private ioctls. |
| 1077 | */ |
| 1078 | |
| 1079 | if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END) |
| 1080 | && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) { |
| 1081 | const struct drm_ioctl_desc *ioctl = |
| 1082 | &vmw_ioctls[nr - DRM_COMMAND_BASE]; |
| 1083 | |
| 1084 | if (unlikely(ioctl->cmd != cmd)) { |
| 1085 | DRM_ERROR("Invalid command format, ioctl %d\n", |
| 1086 | nr - DRM_COMMAND_BASE); |
| 1087 | return -EINVAL; |
| 1088 | } |
| 1089 | flags = ioctl->flags; |
| 1090 | } else if (!drm_ioctl_flags(nr, &flags)) |
| 1091 | return -EINVAL; |
| 1092 | |
| 1093 | vmaster = vmw_master_check(dev, file_priv, flags); |
| 1094 | if (unlikely(IS_ERR(vmaster))) { |
| 1095 | ret = PTR_ERR(vmaster); |
| 1096 | |
| 1097 | if (ret != -ERESTARTSYS) |
| 1098 | DRM_INFO("IOCTL ERROR Command %d, Error %ld.\n", |
| 1099 | nr, ret); |
| 1100 | return ret; |
| 1101 | } |
| 1102 | |
| 1103 | ret = ioctl_func(filp, cmd, arg); |
| 1104 | if (vmaster) |
| 1105 | ttm_read_unlock(&vmaster->lock); |
| 1106 | |
| 1107 | return ret; |
| 1108 | } |
| 1109 | |
| 1110 | static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd, |
| 1111 | unsigned long arg) |
| 1112 | { |
| 1113 | return vmw_generic_ioctl(filp, cmd, arg, &drm_ioctl); |
| 1114 | } |
| 1115 | |
| 1116 | #ifdef CONFIG_COMPAT |
| 1117 | static long vmw_compat_ioctl(struct file *filp, unsigned int cmd, |
| 1118 | unsigned long arg) |
| 1119 | { |
| 1120 | return vmw_generic_ioctl(filp, cmd, arg, &drm_compat_ioctl); |
| 1121 | } |
| 1122 | #endif |
| 1123 | |
| 1124 | static void vmw_lastclose(struct drm_device *dev) |
| 1125 | { |
| 1126 | } |
| 1127 | |
| 1128 | static void vmw_master_init(struct vmw_master *vmaster) |
| 1129 | { |
| 1130 | ttm_lock_init(&vmaster->lock); |
| 1131 | } |
| 1132 | |
| 1133 | static int vmw_master_create(struct drm_device *dev, |
| 1134 | struct drm_master *master) |
| 1135 | { |
| 1136 | struct vmw_master *vmaster; |
| 1137 | |
| 1138 | vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL); |
| 1139 | if (unlikely(vmaster == NULL)) |
| 1140 | return -ENOMEM; |
| 1141 | |
| 1142 | vmw_master_init(vmaster); |
| 1143 | ttm_lock_set_kill(&vmaster->lock, true, SIGTERM); |
| 1144 | master->driver_priv = vmaster; |
| 1145 | |
| 1146 | return 0; |
| 1147 | } |
| 1148 | |
| 1149 | static void vmw_master_destroy(struct drm_device *dev, |
| 1150 | struct drm_master *master) |
| 1151 | { |
| 1152 | struct vmw_master *vmaster = vmw_master(master); |
| 1153 | |
| 1154 | master->driver_priv = NULL; |
| 1155 | kfree(vmaster); |
| 1156 | } |
| 1157 | |
| 1158 | |
| 1159 | static int vmw_master_set(struct drm_device *dev, |
| 1160 | struct drm_file *file_priv, |
| 1161 | bool from_open) |
| 1162 | { |
| 1163 | struct vmw_private *dev_priv = vmw_priv(dev); |
| 1164 | struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); |
| 1165 | struct vmw_master *active = dev_priv->active_master; |
| 1166 | struct vmw_master *vmaster = vmw_master(file_priv->master); |
| 1167 | int ret = 0; |
| 1168 | |
| 1169 | if (active) { |
| 1170 | BUG_ON(active != &dev_priv->fbdev_master); |
| 1171 | ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile); |
| 1172 | if (unlikely(ret != 0)) |
| 1173 | return ret; |
| 1174 | |
| 1175 | ttm_lock_set_kill(&active->lock, true, SIGTERM); |
| 1176 | dev_priv->active_master = NULL; |
| 1177 | } |
| 1178 | |
| 1179 | ttm_lock_set_kill(&vmaster->lock, false, SIGTERM); |
| 1180 | if (!from_open) { |
| 1181 | ttm_vt_unlock(&vmaster->lock); |
| 1182 | BUG_ON(vmw_fp->locked_master != file_priv->master); |
| 1183 | drm_master_put(&vmw_fp->locked_master); |
| 1184 | } |
| 1185 | |
| 1186 | dev_priv->active_master = vmaster; |
| 1187 | |
| 1188 | return 0; |
| 1189 | } |
| 1190 | |
| 1191 | static void vmw_master_drop(struct drm_device *dev, |
| 1192 | struct drm_file *file_priv, |
| 1193 | bool from_release) |
| 1194 | { |
| 1195 | struct vmw_private *dev_priv = vmw_priv(dev); |
| 1196 | struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); |
| 1197 | struct vmw_master *vmaster = vmw_master(file_priv->master); |
| 1198 | int ret; |
| 1199 | |
| 1200 | /** |
| 1201 | * Make sure the master doesn't disappear while we have |
| 1202 | * it locked. |
| 1203 | */ |
| 1204 | |
| 1205 | vmw_fp->locked_master = drm_master_get(file_priv->master); |
| 1206 | ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile); |
| 1207 | if (unlikely((ret != 0))) { |
| 1208 | DRM_ERROR("Unable to lock TTM at VT switch.\n"); |
| 1209 | drm_master_put(&vmw_fp->locked_master); |
| 1210 | } |
| 1211 | |
| 1212 | ttm_lock_set_kill(&vmaster->lock, false, SIGTERM); |
| 1213 | |
| 1214 | if (!dev_priv->enable_fb) |
| 1215 | vmw_svga_disable(dev_priv); |
| 1216 | |
| 1217 | dev_priv->active_master = &dev_priv->fbdev_master; |
| 1218 | ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM); |
| 1219 | ttm_vt_unlock(&dev_priv->fbdev_master.lock); |
| 1220 | |
| 1221 | if (dev_priv->enable_fb) |
| 1222 | vmw_fb_on(dev_priv); |
| 1223 | } |
| 1224 | |
| 1225 | /** |
| 1226 | * __vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM. |
| 1227 | * |
| 1228 | * @dev_priv: Pointer to device private struct. |
| 1229 | * Needs the reservation sem to be held in non-exclusive mode. |
| 1230 | */ |
| 1231 | static void __vmw_svga_enable(struct vmw_private *dev_priv) |
| 1232 | { |
| 1233 | spin_lock(&dev_priv->svga_lock); |
| 1234 | if (!dev_priv->bdev.man[TTM_PL_VRAM].use_type) { |
| 1235 | vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE); |
| 1236 | dev_priv->bdev.man[TTM_PL_VRAM].use_type = true; |
| 1237 | } |
| 1238 | spin_unlock(&dev_priv->svga_lock); |
| 1239 | } |
| 1240 | |
| 1241 | /** |
| 1242 | * vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM. |
| 1243 | * |
| 1244 | * @dev_priv: Pointer to device private struct. |
| 1245 | */ |
| 1246 | void vmw_svga_enable(struct vmw_private *dev_priv) |
| 1247 | { |
| 1248 | ttm_read_lock(&dev_priv->reservation_sem, false); |
| 1249 | __vmw_svga_enable(dev_priv); |
| 1250 | ttm_read_unlock(&dev_priv->reservation_sem); |
| 1251 | } |
| 1252 | |
| 1253 | /** |
| 1254 | * __vmw_svga_disable - Disable SVGA mode and use of VRAM. |
| 1255 | * |
| 1256 | * @dev_priv: Pointer to device private struct. |
| 1257 | * Needs the reservation sem to be held in exclusive mode. |
| 1258 | * Will not empty VRAM. VRAM must be emptied by caller. |
| 1259 | */ |
| 1260 | static void __vmw_svga_disable(struct vmw_private *dev_priv) |
| 1261 | { |
| 1262 | spin_lock(&dev_priv->svga_lock); |
| 1263 | if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) { |
| 1264 | dev_priv->bdev.man[TTM_PL_VRAM].use_type = false; |
| 1265 | vmw_write(dev_priv, SVGA_REG_ENABLE, |
| 1266 | SVGA_REG_ENABLE_ENABLE_HIDE); |
| 1267 | } |
| 1268 | spin_unlock(&dev_priv->svga_lock); |
| 1269 | } |
| 1270 | |
| 1271 | /** |
| 1272 | * vmw_svga_disable - Disable SVGA_MODE, and use of VRAM. Keep the fifo |
| 1273 | * running. |
| 1274 | * |
| 1275 | * @dev_priv: Pointer to device private struct. |
| 1276 | * Will empty VRAM. |
| 1277 | */ |
| 1278 | void vmw_svga_disable(struct vmw_private *dev_priv) |
| 1279 | { |
| 1280 | ttm_write_lock(&dev_priv->reservation_sem, false); |
| 1281 | spin_lock(&dev_priv->svga_lock); |
| 1282 | if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) { |
| 1283 | dev_priv->bdev.man[TTM_PL_VRAM].use_type = false; |
| 1284 | vmw_write(dev_priv, SVGA_REG_ENABLE, |
| 1285 | SVGA_REG_ENABLE_ENABLE_HIDE); |
| 1286 | spin_unlock(&dev_priv->svga_lock); |
| 1287 | if (ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM)) |
| 1288 | DRM_ERROR("Failed evicting VRAM buffers.\n"); |
| 1289 | } else |
| 1290 | spin_unlock(&dev_priv->svga_lock); |
| 1291 | ttm_write_unlock(&dev_priv->reservation_sem); |
| 1292 | } |
| 1293 | |
| 1294 | static void vmw_remove(struct pci_dev *pdev) |
| 1295 | { |
| 1296 | struct drm_device *dev = pci_get_drvdata(pdev); |
| 1297 | |
| 1298 | pci_disable_device(pdev); |
| 1299 | drm_put_dev(dev); |
| 1300 | } |
| 1301 | |
| 1302 | static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val, |
| 1303 | void *ptr) |
| 1304 | { |
| 1305 | struct vmw_private *dev_priv = |
| 1306 | container_of(nb, struct vmw_private, pm_nb); |
| 1307 | |
| 1308 | switch (val) { |
| 1309 | case PM_HIBERNATION_PREPARE: |
| 1310 | if (dev_priv->enable_fb) |
| 1311 | vmw_fb_off(dev_priv); |
| 1312 | ttm_suspend_lock(&dev_priv->reservation_sem); |
| 1313 | |
| 1314 | /* |
| 1315 | * This empties VRAM and unbinds all GMR bindings. |
| 1316 | * Buffer contents is moved to swappable memory. |
| 1317 | */ |
| 1318 | vmw_execbuf_release_pinned_bo(dev_priv); |
| 1319 | vmw_resource_evict_all(dev_priv); |
| 1320 | vmw_release_device_early(dev_priv); |
| 1321 | ttm_bo_swapout_all(&dev_priv->bdev); |
| 1322 | vmw_fence_fifo_down(dev_priv->fman); |
| 1323 | break; |
| 1324 | case PM_POST_HIBERNATION: |
| 1325 | case PM_POST_RESTORE: |
| 1326 | vmw_fence_fifo_up(dev_priv->fman); |
| 1327 | ttm_suspend_unlock(&dev_priv->reservation_sem); |
| 1328 | if (dev_priv->enable_fb) |
| 1329 | vmw_fb_on(dev_priv); |
| 1330 | break; |
| 1331 | case PM_RESTORE_PREPARE: |
| 1332 | break; |
| 1333 | default: |
| 1334 | break; |
| 1335 | } |
| 1336 | return 0; |
| 1337 | } |
| 1338 | |
| 1339 | static int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state) |
| 1340 | { |
| 1341 | struct drm_device *dev = pci_get_drvdata(pdev); |
| 1342 | struct vmw_private *dev_priv = vmw_priv(dev); |
| 1343 | |
| 1344 | if (dev_priv->refuse_hibernation) |
| 1345 | return -EBUSY; |
| 1346 | |
| 1347 | pci_save_state(pdev); |
| 1348 | pci_disable_device(pdev); |
| 1349 | pci_set_power_state(pdev, PCI_D3hot); |
| 1350 | return 0; |
| 1351 | } |
| 1352 | |
| 1353 | static int vmw_pci_resume(struct pci_dev *pdev) |
| 1354 | { |
| 1355 | pci_set_power_state(pdev, PCI_D0); |
| 1356 | pci_restore_state(pdev); |
| 1357 | return pci_enable_device(pdev); |
| 1358 | } |
| 1359 | |
| 1360 | static int vmw_pm_suspend(struct device *kdev) |
| 1361 | { |
| 1362 | struct pci_dev *pdev = to_pci_dev(kdev); |
| 1363 | struct pm_message dummy; |
| 1364 | |
| 1365 | dummy.event = 0; |
| 1366 | |
| 1367 | return vmw_pci_suspend(pdev, dummy); |
| 1368 | } |
| 1369 | |
| 1370 | static int vmw_pm_resume(struct device *kdev) |
| 1371 | { |
| 1372 | struct pci_dev *pdev = to_pci_dev(kdev); |
| 1373 | |
| 1374 | return vmw_pci_resume(pdev); |
| 1375 | } |
| 1376 | |
| 1377 | static int vmw_pm_freeze(struct device *kdev) |
| 1378 | { |
| 1379 | struct pci_dev *pdev = to_pci_dev(kdev); |
| 1380 | struct drm_device *dev = pci_get_drvdata(pdev); |
| 1381 | struct vmw_private *dev_priv = vmw_priv(dev); |
| 1382 | |
| 1383 | dev_priv->suspended = true; |
| 1384 | if (dev_priv->enable_fb) |
| 1385 | vmw_fifo_resource_dec(dev_priv); |
| 1386 | |
| 1387 | if (atomic_read(&dev_priv->num_fifo_resources) != 0) { |
| 1388 | DRM_ERROR("Can't hibernate while 3D resources are active.\n"); |
| 1389 | if (dev_priv->enable_fb) |
| 1390 | vmw_fifo_resource_inc(dev_priv); |
| 1391 | WARN_ON(vmw_request_device_late(dev_priv)); |
| 1392 | dev_priv->suspended = false; |
| 1393 | return -EBUSY; |
| 1394 | } |
| 1395 | |
| 1396 | if (dev_priv->enable_fb) |
| 1397 | __vmw_svga_disable(dev_priv); |
| 1398 | |
| 1399 | vmw_release_device_late(dev_priv); |
| 1400 | |
| 1401 | return 0; |
| 1402 | } |
| 1403 | |
| 1404 | static int vmw_pm_restore(struct device *kdev) |
| 1405 | { |
| 1406 | struct pci_dev *pdev = to_pci_dev(kdev); |
| 1407 | struct drm_device *dev = pci_get_drvdata(pdev); |
| 1408 | struct vmw_private *dev_priv = vmw_priv(dev); |
| 1409 | int ret; |
| 1410 | |
| 1411 | vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2); |
| 1412 | (void) vmw_read(dev_priv, SVGA_REG_ID); |
| 1413 | |
| 1414 | if (dev_priv->enable_fb) |
| 1415 | vmw_fifo_resource_inc(dev_priv); |
| 1416 | |
| 1417 | ret = vmw_request_device(dev_priv); |
| 1418 | if (ret) |
| 1419 | return ret; |
| 1420 | |
| 1421 | if (dev_priv->enable_fb) |
| 1422 | __vmw_svga_enable(dev_priv); |
| 1423 | |
| 1424 | dev_priv->suspended = false; |
| 1425 | |
| 1426 | return 0; |
| 1427 | } |
| 1428 | |
| 1429 | static const struct dev_pm_ops vmw_pm_ops = { |
| 1430 | .freeze = vmw_pm_freeze, |
| 1431 | .thaw = vmw_pm_restore, |
| 1432 | .restore = vmw_pm_restore, |
| 1433 | .suspend = vmw_pm_suspend, |
| 1434 | .resume = vmw_pm_resume, |
| 1435 | }; |
| 1436 | |
| 1437 | static const struct file_operations vmwgfx_driver_fops = { |
| 1438 | .owner = THIS_MODULE, |
| 1439 | .open = drm_open, |
| 1440 | .release = drm_release, |
| 1441 | .unlocked_ioctl = vmw_unlocked_ioctl, |
| 1442 | .mmap = vmw_mmap, |
| 1443 | .poll = vmw_fops_poll, |
| 1444 | .read = vmw_fops_read, |
| 1445 | #if defined(CONFIG_COMPAT) |
| 1446 | .compat_ioctl = vmw_compat_ioctl, |
| 1447 | #endif |
| 1448 | .llseek = noop_llseek, |
| 1449 | }; |
| 1450 | |
| 1451 | static struct drm_driver driver = { |
| 1452 | .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | |
| 1453 | DRIVER_MODESET | DRIVER_PRIME | DRIVER_RENDER, |
| 1454 | .load = vmw_driver_load, |
| 1455 | .unload = vmw_driver_unload, |
| 1456 | .lastclose = vmw_lastclose, |
| 1457 | .irq_preinstall = vmw_irq_preinstall, |
| 1458 | .irq_postinstall = vmw_irq_postinstall, |
| 1459 | .irq_uninstall = vmw_irq_uninstall, |
| 1460 | .irq_handler = vmw_irq_handler, |
| 1461 | .get_vblank_counter = vmw_get_vblank_counter, |
| 1462 | .enable_vblank = vmw_enable_vblank, |
| 1463 | .disable_vblank = vmw_disable_vblank, |
| 1464 | .ioctls = vmw_ioctls, |
| 1465 | .num_ioctls = ARRAY_SIZE(vmw_ioctls), |
| 1466 | .master_create = vmw_master_create, |
| 1467 | .master_destroy = vmw_master_destroy, |
| 1468 | .master_set = vmw_master_set, |
| 1469 | .master_drop = vmw_master_drop, |
| 1470 | .open = vmw_driver_open, |
| 1471 | .preclose = vmw_preclose, |
| 1472 | .postclose = vmw_postclose, |
| 1473 | .set_busid = drm_pci_set_busid, |
| 1474 | |
| 1475 | .dumb_create = vmw_dumb_create, |
| 1476 | .dumb_map_offset = vmw_dumb_map_offset, |
| 1477 | .dumb_destroy = vmw_dumb_destroy, |
| 1478 | |
| 1479 | .prime_fd_to_handle = vmw_prime_fd_to_handle, |
| 1480 | .prime_handle_to_fd = vmw_prime_handle_to_fd, |
| 1481 | |
| 1482 | .fops = &vmwgfx_driver_fops, |
| 1483 | .name = VMWGFX_DRIVER_NAME, |
| 1484 | .desc = VMWGFX_DRIVER_DESC, |
| 1485 | .date = VMWGFX_DRIVER_DATE, |
| 1486 | .major = VMWGFX_DRIVER_MAJOR, |
| 1487 | .minor = VMWGFX_DRIVER_MINOR, |
| 1488 | .patchlevel = VMWGFX_DRIVER_PATCHLEVEL |
| 1489 | }; |
| 1490 | |
| 1491 | static struct pci_driver vmw_pci_driver = { |
| 1492 | .name = VMWGFX_DRIVER_NAME, |
| 1493 | .id_table = vmw_pci_id_list, |
| 1494 | .probe = vmw_probe, |
| 1495 | .remove = vmw_remove, |
| 1496 | .driver = { |
| 1497 | .pm = &vmw_pm_ops |
| 1498 | } |
| 1499 | }; |
| 1500 | |
| 1501 | static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent) |
| 1502 | { |
| 1503 | return drm_get_pci_dev(pdev, ent, &driver); |
| 1504 | } |
| 1505 | |
| 1506 | static int __init vmwgfx_init(void) |
| 1507 | { |
| 1508 | int ret; |
| 1509 | ret = drm_pci_init(&driver, &vmw_pci_driver); |
| 1510 | if (ret) |
| 1511 | DRM_ERROR("Failed initializing DRM.\n"); |
| 1512 | return ret; |
| 1513 | } |
| 1514 | |
| 1515 | static void __exit vmwgfx_exit(void) |
| 1516 | { |
| 1517 | drm_pci_exit(&driver, &vmw_pci_driver); |
| 1518 | } |
| 1519 | |
| 1520 | module_init(vmwgfx_init); |
| 1521 | module_exit(vmwgfx_exit); |
| 1522 | |
| 1523 | MODULE_AUTHOR("VMware Inc. and others"); |
| 1524 | MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device"); |
| 1525 | MODULE_LICENSE("GPL and additional rights"); |
| 1526 | MODULE_VERSION(__stringify(VMWGFX_DRIVER_MAJOR) "." |
| 1527 | __stringify(VMWGFX_DRIVER_MINOR) "." |
| 1528 | __stringify(VMWGFX_DRIVER_PATCHLEVEL) "." |
| 1529 | "0"); |