1 /**************************************************************************
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
27 #include <linux/module.h>
30 #include "vmwgfx_drv.h"
31 #include <drm/ttm/ttm_placement.h>
32 #include <drm/ttm/ttm_bo_driver.h>
33 #include <drm/ttm/ttm_object.h>
34 #include <drm/ttm/ttm_module.h>
35 #include <linux/dma_remapping.h>
37 #define VMWGFX_DRIVER_NAME "vmwgfx"
38 #define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
39 #define VMWGFX_CHIP_SVGAII 0
40 #define VMW_FB_RESERVATION 0
42 #define VMW_MIN_INITIAL_WIDTH 800
43 #define VMW_MIN_INITIAL_HEIGHT 600
47 * Fully encoded drm commands. Might move to vmw_drm.h
50 #define DRM_IOCTL_VMW_GET_PARAM \
51 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GET_PARAM, \
52 struct drm_vmw_getparam_arg)
53 #define DRM_IOCTL_VMW_ALLOC_DMABUF \
54 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_ALLOC_DMABUF, \
55 union drm_vmw_alloc_dmabuf_arg)
56 #define DRM_IOCTL_VMW_UNREF_DMABUF \
57 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_DMABUF, \
58 struct drm_vmw_unref_dmabuf_arg)
59 #define DRM_IOCTL_VMW_CURSOR_BYPASS \
60 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CURSOR_BYPASS, \
61 struct drm_vmw_cursor_bypass_arg)
63 #define DRM_IOCTL_VMW_CONTROL_STREAM \
64 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CONTROL_STREAM, \
65 struct drm_vmw_control_stream_arg)
66 #define DRM_IOCTL_VMW_CLAIM_STREAM \
67 DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CLAIM_STREAM, \
68 struct drm_vmw_stream_arg)
69 #define DRM_IOCTL_VMW_UNREF_STREAM \
70 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_STREAM, \
71 struct drm_vmw_stream_arg)
73 #define DRM_IOCTL_VMW_CREATE_CONTEXT \
74 DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CREATE_CONTEXT, \
75 struct drm_vmw_context_arg)
76 #define DRM_IOCTL_VMW_UNREF_CONTEXT \
77 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_CONTEXT, \
78 struct drm_vmw_context_arg)
79 #define DRM_IOCTL_VMW_CREATE_SURFACE \
80 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SURFACE, \
81 union drm_vmw_surface_create_arg)
82 #define DRM_IOCTL_VMW_UNREF_SURFACE \
83 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SURFACE, \
84 struct drm_vmw_surface_arg)
85 #define DRM_IOCTL_VMW_REF_SURFACE \
86 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_REF_SURFACE, \
87 union drm_vmw_surface_reference_arg)
88 #define DRM_IOCTL_VMW_EXECBUF \
89 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_EXECBUF, \
90 struct drm_vmw_execbuf_arg)
91 #define DRM_IOCTL_VMW_GET_3D_CAP \
92 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_GET_3D_CAP, \
93 struct drm_vmw_get_3d_cap_arg)
94 #define DRM_IOCTL_VMW_FENCE_WAIT \
95 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT, \
96 struct drm_vmw_fence_wait_arg)
97 #define DRM_IOCTL_VMW_FENCE_SIGNALED \
98 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_SIGNALED, \
99 struct drm_vmw_fence_signaled_arg)
100 #define DRM_IOCTL_VMW_FENCE_UNREF \
101 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_UNREF, \
102 struct drm_vmw_fence_arg)
103 #define DRM_IOCTL_VMW_FENCE_EVENT \
104 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_EVENT, \
105 struct drm_vmw_fence_event_arg)
106 #define DRM_IOCTL_VMW_PRESENT \
107 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT, \
108 struct drm_vmw_present_arg)
109 #define DRM_IOCTL_VMW_PRESENT_READBACK \
110 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT_READBACK, \
111 struct drm_vmw_present_readback_arg)
112 #define DRM_IOCTL_VMW_UPDATE_LAYOUT \
113 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT, \
114 struct drm_vmw_update_layout_arg)
115 #define DRM_IOCTL_VMW_CREATE_SHADER \
116 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SHADER, \
117 struct drm_vmw_shader_create_arg)
118 #define DRM_IOCTL_VMW_UNREF_SHADER \
119 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SHADER, \
120 struct drm_vmw_shader_arg)
121 #define DRM_IOCTL_VMW_GB_SURFACE_CREATE \
122 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_CREATE, \
123 union drm_vmw_gb_surface_create_arg)
124 #define DRM_IOCTL_VMW_GB_SURFACE_REF \
125 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF, \
126 union drm_vmw_gb_surface_reference_arg)
127 #define DRM_IOCTL_VMW_SYNCCPU \
128 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_SYNCCPU, \
129 struct drm_vmw_synccpu_arg)
132 * The core DRM version of this macro doesn't account for
136 #define VMW_IOCTL_DEF(ioctl, func, flags) \
137 [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = {DRM_##ioctl, flags, func, DRM_IOCTL_##ioctl}
143 static const struct drm_ioctl_desc vmw_ioctls[] = {
144 VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl,
145 DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
146 VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl,
147 DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
148 VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl,
149 DRM_UNLOCKED | DRM_RENDER_ALLOW),
150 VMW_IOCTL_DEF(VMW_CURSOR_BYPASS,
151 vmw_kms_cursor_bypass_ioctl,
152 DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
154 VMW_IOCTL_DEF(VMW_CONTROL_STREAM, vmw_overlay_ioctl,
155 DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
156 VMW_IOCTL_DEF(VMW_CLAIM_STREAM, vmw_stream_claim_ioctl,
157 DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
158 VMW_IOCTL_DEF(VMW_UNREF_STREAM, vmw_stream_unref_ioctl,
159 DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
161 VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl,
162 DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
163 VMW_IOCTL_DEF(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl,
164 DRM_UNLOCKED | DRM_RENDER_ALLOW),
165 VMW_IOCTL_DEF(VMW_CREATE_SURFACE, vmw_surface_define_ioctl,
166 DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
167 VMW_IOCTL_DEF(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl,
168 DRM_UNLOCKED | DRM_RENDER_ALLOW),
169 VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl,
170 DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
171 VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl,
172 DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
173 VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl,
174 DRM_UNLOCKED | DRM_RENDER_ALLOW),
175 VMW_IOCTL_DEF(VMW_FENCE_SIGNALED,
176 vmw_fence_obj_signaled_ioctl,
177 DRM_UNLOCKED | DRM_RENDER_ALLOW),
178 VMW_IOCTL_DEF(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl,
179 DRM_UNLOCKED | DRM_RENDER_ALLOW),
180 VMW_IOCTL_DEF(VMW_FENCE_EVENT, vmw_fence_event_ioctl,
181 DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
182 VMW_IOCTL_DEF(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl,
183 DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
185 /* these allow direct access to the framebuffers mark as master only */
186 VMW_IOCTL_DEF(VMW_PRESENT, vmw_present_ioctl,
187 DRM_MASTER | DRM_AUTH | DRM_UNLOCKED),
188 VMW_IOCTL_DEF(VMW_PRESENT_READBACK,
189 vmw_present_readback_ioctl,
190 DRM_MASTER | DRM_AUTH | DRM_UNLOCKED),
191 VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT,
192 vmw_kms_update_layout_ioctl,
193 DRM_MASTER | DRM_UNLOCKED),
194 VMW_IOCTL_DEF(VMW_CREATE_SHADER,
195 vmw_shader_define_ioctl,
196 DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
197 VMW_IOCTL_DEF(VMW_UNREF_SHADER,
198 vmw_shader_destroy_ioctl,
199 DRM_UNLOCKED | DRM_RENDER_ALLOW),
200 VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE,
201 vmw_gb_surface_define_ioctl,
202 DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
203 VMW_IOCTL_DEF(VMW_GB_SURFACE_REF,
204 vmw_gb_surface_reference_ioctl,
205 DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
206 VMW_IOCTL_DEF(VMW_SYNCCPU,
207 vmw_user_dmabuf_synccpu_ioctl,
208 DRM_UNLOCKED | DRM_RENDER_ALLOW),
211 static struct pci_device_id vmw_pci_id_list[] = {
212 {0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII},
215 MODULE_DEVICE_TABLE(pci, vmw_pci_id_list);
217 static int enable_fbdev = IS_ENABLED(CONFIG_DRM_VMWGFX_FBCON);
218 static int vmw_force_iommu;
219 static int vmw_restrict_iommu;
220 static int vmw_force_coherent;
221 static int vmw_restrict_dma_mask;
223 static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
224 static void vmw_master_init(struct vmw_master *);
225 static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
228 MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev");
229 module_param_named(enable_fbdev, enable_fbdev, int, 0600);
230 MODULE_PARM_DESC(force_dma_api, "Force using the DMA API for TTM pages");
231 module_param_named(force_dma_api, vmw_force_iommu, int, 0600);
232 MODULE_PARM_DESC(restrict_iommu, "Try to limit IOMMU usage for TTM pages");
233 module_param_named(restrict_iommu, vmw_restrict_iommu, int, 0600);
234 MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages");
235 module_param_named(force_coherent, vmw_force_coherent, int, 0600);
236 MODULE_PARM_DESC(restrict_dma_mask, "Restrict DMA mask to 44 bits with IOMMU");
237 module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, 0600);
240 static void vmw_print_capabilities(uint32_t capabilities)
242 DRM_INFO("Capabilities:\n");
243 if (capabilities & SVGA_CAP_RECT_COPY)
244 DRM_INFO(" Rect copy.\n");
245 if (capabilities & SVGA_CAP_CURSOR)
246 DRM_INFO(" Cursor.\n");
247 if (capabilities & SVGA_CAP_CURSOR_BYPASS)
248 DRM_INFO(" Cursor bypass.\n");
249 if (capabilities & SVGA_CAP_CURSOR_BYPASS_2)
250 DRM_INFO(" Cursor bypass 2.\n");
251 if (capabilities & SVGA_CAP_8BIT_EMULATION)
252 DRM_INFO(" 8bit emulation.\n");
253 if (capabilities & SVGA_CAP_ALPHA_CURSOR)
254 DRM_INFO(" Alpha cursor.\n");
255 if (capabilities & SVGA_CAP_3D)
257 if (capabilities & SVGA_CAP_EXTENDED_FIFO)
258 DRM_INFO(" Extended Fifo.\n");
259 if (capabilities & SVGA_CAP_MULTIMON)
260 DRM_INFO(" Multimon.\n");
261 if (capabilities & SVGA_CAP_PITCHLOCK)
262 DRM_INFO(" Pitchlock.\n");
263 if (capabilities & SVGA_CAP_IRQMASK)
264 DRM_INFO(" Irq mask.\n");
265 if (capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)
266 DRM_INFO(" Display Topology.\n");
267 if (capabilities & SVGA_CAP_GMR)
269 if (capabilities & SVGA_CAP_TRACES)
270 DRM_INFO(" Traces.\n");
271 if (capabilities & SVGA_CAP_GMR2)
272 DRM_INFO(" GMR2.\n");
273 if (capabilities & SVGA_CAP_SCREEN_OBJECT_2)
274 DRM_INFO(" Screen Object 2.\n");
275 if (capabilities & SVGA_CAP_COMMAND_BUFFERS)
276 DRM_INFO(" Command Buffers.\n");
277 if (capabilities & SVGA_CAP_CMD_BUFFERS_2)
278 DRM_INFO(" Command Buffers 2.\n");
279 if (capabilities & SVGA_CAP_GBOBJECTS)
280 DRM_INFO(" Guest Backed Resources.\n");
284 * vmw_dummy_query_bo_create - create a bo to hold a dummy query result
286 * @dev_priv: A device private structure.
288 * This function creates a small buffer object that holds the query
289 * result for dummy queries emitted as query barriers.
290 * The function will then map the first page and initialize a pending
291 * occlusion query result structure, Finally it will unmap the buffer.
292 * No interruptible waits are done within this function.
294 * Returns an error if bo creation or initialization fails.
296 static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
299 struct ttm_buffer_object *bo;
300 struct ttm_bo_kmap_obj map;
301 volatile SVGA3dQueryResult *result;
305 * Create the bo as pinned, so that a tryreserve will
306 * immediately succeed. This is because we're the only
307 * user of the bo currently.
309 ret = ttm_bo_create(&dev_priv->bdev,
312 &vmw_sys_ne_placement,
316 if (unlikely(ret != 0))
319 ret = ttm_bo_reserve(bo, false, true, false, NULL);
322 ret = ttm_bo_kmap(bo, 0, 1, &map);
323 if (likely(ret == 0)) {
324 result = ttm_kmap_obj_virtual(&map, &dummy);
325 result->totalSize = sizeof(*result);
326 result->state = SVGA3D_QUERYSTATE_PENDING;
327 result->result32 = 0xff;
330 vmw_bo_pin(bo, false);
331 ttm_bo_unreserve(bo);
333 if (unlikely(ret != 0)) {
334 DRM_ERROR("Dummy query buffer map failed.\n");
337 dev_priv->dummy_query_bo = bo;
342 static int vmw_request_device(struct vmw_private *dev_priv)
346 ret = vmw_fifo_init(dev_priv, &dev_priv->fifo);
347 if (unlikely(ret != 0)) {
348 DRM_ERROR("Unable to initialize FIFO.\n");
351 vmw_fence_fifo_up(dev_priv->fman);
352 if (dev_priv->has_mob) {
353 ret = vmw_otables_setup(dev_priv);
354 if (unlikely(ret != 0)) {
355 DRM_ERROR("Unable to initialize "
356 "guest Memory OBjects.\n");
360 ret = vmw_dummy_query_bo_create(dev_priv);
361 if (unlikely(ret != 0))
362 goto out_no_query_bo;
367 if (dev_priv->has_mob)
368 vmw_otables_takedown(dev_priv);
370 vmw_fence_fifo_down(dev_priv->fman);
371 vmw_fifo_release(dev_priv, &dev_priv->fifo);
375 static void vmw_release_device(struct vmw_private *dev_priv)
378 * Previous destructions should've released
382 BUG_ON(dev_priv->pinned_bo != NULL);
384 ttm_bo_unref(&dev_priv->dummy_query_bo);
385 if (dev_priv->has_mob)
386 vmw_otables_takedown(dev_priv);
387 vmw_fence_fifo_down(dev_priv->fman);
388 vmw_fifo_release(dev_priv, &dev_priv->fifo);
393 * Increase the 3d resource refcount.
394 * If the count was prevously zero, initialize the fifo, switching to svga
395 * mode. Note that the master holds a ref as well, and may request an
396 * explicit switch to svga mode if fb is not running, using @unhide_svga.
398 int vmw_3d_resource_inc(struct vmw_private *dev_priv,
403 mutex_lock(&dev_priv->release_mutex);
404 if (unlikely(dev_priv->num_3d_resources++ == 0)) {
405 ret = vmw_request_device(dev_priv);
406 if (unlikely(ret != 0))
407 --dev_priv->num_3d_resources;
408 } else if (unhide_svga) {
409 mutex_lock(&dev_priv->hw_mutex);
410 vmw_write(dev_priv, SVGA_REG_ENABLE,
411 vmw_read(dev_priv, SVGA_REG_ENABLE) &
412 ~SVGA_REG_ENABLE_HIDE);
413 mutex_unlock(&dev_priv->hw_mutex);
416 mutex_unlock(&dev_priv->release_mutex);
421 * Decrease the 3d resource refcount.
422 * If the count reaches zero, disable the fifo, switching to vga mode.
423 * Note that the master holds a refcount as well, and may request an
424 * explicit switch to vga mode when it releases its refcount to account
425 * for the situation of an X server vt switch to VGA with 3d resources
428 void vmw_3d_resource_dec(struct vmw_private *dev_priv,
433 mutex_lock(&dev_priv->release_mutex);
434 if (unlikely(--dev_priv->num_3d_resources == 0))
435 vmw_release_device(dev_priv);
436 else if (hide_svga) {
437 mutex_lock(&dev_priv->hw_mutex);
438 vmw_write(dev_priv, SVGA_REG_ENABLE,
439 vmw_read(dev_priv, SVGA_REG_ENABLE) |
440 SVGA_REG_ENABLE_HIDE);
441 mutex_unlock(&dev_priv->hw_mutex);
444 n3d = (int32_t) dev_priv->num_3d_resources;
445 mutex_unlock(&dev_priv->release_mutex);
451 * Sets the initial_[width|height] fields on the given vmw_private.
453 * It does so by reading SVGA_REG_[WIDTH|HEIGHT] regs and then
454 * clamping the value to fb_max_[width|height] fields and the
455 * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
456 * If the values appear to be invalid, set them to
457 * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
459 static void vmw_get_initial_size(struct vmw_private *dev_priv)
464 width = vmw_read(dev_priv, SVGA_REG_WIDTH);
465 height = vmw_read(dev_priv, SVGA_REG_HEIGHT);
467 width = max_t(uint32_t, width, VMW_MIN_INITIAL_WIDTH);
468 height = max_t(uint32_t, height, VMW_MIN_INITIAL_HEIGHT);
470 if (width > dev_priv->fb_max_width ||
471 height > dev_priv->fb_max_height) {
474 * This is a host error and shouldn't occur.
477 width = VMW_MIN_INITIAL_WIDTH;
478 height = VMW_MIN_INITIAL_HEIGHT;
481 dev_priv->initial_width = width;
482 dev_priv->initial_height = height;
486 * vmw_dma_select_mode - Determine how DMA mappings should be set up for this
489 * @dev_priv: Pointer to a struct vmw_private
491 * This functions tries to determine the IOMMU setup and what actions
492 * need to be taken by the driver to make system pages visible to the
494 * If this function decides that DMA is not possible, it returns -EINVAL.
495 * The driver may then try to disable features of the device that require
498 static int vmw_dma_select_mode(struct vmw_private *dev_priv)
500 static const char *names[vmw_dma_map_max] = {
501 [vmw_dma_phys] = "Using physical TTM page addresses.",
502 [vmw_dma_alloc_coherent] = "Using coherent TTM pages.",
503 [vmw_dma_map_populate] = "Keeping DMA mappings.",
504 [vmw_dma_map_bind] = "Giving up DMA mappings early."};
506 const struct dma_map_ops *dma_ops = get_dma_ops(dev_priv->dev->dev);
508 #ifdef CONFIG_INTEL_IOMMU
509 if (intel_iommu_enabled) {
510 dev_priv->map_mode = vmw_dma_map_populate;
515 if (!(vmw_force_iommu || vmw_force_coherent)) {
516 dev_priv->map_mode = vmw_dma_phys;
517 DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]);
521 dev_priv->map_mode = vmw_dma_map_populate;
523 if (dma_ops->sync_single_for_cpu)
524 dev_priv->map_mode = vmw_dma_alloc_coherent;
525 #ifdef CONFIG_SWIOTLB
526 if (swiotlb_nr_tbl() == 0)
527 dev_priv->map_mode = vmw_dma_map_populate;
530 #ifdef CONFIG_INTEL_IOMMU
533 if (dev_priv->map_mode == vmw_dma_map_populate &&
535 dev_priv->map_mode = vmw_dma_map_bind;
537 if (vmw_force_coherent)
538 dev_priv->map_mode = vmw_dma_alloc_coherent;
540 #if !defined(CONFIG_SWIOTLB) && !defined(CONFIG_INTEL_IOMMU)
542 * No coherent page pool
544 if (dev_priv->map_mode == vmw_dma_alloc_coherent)
548 #else /* CONFIG_X86 */
549 dev_priv->map_mode = vmw_dma_map_populate;
550 #endif /* CONFIG_X86 */
552 DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]);
558 * vmw_dma_masks - set required page- and dma masks
560 * @dev: Pointer to struct drm-device
562 * With 32-bit we can only handle 32 bit PFNs. Optionally set that
563 * restriction also for 64-bit systems.
565 #ifdef CONFIG_INTEL_IOMMU
566 static int vmw_dma_masks(struct vmw_private *dev_priv)
568 struct drm_device *dev = dev_priv->dev;
570 if (intel_iommu_enabled &&
571 (sizeof(unsigned long) == 4 || vmw_restrict_dma_mask)) {
572 DRM_INFO("Restricting DMA addresses to 44 bits.\n");
573 return dma_set_mask(dev->dev, DMA_BIT_MASK(44));
578 static int vmw_dma_masks(struct vmw_private *dev_priv)
584 static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
586 struct vmw_private *dev_priv;
590 bool refuse_dma = false;
592 dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
593 if (unlikely(dev_priv == NULL)) {
594 DRM_ERROR("Failed allocating a device private struct.\n");
598 pci_set_master(dev->pdev);
601 dev_priv->vmw_chipset = chipset;
602 dev_priv->last_read_seqno = (uint32_t) -100;
603 mutex_init(&dev_priv->hw_mutex);
604 mutex_init(&dev_priv->cmdbuf_mutex);
605 mutex_init(&dev_priv->release_mutex);
606 mutex_init(&dev_priv->binding_mutex);
607 rwlock_init(&dev_priv->resource_lock);
608 ttm_lock_init(&dev_priv->reservation_sem);
610 for (i = vmw_res_context; i < vmw_res_max; ++i) {
611 idr_init(&dev_priv->res_idr[i]);
612 INIT_LIST_HEAD(&dev_priv->res_lru[i]);
615 mutex_init(&dev_priv->init_mutex);
616 init_waitqueue_head(&dev_priv->fence_queue);
617 init_waitqueue_head(&dev_priv->fifo_queue);
618 dev_priv->fence_queue_waiters = 0;
619 atomic_set(&dev_priv->fifo_queue_waiters, 0);
621 dev_priv->used_memory_size = 0;
623 dev_priv->io_start = pci_resource_start(dev->pdev, 0);
624 dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
625 dev_priv->mmio_start = pci_resource_start(dev->pdev, 2);
627 dev_priv->enable_fb = enable_fbdev;
629 mutex_lock(&dev_priv->hw_mutex);
631 vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
632 svga_id = vmw_read(dev_priv, SVGA_REG_ID);
633 if (svga_id != SVGA_ID_2) {
635 DRM_ERROR("Unsupported SVGA ID 0x%x\n", svga_id);
636 mutex_unlock(&dev_priv->hw_mutex);
640 dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
641 ret = vmw_dma_select_mode(dev_priv);
642 if (unlikely(ret != 0)) {
643 DRM_INFO("Restricting capabilities due to IOMMU setup.\n");
647 dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
648 dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
649 dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH);
650 dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT);
652 vmw_get_initial_size(dev_priv);
654 if (dev_priv->capabilities & SVGA_CAP_GMR2) {
655 dev_priv->max_gmr_ids =
656 vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS);
657 dev_priv->max_gmr_pages =
658 vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES);
659 dev_priv->memory_size =
660 vmw_read(dev_priv, SVGA_REG_MEMORY_SIZE);
661 dev_priv->memory_size -= dev_priv->vram_size;
664 * An arbitrary limit of 512MiB on surface
665 * memory. But all HWV8 hardware supports GMR2.
667 dev_priv->memory_size = 512*1024*1024;
669 dev_priv->max_mob_pages = 0;
670 dev_priv->max_mob_size = 0;
671 if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
674 SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB);
676 dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE;
677 dev_priv->prim_bb_mem =
679 SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM);
680 dev_priv->max_mob_size =
681 vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
683 dev_priv->prim_bb_mem = dev_priv->vram_size;
685 ret = vmw_dma_masks(dev_priv);
686 if (unlikely(ret != 0)) {
687 mutex_unlock(&dev_priv->hw_mutex);
692 * Limit back buffer size to VRAM size. Remove this once
693 * screen targets are implemented.
695 if (dev_priv->prim_bb_mem > dev_priv->vram_size)
696 dev_priv->prim_bb_mem = dev_priv->vram_size;
698 mutex_unlock(&dev_priv->hw_mutex);
700 vmw_print_capabilities(dev_priv->capabilities);
702 if (dev_priv->capabilities & SVGA_CAP_GMR2) {
703 DRM_INFO("Max GMR ids is %u\n",
704 (unsigned)dev_priv->max_gmr_ids);
705 DRM_INFO("Max number of GMR pages is %u\n",
706 (unsigned)dev_priv->max_gmr_pages);
707 DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n",
708 (unsigned)dev_priv->memory_size / 1024);
710 DRM_INFO("Maximum display memory size is %u kiB\n",
711 dev_priv->prim_bb_mem / 1024);
712 DRM_INFO("VRAM at 0x%08x size is %u kiB\n",
713 dev_priv->vram_start, dev_priv->vram_size / 1024);
714 DRM_INFO("MMIO at 0x%08x size is %u kiB\n",
715 dev_priv->mmio_start, dev_priv->mmio_size / 1024);
717 ret = vmw_ttm_global_init(dev_priv);
718 if (unlikely(ret != 0))
722 vmw_master_init(&dev_priv->fbdev_master);
723 ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
724 dev_priv->active_master = &dev_priv->fbdev_master;
727 ret = ttm_bo_device_init(&dev_priv->bdev,
728 dev_priv->bo_global_ref.ref.object,
730 dev->anon_inode->i_mapping,
731 VMWGFX_FILE_PAGE_OFFSET,
733 if (unlikely(ret != 0)) {
734 DRM_ERROR("Failed initializing TTM buffer object driver.\n");
738 ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM,
739 (dev_priv->vram_size >> PAGE_SHIFT));
740 if (unlikely(ret != 0)) {
741 DRM_ERROR("Failed initializing memory manager for VRAM.\n");
745 dev_priv->has_gmr = true;
746 if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) ||
747 refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
749 DRM_INFO("No GMR memory available. "
750 "Graphics memory resources are very limited.\n");
751 dev_priv->has_gmr = false;
754 if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
755 dev_priv->has_mob = true;
756 if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB,
758 DRM_INFO("No MOB memory available. "
759 "3D will be disabled.\n");
760 dev_priv->has_mob = false;
764 dev_priv->mmio_mtrr = arch_phys_wc_add(dev_priv->mmio_start,
765 dev_priv->mmio_size);
767 dev_priv->mmio_virt = ioremap_wc(dev_priv->mmio_start,
768 dev_priv->mmio_size);
770 if (unlikely(dev_priv->mmio_virt == NULL)) {
772 DRM_ERROR("Failed mapping MMIO.\n");
776 /* Need mmio memory to check for fifo pitchlock cap. */
777 if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
778 !(dev_priv->capabilities & SVGA_CAP_PITCHLOCK) &&
779 !vmw_fifo_have_pitchlock(dev_priv)) {
781 DRM_ERROR("Hardware has no pitchlock\n");
785 dev_priv->tdev = ttm_object_device_init
786 (dev_priv->mem_global_ref.object, 12, &vmw_prime_dmabuf_ops);
788 if (unlikely(dev_priv->tdev == NULL)) {
789 DRM_ERROR("Unable to initialize TTM object management.\n");
794 dev->dev_private = dev_priv;
796 ret = pci_request_regions(dev->pdev, "vmwgfx probe");
797 dev_priv->stealth = (ret != 0);
798 if (dev_priv->stealth) {
800 * Request at least the mmio PCI resource.
803 DRM_INFO("It appears like vesafb is loaded. "
804 "Ignore above error if any.\n");
805 ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe");
806 if (unlikely(ret != 0)) {
807 DRM_ERROR("Failed reserving the SVGA MMIO resource.\n");
812 if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
813 ret = drm_irq_install(dev, dev->pdev->irq);
815 DRM_ERROR("Failed installing irq: %d\n", ret);
820 dev_priv->fman = vmw_fence_manager_init(dev_priv);
821 if (unlikely(dev_priv->fman == NULL)) {
826 vmw_kms_save_vga(dev_priv);
828 /* Start kms and overlay systems, needs fifo. */
829 ret = vmw_kms_init(dev_priv);
830 if (unlikely(ret != 0))
832 vmw_overlay_init(dev_priv);
834 if (dev_priv->enable_fb) {
835 ret = vmw_3d_resource_inc(dev_priv, true);
836 if (unlikely(ret != 0))
838 vmw_fb_init(dev_priv);
841 dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
842 register_pm_notifier(&dev_priv->pm_nb);
847 vmw_overlay_close(dev_priv);
848 vmw_kms_close(dev_priv);
850 vmw_kms_restore_vga(dev_priv);
851 vmw_fence_manager_takedown(dev_priv->fman);
853 if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
854 drm_irq_uninstall(dev_priv->dev);
856 if (dev_priv->stealth)
857 pci_release_region(dev->pdev, 2);
859 pci_release_regions(dev->pdev);
861 ttm_object_device_release(&dev_priv->tdev);
863 iounmap(dev_priv->mmio_virt);
865 arch_phys_wc_del(dev_priv->mmio_mtrr);
866 if (dev_priv->has_mob)
867 (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
868 if (dev_priv->has_gmr)
869 (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
870 (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
872 (void)ttm_bo_device_release(&dev_priv->bdev);
874 vmw_ttm_global_release(dev_priv);
876 for (i = vmw_res_context; i < vmw_res_max; ++i)
877 idr_destroy(&dev_priv->res_idr[i]);
883 static int vmw_driver_unload(struct drm_device *dev)
885 struct vmw_private *dev_priv = vmw_priv(dev);
888 unregister_pm_notifier(&dev_priv->pm_nb);
890 if (dev_priv->ctx.res_ht_initialized)
891 drm_ht_remove(&dev_priv->ctx.res_ht);
892 if (dev_priv->ctx.cmd_bounce)
893 vfree(dev_priv->ctx.cmd_bounce);
894 if (dev_priv->enable_fb) {
895 vmw_fb_close(dev_priv);
896 vmw_kms_restore_vga(dev_priv);
897 vmw_3d_resource_dec(dev_priv, false);
899 vmw_kms_close(dev_priv);
900 vmw_overlay_close(dev_priv);
901 vmw_fence_manager_takedown(dev_priv->fman);
902 if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
903 drm_irq_uninstall(dev_priv->dev);
904 if (dev_priv->stealth)
905 pci_release_region(dev->pdev, 2);
907 pci_release_regions(dev->pdev);
909 ttm_object_device_release(&dev_priv->tdev);
910 iounmap(dev_priv->mmio_virt);
911 arch_phys_wc_del(dev_priv->mmio_mtrr);
912 if (dev_priv->has_mob)
913 (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
914 if (dev_priv->has_gmr)
915 (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
916 (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
917 (void)ttm_bo_device_release(&dev_priv->bdev);
918 vmw_ttm_global_release(dev_priv);
920 for (i = vmw_res_context; i < vmw_res_max; ++i)
921 idr_destroy(&dev_priv->res_idr[i]);
928 static void vmw_preclose(struct drm_device *dev,
929 struct drm_file *file_priv)
931 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
932 struct vmw_private *dev_priv = vmw_priv(dev);
934 vmw_event_fence_fpriv_gone(dev_priv->fman, &vmw_fp->fence_events);
937 static void vmw_postclose(struct drm_device *dev,
938 struct drm_file *file_priv)
940 struct vmw_fpriv *vmw_fp;
942 vmw_fp = vmw_fpriv(file_priv);
944 if (vmw_fp->locked_master) {
945 struct vmw_master *vmaster =
946 vmw_master(vmw_fp->locked_master);
948 ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
949 ttm_vt_unlock(&vmaster->lock);
950 drm_master_put(&vmw_fp->locked_master);
953 ttm_object_file_release(&vmw_fp->tfile);
957 static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
959 struct vmw_private *dev_priv = vmw_priv(dev);
960 struct vmw_fpriv *vmw_fp;
963 vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL);
964 if (unlikely(vmw_fp == NULL))
967 INIT_LIST_HEAD(&vmw_fp->fence_events);
968 vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10);
969 if (unlikely(vmw_fp->tfile == NULL))
972 file_priv->driver_priv = vmw_fp;
981 static struct vmw_master *vmw_master_check(struct drm_device *dev,
982 struct drm_file *file_priv,
986 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
987 struct vmw_master *vmaster;
989 if (file_priv->minor->type != DRM_MINOR_LEGACY ||
993 ret = mutex_lock_interruptible(&dev->master_mutex);
994 if (unlikely(ret != 0))
995 return ERR_PTR(-ERESTARTSYS);
997 if (file_priv->is_master) {
998 mutex_unlock(&dev->master_mutex);
1003 * Check if we were previously master, but now dropped.
1005 if (vmw_fp->locked_master) {
1006 mutex_unlock(&dev->master_mutex);
1007 DRM_ERROR("Dropped master trying to access ioctl that "
1008 "requires authentication.\n");
1009 return ERR_PTR(-EACCES);
1011 mutex_unlock(&dev->master_mutex);
1014 * Taking the drm_global_mutex after the TTM lock might deadlock
1016 if (!(flags & DRM_UNLOCKED)) {
1017 DRM_ERROR("Refusing locked ioctl access.\n");
1018 return ERR_PTR(-EDEADLK);
1022 * Take the TTM lock. Possibly sleep waiting for the authenticating
1023 * master to become master again, or for a SIGTERM if the
1024 * authenticating master exits.
1026 vmaster = vmw_master(file_priv->master);
1027 ret = ttm_read_lock(&vmaster->lock, true);
1028 if (unlikely(ret != 0))
1029 vmaster = ERR_PTR(ret);
1034 static long vmw_generic_ioctl(struct file *filp, unsigned int cmd,
1036 long (*ioctl_func)(struct file *, unsigned int,
1039 struct drm_file *file_priv = filp->private_data;
1040 struct drm_device *dev = file_priv->minor->dev;
1041 unsigned int nr = DRM_IOCTL_NR(cmd);
1042 struct vmw_master *vmaster;
1047 * Do extra checking on driver private ioctls.
1050 if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
1051 && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
1052 const struct drm_ioctl_desc *ioctl =
1053 &vmw_ioctls[nr - DRM_COMMAND_BASE];
1055 if (unlikely(ioctl->cmd_drv != cmd)) {
1056 DRM_ERROR("Invalid command format, ioctl %d\n",
1057 nr - DRM_COMMAND_BASE);
1060 flags = ioctl->flags;
1061 } else if (!drm_ioctl_flags(nr, &flags))
1064 vmaster = vmw_master_check(dev, file_priv, flags);
1065 if (unlikely(IS_ERR(vmaster))) {
1066 DRM_INFO("IOCTL ERROR %d\n", nr);
1067 return PTR_ERR(vmaster);
1070 ret = ioctl_func(filp, cmd, arg);
1072 ttm_read_unlock(&vmaster->lock);
1077 static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
1080 return vmw_generic_ioctl(filp, cmd, arg, &drm_ioctl);
1083 #ifdef CONFIG_COMPAT
1084 static long vmw_compat_ioctl(struct file *filp, unsigned int cmd,
1087 return vmw_generic_ioctl(filp, cmd, arg, &drm_compat_ioctl);
1091 static void vmw_lastclose(struct drm_device *dev)
1093 struct drm_crtc *crtc;
1094 struct drm_mode_set set;
1101 set.connectors = NULL;
1102 set.num_connectors = 0;
1104 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1106 ret = drm_mode_set_config_internal(&set);
1112 static void vmw_master_init(struct vmw_master *vmaster)
1114 ttm_lock_init(&vmaster->lock);
1115 INIT_LIST_HEAD(&vmaster->fb_surf);
1116 mutex_init(&vmaster->fb_surf_mutex);
1119 static int vmw_master_create(struct drm_device *dev,
1120 struct drm_master *master)
1122 struct vmw_master *vmaster;
1124 vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL);
1125 if (unlikely(vmaster == NULL))
1128 vmw_master_init(vmaster);
1129 ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
1130 master->driver_priv = vmaster;
1135 static void vmw_master_destroy(struct drm_device *dev,
1136 struct drm_master *master)
1138 struct vmw_master *vmaster = vmw_master(master);
1140 master->driver_priv = NULL;
1145 static int vmw_master_set(struct drm_device *dev,
1146 struct drm_file *file_priv,
1149 struct vmw_private *dev_priv = vmw_priv(dev);
1150 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
1151 struct vmw_master *active = dev_priv->active_master;
1152 struct vmw_master *vmaster = vmw_master(file_priv->master);
1155 if (!dev_priv->enable_fb) {
1156 ret = vmw_3d_resource_inc(dev_priv, true);
1157 if (unlikely(ret != 0))
1159 vmw_kms_save_vga(dev_priv);
1160 mutex_lock(&dev_priv->hw_mutex);
1161 vmw_write(dev_priv, SVGA_REG_TRACES, 0);
1162 mutex_unlock(&dev_priv->hw_mutex);
1166 BUG_ON(active != &dev_priv->fbdev_master);
1167 ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile);
1168 if (unlikely(ret != 0))
1169 goto out_no_active_lock;
1171 ttm_lock_set_kill(&active->lock, true, SIGTERM);
1172 ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
1173 if (unlikely(ret != 0)) {
1174 DRM_ERROR("Unable to clean VRAM on "
1178 dev_priv->active_master = NULL;
1181 ttm_lock_set_kill(&vmaster->lock, false, SIGTERM);
1183 ttm_vt_unlock(&vmaster->lock);
1184 BUG_ON(vmw_fp->locked_master != file_priv->master);
1185 drm_master_put(&vmw_fp->locked_master);
1188 dev_priv->active_master = vmaster;
1193 if (!dev_priv->enable_fb) {
1194 vmw_kms_restore_vga(dev_priv);
1195 vmw_3d_resource_dec(dev_priv, true);
1196 mutex_lock(&dev_priv->hw_mutex);
1197 vmw_write(dev_priv, SVGA_REG_TRACES, 1);
1198 mutex_unlock(&dev_priv->hw_mutex);
1203 static void vmw_master_drop(struct drm_device *dev,
1204 struct drm_file *file_priv,
1207 struct vmw_private *dev_priv = vmw_priv(dev);
1208 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
1209 struct vmw_master *vmaster = vmw_master(file_priv->master);
1213 * Make sure the master doesn't disappear while we have
1217 vmw_fp->locked_master = drm_master_get(file_priv->master);
1218 ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile);
1219 if (unlikely((ret != 0))) {
1220 DRM_ERROR("Unable to lock TTM at VT switch.\n");
1221 drm_master_put(&vmw_fp->locked_master);
1224 ttm_lock_set_kill(&vmaster->lock, false, SIGTERM);
1225 vmw_execbuf_release_pinned_bo(dev_priv);
1227 if (!dev_priv->enable_fb) {
1228 ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
1229 if (unlikely(ret != 0))
1230 DRM_ERROR("Unable to clean VRAM on master drop.\n");
1231 vmw_kms_restore_vga(dev_priv);
1232 vmw_3d_resource_dec(dev_priv, true);
1233 mutex_lock(&dev_priv->hw_mutex);
1234 vmw_write(dev_priv, SVGA_REG_TRACES, 1);
1235 mutex_unlock(&dev_priv->hw_mutex);
1238 dev_priv->active_master = &dev_priv->fbdev_master;
1239 ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
1240 ttm_vt_unlock(&dev_priv->fbdev_master.lock);
1242 if (dev_priv->enable_fb)
1243 vmw_fb_on(dev_priv);
1247 static void vmw_remove(struct pci_dev *pdev)
1249 struct drm_device *dev = pci_get_drvdata(pdev);
1254 static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
1257 struct vmw_private *dev_priv =
1258 container_of(nb, struct vmw_private, pm_nb);
1261 case PM_HIBERNATION_PREPARE:
1262 case PM_SUSPEND_PREPARE:
1263 ttm_suspend_lock(&dev_priv->reservation_sem);
1266 * This empties VRAM and unbinds all GMR bindings.
1267 * Buffer contents is moved to swappable memory.
1269 vmw_execbuf_release_pinned_bo(dev_priv);
1270 vmw_resource_evict_all(dev_priv);
1271 ttm_bo_swapout_all(&dev_priv->bdev);
1274 case PM_POST_HIBERNATION:
1275 case PM_POST_SUSPEND:
1276 case PM_POST_RESTORE:
1277 ttm_suspend_unlock(&dev_priv->reservation_sem);
1280 case PM_RESTORE_PREPARE:
1289 * These might not be needed with the virtual SVGA device.
1292 static int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
1294 struct drm_device *dev = pci_get_drvdata(pdev);
1295 struct vmw_private *dev_priv = vmw_priv(dev);
1297 if (dev_priv->num_3d_resources != 0) {
1298 DRM_INFO("Can't suspend or hibernate "
1299 "while 3D resources are active.\n");
1303 pci_save_state(pdev);
1304 pci_disable_device(pdev);
1305 pci_set_power_state(pdev, PCI_D3hot);
1309 static int vmw_pci_resume(struct pci_dev *pdev)
1311 pci_set_power_state(pdev, PCI_D0);
1312 pci_restore_state(pdev);
1313 return pci_enable_device(pdev);
1316 static int vmw_pm_suspend(struct device *kdev)
1318 struct pci_dev *pdev = to_pci_dev(kdev);
1319 struct pm_message dummy;
1323 return vmw_pci_suspend(pdev, dummy);
1326 static int vmw_pm_resume(struct device *kdev)
1328 struct pci_dev *pdev = to_pci_dev(kdev);
1330 return vmw_pci_resume(pdev);
1333 static int vmw_pm_prepare(struct device *kdev)
1335 struct pci_dev *pdev = to_pci_dev(kdev);
1336 struct drm_device *dev = pci_get_drvdata(pdev);
1337 struct vmw_private *dev_priv = vmw_priv(dev);
1340 * Release 3d reference held by fbdev and potentially
1343 dev_priv->suspended = true;
1344 if (dev_priv->enable_fb)
1345 vmw_3d_resource_dec(dev_priv, true);
1347 if (dev_priv->num_3d_resources != 0) {
1349 DRM_INFO("Can't suspend or hibernate "
1350 "while 3D resources are active.\n");
1352 if (dev_priv->enable_fb)
1353 vmw_3d_resource_inc(dev_priv, true);
1354 dev_priv->suspended = false;
1361 static void vmw_pm_complete(struct device *kdev)
1363 struct pci_dev *pdev = to_pci_dev(kdev);
1364 struct drm_device *dev = pci_get_drvdata(pdev);
1365 struct vmw_private *dev_priv = vmw_priv(dev);
1367 mutex_lock(&dev_priv->hw_mutex);
1368 vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
1369 (void) vmw_read(dev_priv, SVGA_REG_ID);
1370 mutex_unlock(&dev_priv->hw_mutex);
1373 * Reclaim 3d reference held by fbdev and potentially
1376 if (dev_priv->enable_fb)
1377 vmw_3d_resource_inc(dev_priv, false);
1379 dev_priv->suspended = false;
1382 static const struct dev_pm_ops vmw_pm_ops = {
1383 .prepare = vmw_pm_prepare,
1384 .complete = vmw_pm_complete,
1385 .suspend = vmw_pm_suspend,
1386 .resume = vmw_pm_resume,
1389 static const struct file_operations vmwgfx_driver_fops = {
1390 .owner = THIS_MODULE,
1392 .release = drm_release,
1393 .unlocked_ioctl = vmw_unlocked_ioctl,
1395 .poll = vmw_fops_poll,
1396 .read = vmw_fops_read,
1397 #if defined(CONFIG_COMPAT)
1398 .compat_ioctl = vmw_compat_ioctl,
1400 .llseek = noop_llseek,
1403 static struct drm_driver driver = {
1404 .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
1405 DRIVER_MODESET | DRIVER_PRIME | DRIVER_RENDER,
1406 .load = vmw_driver_load,
1407 .unload = vmw_driver_unload,
1408 .lastclose = vmw_lastclose,
1409 .irq_preinstall = vmw_irq_preinstall,
1410 .irq_postinstall = vmw_irq_postinstall,
1411 .irq_uninstall = vmw_irq_uninstall,
1412 .irq_handler = vmw_irq_handler,
1413 .get_vblank_counter = vmw_get_vblank_counter,
1414 .enable_vblank = vmw_enable_vblank,
1415 .disable_vblank = vmw_disable_vblank,
1416 .ioctls = vmw_ioctls,
1417 .num_ioctls = ARRAY_SIZE(vmw_ioctls),
1418 .master_create = vmw_master_create,
1419 .master_destroy = vmw_master_destroy,
1420 .master_set = vmw_master_set,
1421 .master_drop = vmw_master_drop,
1422 .open = vmw_driver_open,
1423 .preclose = vmw_preclose,
1424 .postclose = vmw_postclose,
1425 .set_busid = drm_pci_set_busid,
1427 .dumb_create = vmw_dumb_create,
1428 .dumb_map_offset = vmw_dumb_map_offset,
1429 .dumb_destroy = vmw_dumb_destroy,
1431 .prime_fd_to_handle = vmw_prime_fd_to_handle,
1432 .prime_handle_to_fd = vmw_prime_handle_to_fd,
1434 .fops = &vmwgfx_driver_fops,
1435 .name = VMWGFX_DRIVER_NAME,
1436 .desc = VMWGFX_DRIVER_DESC,
1437 .date = VMWGFX_DRIVER_DATE,
1438 .major = VMWGFX_DRIVER_MAJOR,
1439 .minor = VMWGFX_DRIVER_MINOR,
1440 .patchlevel = VMWGFX_DRIVER_PATCHLEVEL
1443 static struct pci_driver vmw_pci_driver = {
1444 .name = VMWGFX_DRIVER_NAME,
1445 .id_table = vmw_pci_id_list,
1447 .remove = vmw_remove,
1453 static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1455 return drm_get_pci_dev(pdev, ent, &driver);
1458 static int __init vmwgfx_init(void)
1461 ret = drm_pci_init(&driver, &vmw_pci_driver);
1463 DRM_ERROR("Failed initializing DRM.\n");
1467 static void __exit vmwgfx_exit(void)
1469 drm_pci_exit(&driver, &vmw_pci_driver);
1472 module_init(vmwgfx_init);
1473 module_exit(vmwgfx_exit);
1475 MODULE_AUTHOR("VMware Inc. and others");
1476 MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device");
1477 MODULE_LICENSE("GPL and additional rights");
1478 MODULE_VERSION(__stringify(VMWGFX_DRIVER_MAJOR) "."
1479 __stringify(VMWGFX_DRIVER_MINOR) "."
1480 __stringify(VMWGFX_DRIVER_PATCHLEVEL) "."