1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
2 /**************************************************************************
3 *
4 * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #ifndef _VMWGFX_DRV_H_
29 #define _VMWGFX_DRV_H_
30
31 #include <linux/suspend.h>
32 #include <linux/sync_file.h>
33
34 #include <drm/drm_auth.h>
35 #include <drm/drm_device.h>
36 #include <drm/drm_file.h>
37 #include <drm/drm_hashtab.h>
38 #include <drm/drm_rect.h>
39
40 #include <drm/ttm/ttm_bo_driver.h>
41 #include <drm/ttm/ttm_execbuf_util.h>
42 #include <drm/ttm/ttm_module.h>
43
44 #include "ttm_lock.h"
45 #include "ttm_object.h"
46
47 #include "vmwgfx_fence.h"
48 #include "vmwgfx_reg.h"
49 #include "vmwgfx_validation.h"
50
51 /*
52 * FIXME: vmwgfx_drm.h needs to be last due to dependencies.
53 * uapi headers should not depend on header files outside uapi/.
54 */
55 #include <drm/vmwgfx_drm.h>
56
57
58 #define VMWGFX_DRIVER_NAME "vmwgfx"
59 #define VMWGFX_DRIVER_DATE "20180704"
60 #define VMWGFX_DRIVER_MAJOR 2
61 #define VMWGFX_DRIVER_MINOR 15
62 #define VMWGFX_DRIVER_PATCHLEVEL 0
63 #define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
64 #define VMWGFX_MAX_RELOCATIONS 2048
65 #define VMWGFX_MAX_VALIDATIONS 2048
66 #define VMWGFX_MAX_DISPLAYS 16
67 #define VMWGFX_CMD_BOUNCE_INIT_SIZE 32768
68 #define VMWGFX_ENABLE_SCREEN_TARGET_OTABLE 1
69
70 /*
71 * Perhaps we should have sysfs entries for these.
72 */
73 #define VMWGFX_NUM_GB_CONTEXT 256
74 #define VMWGFX_NUM_GB_SHADER 20000
75 #define VMWGFX_NUM_GB_SURFACE 32768
76 #define VMWGFX_NUM_GB_SCREEN_TARGET VMWGFX_MAX_DISPLAYS
77 #define VMWGFX_NUM_DXCONTEXT 256
78 #define VMWGFX_NUM_DXQUERY 512
79 #define VMWGFX_NUM_MOB (VMWGFX_NUM_GB_CONTEXT +\
80 VMWGFX_NUM_GB_SHADER +\
81 VMWGFX_NUM_GB_SURFACE +\
82 VMWGFX_NUM_GB_SCREEN_TARGET)
83
84 #define VMW_PL_GMR (TTM_PL_PRIV + 0)
85 #define VMW_PL_FLAG_GMR (TTM_PL_FLAG_PRIV << 0)
86 #define VMW_PL_MOB (TTM_PL_PRIV + 1)
87 #define VMW_PL_FLAG_MOB (TTM_PL_FLAG_PRIV << 1)
88
89 #define VMW_RES_CONTEXT ttm_driver_type0
90 #define VMW_RES_SURFACE ttm_driver_type1
91 #define VMW_RES_STREAM ttm_driver_type2
92 #define VMW_RES_FENCE ttm_driver_type3
93 #define VMW_RES_SHADER ttm_driver_type4
94
95 struct vmw_fpriv {
96 struct ttm_object_file *tfile;
97 bool gb_aware; /* user-space is guest-backed aware */
98 };
99
100 /**
101 * struct vmw_buffer_object - TTM buffer object with vmwgfx additions
102 * @base: The TTM buffer object
103 * @res_list: List of resources using this buffer object as a backing MOB
104 * @pin_count: pin depth
105 * @dx_query_ctx: DX context if this buffer object is used as a DX query MOB
106 * @map: Kmap object for semi-persistent mappings
107 * @res_prios: Eviction priority counts for attached resources
108 */
109 struct vmw_buffer_object {
110 struct ttm_buffer_object base;
111 struct list_head res_list;
112 s32 pin_count;
113 /* Not ref-counted. Protected by binding_mutex */
114 struct vmw_resource *dx_query_ctx;
115 /* Protected by reservation */
116 struct ttm_bo_kmap_obj map;
117 u32 res_prios[TTM_MAX_BO_PRIORITY];
118 };
119
120 /**
121 * struct vmw_validate_buffer - Carries validation info about buffers.
122 *
123 * @base: Validation info for TTM.
124 * @hash: Hash entry for quick lookup of the TTM buffer object.
125 *
126 * This structure contains also driver private validation info
127 * on top of the info needed by TTM.
128 */
129 struct vmw_validate_buffer {
130 struct ttm_validate_buffer base;
131 struct drm_hash_item hash;
132 bool validate_as_mob;
133 };
134
135 struct vmw_res_func;
136
137
138 /**
139 * struct vmw-resource - base class for hardware resources
140 *
141 * @kref: For refcounting.
142 * @dev_priv: Pointer to the device private for this resource. Immutable.
143 * @id: Device id. Protected by @dev_priv::resource_lock.
144 * @backup_size: Backup buffer size. Immutable.
145 * @res_dirty: Resource contains data not yet in the backup buffer. Protected
146 * by resource reserved.
147 * @backup_dirty: Backup buffer contains data not yet in the HW resource.
148 * Protecte by resource reserved.
149 * @backup: The backup buffer if any. Protected by resource reserved.
150 * @backup_offset: Offset into the backup buffer if any. Protected by resource
151 * reserved. Note that only a few resource types can have a @backup_offset
152 * different from zero.
153 * @pin_count: The pin count for this resource. A pinned resource has a
154 * pin-count greater than zero. It is not on the resource LRU lists and its
155 * backup buffer is pinned. Hence it can't be evicted.
156 * @func: Method vtable for this resource. Immutable.
157 * @lru_head: List head for the LRU list. Protected by @dev_priv::resource_lock.
158 * @mob_head: List head for the MOB backup list. Protected by @backup reserved.
159 * @binding_head: List head for the context binding list. Protected by
160 * the @dev_priv::binding_mutex
161 * @res_free: The resource destructor.
162 * @hw_destroy: Callback to destroy the resource on the device, as part of
163 * resource destruction.
164 */
165 struct vmw_resource {
166 struct kref kref;
167 struct vmw_private *dev_priv;
168 int id;
169 u32 used_prio;
170 unsigned long backup_size;
171 bool res_dirty;
172 bool backup_dirty;
173 struct vmw_buffer_object *backup;
174 unsigned long backup_offset;
175 unsigned long pin_count;
176 const struct vmw_res_func *func;
177 struct list_head lru_head;
178 struct list_head mob_head;
179 struct list_head binding_head;
180 void (*res_free) (struct vmw_resource *res);
181 void (*hw_destroy) (struct vmw_resource *res);
182 };
183
184
185 /*
186 * Resources that are managed using ioctls.
187 */
188 enum vmw_res_type {
189 vmw_res_context,
190 vmw_res_surface,
191 vmw_res_stream,
192 vmw_res_shader,
193 vmw_res_dx_context,
194 vmw_res_cotable,
195 vmw_res_view,
196 vmw_res_max
197 };
198
199 /*
200 * Resources that are managed using command streams.
201 */
202 enum vmw_cmdbuf_res_type {
203 vmw_cmdbuf_res_shader,
204 vmw_cmdbuf_res_view
205 };
206
207 struct vmw_cmdbuf_res_manager;
208
209 struct vmw_cursor_snooper {
210 size_t age;
211 uint32_t *image;
212 };
213
214 struct vmw_framebuffer;
215 struct vmw_surface_offset;
216
217 struct vmw_surface {
218 struct vmw_resource res;
219 SVGA3dSurfaceAllFlags flags;
220 uint32_t format;
221 uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES];
222 struct drm_vmw_size base_size;
223 struct drm_vmw_size *sizes;
224 uint32_t num_sizes;
225 bool scanout;
226 uint32_t array_size;
227 /* TODO so far just a extra pointer */
228 struct vmw_cursor_snooper snooper;
229 struct vmw_surface_offset *offsets;
230 SVGA3dTextureFilter autogen_filter;
231 uint32_t multisample_count;
232 struct list_head view_list;
233 SVGA3dMSPattern multisample_pattern;
234 SVGA3dMSQualityLevel quality_level;
235 };
236
237 struct vmw_marker_queue {
238 struct list_head head;
239 u64 lag;
240 u64 lag_time;
241 spinlock_t lock;
242 };
243
244 struct vmw_fifo_state {
245 unsigned long reserved_size;
246 u32 *dynamic_buffer;
247 u32 *static_buffer;
248 unsigned long static_buffer_size;
249 bool using_bounce_buffer;
250 uint32_t capabilities;
251 struct mutex fifo_mutex;
252 struct rw_semaphore rwsem;
253 struct vmw_marker_queue marker_queue;
254 bool dx;
255 };
256
257 /**
258 * struct vmw_res_cache_entry - resource information cache entry
259 * @handle: User-space handle of a resource.
260 * @res: Non-ref-counted pointer to the resource.
261 * @valid_handle: Whether the @handle member is valid.
262 * @valid: Whether the entry is valid, which also implies that the execbuf
263 * code holds a reference to the resource, and it's placed on the
264 * validation list.
265 *
266 * Used to avoid frequent repeated user-space handle lookups of the
267 * same resource.
268 */
269 struct vmw_res_cache_entry {
270 uint32_t handle;
271 struct vmw_resource *res;
272 void *private;
273 unsigned short valid_handle;
274 unsigned short valid;
275 };
276
277 /**
278 * enum vmw_dma_map_mode - indicate how to perform TTM page dma mappings.
279 */
280 enum vmw_dma_map_mode {
281 vmw_dma_phys, /* Use physical page addresses */
282 vmw_dma_alloc_coherent, /* Use TTM coherent pages */
283 vmw_dma_map_populate, /* Unmap from DMA just after unpopulate */
284 vmw_dma_map_bind, /* Unmap from DMA just before unbind */
285 vmw_dma_map_max
286 };
287
288 /**
289 * struct vmw_sg_table - Scatter/gather table for binding, with additional
290 * device-specific information.
291 *
292 * @sgt: Pointer to a struct sg_table with binding information
293 * @num_regions: Number of regions with device-address contiguous pages
294 */
295 struct vmw_sg_table {
296 enum vmw_dma_map_mode mode;
297 struct page **pages;
298 const dma_addr_t *addrs;
299 struct sg_table *sgt;
300 unsigned long num_regions;
301 unsigned long num_pages;
302 };
303
304 /**
305 * struct vmw_piter - Page iterator that iterates over a list of pages
306 * and DMA addresses that could be either a scatter-gather list or
307 * arrays
308 *
309 * @pages: Array of page pointers to the pages.
310 * @addrs: DMA addresses to the pages if coherent pages are used.
311 * @iter: Scatter-gather page iterator. Current position in SG list.
312 * @i: Current position in arrays.
313 * @num_pages: Number of pages total.
314 * @next: Function to advance the iterator. Returns false if past the list
315 * of pages, true otherwise.
316 * @dma_address: Function to return the DMA address of the current page.
317 */
318 struct vmw_piter {
319 struct page **pages;
320 const dma_addr_t *addrs;
321 struct sg_dma_page_iter iter;
322 unsigned long i;
323 unsigned long num_pages;
324 bool (*next)(struct vmw_piter *);
325 dma_addr_t (*dma_address)(struct vmw_piter *);
326 struct page *(*page)(struct vmw_piter *);
327 };
328
329 /*
330 * enum vmw_display_unit_type - Describes the display unit
331 */
332 enum vmw_display_unit_type {
333 vmw_du_invalid = 0,
334 vmw_du_legacy,
335 vmw_du_screen_object,
336 vmw_du_screen_target
337 };
338
339 struct vmw_validation_context;
340 struct vmw_ctx_validation_info;
341
342 /**
343 * struct vmw_sw_context - Command submission context
344 * @res_ht: Pointer hash table used to find validation duplicates
345 * @kernel: Whether the command buffer originates from kernel code rather
346 * than from user-space
347 * @fp: If @kernel is false, points to the file of the client. Otherwise
348 * NULL
349 * @cmd_bounce: Command bounce buffer used for command validation before
350 * copying to fifo space
351 * @cmd_bounce_size: Current command bounce buffer size
352 * @cur_query_bo: Current buffer object used as query result buffer
353 * @bo_relocations: List of buffer object relocations
354 * @res_relocations: List of resource relocations
355 * @buf_start: Pointer to start of memory where command validation takes
356 * place
357 * @res_cache: Cache of recently looked up resources
358 * @last_query_ctx: Last context that submitted a query
359 * @needs_post_query_barrier: Whether a query barrier is needed after
360 * command submission
361 * @staged_bindings: Cached per-context binding tracker
362 * @staged_bindings_inuse: Whether the cached per-context binding tracker
363 * is in use
364 * @staged_cmd_res: List of staged command buffer managed resources in this
365 * command buffer
366 * @ctx_list: List of context resources referenced in this command buffer
367 * @dx_ctx_node: Validation metadata of the current DX context
368 * @dx_query_mob: The MOB used for DX queries
369 * @dx_query_ctx: The DX context used for the last DX query
370 * @man: Pointer to the command buffer managed resource manager
371 * @ctx: The validation context
372 */
373 struct vmw_sw_context{
374 struct drm_open_hash res_ht;
375 bool res_ht_initialized;
376 bool kernel;
377 struct vmw_fpriv *fp;
378 uint32_t *cmd_bounce;
379 uint32_t cmd_bounce_size;
380 struct vmw_buffer_object *cur_query_bo;
381 struct list_head bo_relocations;
382 struct list_head res_relocations;
383 uint32_t *buf_start;
384 struct vmw_res_cache_entry res_cache[vmw_res_max];
385 struct vmw_resource *last_query_ctx;
386 bool needs_post_query_barrier;
387 struct vmw_ctx_binding_state *staged_bindings;
388 bool staged_bindings_inuse;
389 struct list_head staged_cmd_res;
390 struct list_head ctx_list;
391 struct vmw_ctx_validation_info *dx_ctx_node;
392 struct vmw_buffer_object *dx_query_mob;
393 struct vmw_resource *dx_query_ctx;
394 struct vmw_cmdbuf_res_manager *man;
395 struct vmw_validation_context *ctx;
396 };
397
398 struct vmw_legacy_display;
399 struct vmw_overlay;
400
401 struct vmw_vga_topology_state {
402 uint32_t width;
403 uint32_t height;
404 uint32_t primary;
405 uint32_t pos_x;
406 uint32_t pos_y;
407 };
408
409
410 /*
411 * struct vmw_otable - Guest Memory OBject table metadata
412 *
413 * @size: Size of the table (page-aligned).
414 * @page_table: Pointer to a struct vmw_mob holding the page table.
415 */
416 struct vmw_otable {
417 unsigned long size;
418 struct vmw_mob *page_table;
419 bool enabled;
420 };
421
422 struct vmw_otable_batch {
423 unsigned num_otables;
424 struct vmw_otable *otables;
425 struct vmw_resource *context;
426 struct ttm_buffer_object *otable_bo;
427 };
428
429 enum {
430 VMW_IRQTHREAD_FENCE,
431 VMW_IRQTHREAD_CMDBUF,
432 VMW_IRQTHREAD_MAX
433 };
434
435 struct vmw_private {
436 struct ttm_bo_device bdev;
437
438 struct vmw_fifo_state fifo;
439
440 struct drm_device *dev;
441 unsigned long vmw_chipset;
442 unsigned int io_start;
443 uint32_t vram_start;
444 uint32_t vram_size;
445 uint32_t prim_bb_mem;
446 uint32_t mmio_start;
447 uint32_t mmio_size;
448 uint32_t fb_max_width;
449 uint32_t fb_max_height;
450 uint32_t texture_max_width;
451 uint32_t texture_max_height;
452 uint32_t stdu_max_width;
453 uint32_t stdu_max_height;
454 uint32_t initial_width;
455 uint32_t initial_height;
456 u32 *mmio_virt;
457 uint32_t capabilities;
458 uint32_t capabilities2;
459 uint32_t max_gmr_ids;
460 uint32_t max_gmr_pages;
461 uint32_t max_mob_pages;
462 uint32_t max_mob_size;
463 uint32_t memory_size;
464 bool has_gmr;
465 bool has_mob;
466 spinlock_t hw_lock;
467 spinlock_t cap_lock;
468 bool has_dx;
469 bool assume_16bpp;
470 bool has_sm4_1;
471
472 /*
473 * VGA registers.
474 */
475
476 struct vmw_vga_topology_state vga_save[VMWGFX_MAX_DISPLAYS];
477 uint32_t vga_width;
478 uint32_t vga_height;
479 uint32_t vga_bpp;
480 uint32_t vga_bpl;
481 uint32_t vga_pitchlock;
482
483 uint32_t num_displays;
484
485 /*
486 * Framebuffer info.
487 */
488
489 void *fb_info;
490 enum vmw_display_unit_type active_display_unit;
491 struct vmw_legacy_display *ldu_priv;
492 struct vmw_overlay *overlay_priv;
493 struct drm_property *hotplug_mode_update_property;
494 struct drm_property *implicit_placement_property;
495 struct mutex global_kms_state_mutex;
496 spinlock_t cursor_lock;
497 struct drm_atomic_state *suspend_state;
498
499 /*
500 * Context and surface management.
501 */
502
503 spinlock_t resource_lock;
504 struct idr res_idr[vmw_res_max];
505
506 /*
507 * A resource manager for kernel-only surfaces and
508 * contexts.
509 */
510
511 struct ttm_object_device *tdev;
512
513 /*
514 * Fencing and IRQs.
515 */
516
517 atomic_t marker_seq;
518 wait_queue_head_t fence_queue;
519 wait_queue_head_t fifo_queue;
520 spinlock_t waiter_lock;
521 int fence_queue_waiters; /* Protected by waiter_lock */
522 int goal_queue_waiters; /* Protected by waiter_lock */
523 int cmdbuf_waiters; /* Protected by waiter_lock */
524 int error_waiters; /* Protected by waiter_lock */
525 int fifo_queue_waiters; /* Protected by waiter_lock */
526 uint32_t last_read_seqno;
527 struct vmw_fence_manager *fman;
528 uint32_t irq_mask; /* Updates protected by waiter_lock */
529
530 /*
531 * Device state
532 */
533
534 uint32_t traces_state;
535 uint32_t enable_state;
536 uint32_t config_done_state;
537
538 /**
539 * Execbuf
540 */
541 /**
542 * Protected by the cmdbuf mutex.
543 */
544
545 struct vmw_sw_context ctx;
546 struct mutex cmdbuf_mutex;
547 struct mutex binding_mutex;
548
549 /**
550 * Operating mode.
551 */
552
553 bool stealth;
554 bool enable_fb;
555 spinlock_t svga_lock;
556
557 /**
558 * PM management.
559 */
560 struct notifier_block pm_nb;
561 bool refuse_hibernation;
562 bool suspend_locked;
563
564 struct mutex release_mutex;
565 atomic_t num_fifo_resources;
566
567 /*
568 * Replace this with an rwsem as soon as we have down_xx_interruptible()
569 */
570 struct ttm_lock reservation_sem;
571
572 /*
573 * Query processing. These members
574 * are protected by the cmdbuf mutex.
575 */
576
577 struct vmw_buffer_object *dummy_query_bo;
578 struct vmw_buffer_object *pinned_bo;
579 uint32_t query_cid;
580 uint32_t query_cid_valid;
581 bool dummy_query_bo_pinned;
582
583 /*
584 * Surface swapping. The "surface_lru" list is protected by the
585 * resource lock in order to be able to destroy a surface and take
586 * it off the lru atomically. "used_memory_size" is currently
587 * protected by the cmdbuf mutex for simplicity.
588 */
589
590 struct list_head res_lru[vmw_res_max];
591 uint32_t used_memory_size;
592
593 /*
594 * DMA mapping stuff.
595 */
596 enum vmw_dma_map_mode map_mode;
597
598 /*
599 * Guest Backed stuff
600 */
601 struct vmw_otable_batch otable_batch;
602
603 struct vmw_cmdbuf_man *cman;
604 DECLARE_BITMAP(irqthread_pending, VMW_IRQTHREAD_MAX);
605
606 /* Validation memory reservation */
607 struct vmw_validation_mem vvm;
608 };
609
vmw_res_to_srf(struct vmw_resource * res)610 static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res)
611 {
612 return container_of(res, struct vmw_surface, res);
613 }
614
vmw_priv(struct drm_device * dev)615 static inline struct vmw_private *vmw_priv(struct drm_device *dev)
616 {
617 return (struct vmw_private *)dev->dev_private;
618 }
619
vmw_fpriv(struct drm_file * file_priv)620 static inline struct vmw_fpriv *vmw_fpriv(struct drm_file *file_priv)
621 {
622 return (struct vmw_fpriv *)file_priv->driver_priv;
623 }
624
625 /*
626 * The locking here is fine-grained, so that it is performed once
627 * for every read- and write operation. This is of course costly, but we
628 * don't perform much register access in the timing critical paths anyway.
629 * Instead we have the extra benefit of being sure that we don't forget
630 * the hw lock around register accesses.
631 */
vmw_write(struct vmw_private * dev_priv,unsigned int offset,uint32_t value)632 static inline void vmw_write(struct vmw_private *dev_priv,
633 unsigned int offset, uint32_t value)
634 {
635 spin_lock(&dev_priv->hw_lock);
636 outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
637 outl(value, dev_priv->io_start + VMWGFX_VALUE_PORT);
638 spin_unlock(&dev_priv->hw_lock);
639 }
640
vmw_read(struct vmw_private * dev_priv,unsigned int offset)641 static inline uint32_t vmw_read(struct vmw_private *dev_priv,
642 unsigned int offset)
643 {
644 u32 val;
645
646 spin_lock(&dev_priv->hw_lock);
647 outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
648 val = inl(dev_priv->io_start + VMWGFX_VALUE_PORT);
649 spin_unlock(&dev_priv->hw_lock);
650
651 return val;
652 }
653
654 extern void vmw_svga_enable(struct vmw_private *dev_priv);
655 extern void vmw_svga_disable(struct vmw_private *dev_priv);
656
657
658 /**
659 * GMR utilities - vmwgfx_gmr.c
660 */
661
662 extern int vmw_gmr_bind(struct vmw_private *dev_priv,
663 const struct vmw_sg_table *vsgt,
664 unsigned long num_pages,
665 int gmr_id);
666 extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id);
667
668 /**
669 * Resource utilities - vmwgfx_resource.c
670 */
671 struct vmw_user_resource_conv;
672
673 extern void vmw_resource_unreference(struct vmw_resource **p_res);
674 extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res);
675 extern struct vmw_resource *
676 vmw_resource_reference_unless_doomed(struct vmw_resource *res);
677 extern int vmw_resource_validate(struct vmw_resource *res, bool intr);
678 extern int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
679 bool no_backup);
680 extern bool vmw_resource_needs_backup(const struct vmw_resource *res);
681 extern int vmw_user_lookup_handle(struct vmw_private *dev_priv,
682 struct ttm_object_file *tfile,
683 uint32_t handle,
684 struct vmw_surface **out_surf,
685 struct vmw_buffer_object **out_buf);
686 extern int vmw_user_resource_lookup_handle(
687 struct vmw_private *dev_priv,
688 struct ttm_object_file *tfile,
689 uint32_t handle,
690 const struct vmw_user_resource_conv *converter,
691 struct vmw_resource **p_res);
692 extern struct vmw_resource *
693 vmw_user_resource_noref_lookup_handle(struct vmw_private *dev_priv,
694 struct ttm_object_file *tfile,
695 uint32_t handle,
696 const struct vmw_user_resource_conv *
697 converter);
698 extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
699 struct drm_file *file_priv);
700 extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
701 struct drm_file *file_priv);
702 extern int vmw_user_stream_lookup(struct vmw_private *dev_priv,
703 struct ttm_object_file *tfile,
704 uint32_t *inout_id,
705 struct vmw_resource **out);
706 extern void vmw_resource_unreserve(struct vmw_resource *res,
707 bool dirty_set,
708 bool dirty,
709 bool switch_backup,
710 struct vmw_buffer_object *new_backup,
711 unsigned long new_backup_offset);
712 extern void vmw_query_move_notify(struct ttm_buffer_object *bo,
713 struct ttm_mem_reg *mem);
714 extern int vmw_query_readback_all(struct vmw_buffer_object *dx_query_mob);
715 extern void vmw_resource_evict_all(struct vmw_private *dev_priv);
716 extern void vmw_resource_unbind_list(struct vmw_buffer_object *vbo);
717 void vmw_resource_mob_attach(struct vmw_resource *res);
718 void vmw_resource_mob_detach(struct vmw_resource *res);
719
720 /**
721 * vmw_resource_mob_attached - Whether a resource currently has a mob attached
722 * @res: The resource
723 *
724 * Return: true if the resource has a mob attached, false otherwise.
725 */
vmw_resource_mob_attached(const struct vmw_resource * res)726 static inline bool vmw_resource_mob_attached(const struct vmw_resource *res)
727 {
728 return !list_empty(&res->mob_head);
729 }
730
731 /**
732 * vmw_user_resource_noref_release - release a user resource pointer looked up
733 * without reference
734 */
vmw_user_resource_noref_release(void)735 static inline void vmw_user_resource_noref_release(void)
736 {
737 ttm_base_object_noref_release();
738 }
739
740 /**
741 * Buffer object helper functions - vmwgfx_bo.c
742 */
743 extern int vmw_bo_pin_in_placement(struct vmw_private *vmw_priv,
744 struct vmw_buffer_object *bo,
745 struct ttm_placement *placement,
746 bool interruptible);
747 extern int vmw_bo_pin_in_vram(struct vmw_private *dev_priv,
748 struct vmw_buffer_object *buf,
749 bool interruptible);
750 extern int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
751 struct vmw_buffer_object *buf,
752 bool interruptible);
753 extern int vmw_bo_pin_in_start_of_vram(struct vmw_private *vmw_priv,
754 struct vmw_buffer_object *bo,
755 bool interruptible);
756 extern int vmw_bo_unpin(struct vmw_private *vmw_priv,
757 struct vmw_buffer_object *bo,
758 bool interruptible);
759 extern void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *buf,
760 SVGAGuestPtr *ptr);
761 extern void vmw_bo_pin_reserved(struct vmw_buffer_object *bo, bool pin);
762 extern void vmw_bo_bo_free(struct ttm_buffer_object *bo);
763 extern int vmw_bo_init(struct vmw_private *dev_priv,
764 struct vmw_buffer_object *vmw_bo,
765 size_t size, struct ttm_placement *placement,
766 bool interuptable,
767 void (*bo_free)(struct ttm_buffer_object *bo));
768 extern int vmw_user_bo_verify_access(struct ttm_buffer_object *bo,
769 struct ttm_object_file *tfile);
770 extern int vmw_user_bo_alloc(struct vmw_private *dev_priv,
771 struct ttm_object_file *tfile,
772 uint32_t size,
773 bool shareable,
774 uint32_t *handle,
775 struct vmw_buffer_object **p_dma_buf,
776 struct ttm_base_object **p_base);
777 extern int vmw_user_bo_reference(struct ttm_object_file *tfile,
778 struct vmw_buffer_object *dma_buf,
779 uint32_t *handle);
780 extern int vmw_bo_alloc_ioctl(struct drm_device *dev, void *data,
781 struct drm_file *file_priv);
782 extern int vmw_bo_unref_ioctl(struct drm_device *dev, void *data,
783 struct drm_file *file_priv);
784 extern int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
785 struct drm_file *file_priv);
786 extern int vmw_user_bo_lookup(struct ttm_object_file *tfile,
787 uint32_t id, struct vmw_buffer_object **out,
788 struct ttm_base_object **base);
789 extern void vmw_bo_fence_single(struct ttm_buffer_object *bo,
790 struct vmw_fence_obj *fence);
791 extern void *vmw_bo_map_and_cache(struct vmw_buffer_object *vbo);
792 extern void vmw_bo_unmap(struct vmw_buffer_object *vbo);
793 extern void vmw_bo_move_notify(struct ttm_buffer_object *bo,
794 struct ttm_mem_reg *mem);
795 extern void vmw_bo_swap_notify(struct ttm_buffer_object *bo);
796 extern struct vmw_buffer_object *
797 vmw_user_bo_noref_lookup(struct ttm_object_file *tfile, u32 handle);
798
799 /**
800 * vmw_user_bo_noref_release - release a buffer object pointer looked up
801 * without reference
802 */
vmw_user_bo_noref_release(void)803 static inline void vmw_user_bo_noref_release(void)
804 {
805 ttm_base_object_noref_release();
806 }
807
808 /**
809 * vmw_bo_adjust_prio - Adjust the buffer object eviction priority
810 * according to attached resources
811 * @vbo: The struct vmw_buffer_object
812 */
vmw_bo_prio_adjust(struct vmw_buffer_object * vbo)813 static inline void vmw_bo_prio_adjust(struct vmw_buffer_object *vbo)
814 {
815 int i = ARRAY_SIZE(vbo->res_prios);
816
817 while (i--) {
818 if (vbo->res_prios[i]) {
819 vbo->base.priority = i;
820 return;
821 }
822 }
823
824 vbo->base.priority = 3;
825 }
826
827 /**
828 * vmw_bo_prio_add - Notify a buffer object of a newly attached resource
829 * eviction priority
830 * @vbo: The struct vmw_buffer_object
831 * @prio: The resource priority
832 *
833 * After being notified, the code assigns the highest resource eviction priority
834 * to the backing buffer object (mob).
835 */
vmw_bo_prio_add(struct vmw_buffer_object * vbo,int prio)836 static inline void vmw_bo_prio_add(struct vmw_buffer_object *vbo, int prio)
837 {
838 if (vbo->res_prios[prio]++ == 0)
839 vmw_bo_prio_adjust(vbo);
840 }
841
842 /**
843 * vmw_bo_prio_del - Notify a buffer object of a resource with a certain
844 * priority being removed
845 * @vbo: The struct vmw_buffer_object
846 * @prio: The resource priority
847 *
848 * After being notified, the code assigns the highest resource eviction priority
849 * to the backing buffer object (mob).
850 */
vmw_bo_prio_del(struct vmw_buffer_object * vbo,int prio)851 static inline void vmw_bo_prio_del(struct vmw_buffer_object *vbo, int prio)
852 {
853 if (--vbo->res_prios[prio] == 0)
854 vmw_bo_prio_adjust(vbo);
855 }
856
857 /**
858 * Misc Ioctl functionality - vmwgfx_ioctl.c
859 */
860
861 extern int vmw_getparam_ioctl(struct drm_device *dev, void *data,
862 struct drm_file *file_priv);
863 extern int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
864 struct drm_file *file_priv);
865 extern int vmw_present_ioctl(struct drm_device *dev, void *data,
866 struct drm_file *file_priv);
867 extern int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
868 struct drm_file *file_priv);
869 extern __poll_t vmw_fops_poll(struct file *filp,
870 struct poll_table_struct *wait);
871 extern ssize_t vmw_fops_read(struct file *filp, char __user *buffer,
872 size_t count, loff_t *offset);
873
874 /**
875 * Fifo utilities - vmwgfx_fifo.c
876 */
877
878 extern int vmw_fifo_init(struct vmw_private *dev_priv,
879 struct vmw_fifo_state *fifo);
880 extern void vmw_fifo_release(struct vmw_private *dev_priv,
881 struct vmw_fifo_state *fifo);
882 extern void *
883 vmw_fifo_reserve_dx(struct vmw_private *dev_priv, uint32_t bytes, int ctx_id);
884 extern void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes);
885 extern void vmw_fifo_commit_flush(struct vmw_private *dev_priv, uint32_t bytes);
886 extern int vmw_fifo_send_fence(struct vmw_private *dev_priv,
887 uint32_t *seqno);
888 extern void vmw_fifo_ping_host_locked(struct vmw_private *, uint32_t reason);
889 extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason);
890 extern bool vmw_fifo_have_3d(struct vmw_private *dev_priv);
891 extern bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv);
892 extern int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv,
893 uint32_t cid);
894 extern int vmw_fifo_flush(struct vmw_private *dev_priv,
895 bool interruptible);
896
897 #define VMW_FIFO_RESERVE_DX(__priv, __bytes, __ctx_id) \
898 ({ \
899 vmw_fifo_reserve_dx(__priv, __bytes, __ctx_id) ? : ({ \
900 DRM_ERROR("FIFO reserve failed at %s for %u bytes\n", \
901 __func__, (unsigned int) __bytes); \
902 NULL; \
903 }); \
904 })
905
906 #define VMW_FIFO_RESERVE(__priv, __bytes) \
907 VMW_FIFO_RESERVE_DX(__priv, __bytes, SVGA3D_INVALID_ID)
908
909 /**
910 * TTM glue - vmwgfx_ttm_glue.c
911 */
912
913 extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma);
914
915 extern void vmw_validation_mem_init_ttm(struct vmw_private *dev_priv,
916 size_t gran);
917 /**
918 * TTM buffer object driver - vmwgfx_ttm_buffer.c
919 */
920
921 extern const size_t vmw_tt_size;
922 extern struct ttm_placement vmw_vram_placement;
923 extern struct ttm_placement vmw_vram_ne_placement;
924 extern struct ttm_placement vmw_vram_sys_placement;
925 extern struct ttm_placement vmw_vram_gmr_placement;
926 extern struct ttm_placement vmw_vram_gmr_ne_placement;
927 extern struct ttm_placement vmw_sys_placement;
928 extern struct ttm_placement vmw_sys_ne_placement;
929 extern struct ttm_placement vmw_evictable_placement;
930 extern struct ttm_placement vmw_srf_placement;
931 extern struct ttm_placement vmw_mob_placement;
932 extern struct ttm_placement vmw_mob_ne_placement;
933 extern struct ttm_placement vmw_nonfixed_placement;
934 extern struct ttm_bo_driver vmw_bo_driver;
935 extern int vmw_dma_quiescent(struct drm_device *dev);
936 extern int vmw_bo_map_dma(struct ttm_buffer_object *bo);
937 extern void vmw_bo_unmap_dma(struct ttm_buffer_object *bo);
938 extern const struct vmw_sg_table *
939 vmw_bo_sg_table(struct ttm_buffer_object *bo);
940 extern void vmw_piter_start(struct vmw_piter *viter,
941 const struct vmw_sg_table *vsgt,
942 unsigned long p_offs);
943
944 /**
945 * vmw_piter_next - Advance the iterator one page.
946 *
947 * @viter: Pointer to the iterator to advance.
948 *
949 * Returns false if past the list of pages, true otherwise.
950 */
vmw_piter_next(struct vmw_piter * viter)951 static inline bool vmw_piter_next(struct vmw_piter *viter)
952 {
953 return viter->next(viter);
954 }
955
956 /**
957 * vmw_piter_dma_addr - Return the DMA address of the current page.
958 *
959 * @viter: Pointer to the iterator
960 *
961 * Returns the DMA address of the page pointed to by @viter.
962 */
vmw_piter_dma_addr(struct vmw_piter * viter)963 static inline dma_addr_t vmw_piter_dma_addr(struct vmw_piter *viter)
964 {
965 return viter->dma_address(viter);
966 }
967
968 /**
969 * vmw_piter_page - Return a pointer to the current page.
970 *
971 * @viter: Pointer to the iterator
972 *
973 * Returns the DMA address of the page pointed to by @viter.
974 */
vmw_piter_page(struct vmw_piter * viter)975 static inline struct page *vmw_piter_page(struct vmw_piter *viter)
976 {
977 return viter->page(viter);
978 }
979
980 /**
981 * Command submission - vmwgfx_execbuf.c
982 */
983
984 extern int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
985 struct drm_file *file_priv);
986 extern int vmw_execbuf_process(struct drm_file *file_priv,
987 struct vmw_private *dev_priv,
988 void __user *user_commands,
989 void *kernel_commands,
990 uint32_t command_size,
991 uint64_t throttle_us,
992 uint32_t dx_context_handle,
993 struct drm_vmw_fence_rep __user
994 *user_fence_rep,
995 struct vmw_fence_obj **out_fence,
996 uint32_t flags);
997 extern void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
998 struct vmw_fence_obj *fence);
999 extern void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv);
1000
1001 extern int vmw_execbuf_fence_commands(struct drm_file *file_priv,
1002 struct vmw_private *dev_priv,
1003 struct vmw_fence_obj **p_fence,
1004 uint32_t *p_handle);
1005 extern void vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
1006 struct vmw_fpriv *vmw_fp,
1007 int ret,
1008 struct drm_vmw_fence_rep __user
1009 *user_fence_rep,
1010 struct vmw_fence_obj *fence,
1011 uint32_t fence_handle,
1012 int32_t out_fence_fd,
1013 struct sync_file *sync_file);
1014 bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd);
1015
1016 /**
1017 * IRQs and wating - vmwgfx_irq.c
1018 */
1019
1020 extern int vmw_wait_seqno(struct vmw_private *dev_priv, bool lazy,
1021 uint32_t seqno, bool interruptible,
1022 unsigned long timeout);
1023 extern int vmw_irq_install(struct drm_device *dev, int irq);
1024 extern void vmw_irq_uninstall(struct drm_device *dev);
1025 extern bool vmw_seqno_passed(struct vmw_private *dev_priv,
1026 uint32_t seqno);
1027 extern int vmw_fallback_wait(struct vmw_private *dev_priv,
1028 bool lazy,
1029 bool fifo_idle,
1030 uint32_t seqno,
1031 bool interruptible,
1032 unsigned long timeout);
1033 extern void vmw_update_seqno(struct vmw_private *dev_priv,
1034 struct vmw_fifo_state *fifo_state);
1035 extern void vmw_seqno_waiter_add(struct vmw_private *dev_priv);
1036 extern void vmw_seqno_waiter_remove(struct vmw_private *dev_priv);
1037 extern void vmw_goal_waiter_add(struct vmw_private *dev_priv);
1038 extern void vmw_goal_waiter_remove(struct vmw_private *dev_priv);
1039 extern void vmw_generic_waiter_add(struct vmw_private *dev_priv, u32 flag,
1040 int *waiter_count);
1041 extern void vmw_generic_waiter_remove(struct vmw_private *dev_priv,
1042 u32 flag, int *waiter_count);
1043
1044 /**
1045 * Rudimentary fence-like objects currently used only for throttling -
1046 * vmwgfx_marker.c
1047 */
1048
1049 extern void vmw_marker_queue_init(struct vmw_marker_queue *queue);
1050 extern void vmw_marker_queue_takedown(struct vmw_marker_queue *queue);
1051 extern int vmw_marker_push(struct vmw_marker_queue *queue,
1052 uint32_t seqno);
1053 extern int vmw_marker_pull(struct vmw_marker_queue *queue,
1054 uint32_t signaled_seqno);
1055 extern int vmw_wait_lag(struct vmw_private *dev_priv,
1056 struct vmw_marker_queue *queue, uint32_t us);
1057
1058 /**
1059 * Kernel framebuffer - vmwgfx_fb.c
1060 */
1061
1062 int vmw_fb_init(struct vmw_private *vmw_priv);
1063 int vmw_fb_close(struct vmw_private *dev_priv);
1064 int vmw_fb_off(struct vmw_private *vmw_priv);
1065 int vmw_fb_on(struct vmw_private *vmw_priv);
1066
1067 /**
1068 * Kernel modesetting - vmwgfx_kms.c
1069 */
1070
1071 int vmw_kms_init(struct vmw_private *dev_priv);
1072 int vmw_kms_close(struct vmw_private *dev_priv);
1073 int vmw_kms_save_vga(struct vmw_private *vmw_priv);
1074 int vmw_kms_restore_vga(struct vmw_private *vmw_priv);
1075 int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
1076 struct drm_file *file_priv);
1077 void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv);
1078 void vmw_kms_cursor_snoop(struct vmw_surface *srf,
1079 struct ttm_object_file *tfile,
1080 struct ttm_buffer_object *bo,
1081 SVGA3dCmdHeader *header);
1082 int vmw_kms_write_svga(struct vmw_private *vmw_priv,
1083 unsigned width, unsigned height, unsigned pitch,
1084 unsigned bpp, unsigned depth);
1085 bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
1086 uint32_t pitch,
1087 uint32_t height);
1088 u32 vmw_get_vblank_counter(struct drm_device *dev, unsigned int pipe);
1089 int vmw_enable_vblank(struct drm_device *dev, unsigned int pipe);
1090 void vmw_disable_vblank(struct drm_device *dev, unsigned int pipe);
1091 int vmw_kms_present(struct vmw_private *dev_priv,
1092 struct drm_file *file_priv,
1093 struct vmw_framebuffer *vfb,
1094 struct vmw_surface *surface,
1095 uint32_t sid, int32_t destX, int32_t destY,
1096 struct drm_vmw_rect *clips,
1097 uint32_t num_clips);
1098 int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
1099 struct drm_file *file_priv);
1100 void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv);
1101 int vmw_kms_suspend(struct drm_device *dev);
1102 int vmw_kms_resume(struct drm_device *dev);
1103 void vmw_kms_lost_device(struct drm_device *dev);
1104
1105 int vmw_dumb_create(struct drm_file *file_priv,
1106 struct drm_device *dev,
1107 struct drm_mode_create_dumb *args);
1108
1109 int vmw_dumb_map_offset(struct drm_file *file_priv,
1110 struct drm_device *dev, uint32_t handle,
1111 uint64_t *offset);
1112 int vmw_dumb_destroy(struct drm_file *file_priv,
1113 struct drm_device *dev,
1114 uint32_t handle);
1115 extern int vmw_resource_pin(struct vmw_resource *res, bool interruptible);
1116 extern void vmw_resource_unpin(struct vmw_resource *res);
1117 extern enum vmw_res_type vmw_res_type(const struct vmw_resource *res);
1118
1119 /**
1120 * Overlay control - vmwgfx_overlay.c
1121 */
1122
1123 int vmw_overlay_init(struct vmw_private *dev_priv);
1124 int vmw_overlay_close(struct vmw_private *dev_priv);
1125 int vmw_overlay_ioctl(struct drm_device *dev, void *data,
1126 struct drm_file *file_priv);
1127 int vmw_overlay_stop_all(struct vmw_private *dev_priv);
1128 int vmw_overlay_resume_all(struct vmw_private *dev_priv);
1129 int vmw_overlay_pause_all(struct vmw_private *dev_priv);
1130 int vmw_overlay_claim(struct vmw_private *dev_priv, uint32_t *out);
1131 int vmw_overlay_unref(struct vmw_private *dev_priv, uint32_t stream_id);
1132 int vmw_overlay_num_overlays(struct vmw_private *dev_priv);
1133 int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv);
1134
1135 /**
1136 * GMR Id manager
1137 */
1138
1139 extern const struct ttm_mem_type_manager_func vmw_gmrid_manager_func;
1140
1141 /**
1142 * Prime - vmwgfx_prime.c
1143 */
1144
1145 extern const struct dma_buf_ops vmw_prime_dmabuf_ops;
1146 extern int vmw_prime_fd_to_handle(struct drm_device *dev,
1147 struct drm_file *file_priv,
1148 int fd, u32 *handle);
1149 extern int vmw_prime_handle_to_fd(struct drm_device *dev,
1150 struct drm_file *file_priv,
1151 uint32_t handle, uint32_t flags,
1152 int *prime_fd);
1153
1154 /*
1155 * MemoryOBject management - vmwgfx_mob.c
1156 */
1157 struct vmw_mob;
1158 extern int vmw_mob_bind(struct vmw_private *dev_priv, struct vmw_mob *mob,
1159 const struct vmw_sg_table *vsgt,
1160 unsigned long num_data_pages, int32_t mob_id);
1161 extern void vmw_mob_unbind(struct vmw_private *dev_priv,
1162 struct vmw_mob *mob);
1163 extern void vmw_mob_destroy(struct vmw_mob *mob);
1164 extern struct vmw_mob *vmw_mob_create(unsigned long data_pages);
1165 extern int vmw_otables_setup(struct vmw_private *dev_priv);
1166 extern void vmw_otables_takedown(struct vmw_private *dev_priv);
1167
1168 /*
1169 * Context management - vmwgfx_context.c
1170 */
1171
1172 extern const struct vmw_user_resource_conv *user_context_converter;
1173
1174 extern int vmw_context_check(struct vmw_private *dev_priv,
1175 struct ttm_object_file *tfile,
1176 int id,
1177 struct vmw_resource **p_res);
1178 extern int vmw_context_define_ioctl(struct drm_device *dev, void *data,
1179 struct drm_file *file_priv);
1180 extern int vmw_extended_context_define_ioctl(struct drm_device *dev, void *data,
1181 struct drm_file *file_priv);
1182 extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
1183 struct drm_file *file_priv);
1184 extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx);
1185 extern struct vmw_cmdbuf_res_manager *
1186 vmw_context_res_man(struct vmw_resource *ctx);
1187 extern struct vmw_resource *vmw_context_cotable(struct vmw_resource *ctx,
1188 SVGACOTableType cotable_type);
1189 extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx);
1190 struct vmw_ctx_binding_state;
1191 extern struct vmw_ctx_binding_state *
1192 vmw_context_binding_state(struct vmw_resource *ctx);
1193 extern void vmw_dx_context_scrub_cotables(struct vmw_resource *ctx,
1194 bool readback);
1195 extern int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
1196 struct vmw_buffer_object *mob);
1197 extern struct vmw_buffer_object *
1198 vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res);
1199
1200
1201 /*
1202 * Surface management - vmwgfx_surface.c
1203 */
1204
1205 extern const struct vmw_user_resource_conv *user_surface_converter;
1206
1207 extern void vmw_surface_res_free(struct vmw_resource *res);
1208 extern int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
1209 struct drm_file *file_priv);
1210 extern int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
1211 struct drm_file *file_priv);
1212 extern int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
1213 struct drm_file *file_priv);
1214 extern int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
1215 struct drm_file *file_priv);
1216 extern int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data,
1217 struct drm_file *file_priv);
1218 extern int vmw_surface_check(struct vmw_private *dev_priv,
1219 struct ttm_object_file *tfile,
1220 uint32_t handle, int *id);
1221 extern int vmw_surface_validate(struct vmw_private *dev_priv,
1222 struct vmw_surface *srf);
1223 int vmw_surface_gb_priv_define(struct drm_device *dev,
1224 uint32_t user_accounting_size,
1225 SVGA3dSurfaceAllFlags svga3d_flags,
1226 SVGA3dSurfaceFormat format,
1227 bool for_scanout,
1228 uint32_t num_mip_levels,
1229 uint32_t multisample_count,
1230 uint32_t array_size,
1231 struct drm_vmw_size size,
1232 SVGA3dMSPattern multisample_pattern,
1233 SVGA3dMSQualityLevel quality_level,
1234 struct vmw_surface **srf_out);
1235 extern int vmw_gb_surface_define_ext_ioctl(struct drm_device *dev,
1236 void *data,
1237 struct drm_file *file_priv);
1238 extern int vmw_gb_surface_reference_ext_ioctl(struct drm_device *dev,
1239 void *data,
1240 struct drm_file *file_priv);
1241
1242 /*
1243 * Shader management - vmwgfx_shader.c
1244 */
1245
1246 extern const struct vmw_user_resource_conv *user_shader_converter;
1247
1248 extern int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
1249 struct drm_file *file_priv);
1250 extern int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data,
1251 struct drm_file *file_priv);
1252 extern int vmw_compat_shader_add(struct vmw_private *dev_priv,
1253 struct vmw_cmdbuf_res_manager *man,
1254 u32 user_key, const void *bytecode,
1255 SVGA3dShaderType shader_type,
1256 size_t size,
1257 struct list_head *list);
1258 extern int vmw_shader_remove(struct vmw_cmdbuf_res_manager *man,
1259 u32 user_key, SVGA3dShaderType shader_type,
1260 struct list_head *list);
1261 extern int vmw_dx_shader_add(struct vmw_cmdbuf_res_manager *man,
1262 struct vmw_resource *ctx,
1263 u32 user_key,
1264 SVGA3dShaderType shader_type,
1265 struct list_head *list);
1266 extern void vmw_dx_shader_cotable_list_scrub(struct vmw_private *dev_priv,
1267 struct list_head *list,
1268 bool readback);
1269
1270 extern struct vmw_resource *
1271 vmw_shader_lookup(struct vmw_cmdbuf_res_manager *man,
1272 u32 user_key, SVGA3dShaderType shader_type);
1273
1274 /*
1275 * Command buffer managed resources - vmwgfx_cmdbuf_res.c
1276 */
1277
1278 extern struct vmw_cmdbuf_res_manager *
1279 vmw_cmdbuf_res_man_create(struct vmw_private *dev_priv);
1280 extern void vmw_cmdbuf_res_man_destroy(struct vmw_cmdbuf_res_manager *man);
1281 extern size_t vmw_cmdbuf_res_man_size(void);
1282 extern struct vmw_resource *
1283 vmw_cmdbuf_res_lookup(struct vmw_cmdbuf_res_manager *man,
1284 enum vmw_cmdbuf_res_type res_type,
1285 u32 user_key);
1286 extern void vmw_cmdbuf_res_revert(struct list_head *list);
1287 extern void vmw_cmdbuf_res_commit(struct list_head *list);
1288 extern int vmw_cmdbuf_res_add(struct vmw_cmdbuf_res_manager *man,
1289 enum vmw_cmdbuf_res_type res_type,
1290 u32 user_key,
1291 struct vmw_resource *res,
1292 struct list_head *list);
1293 extern int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man,
1294 enum vmw_cmdbuf_res_type res_type,
1295 u32 user_key,
1296 struct list_head *list,
1297 struct vmw_resource **res);
1298
1299 /*
1300 * COTable management - vmwgfx_cotable.c
1301 */
1302 extern const SVGACOTableType vmw_cotable_scrub_order[];
1303 extern struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv,
1304 struct vmw_resource *ctx,
1305 u32 type);
1306 extern int vmw_cotable_notify(struct vmw_resource *res, int id);
1307 extern int vmw_cotable_scrub(struct vmw_resource *res, bool readback);
1308 extern void vmw_cotable_add_resource(struct vmw_resource *ctx,
1309 struct list_head *head);
1310
1311 /*
1312 * Command buffer managerment vmwgfx_cmdbuf.c
1313 */
1314 struct vmw_cmdbuf_man;
1315 struct vmw_cmdbuf_header;
1316
1317 extern struct vmw_cmdbuf_man *
1318 vmw_cmdbuf_man_create(struct vmw_private *dev_priv);
1319 extern int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man,
1320 size_t size, size_t default_size);
1321 extern void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man);
1322 extern void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man *man);
1323 extern int vmw_cmdbuf_idle(struct vmw_cmdbuf_man *man, bool interruptible,
1324 unsigned long timeout);
1325 extern void *vmw_cmdbuf_reserve(struct vmw_cmdbuf_man *man, size_t size,
1326 int ctx_id, bool interruptible,
1327 struct vmw_cmdbuf_header *header);
1328 extern void vmw_cmdbuf_commit(struct vmw_cmdbuf_man *man, size_t size,
1329 struct vmw_cmdbuf_header *header,
1330 bool flush);
1331 extern void *vmw_cmdbuf_alloc(struct vmw_cmdbuf_man *man,
1332 size_t size, bool interruptible,
1333 struct vmw_cmdbuf_header **p_header);
1334 extern void vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header);
1335 extern int vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man,
1336 bool interruptible);
1337 extern void vmw_cmdbuf_irqthread(struct vmw_cmdbuf_man *man);
1338
1339 /* CPU blit utilities - vmwgfx_blit.c */
1340
1341 /**
1342 * struct vmw_diff_cpy - CPU blit information structure
1343 *
1344 * @rect: The output bounding box rectangle.
1345 * @line: The current line of the blit.
1346 * @line_offset: Offset of the current line segment.
1347 * @cpp: Bytes per pixel (granularity information).
1348 * @memcpy: Which memcpy function to use.
1349 */
1350 struct vmw_diff_cpy {
1351 struct drm_rect rect;
1352 size_t line;
1353 size_t line_offset;
1354 int cpp;
1355 void (*do_cpy)(struct vmw_diff_cpy *diff, u8 *dest, const u8 *src,
1356 size_t n);
1357 };
1358
1359 #define VMW_CPU_BLIT_INITIALIZER { \
1360 .do_cpy = vmw_memcpy, \
1361 }
1362
1363 #define VMW_CPU_BLIT_DIFF_INITIALIZER(_cpp) { \
1364 .line = 0, \
1365 .line_offset = 0, \
1366 .rect = { .x1 = INT_MAX/2, \
1367 .y1 = INT_MAX/2, \
1368 .x2 = INT_MIN/2, \
1369 .y2 = INT_MIN/2 \
1370 }, \
1371 .cpp = _cpp, \
1372 .do_cpy = vmw_diff_memcpy, \
1373 }
1374
1375 void vmw_diff_memcpy(struct vmw_diff_cpy *diff, u8 *dest, const u8 *src,
1376 size_t n);
1377
1378 void vmw_memcpy(struct vmw_diff_cpy *diff, u8 *dest, const u8 *src, size_t n);
1379
1380 int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
1381 u32 dst_offset, u32 dst_stride,
1382 struct ttm_buffer_object *src,
1383 u32 src_offset, u32 src_stride,
1384 u32 w, u32 h,
1385 struct vmw_diff_cpy *diff);
1386
1387 /* Host messaging -vmwgfx_msg.c: */
1388 int vmw_host_get_guestinfo(const char *guest_info_param,
1389 char *buffer, size_t *length);
1390 int vmw_host_log(const char *log);
1391
1392 /* VMW logging */
1393
1394 /**
1395 * VMW_DEBUG_USER - Debug output for user-space debugging.
1396 *
1397 * @fmt: printf() like format string.
1398 *
1399 * This macro is for logging user-space error and debugging messages for e.g.
1400 * command buffer execution errors due to malformed commands, invalid context,
1401 * etc.
1402 */
1403 #define VMW_DEBUG_USER(fmt, ...) \
1404 DRM_DEBUG_DRIVER(fmt, ##__VA_ARGS__)
1405
1406 /**
1407 * VMW_DEBUG_KMS - Debug output for kernel mode-setting
1408 *
1409 * This macro is for debugging vmwgfx mode-setting code.
1410 */
1411 #define VMW_DEBUG_KMS(fmt, ...) \
1412 DRM_DEBUG_DRIVER(fmt, ##__VA_ARGS__)
1413
1414 /**
1415 * Inline helper functions
1416 */
1417
vmw_surface_unreference(struct vmw_surface ** srf)1418 static inline void vmw_surface_unreference(struct vmw_surface **srf)
1419 {
1420 struct vmw_surface *tmp_srf = *srf;
1421 struct vmw_resource *res = &tmp_srf->res;
1422 *srf = NULL;
1423
1424 vmw_resource_unreference(&res);
1425 }
1426
vmw_surface_reference(struct vmw_surface * srf)1427 static inline struct vmw_surface *vmw_surface_reference(struct vmw_surface *srf)
1428 {
1429 (void) vmw_resource_reference(&srf->res);
1430 return srf;
1431 }
1432
vmw_bo_unreference(struct vmw_buffer_object ** buf)1433 static inline void vmw_bo_unreference(struct vmw_buffer_object **buf)
1434 {
1435 struct vmw_buffer_object *tmp_buf = *buf;
1436
1437 *buf = NULL;
1438 if (tmp_buf != NULL) {
1439 ttm_bo_put(&tmp_buf->base);
1440 }
1441 }
1442
1443 static inline struct vmw_buffer_object *
vmw_bo_reference(struct vmw_buffer_object * buf)1444 vmw_bo_reference(struct vmw_buffer_object *buf)
1445 {
1446 ttm_bo_get(&buf->base);
1447 return buf;
1448 }
1449
vmw_mem_glob(struct vmw_private * dev_priv)1450 static inline struct ttm_mem_global *vmw_mem_glob(struct vmw_private *dev_priv)
1451 {
1452 return &ttm_mem_glob;
1453 }
1454
vmw_fifo_resource_inc(struct vmw_private * dev_priv)1455 static inline void vmw_fifo_resource_inc(struct vmw_private *dev_priv)
1456 {
1457 atomic_inc(&dev_priv->num_fifo_resources);
1458 }
1459
vmw_fifo_resource_dec(struct vmw_private * dev_priv)1460 static inline void vmw_fifo_resource_dec(struct vmw_private *dev_priv)
1461 {
1462 atomic_dec(&dev_priv->num_fifo_resources);
1463 }
1464
1465 /**
1466 * vmw_mmio_read - Perform a MMIO read from volatile memory
1467 *
1468 * @addr: The address to read from
1469 *
1470 * This function is intended to be equivalent to ioread32() on
1471 * memremap'd memory, but without byteswapping.
1472 */
vmw_mmio_read(u32 * addr)1473 static inline u32 vmw_mmio_read(u32 *addr)
1474 {
1475 return READ_ONCE(*addr);
1476 }
1477
1478 /**
1479 * vmw_mmio_write - Perform a MMIO write to volatile memory
1480 *
1481 * @addr: The address to write to
1482 *
1483 * This function is intended to be equivalent to iowrite32 on
1484 * memremap'd memory, but without byteswapping.
1485 */
vmw_mmio_write(u32 value,u32 * addr)1486 static inline void vmw_mmio_write(u32 value, u32 *addr)
1487 {
1488 WRITE_ONCE(*addr, value);
1489 }
1490 #endif
1491