drm/vmwgfx: Initial DX support
Initial DX support. Co-authored with Sinclair Yeh, Charmaine Lee and Jakob Bornecrantz. Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com> Signed-off-by: Sinclair Yeh <syeh@vmware.com> Signed-off-by: Charmaine Lee <charmainel@vmware.com>
This commit is contained in:
		
					parent
					
						
							
								8ce75f8ab9
							
						
					
				
			
			
				commit
				
					
						d80efd5cb3
					
				
			
		
					 22 changed files with 5362 additions and 790 deletions
				
			
		|  | @ -8,5 +8,6 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \ | ||||||
| 	    vmwgfx_fence.o vmwgfx_dmabuf.o vmwgfx_scrn.o vmwgfx_context.o \
 | 	    vmwgfx_fence.o vmwgfx_dmabuf.o vmwgfx_scrn.o vmwgfx_context.o \
 | ||||||
| 	    vmwgfx_surface.o vmwgfx_prime.o vmwgfx_mob.o vmwgfx_shader.o \
 | 	    vmwgfx_surface.o vmwgfx_prime.o vmwgfx_mob.o vmwgfx_shader.o \
 | ||||||
| 	    vmwgfx_cmdbuf_res.o vmwgfx_cmdbuf.o vmwgfx_stdu.o \
 | 	    vmwgfx_cmdbuf_res.o vmwgfx_cmdbuf.o vmwgfx_stdu.o \
 | ||||||
|  | 	    vmwgfx_cotable.o vmwgfx_so.o vmwgfx_binding.o | ||||||
| 
 | 
 | ||||||
| obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o | obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o | ||||||
|  |  | ||||||
							
								
								
									
										1294
									
								
								drivers/gpu/drm/vmwgfx/vmwgfx_binding.c
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										1294
									
								
								drivers/gpu/drm/vmwgfx/vmwgfx_binding.c
									
										
									
									
									
										Normal file
									
								
							
										
											
												File diff suppressed because it is too large
												Load diff
											
										
									
								
							
							
								
								
									
										209
									
								
								drivers/gpu/drm/vmwgfx/vmwgfx_binding.h
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										209
									
								
								drivers/gpu/drm/vmwgfx/vmwgfx_binding.h
									
										
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,209 @@ | ||||||
|  | /**************************************************************************
 | ||||||
|  |  * | ||||||
|  |  * Copyright © 2015 VMware, Inc., Palo Alto, CA., USA | ||||||
|  |  * All Rights Reserved. | ||||||
|  |  * | ||||||
|  |  * Permission is hereby granted, free of charge, to any person obtaining a | ||||||
|  |  * copy of this software and associated documentation files (the | ||||||
|  |  * "Software"), to deal in the Software without restriction, including | ||||||
|  |  * without limitation the rights to use, copy, modify, merge, publish, | ||||||
|  |  * distribute, sub license, and/or sell copies of the Software, and to | ||||||
|  |  * permit persons to whom the Software is furnished to do so, subject to | ||||||
|  |  * the following conditions: | ||||||
|  |  * | ||||||
|  |  * The above copyright notice and this permission notice (including the | ||||||
|  |  * next paragraph) shall be included in all copies or substantial portions | ||||||
|  |  * of the Software. | ||||||
|  |  * | ||||||
|  |  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||||||
|  |  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||||||
|  |  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||||||
|  |  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | ||||||
|  |  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | ||||||
|  |  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | ||||||
|  |  * USE OR OTHER DEALINGS IN THE SOFTWARE. | ||||||
|  |  * | ||||||
|  |  **************************************************************************/ | ||||||
|  | #ifndef _VMWGFX_BINDING_H_ | ||||||
|  | #define _VMWGFX_BINDING_H_ | ||||||
|  | 
 | ||||||
|  | #include "device_include/svga3d_reg.h" | ||||||
|  | #include <linux/list.h> | ||||||
|  | 
 | ||||||
|  | #define VMW_MAX_VIEW_BINDINGS 128 | ||||||
|  | 
 | ||||||
|  | struct vmw_private; | ||||||
|  | struct vmw_ctx_binding_state; | ||||||
|  | 
 | ||||||
|  | /*
 | ||||||
|  |  * enum vmw_ctx_binding_type - abstract resource to context binding types | ||||||
|  |  */ | ||||||
|  | enum vmw_ctx_binding_type { | ||||||
|  | 	vmw_ctx_binding_shader, | ||||||
|  | 	vmw_ctx_binding_rt, | ||||||
|  | 	vmw_ctx_binding_tex, | ||||||
|  | 	vmw_ctx_binding_cb, | ||||||
|  | 	vmw_ctx_binding_dx_shader, | ||||||
|  | 	vmw_ctx_binding_dx_rt, | ||||||
|  | 	vmw_ctx_binding_sr, | ||||||
|  | 	vmw_ctx_binding_ds, | ||||||
|  | 	vmw_ctx_binding_so, | ||||||
|  | 	vmw_ctx_binding_vb, | ||||||
|  | 	vmw_ctx_binding_ib, | ||||||
|  | 	vmw_ctx_binding_max | ||||||
|  | }; | ||||||
|  | 
 | ||||||
|  | /**
 | ||||||
|  |  * struct vmw_ctx_bindinfo - single binding metadata | ||||||
|  |  * | ||||||
|  |  * @ctx_list: List head for the context's list of bindings. | ||||||
|  |  * @res_list: List head for a resource's list of bindings. | ||||||
|  |  * @ctx: Non-refcounted pointer to the context that owns the binding. NULL | ||||||
|  |  * indicates no binding present. | ||||||
|  |  * @res: Non-refcounted pointer to the resource the binding points to. This | ||||||
|  |  * is typically a surface or a view. | ||||||
|  |  * @bt: Binding type. | ||||||
|  |  * @scrubbed: Whether the binding has been scrubbed from the context. | ||||||
|  |  */ | ||||||
|  | struct vmw_ctx_bindinfo { | ||||||
|  | 	struct list_head ctx_list; | ||||||
|  | 	struct list_head res_list; | ||||||
|  | 	struct vmw_resource *ctx; | ||||||
|  | 	struct vmw_resource *res; | ||||||
|  | 	enum vmw_ctx_binding_type bt; | ||||||
|  | 	bool scrubbed; | ||||||
|  | }; | ||||||
|  | 
 | ||||||
|  | /**
 | ||||||
|  |  * struct vmw_ctx_bindinfo_tex - texture stage binding metadata | ||||||
|  |  * | ||||||
|  |  * @bi: struct vmw_ctx_bindinfo we derive from. | ||||||
|  |  * @texture_stage: Device data used to reconstruct binding command. | ||||||
|  |  */ | ||||||
|  | struct vmw_ctx_bindinfo_tex { | ||||||
|  | 	struct vmw_ctx_bindinfo bi; | ||||||
|  | 	uint32 texture_stage; | ||||||
|  | }; | ||||||
|  | 
 | ||||||
|  | /**
 | ||||||
|  |  * struct vmw_ctx_bindinfo_shader - Shader binding metadata | ||||||
|  |  * | ||||||
|  |  * @bi: struct vmw_ctx_bindinfo we derive from. | ||||||
|  |  * @shader_slot: Device data used to reconstruct binding command. | ||||||
|  |  */ | ||||||
|  | struct vmw_ctx_bindinfo_shader { | ||||||
|  | 	struct vmw_ctx_bindinfo bi; | ||||||
|  | 	SVGA3dShaderType shader_slot; | ||||||
|  | }; | ||||||
|  | 
 | ||||||
|  | /**
 | ||||||
|  |  * struct vmw_ctx_bindinfo_cb - Constant buffer binding metadata | ||||||
|  |  * | ||||||
|  |  * @bi: struct vmw_ctx_bindinfo we derive from. | ||||||
|  |  * @shader_slot: Device data used to reconstruct binding command. | ||||||
|  |  * @offset: Device data used to reconstruct binding command. | ||||||
|  |  * @size: Device data used to reconstruct binding command. | ||||||
|  |  * @slot: Device data used to reconstruct binding command. | ||||||
|  |  */ | ||||||
|  | struct vmw_ctx_bindinfo_cb { | ||||||
|  | 	struct vmw_ctx_bindinfo bi; | ||||||
|  | 	SVGA3dShaderType shader_slot; | ||||||
|  | 	uint32 offset; | ||||||
|  | 	uint32 size; | ||||||
|  | 	uint32 slot; | ||||||
|  | }; | ||||||
|  | 
 | ||||||
|  | /**
 | ||||||
|  |  * struct vmw_ctx_bindinfo_view - View binding metadata | ||||||
|  |  * | ||||||
|  |  * @bi: struct vmw_ctx_bindinfo we derive from. | ||||||
|  |  * @shader_slot: Device data used to reconstruct binding command. | ||||||
|  |  * @slot: Device data used to reconstruct binding command. | ||||||
|  |  */ | ||||||
|  | struct vmw_ctx_bindinfo_view { | ||||||
|  | 	struct vmw_ctx_bindinfo bi; | ||||||
|  | 	SVGA3dShaderType shader_slot; | ||||||
|  | 	uint32 slot; | ||||||
|  | }; | ||||||
|  | 
 | ||||||
|  | /**
 | ||||||
|  |  * struct vmw_ctx_bindinfo_so - StreamOutput binding metadata | ||||||
|  |  * | ||||||
|  |  * @bi: struct vmw_ctx_bindinfo we derive from. | ||||||
|  |  * @offset: Device data used to reconstruct binding command. | ||||||
|  |  * @size: Device data used to reconstruct binding command. | ||||||
|  |  * @slot: Device data used to reconstruct binding command. | ||||||
|  |  */ | ||||||
|  | struct vmw_ctx_bindinfo_so { | ||||||
|  | 	struct vmw_ctx_bindinfo bi; | ||||||
|  | 	uint32 offset; | ||||||
|  | 	uint32 size; | ||||||
|  | 	uint32 slot; | ||||||
|  | }; | ||||||
|  | 
 | ||||||
|  | /**
 | ||||||
|  |  * struct vmw_ctx_bindinfo_vb - Vertex buffer binding metadata | ||||||
|  |  * | ||||||
|  |  * @bi: struct vmw_ctx_bindinfo we derive from. | ||||||
|  |  * @offset: Device data used to reconstruct binding command. | ||||||
|  |  * @stride: Device data used to reconstruct binding command. | ||||||
|  |  * @slot: Device data used to reconstruct binding command. | ||||||
|  |  */ | ||||||
|  | struct vmw_ctx_bindinfo_vb { | ||||||
|  | 	struct vmw_ctx_bindinfo bi; | ||||||
|  | 	uint32 offset; | ||||||
|  | 	uint32 stride; | ||||||
|  | 	uint32 slot; | ||||||
|  | }; | ||||||
|  | 
 | ||||||
|  | /**
 | ||||||
|  |  * struct vmw_ctx_bindinfo_ib - StreamOutput binding metadata | ||||||
|  |  * | ||||||
|  |  * @bi: struct vmw_ctx_bindinfo we derive from. | ||||||
|  |  * @offset: Device data used to reconstruct binding command. | ||||||
|  |  * @format: Device data used to reconstruct binding command. | ||||||
|  |  */ | ||||||
|  | struct vmw_ctx_bindinfo_ib { | ||||||
|  | 	struct vmw_ctx_bindinfo bi; | ||||||
|  | 	uint32 offset; | ||||||
|  | 	uint32 format; | ||||||
|  | }; | ||||||
|  | 
 | ||||||
|  | /**
 | ||||||
|  |  * struct vmw_dx_shader_bindings - per shader type context binding state | ||||||
|  |  * | ||||||
|  |  * @shader: The shader binding for this shader type | ||||||
|  |  * @const_buffer: Const buffer bindings for this shader type. | ||||||
|  |  * @shader_res: Shader resource view bindings for this shader type. | ||||||
|  |  * @dirty_sr: Bitmap tracking individual shader resource bindings changes | ||||||
|  |  * that have not yet been emitted to the device. | ||||||
|  |  * @dirty: Bitmap tracking per-binding type binding changes that have not | ||||||
|  |  * yet been emitted to the device. | ||||||
|  |  */ | ||||||
|  | struct vmw_dx_shader_bindings { | ||||||
|  | 	struct vmw_ctx_bindinfo_shader shader; | ||||||
|  | 	struct vmw_ctx_bindinfo_cb const_buffers[SVGA3D_DX_MAX_CONSTBUFFERS]; | ||||||
|  | 	struct vmw_ctx_bindinfo_view shader_res[SVGA3D_DX_MAX_SRVIEWS]; | ||||||
|  | 	DECLARE_BITMAP(dirty_sr, SVGA3D_DX_MAX_SRVIEWS); | ||||||
|  | 	unsigned long dirty; | ||||||
|  | }; | ||||||
|  | 
 | ||||||
|  | extern void vmw_binding_add(struct vmw_ctx_binding_state *cbs, | ||||||
|  | 			    const struct vmw_ctx_bindinfo *ci, | ||||||
|  | 			    u32 shader_slot, u32 slot); | ||||||
|  | extern void | ||||||
|  | vmw_binding_state_commit(struct vmw_ctx_binding_state *to, | ||||||
|  | 			 struct vmw_ctx_binding_state *from); | ||||||
|  | extern void vmw_binding_res_list_kill(struct list_head *head); | ||||||
|  | extern void vmw_binding_res_list_scrub(struct list_head *head); | ||||||
|  | extern int vmw_binding_rebind_all(struct vmw_ctx_binding_state *cbs); | ||||||
|  | extern void vmw_binding_state_kill(struct vmw_ctx_binding_state *cbs); | ||||||
|  | extern void vmw_binding_state_scrub(struct vmw_ctx_binding_state *cbs); | ||||||
|  | extern struct vmw_ctx_binding_state * | ||||||
|  | vmw_binding_state_alloc(struct vmw_private *dev_priv); | ||||||
|  | extern void vmw_binding_state_free(struct vmw_ctx_binding_state *cbs); | ||||||
|  | extern struct list_head * | ||||||
|  | vmw_binding_state_list(struct vmw_ctx_binding_state *cbs); | ||||||
|  | extern void vmw_binding_state_reset(struct vmw_ctx_binding_state *cbs); | ||||||
|  | 
 | ||||||
|  | #endif | ||||||
|  | @ -916,9 +916,8 @@ static void *vmw_cmdbuf_reserve_cur(struct vmw_cmdbuf_man *man, | ||||||
| 
 | 
 | ||||||
| 	cur = man->cur; | 	cur = man->cur; | ||||||
| 	if (cur && (size + man->cur_pos > cur->size || | 	if (cur && (size + man->cur_pos > cur->size || | ||||||
| 	    (ctx_id != SVGA3D_INVALID_ID && | 		    ((cur->cb_header->flags & SVGA_CB_FLAG_DX_CONTEXT) && | ||||||
| 	     (cur->cb_header->flags & SVGA_CB_FLAG_DX_CONTEXT) && | 		     ctx_id != cur->cb_header->dxContext))) | ||||||
| 	     ctx_id != cur->cb_header->dxContext))) |  | ||||||
| 		__vmw_cmdbuf_cur_flush(man); | 		__vmw_cmdbuf_cur_flush(man); | ||||||
| 
 | 
 | ||||||
| 	if (!man->cur) { | 	if (!man->cur) { | ||||||
|  |  | ||||||
|  | @ -26,15 +26,10 @@ | ||||||
|  **************************************************************************/ |  **************************************************************************/ | ||||||
| 
 | 
 | ||||||
| #include "vmwgfx_drv.h" | #include "vmwgfx_drv.h" | ||||||
|  | #include "vmwgfx_resource_priv.h" | ||||||
| 
 | 
 | ||||||
| #define VMW_CMDBUF_RES_MAN_HT_ORDER 12 | #define VMW_CMDBUF_RES_MAN_HT_ORDER 12 | ||||||
| 
 | 
 | ||||||
| enum vmw_cmdbuf_res_state { |  | ||||||
| 	VMW_CMDBUF_RES_COMMITED, |  | ||||||
| 	VMW_CMDBUF_RES_ADD, |  | ||||||
| 	VMW_CMDBUF_RES_DEL |  | ||||||
| }; |  | ||||||
| 
 |  | ||||||
| /**
 | /**
 | ||||||
|  * struct vmw_cmdbuf_res - Command buffer managed resource entry. |  * struct vmw_cmdbuf_res - Command buffer managed resource entry. | ||||||
|  * |  * | ||||||
|  | @ -132,9 +127,12 @@ void vmw_cmdbuf_res_commit(struct list_head *list) | ||||||
| 
 | 
 | ||||||
| 	list_for_each_entry_safe(entry, next, list, head) { | 	list_for_each_entry_safe(entry, next, list, head) { | ||||||
| 		list_del(&entry->head); | 		list_del(&entry->head); | ||||||
|  | 		if (entry->res->func->commit_notify) | ||||||
|  | 			entry->res->func->commit_notify(entry->res, | ||||||
|  | 							entry->state); | ||||||
| 		switch (entry->state) { | 		switch (entry->state) { | ||||||
| 		case VMW_CMDBUF_RES_ADD: | 		case VMW_CMDBUF_RES_ADD: | ||||||
| 			entry->state = VMW_CMDBUF_RES_COMMITED; | 			entry->state = VMW_CMDBUF_RES_COMMITTED; | ||||||
| 			list_add_tail(&entry->head, &entry->man->list); | 			list_add_tail(&entry->head, &entry->man->list); | ||||||
| 			break; | 			break; | ||||||
| 		case VMW_CMDBUF_RES_DEL: | 		case VMW_CMDBUF_RES_DEL: | ||||||
|  | @ -175,7 +173,7 @@ void vmw_cmdbuf_res_revert(struct list_head *list) | ||||||
| 						 &entry->hash); | 						 &entry->hash); | ||||||
| 			list_del(&entry->head); | 			list_del(&entry->head); | ||||||
| 			list_add_tail(&entry->head, &entry->man->list); | 			list_add_tail(&entry->head, &entry->man->list); | ||||||
| 			entry->state = VMW_CMDBUF_RES_COMMITED; | 			entry->state = VMW_CMDBUF_RES_COMMITTED; | ||||||
| 			break; | 			break; | ||||||
| 		default: | 		default: | ||||||
| 			BUG(); | 			BUG(); | ||||||
|  | @ -231,6 +229,9 @@ out_invalid_key: | ||||||
|  * @res_type: The resource type. |  * @res_type: The resource type. | ||||||
|  * @user_key: The user-space id of the resource. |  * @user_key: The user-space id of the resource. | ||||||
|  * @list: The staging list. |  * @list: The staging list. | ||||||
|  |  * @res_p: If the resource is in an already committed state, points to the | ||||||
|  |  * struct vmw_resource on successful return. The pointer will be | ||||||
|  |  * non ref-counted. | ||||||
|  * |  * | ||||||
|  * This function looks up the struct vmw_cmdbuf_res entry from the manager |  * This function looks up the struct vmw_cmdbuf_res entry from the manager | ||||||
|  * hash table and, if it exists, removes it. Depending on its current staging |  * hash table and, if it exists, removes it. Depending on its current staging | ||||||
|  | @ -240,7 +241,8 @@ out_invalid_key: | ||||||
| int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man, | int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man, | ||||||
| 			  enum vmw_cmdbuf_res_type res_type, | 			  enum vmw_cmdbuf_res_type res_type, | ||||||
| 			  u32 user_key, | 			  u32 user_key, | ||||||
| 			  struct list_head *list) | 			  struct list_head *list, | ||||||
|  | 			  struct vmw_resource **res_p) | ||||||
| { | { | ||||||
| 	struct vmw_cmdbuf_res *entry; | 	struct vmw_cmdbuf_res *entry; | ||||||
| 	struct drm_hash_item *hash; | 	struct drm_hash_item *hash; | ||||||
|  | @ -256,12 +258,14 @@ int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man, | ||||||
| 	switch (entry->state) { | 	switch (entry->state) { | ||||||
| 	case VMW_CMDBUF_RES_ADD: | 	case VMW_CMDBUF_RES_ADD: | ||||||
| 		vmw_cmdbuf_res_free(man, entry); | 		vmw_cmdbuf_res_free(man, entry); | ||||||
|  | 		*res_p = NULL; | ||||||
| 		break; | 		break; | ||||||
| 	case VMW_CMDBUF_RES_COMMITED: | 	case VMW_CMDBUF_RES_COMMITTED: | ||||||
| 		(void) drm_ht_remove_item(&man->resources, &entry->hash); | 		(void) drm_ht_remove_item(&man->resources, &entry->hash); | ||||||
| 		list_del(&entry->head); | 		list_del(&entry->head); | ||||||
| 		entry->state = VMW_CMDBUF_RES_DEL; | 		entry->state = VMW_CMDBUF_RES_DEL; | ||||||
| 		list_add_tail(&entry->head, list); | 		list_add_tail(&entry->head, list); | ||||||
|  | 		*res_p = entry->res; | ||||||
| 		break; | 		break; | ||||||
| 	default: | 	default: | ||||||
| 		BUG(); | 		BUG(); | ||||||
|  |  | ||||||
|  | @ -27,19 +27,18 @@ | ||||||
| 
 | 
 | ||||||
| #include "vmwgfx_drv.h" | #include "vmwgfx_drv.h" | ||||||
| #include "vmwgfx_resource_priv.h" | #include "vmwgfx_resource_priv.h" | ||||||
|  | #include "vmwgfx_binding.h" | ||||||
| #include "ttm/ttm_placement.h" | #include "ttm/ttm_placement.h" | ||||||
| 
 | 
 | ||||||
| struct vmw_user_context { | struct vmw_user_context { | ||||||
| 	struct ttm_base_object base; | 	struct ttm_base_object base; | ||||||
| 	struct vmw_resource res; | 	struct vmw_resource res; | ||||||
| 	struct vmw_ctx_binding_state cbs; | 	struct vmw_ctx_binding_state *cbs; | ||||||
| 	struct vmw_cmdbuf_res_manager *man; | 	struct vmw_cmdbuf_res_manager *man; | ||||||
|  | 	struct vmw_resource *cotables[SVGA_COTABLE_DX10_MAX]; | ||||||
|  | 	spinlock_t cotable_lock; | ||||||
| }; | }; | ||||||
| 
 | 
 | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| typedef int (*vmw_scrub_func)(struct vmw_ctx_bindinfo *, bool); |  | ||||||
| 
 |  | ||||||
| static void vmw_user_context_free(struct vmw_resource *res); | static void vmw_user_context_free(struct vmw_resource *res); | ||||||
| static struct vmw_resource * | static struct vmw_resource * | ||||||
| vmw_user_context_base_to_res(struct ttm_base_object *base); | vmw_user_context_base_to_res(struct ttm_base_object *base); | ||||||
|  | @ -51,12 +50,14 @@ static int vmw_gb_context_unbind(struct vmw_resource *res, | ||||||
| 				 bool readback, | 				 bool readback, | ||||||
| 				 struct ttm_validate_buffer *val_buf); | 				 struct ttm_validate_buffer *val_buf); | ||||||
| static int vmw_gb_context_destroy(struct vmw_resource *res); | static int vmw_gb_context_destroy(struct vmw_resource *res); | ||||||
| static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind); | static int vmw_dx_context_create(struct vmw_resource *res); | ||||||
| static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi, | static int vmw_dx_context_bind(struct vmw_resource *res, | ||||||
| 					   bool rebind); | 			       struct ttm_validate_buffer *val_buf); | ||||||
| static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi, bool rebind); | static int vmw_dx_context_unbind(struct vmw_resource *res, | ||||||
| static void vmw_context_binding_state_scrub(struct vmw_ctx_binding_state *cbs); | 				 bool readback, | ||||||
| static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs); | 				 struct ttm_validate_buffer *val_buf); | ||||||
|  | static int vmw_dx_context_destroy(struct vmw_resource *res); | ||||||
|  | 
 | ||||||
| static uint64_t vmw_user_context_size; | static uint64_t vmw_user_context_size; | ||||||
| 
 | 
 | ||||||
| static const struct vmw_user_resource_conv user_context_conv = { | static const struct vmw_user_resource_conv user_context_conv = { | ||||||
|  | @ -93,15 +94,36 @@ static const struct vmw_res_func vmw_gb_context_func = { | ||||||
| 	.unbind = vmw_gb_context_unbind | 	.unbind = vmw_gb_context_unbind | ||||||
| }; | }; | ||||||
| 
 | 
 | ||||||
| static const vmw_scrub_func vmw_scrub_funcs[vmw_ctx_binding_max] = { | static const struct vmw_res_func vmw_dx_context_func = { | ||||||
| 	[vmw_ctx_binding_shader] = vmw_context_scrub_shader, | 	.res_type = vmw_res_dx_context, | ||||||
| 	[vmw_ctx_binding_rt] = vmw_context_scrub_render_target, | 	.needs_backup = true, | ||||||
| 	[vmw_ctx_binding_tex] = vmw_context_scrub_texture }; | 	.may_evict = true, | ||||||
|  | 	.type_name = "dx contexts", | ||||||
|  | 	.backup_placement = &vmw_mob_placement, | ||||||
|  | 	.create = vmw_dx_context_create, | ||||||
|  | 	.destroy = vmw_dx_context_destroy, | ||||||
|  | 	.bind = vmw_dx_context_bind, | ||||||
|  | 	.unbind = vmw_dx_context_unbind | ||||||
|  | }; | ||||||
| 
 | 
 | ||||||
| /**
 | /**
 | ||||||
|  * Context management: |  * Context management: | ||||||
|  */ |  */ | ||||||
| 
 | 
 | ||||||
|  | static void vmw_context_cotables_unref(struct vmw_user_context *uctx) | ||||||
|  | { | ||||||
|  | 	struct vmw_resource *res; | ||||||
|  | 	int i; | ||||||
|  | 
 | ||||||
|  | 	for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) { | ||||||
|  | 		spin_lock(&uctx->cotable_lock); | ||||||
|  | 		res = uctx->cotables[i]; | ||||||
|  | 		uctx->cotables[i] = NULL; | ||||||
|  | 		spin_unlock(&uctx->cotable_lock); | ||||||
|  | 		vmw_resource_unreference(&res); | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
| static void vmw_hw_context_destroy(struct vmw_resource *res) | static void vmw_hw_context_destroy(struct vmw_resource *res) | ||||||
| { | { | ||||||
| 	struct vmw_user_context *uctx = | 	struct vmw_user_context *uctx = | ||||||
|  | @ -113,17 +135,19 @@ static void vmw_hw_context_destroy(struct vmw_resource *res) | ||||||
| 	} *cmd; | 	} *cmd; | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| 	if (res->func->destroy == vmw_gb_context_destroy) { | 	if (res->func->destroy == vmw_gb_context_destroy || | ||||||
|  | 	    res->func->destroy == vmw_dx_context_destroy) { | ||||||
| 		mutex_lock(&dev_priv->cmdbuf_mutex); | 		mutex_lock(&dev_priv->cmdbuf_mutex); | ||||||
| 		vmw_cmdbuf_res_man_destroy(uctx->man); | 		vmw_cmdbuf_res_man_destroy(uctx->man); | ||||||
| 		mutex_lock(&dev_priv->binding_mutex); | 		mutex_lock(&dev_priv->binding_mutex); | ||||||
| 		(void) vmw_context_binding_state_kill(&uctx->cbs); | 		vmw_binding_state_kill(uctx->cbs); | ||||||
| 		(void) vmw_gb_context_destroy(res); | 		(void) res->func->destroy(res); | ||||||
| 		mutex_unlock(&dev_priv->binding_mutex); | 		mutex_unlock(&dev_priv->binding_mutex); | ||||||
| 		if (dev_priv->pinned_bo != NULL && | 		if (dev_priv->pinned_bo != NULL && | ||||||
| 		    !dev_priv->query_cid_valid) | 		    !dev_priv->query_cid_valid) | ||||||
| 			__vmw_execbuf_release_pinned_bo(dev_priv, NULL); | 			__vmw_execbuf_release_pinned_bo(dev_priv, NULL); | ||||||
| 		mutex_unlock(&dev_priv->cmdbuf_mutex); | 		mutex_unlock(&dev_priv->cmdbuf_mutex); | ||||||
|  | 		vmw_context_cotables_unref(uctx); | ||||||
| 		return; | 		return; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
|  | @ -144,16 +168,20 @@ static void vmw_hw_context_destroy(struct vmw_resource *res) | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static int vmw_gb_context_init(struct vmw_private *dev_priv, | static int vmw_gb_context_init(struct vmw_private *dev_priv, | ||||||
|  | 			       bool dx, | ||||||
| 			       struct vmw_resource *res, | 			       struct vmw_resource *res, | ||||||
| 			       void (*res_free) (struct vmw_resource *res)) | 			       void (*res_free)(struct vmw_resource *res)) | ||||||
| { | { | ||||||
| 	int ret; | 	int ret, i; | ||||||
| 	struct vmw_user_context *uctx = | 	struct vmw_user_context *uctx = | ||||||
| 		container_of(res, struct vmw_user_context, res); | 		container_of(res, struct vmw_user_context, res); | ||||||
| 
 | 
 | ||||||
|  | 	res->backup_size = (dx ? sizeof(SVGADXContextMobFormat) : | ||||||
|  | 			    SVGA3D_CONTEXT_DATA_SIZE); | ||||||
| 	ret = vmw_resource_init(dev_priv, res, true, | 	ret = vmw_resource_init(dev_priv, res, true, | ||||||
| 				res_free, &vmw_gb_context_func); | 				res_free, | ||||||
| 	res->backup_size = SVGA3D_CONTEXT_DATA_SIZE; | 				dx ? &vmw_dx_context_func : | ||||||
|  | 				&vmw_gb_context_func); | ||||||
| 	if (unlikely(ret != 0)) | 	if (unlikely(ret != 0)) | ||||||
| 		goto out_err; | 		goto out_err; | ||||||
| 
 | 
 | ||||||
|  | @ -166,12 +194,32 @@ static int vmw_gb_context_init(struct vmw_private *dev_priv, | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	memset(&uctx->cbs, 0, sizeof(uctx->cbs)); | 	uctx->cbs = vmw_binding_state_alloc(dev_priv); | ||||||
| 	INIT_LIST_HEAD(&uctx->cbs.list); | 	if (IS_ERR(uctx->cbs)) { | ||||||
|  | 		ret = PTR_ERR(uctx->cbs); | ||||||
|  | 		goto out_err; | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	spin_lock_init(&uctx->cotable_lock); | ||||||
|  | 
 | ||||||
|  | 	if (dx) { | ||||||
|  | 		for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) { | ||||||
|  | 			uctx->cotables[i] = vmw_cotable_alloc(dev_priv, | ||||||
|  | 							      &uctx->res, i); | ||||||
|  | 			if (unlikely(uctx->cotables[i] == NULL)) { | ||||||
|  | 				ret = -ENOMEM; | ||||||
|  | 				goto out_cotables; | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
| 
 | 
 | ||||||
| 	vmw_resource_activate(res, vmw_hw_context_destroy); | 	vmw_resource_activate(res, vmw_hw_context_destroy); | ||||||
| 	return 0; | 	return 0; | ||||||
| 
 | 
 | ||||||
|  | out_cotables: | ||||||
|  | 	vmw_context_cotables_unref(uctx); | ||||||
| out_err: | out_err: | ||||||
| 	if (res_free) | 	if (res_free) | ||||||
| 		res_free(res); | 		res_free(res); | ||||||
|  | @ -182,7 +230,8 @@ out_err: | ||||||
| 
 | 
 | ||||||
| static int vmw_context_init(struct vmw_private *dev_priv, | static int vmw_context_init(struct vmw_private *dev_priv, | ||||||
| 			    struct vmw_resource *res, | 			    struct vmw_resource *res, | ||||||
| 			    void (*res_free) (struct vmw_resource *res)) | 			    void (*res_free)(struct vmw_resource *res), | ||||||
|  | 			    bool dx) | ||||||
| { | { | ||||||
| 	int ret; | 	int ret; | ||||||
| 
 | 
 | ||||||
|  | @ -192,7 +241,7 @@ static int vmw_context_init(struct vmw_private *dev_priv, | ||||||
| 	} *cmd; | 	} *cmd; | ||||||
| 
 | 
 | ||||||
| 	if (dev_priv->has_mob) | 	if (dev_priv->has_mob) | ||||||
| 		return vmw_gb_context_init(dev_priv, res, res_free); | 		return vmw_gb_context_init(dev_priv, dx, res, res_free); | ||||||
| 
 | 
 | ||||||
| 	ret = vmw_resource_init(dev_priv, res, false, | 	ret = vmw_resource_init(dev_priv, res, false, | ||||||
| 				res_free, &vmw_legacy_context_func); | 				res_free, &vmw_legacy_context_func); | ||||||
|  | @ -232,19 +281,10 @@ out_early: | ||||||
| 	return ret; | 	return ret; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv) |  | ||||||
| { |  | ||||||
| 	struct vmw_resource *res = kmalloc(sizeof(*res), GFP_KERNEL); |  | ||||||
| 	int ret; |  | ||||||
| 
 |  | ||||||
| 	if (unlikely(res == NULL)) |  | ||||||
| 		return NULL; |  | ||||||
| 
 |  | ||||||
| 	ret = vmw_context_init(dev_priv, res, NULL); |  | ||||||
| 
 |  | ||||||
| 	return (ret == 0) ? res : NULL; |  | ||||||
| } |  | ||||||
| 
 | 
 | ||||||
|  | /*
 | ||||||
|  |  * GB context. | ||||||
|  |  */ | ||||||
| 
 | 
 | ||||||
| static int vmw_gb_context_create(struct vmw_resource *res) | static int vmw_gb_context_create(struct vmw_resource *res) | ||||||
| { | { | ||||||
|  | @ -309,7 +349,6 @@ static int vmw_gb_context_bind(struct vmw_resource *res, | ||||||
| 			  "binding.\n"); | 			  "binding.\n"); | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
| 	} | 	} | ||||||
| 
 |  | ||||||
| 	cmd->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT; | 	cmd->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT; | ||||||
| 	cmd->header.size = sizeof(cmd->body); | 	cmd->header.size = sizeof(cmd->body); | ||||||
| 	cmd->body.cid = res->id; | 	cmd->body.cid = res->id; | ||||||
|  | @ -346,7 +385,7 @@ static int vmw_gb_context_unbind(struct vmw_resource *res, | ||||||
| 	BUG_ON(bo->mem.mem_type != VMW_PL_MOB); | 	BUG_ON(bo->mem.mem_type != VMW_PL_MOB); | ||||||
| 
 | 
 | ||||||
| 	mutex_lock(&dev_priv->binding_mutex); | 	mutex_lock(&dev_priv->binding_mutex); | ||||||
| 	vmw_context_binding_state_scrub(&uctx->cbs); | 	vmw_binding_state_scrub(uctx->cbs); | ||||||
| 
 | 
 | ||||||
| 	submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0); | 	submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0); | ||||||
| 
 | 
 | ||||||
|  | @ -419,6 +458,221 @@ static int vmw_gb_context_destroy(struct vmw_resource *res) | ||||||
| 	return 0; | 	return 0; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | /*
 | ||||||
|  |  * DX context. | ||||||
|  |  */ | ||||||
|  | 
 | ||||||
|  | static int vmw_dx_context_create(struct vmw_resource *res) | ||||||
|  | { | ||||||
|  | 	struct vmw_private *dev_priv = res->dev_priv; | ||||||
|  | 	int ret; | ||||||
|  | 	struct { | ||||||
|  | 		SVGA3dCmdHeader header; | ||||||
|  | 		SVGA3dCmdDXDefineContext body; | ||||||
|  | 	} *cmd; | ||||||
|  | 
 | ||||||
|  | 	if (likely(res->id != -1)) | ||||||
|  | 		return 0; | ||||||
|  | 
 | ||||||
|  | 	ret = vmw_resource_alloc_id(res); | ||||||
|  | 	if (unlikely(ret != 0)) { | ||||||
|  | 		DRM_ERROR("Failed to allocate a context id.\n"); | ||||||
|  | 		goto out_no_id; | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	if (unlikely(res->id >= VMWGFX_NUM_DXCONTEXT)) { | ||||||
|  | 		ret = -EBUSY; | ||||||
|  | 		goto out_no_fifo; | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | ||||||
|  | 	if (unlikely(cmd == NULL)) { | ||||||
|  | 		DRM_ERROR("Failed reserving FIFO space for context " | ||||||
|  | 			  "creation.\n"); | ||||||
|  | 		ret = -ENOMEM; | ||||||
|  | 		goto out_no_fifo; | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	cmd->header.id = SVGA_3D_CMD_DX_DEFINE_CONTEXT; | ||||||
|  | 	cmd->header.size = sizeof(cmd->body); | ||||||
|  | 	cmd->body.cid = res->id; | ||||||
|  | 	vmw_fifo_commit(dev_priv, sizeof(*cmd)); | ||||||
|  | 	vmw_fifo_resource_inc(dev_priv); | ||||||
|  | 
 | ||||||
|  | 	return 0; | ||||||
|  | 
 | ||||||
|  | out_no_fifo: | ||||||
|  | 	vmw_resource_release_id(res); | ||||||
|  | out_no_id: | ||||||
|  | 	return ret; | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | static int vmw_dx_context_bind(struct vmw_resource *res, | ||||||
|  | 			       struct ttm_validate_buffer *val_buf) | ||||||
|  | { | ||||||
|  | 	struct vmw_private *dev_priv = res->dev_priv; | ||||||
|  | 	struct { | ||||||
|  | 		SVGA3dCmdHeader header; | ||||||
|  | 		SVGA3dCmdDXBindContext body; | ||||||
|  | 	} *cmd; | ||||||
|  | 	struct ttm_buffer_object *bo = val_buf->bo; | ||||||
|  | 
 | ||||||
|  | 	BUG_ON(bo->mem.mem_type != VMW_PL_MOB); | ||||||
|  | 
 | ||||||
|  | 	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | ||||||
|  | 	if (unlikely(cmd == NULL)) { | ||||||
|  | 		DRM_ERROR("Failed reserving FIFO space for context " | ||||||
|  | 			  "binding.\n"); | ||||||
|  | 		return -ENOMEM; | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	cmd->header.id = SVGA_3D_CMD_DX_BIND_CONTEXT; | ||||||
|  | 	cmd->header.size = sizeof(cmd->body); | ||||||
|  | 	cmd->body.cid = res->id; | ||||||
|  | 	cmd->body.mobid = bo->mem.start; | ||||||
|  | 	cmd->body.validContents = res->backup_dirty; | ||||||
|  | 	res->backup_dirty = false; | ||||||
|  | 	vmw_fifo_commit(dev_priv, sizeof(*cmd)); | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | 	return 0; | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | /**
 | ||||||
|  |  * vmw_dx_context_scrub_cotables - Scrub all bindings and | ||||||
|  |  * cotables from a context | ||||||
|  |  * | ||||||
|  |  * @ctx: Pointer to the context resource | ||||||
|  |  * @readback: Whether to save the otable contents on scrubbing. | ||||||
|  |  * | ||||||
|  |  * COtables must be unbound before their context, but unbinding requires | ||||||
|  |  * the backup buffer being reserved, whereas scrubbing does not. | ||||||
|  |  * This function scrubs all cotables of a context, potentially reading back | ||||||
|  |  * the contents into their backup buffers. However, scrubbing cotables | ||||||
|  |  * also makes the device context invalid, so scrub all bindings first so | ||||||
|  |  * that doesn't have to be done later with an invalid context. | ||||||
|  |  */ | ||||||
|  | void vmw_dx_context_scrub_cotables(struct vmw_resource *ctx, | ||||||
|  | 				   bool readback) | ||||||
|  | { | ||||||
|  | 	struct vmw_user_context *uctx = | ||||||
|  | 		container_of(ctx, struct vmw_user_context, res); | ||||||
|  | 	int i; | ||||||
|  | 
 | ||||||
|  | 	vmw_binding_state_scrub(uctx->cbs); | ||||||
|  | 	for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) { | ||||||
|  | 		struct vmw_resource *res; | ||||||
|  | 
 | ||||||
|  | 		/* Avoid racing with ongoing cotable destruction. */ | ||||||
|  | 		spin_lock(&uctx->cotable_lock); | ||||||
|  | 		res = uctx->cotables[vmw_cotable_scrub_order[i]]; | ||||||
|  | 		if (res) | ||||||
|  | 			res = vmw_resource_reference_unless_doomed(res); | ||||||
|  | 		spin_unlock(&uctx->cotable_lock); | ||||||
|  | 		if (!res) | ||||||
|  | 			continue; | ||||||
|  | 
 | ||||||
|  | 		WARN_ON(vmw_cotable_scrub(res, readback)); | ||||||
|  | 		vmw_resource_unreference(&res); | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | static int vmw_dx_context_unbind(struct vmw_resource *res, | ||||||
|  | 				 bool readback, | ||||||
|  | 				 struct ttm_validate_buffer *val_buf) | ||||||
|  | { | ||||||
|  | 	struct vmw_private *dev_priv = res->dev_priv; | ||||||
|  | 	struct ttm_buffer_object *bo = val_buf->bo; | ||||||
|  | 	struct vmw_fence_obj *fence; | ||||||
|  | 
 | ||||||
|  | 	struct { | ||||||
|  | 		SVGA3dCmdHeader header; | ||||||
|  | 		SVGA3dCmdDXReadbackContext body; | ||||||
|  | 	} *cmd1; | ||||||
|  | 	struct { | ||||||
|  | 		SVGA3dCmdHeader header; | ||||||
|  | 		SVGA3dCmdDXBindContext body; | ||||||
|  | 	} *cmd2; | ||||||
|  | 	uint32_t submit_size; | ||||||
|  | 	uint8_t *cmd; | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | 	BUG_ON(bo->mem.mem_type != VMW_PL_MOB); | ||||||
|  | 
 | ||||||
|  | 	mutex_lock(&dev_priv->binding_mutex); | ||||||
|  | 	vmw_dx_context_scrub_cotables(res, readback); | ||||||
|  | 
 | ||||||
|  | 	submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0); | ||||||
|  | 
 | ||||||
|  | 	cmd = vmw_fifo_reserve(dev_priv, submit_size); | ||||||
|  | 	if (unlikely(cmd == NULL)) { | ||||||
|  | 		DRM_ERROR("Failed reserving FIFO space for context " | ||||||
|  | 			  "unbinding.\n"); | ||||||
|  | 		mutex_unlock(&dev_priv->binding_mutex); | ||||||
|  | 		return -ENOMEM; | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	cmd2 = (void *) cmd; | ||||||
|  | 	if (readback) { | ||||||
|  | 		cmd1 = (void *) cmd; | ||||||
|  | 		cmd1->header.id = SVGA_3D_CMD_DX_READBACK_CONTEXT; | ||||||
|  | 		cmd1->header.size = sizeof(cmd1->body); | ||||||
|  | 		cmd1->body.cid = res->id; | ||||||
|  | 		cmd2 = (void *) (&cmd1[1]); | ||||||
|  | 	} | ||||||
|  | 	cmd2->header.id = SVGA_3D_CMD_DX_BIND_CONTEXT; | ||||||
|  | 	cmd2->header.size = sizeof(cmd2->body); | ||||||
|  | 	cmd2->body.cid = res->id; | ||||||
|  | 	cmd2->body.mobid = SVGA3D_INVALID_ID; | ||||||
|  | 
 | ||||||
|  | 	vmw_fifo_commit(dev_priv, submit_size); | ||||||
|  | 	mutex_unlock(&dev_priv->binding_mutex); | ||||||
|  | 
 | ||||||
|  | 	/*
 | ||||||
|  | 	 * Create a fence object and fence the backup buffer. | ||||||
|  | 	 */ | ||||||
|  | 
 | ||||||
|  | 	(void) vmw_execbuf_fence_commands(NULL, dev_priv, | ||||||
|  | 					  &fence, NULL); | ||||||
|  | 
 | ||||||
|  | 	vmw_fence_single_bo(bo, fence); | ||||||
|  | 
 | ||||||
|  | 	if (likely(fence != NULL)) | ||||||
|  | 		vmw_fence_obj_unreference(&fence); | ||||||
|  | 
 | ||||||
|  | 	return 0; | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | static int vmw_dx_context_destroy(struct vmw_resource *res) | ||||||
|  | { | ||||||
|  | 	struct vmw_private *dev_priv = res->dev_priv; | ||||||
|  | 	struct { | ||||||
|  | 		SVGA3dCmdHeader header; | ||||||
|  | 		SVGA3dCmdDXDestroyContext body; | ||||||
|  | 	} *cmd; | ||||||
|  | 
 | ||||||
|  | 	if (likely(res->id == -1)) | ||||||
|  | 		return 0; | ||||||
|  | 
 | ||||||
|  | 	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | ||||||
|  | 	if (unlikely(cmd == NULL)) { | ||||||
|  | 		DRM_ERROR("Failed reserving FIFO space for context " | ||||||
|  | 			  "destruction.\n"); | ||||||
|  | 		return -ENOMEM; | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	cmd->header.id = SVGA_3D_CMD_DX_DESTROY_CONTEXT; | ||||||
|  | 	cmd->header.size = sizeof(cmd->body); | ||||||
|  | 	cmd->body.cid = res->id; | ||||||
|  | 	vmw_fifo_commit(dev_priv, sizeof(*cmd)); | ||||||
|  | 	if (dev_priv->query_cid == res->id) | ||||||
|  | 		dev_priv->query_cid_valid = false; | ||||||
|  | 	vmw_resource_release_id(res); | ||||||
|  | 	vmw_fifo_resource_dec(dev_priv); | ||||||
|  | 
 | ||||||
|  | 	return 0; | ||||||
|  | } | ||||||
|  | 
 | ||||||
| /**
 | /**
 | ||||||
|  * User-space context management: |  * User-space context management: | ||||||
|  */ |  */ | ||||||
|  | @ -435,6 +689,8 @@ static void vmw_user_context_free(struct vmw_resource *res) | ||||||
| 	    container_of(res, struct vmw_user_context, res); | 	    container_of(res, struct vmw_user_context, res); | ||||||
| 	struct vmw_private *dev_priv = res->dev_priv; | 	struct vmw_private *dev_priv = res->dev_priv; | ||||||
| 
 | 
 | ||||||
|  | 	if (ctx->cbs) | ||||||
|  | 		vmw_binding_state_free(ctx->cbs); | ||||||
| 	ttm_base_object_kfree(ctx, base); | 	ttm_base_object_kfree(ctx, base); | ||||||
| 	ttm_mem_global_free(vmw_mem_glob(dev_priv), | 	ttm_mem_global_free(vmw_mem_glob(dev_priv), | ||||||
| 			    vmw_user_context_size); | 			    vmw_user_context_size); | ||||||
|  | @ -465,8 +721,8 @@ int vmw_context_destroy_ioctl(struct drm_device *dev, void *data, | ||||||
| 	return ttm_ref_object_base_unref(tfile, arg->cid, TTM_REF_USAGE); | 	return ttm_ref_object_base_unref(tfile, arg->cid, TTM_REF_USAGE); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| int vmw_context_define_ioctl(struct drm_device *dev, void *data, | static int vmw_context_define(struct drm_device *dev, void *data, | ||||||
| 			     struct drm_file *file_priv) | 			      struct drm_file *file_priv, bool dx) | ||||||
| { | { | ||||||
| 	struct vmw_private *dev_priv = vmw_priv(dev); | 	struct vmw_private *dev_priv = vmw_priv(dev); | ||||||
| 	struct vmw_user_context *ctx; | 	struct vmw_user_context *ctx; | ||||||
|  | @ -476,6 +732,10 @@ int vmw_context_define_ioctl(struct drm_device *dev, void *data, | ||||||
| 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; | 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; | ||||||
| 	int ret; | 	int ret; | ||||||
| 
 | 
 | ||||||
|  | 	if (!dev_priv->has_dx && dx) { | ||||||
|  | 		DRM_ERROR("DX contexts not supported by device.\n"); | ||||||
|  | 		return -EINVAL; | ||||||
|  | 	} | ||||||
| 
 | 
 | ||||||
| 	/*
 | 	/*
 | ||||||
| 	 * Approximate idr memory usage with 128 bytes. It will be limited | 	 * Approximate idr memory usage with 128 bytes. It will be limited | ||||||
|  | @ -516,7 +776,7 @@ int vmw_context_define_ioctl(struct drm_device *dev, void *data, | ||||||
| 	 * From here on, the destructor takes over resource freeing. | 	 * From here on, the destructor takes over resource freeing. | ||||||
| 	 */ | 	 */ | ||||||
| 
 | 
 | ||||||
| 	ret = vmw_context_init(dev_priv, res, vmw_user_context_free); | 	ret = vmw_context_init(dev_priv, res, vmw_user_context_free, dx); | ||||||
| 	if (unlikely(ret != 0)) | 	if (unlikely(ret != 0)) | ||||||
| 		goto out_unlock; | 		goto out_unlock; | ||||||
| 
 | 
 | ||||||
|  | @ -535,371 +795,29 @@ out_err: | ||||||
| out_unlock: | out_unlock: | ||||||
| 	ttm_read_unlock(&dev_priv->reservation_sem); | 	ttm_read_unlock(&dev_priv->reservation_sem); | ||||||
| 	return ret; | 	return ret; | ||||||
| 
 |  | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /**
 | int vmw_context_define_ioctl(struct drm_device *dev, void *data, | ||||||
|  * vmw_context_scrub_shader - scrub a shader binding from a context. | 			     struct drm_file *file_priv) | ||||||
|  * |  | ||||||
|  * @bi: single binding information. |  | ||||||
|  * @rebind: Whether to issue a bind instead of scrub command. |  | ||||||
|  */ |  | ||||||
| static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind) |  | ||||||
| { | { | ||||||
| 	struct vmw_private *dev_priv = bi->ctx->dev_priv; | 	return vmw_context_define(dev, data, file_priv, false); | ||||||
| 	struct { |  | ||||||
| 		SVGA3dCmdHeader header; |  | ||||||
| 		SVGA3dCmdSetShader body; |  | ||||||
| 	} *cmd; |  | ||||||
| 
 |  | ||||||
| 	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); |  | ||||||
| 	if (unlikely(cmd == NULL)) { |  | ||||||
| 		DRM_ERROR("Failed reserving FIFO space for shader " |  | ||||||
| 			  "unbinding.\n"); |  | ||||||
| 		return -ENOMEM; |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	cmd->header.id = SVGA_3D_CMD_SET_SHADER; |  | ||||||
| 	cmd->header.size = sizeof(cmd->body); |  | ||||||
| 	cmd->body.cid = bi->ctx->id; |  | ||||||
| 	cmd->body.type = bi->i1.shader_type; |  | ||||||
| 	cmd->body.shid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID); |  | ||||||
| 	vmw_fifo_commit(dev_priv, sizeof(*cmd)); |  | ||||||
| 
 |  | ||||||
| 	return 0; |  | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /**
 | int vmw_extended_context_define_ioctl(struct drm_device *dev, void *data, | ||||||
|  * vmw_context_scrub_render_target - scrub a render target binding | 				      struct drm_file *file_priv) | ||||||
|  * from a context. |  | ||||||
|  * |  | ||||||
|  * @bi: single binding information. |  | ||||||
|  * @rebind: Whether to issue a bind instead of scrub command. |  | ||||||
|  */ |  | ||||||
| static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi, |  | ||||||
| 					   bool rebind) |  | ||||||
| { | { | ||||||
| 	struct vmw_private *dev_priv = bi->ctx->dev_priv; | 	union drm_vmw_extended_context_arg *arg = (typeof(arg)) data; | ||||||
| 	struct { | 	struct drm_vmw_context_arg *rep = &arg->rep; | ||||||
| 		SVGA3dCmdHeader header; |  | ||||||
| 		SVGA3dCmdSetRenderTarget body; |  | ||||||
| 	} *cmd; |  | ||||||
| 
 | 
 | ||||||
| 	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | 	switch (arg->req) { | ||||||
| 	if (unlikely(cmd == NULL)) { | 	case drm_vmw_context_legacy: | ||||||
| 		DRM_ERROR("Failed reserving FIFO space for render target " | 		return vmw_context_define(dev, rep, file_priv, false); | ||||||
| 			  "unbinding.\n"); | 	case drm_vmw_context_dx: | ||||||
| 		return -ENOMEM; | 		return vmw_context_define(dev, rep, file_priv, true); | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	cmd->header.id = SVGA_3D_CMD_SETRENDERTARGET; |  | ||||||
| 	cmd->header.size = sizeof(cmd->body); |  | ||||||
| 	cmd->body.cid = bi->ctx->id; |  | ||||||
| 	cmd->body.type = bi->i1.rt_type; |  | ||||||
| 	cmd->body.target.sid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID); |  | ||||||
| 	cmd->body.target.face = 0; |  | ||||||
| 	cmd->body.target.mipmap = 0; |  | ||||||
| 	vmw_fifo_commit(dev_priv, sizeof(*cmd)); |  | ||||||
| 
 |  | ||||||
| 	return 0; |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| /**
 |  | ||||||
|  * vmw_context_scrub_texture - scrub a texture binding from a context. |  | ||||||
|  * |  | ||||||
|  * @bi: single binding information. |  | ||||||
|  * @rebind: Whether to issue a bind instead of scrub command. |  | ||||||
|  * |  | ||||||
|  * TODO: Possibly complement this function with a function that takes |  | ||||||
|  * a list of texture bindings and combines them to a single command. |  | ||||||
|  */ |  | ||||||
| static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi, |  | ||||||
| 				     bool rebind) |  | ||||||
| { |  | ||||||
| 	struct vmw_private *dev_priv = bi->ctx->dev_priv; |  | ||||||
| 	struct { |  | ||||||
| 		SVGA3dCmdHeader header; |  | ||||||
| 		struct { |  | ||||||
| 			SVGA3dCmdSetTextureState c; |  | ||||||
| 			SVGA3dTextureState s1; |  | ||||||
| 		} body; |  | ||||||
| 	} *cmd; |  | ||||||
| 
 |  | ||||||
| 	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); |  | ||||||
| 	if (unlikely(cmd == NULL)) { |  | ||||||
| 		DRM_ERROR("Failed reserving FIFO space for texture " |  | ||||||
| 			  "unbinding.\n"); |  | ||||||
| 		return -ENOMEM; |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| 	cmd->header.id = SVGA_3D_CMD_SETTEXTURESTATE; |  | ||||||
| 	cmd->header.size = sizeof(cmd->body); |  | ||||||
| 	cmd->body.c.cid = bi->ctx->id; |  | ||||||
| 	cmd->body.s1.stage = bi->i1.texture_stage; |  | ||||||
| 	cmd->body.s1.name = SVGA3D_TS_BIND_TEXTURE; |  | ||||||
| 	cmd->body.s1.value = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID); |  | ||||||
| 	vmw_fifo_commit(dev_priv, sizeof(*cmd)); |  | ||||||
| 
 |  | ||||||
| 	return 0; |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| /**
 |  | ||||||
|  * vmw_context_binding_drop: Stop tracking a context binding |  | ||||||
|  * |  | ||||||
|  * @cb: Pointer to binding tracker storage. |  | ||||||
|  * |  | ||||||
|  * Stops tracking a context binding, and re-initializes its storage. |  | ||||||
|  * Typically used when the context binding is replaced with a binding to |  | ||||||
|  * another (or the same, for that matter) resource. |  | ||||||
|  */ |  | ||||||
| static void vmw_context_binding_drop(struct vmw_ctx_binding *cb) |  | ||||||
| { |  | ||||||
| 	list_del(&cb->ctx_list); |  | ||||||
| 	if (!list_empty(&cb->res_list)) |  | ||||||
| 		list_del(&cb->res_list); |  | ||||||
| 	cb->bi.ctx = NULL; |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| /**
 |  | ||||||
|  * vmw_context_binding_add: Start tracking a context binding |  | ||||||
|  * |  | ||||||
|  * @cbs: Pointer to the context binding state tracker. |  | ||||||
|  * @bi: Information about the binding to track. |  | ||||||
|  * |  | ||||||
|  * Performs basic checks on the binding to make sure arguments are within |  | ||||||
|  * bounds and then starts tracking the binding in the context binding |  | ||||||
|  * state structure @cbs. |  | ||||||
|  */ |  | ||||||
| int vmw_context_binding_add(struct vmw_ctx_binding_state *cbs, |  | ||||||
| 			    const struct vmw_ctx_bindinfo *bi) |  | ||||||
| { |  | ||||||
| 	struct vmw_ctx_binding *loc; |  | ||||||
| 
 |  | ||||||
| 	switch (bi->bt) { |  | ||||||
| 	case vmw_ctx_binding_rt: |  | ||||||
| 		if (unlikely((unsigned)bi->i1.rt_type >= SVGA3D_RT_MAX)) { |  | ||||||
| 			DRM_ERROR("Illegal render target type %u.\n", |  | ||||||
| 				  (unsigned) bi->i1.rt_type); |  | ||||||
| 			return -EINVAL; |  | ||||||
| 		} |  | ||||||
| 		loc = &cbs->render_targets[bi->i1.rt_type]; |  | ||||||
| 		break; |  | ||||||
| 	case vmw_ctx_binding_tex: |  | ||||||
| 		if (unlikely((unsigned)bi->i1.texture_stage >= |  | ||||||
| 			     SVGA3D_NUM_TEXTURE_UNITS)) { |  | ||||||
| 			DRM_ERROR("Illegal texture/sampler unit %u.\n", |  | ||||||
| 				  (unsigned) bi->i1.texture_stage); |  | ||||||
| 			return -EINVAL; |  | ||||||
| 		} |  | ||||||
| 		loc = &cbs->texture_units[bi->i1.texture_stage]; |  | ||||||
| 		break; |  | ||||||
| 	case vmw_ctx_binding_shader: |  | ||||||
| 		if (unlikely((unsigned)bi->i1.shader_type >= |  | ||||||
| 			     SVGA3D_SHADERTYPE_PREDX_MAX)) { |  | ||||||
| 			DRM_ERROR("Illegal shader type %u.\n", |  | ||||||
| 				  (unsigned) bi->i1.shader_type); |  | ||||||
| 			return -EINVAL; |  | ||||||
| 		} |  | ||||||
| 		loc = &cbs->shaders[bi->i1.shader_type]; |  | ||||||
| 		break; |  | ||||||
| 	default: | 	default: | ||||||
| 		BUG(); |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	if (loc->bi.ctx != NULL) |  | ||||||
| 		vmw_context_binding_drop(loc); |  | ||||||
| 
 |  | ||||||
| 	loc->bi = *bi; |  | ||||||
| 	loc->bi.scrubbed = false; |  | ||||||
| 	list_add_tail(&loc->ctx_list, &cbs->list); |  | ||||||
| 	INIT_LIST_HEAD(&loc->res_list); |  | ||||||
| 
 |  | ||||||
| 	return 0; |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| /**
 |  | ||||||
|  * vmw_context_binding_transfer: Transfer a context binding tracking entry. |  | ||||||
|  * |  | ||||||
|  * @cbs: Pointer to the persistent context binding state tracker. |  | ||||||
|  * @bi: Information about the binding to track. |  | ||||||
|  * |  | ||||||
|  */ |  | ||||||
| static void vmw_context_binding_transfer(struct vmw_ctx_binding_state *cbs, |  | ||||||
| 					 const struct vmw_ctx_bindinfo *bi) |  | ||||||
| { |  | ||||||
| 	struct vmw_ctx_binding *loc; |  | ||||||
| 
 |  | ||||||
| 	switch (bi->bt) { |  | ||||||
| 	case vmw_ctx_binding_rt: |  | ||||||
| 		loc = &cbs->render_targets[bi->i1.rt_type]; |  | ||||||
| 		break; | 		break; | ||||||
| 	case vmw_ctx_binding_tex: |  | ||||||
| 		loc = &cbs->texture_units[bi->i1.texture_stage]; |  | ||||||
| 		break; |  | ||||||
| 	case vmw_ctx_binding_shader: |  | ||||||
| 		loc = &cbs->shaders[bi->i1.shader_type]; |  | ||||||
| 		break; |  | ||||||
| 	default: |  | ||||||
| 		BUG(); |  | ||||||
| 	} | 	} | ||||||
| 
 | 	return -EINVAL; | ||||||
| 	if (loc->bi.ctx != NULL) |  | ||||||
| 		vmw_context_binding_drop(loc); |  | ||||||
| 
 |  | ||||||
| 	if (bi->res != NULL) { |  | ||||||
| 		loc->bi = *bi; |  | ||||||
| 		list_add_tail(&loc->ctx_list, &cbs->list); |  | ||||||
| 		list_add_tail(&loc->res_list, &bi->res->binding_head); |  | ||||||
| 	} |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| /**
 |  | ||||||
|  * vmw_context_binding_kill - Kill a binding on the device |  | ||||||
|  * and stop tracking it. |  | ||||||
|  * |  | ||||||
|  * @cb: Pointer to binding tracker storage. |  | ||||||
|  * |  | ||||||
|  * Emits FIFO commands to scrub a binding represented by @cb. |  | ||||||
|  * Then stops tracking the binding and re-initializes its storage. |  | ||||||
|  */ |  | ||||||
| static void vmw_context_binding_kill(struct vmw_ctx_binding *cb) |  | ||||||
| { |  | ||||||
| 	if (!cb->bi.scrubbed) { |  | ||||||
| 		(void) vmw_scrub_funcs[cb->bi.bt](&cb->bi, false); |  | ||||||
| 		cb->bi.scrubbed = true; |  | ||||||
| 	} |  | ||||||
| 	vmw_context_binding_drop(cb); |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| /**
 |  | ||||||
|  * vmw_context_binding_state_kill - Kill all bindings associated with a |  | ||||||
|  * struct vmw_ctx_binding state structure, and re-initialize the structure. |  | ||||||
|  * |  | ||||||
|  * @cbs: Pointer to the context binding state tracker. |  | ||||||
|  * |  | ||||||
|  * Emits commands to scrub all bindings associated with the |  | ||||||
|  * context binding state tracker. Then re-initializes the whole structure. |  | ||||||
|  */ |  | ||||||
| static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs) |  | ||||||
| { |  | ||||||
| 	struct vmw_ctx_binding *entry, *next; |  | ||||||
| 
 |  | ||||||
| 	list_for_each_entry_safe(entry, next, &cbs->list, ctx_list) |  | ||||||
| 		vmw_context_binding_kill(entry); |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| /**
 |  | ||||||
|  * vmw_context_binding_state_scrub - Scrub all bindings associated with a |  | ||||||
|  * struct vmw_ctx_binding state structure. |  | ||||||
|  * |  | ||||||
|  * @cbs: Pointer to the context binding state tracker. |  | ||||||
|  * |  | ||||||
|  * Emits commands to scrub all bindings associated with the |  | ||||||
|  * context binding state tracker. |  | ||||||
|  */ |  | ||||||
| static void vmw_context_binding_state_scrub(struct vmw_ctx_binding_state *cbs) |  | ||||||
| { |  | ||||||
| 	struct vmw_ctx_binding *entry; |  | ||||||
| 
 |  | ||||||
| 	list_for_each_entry(entry, &cbs->list, ctx_list) { |  | ||||||
| 		if (!entry->bi.scrubbed) { |  | ||||||
| 			(void) vmw_scrub_funcs[entry->bi.bt](&entry->bi, false); |  | ||||||
| 			entry->bi.scrubbed = true; |  | ||||||
| 		} |  | ||||||
| 	} |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| /**
 |  | ||||||
|  * vmw_context_binding_res_list_kill - Kill all bindings on a |  | ||||||
|  * resource binding list |  | ||||||
|  * |  | ||||||
|  * @head: list head of resource binding list |  | ||||||
|  * |  | ||||||
|  * Kills all bindings associated with a specific resource. Typically |  | ||||||
|  * called before the resource is destroyed. |  | ||||||
|  */ |  | ||||||
| void vmw_context_binding_res_list_kill(struct list_head *head) |  | ||||||
| { |  | ||||||
| 	struct vmw_ctx_binding *entry, *next; |  | ||||||
| 
 |  | ||||||
| 	list_for_each_entry_safe(entry, next, head, res_list) |  | ||||||
| 		vmw_context_binding_kill(entry); |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| /**
 |  | ||||||
|  * vmw_context_binding_res_list_scrub - Scrub all bindings on a |  | ||||||
|  * resource binding list |  | ||||||
|  * |  | ||||||
|  * @head: list head of resource binding list |  | ||||||
|  * |  | ||||||
|  * Scrub all bindings associated with a specific resource. Typically |  | ||||||
|  * called before the resource is evicted. |  | ||||||
|  */ |  | ||||||
| void vmw_context_binding_res_list_scrub(struct list_head *head) |  | ||||||
| { |  | ||||||
| 	struct vmw_ctx_binding *entry; |  | ||||||
| 
 |  | ||||||
| 	list_for_each_entry(entry, head, res_list) { |  | ||||||
| 		if (!entry->bi.scrubbed) { |  | ||||||
| 			(void) vmw_scrub_funcs[entry->bi.bt](&entry->bi, false); |  | ||||||
| 			entry->bi.scrubbed = true; |  | ||||||
| 		} |  | ||||||
| 	} |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| /**
 |  | ||||||
|  * vmw_context_binding_state_transfer - Commit staged binding info |  | ||||||
|  * |  | ||||||
|  * @ctx: Pointer to context to commit the staged binding info to. |  | ||||||
|  * @from: Staged binding info built during execbuf. |  | ||||||
|  * |  | ||||||
|  * Transfers binding info from a temporary structure to the persistent |  | ||||||
|  * structure in the context. This can be done once commands |  | ||||||
|  */ |  | ||||||
| void vmw_context_binding_state_transfer(struct vmw_resource *ctx, |  | ||||||
| 					struct vmw_ctx_binding_state *from) |  | ||||||
| { |  | ||||||
| 	struct vmw_user_context *uctx = |  | ||||||
| 		container_of(ctx, struct vmw_user_context, res); |  | ||||||
| 	struct vmw_ctx_binding *entry, *next; |  | ||||||
| 
 |  | ||||||
| 	list_for_each_entry_safe(entry, next, &from->list, ctx_list) |  | ||||||
| 		vmw_context_binding_transfer(&uctx->cbs, &entry->bi); |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| /**
 |  | ||||||
|  * vmw_context_rebind_all - Rebind all scrubbed bindings of a context |  | ||||||
|  * |  | ||||||
|  * @ctx: The context resource |  | ||||||
|  * |  | ||||||
|  * Walks through the context binding list and rebinds all scrubbed |  | ||||||
|  * resources. |  | ||||||
|  */ |  | ||||||
| int vmw_context_rebind_all(struct vmw_resource *ctx) |  | ||||||
| { |  | ||||||
| 	struct vmw_ctx_binding *entry; |  | ||||||
| 	struct vmw_user_context *uctx = |  | ||||||
| 		container_of(ctx, struct vmw_user_context, res); |  | ||||||
| 	struct vmw_ctx_binding_state *cbs = &uctx->cbs; |  | ||||||
| 	int ret; |  | ||||||
| 
 |  | ||||||
| 	list_for_each_entry(entry, &cbs->list, ctx_list) { |  | ||||||
| 		if (likely(!entry->bi.scrubbed)) |  | ||||||
| 			continue; |  | ||||||
| 
 |  | ||||||
| 		if (WARN_ON(entry->bi.res == NULL || entry->bi.res->id == |  | ||||||
| 			    SVGA3D_INVALID_ID)) |  | ||||||
| 			continue; |  | ||||||
| 
 |  | ||||||
| 		ret = vmw_scrub_funcs[entry->bi.bt](&entry->bi, true); |  | ||||||
| 		if (unlikely(ret != 0)) |  | ||||||
| 			return ret; |  | ||||||
| 
 |  | ||||||
| 		entry->bi.scrubbed = false; |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	return 0; |  | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /**
 | /**
 | ||||||
|  | @ -912,10 +830,39 @@ int vmw_context_rebind_all(struct vmw_resource *ctx) | ||||||
|  */ |  */ | ||||||
| struct list_head *vmw_context_binding_list(struct vmw_resource *ctx) | struct list_head *vmw_context_binding_list(struct vmw_resource *ctx) | ||||||
| { | { | ||||||
| 	return &(container_of(ctx, struct vmw_user_context, res)->cbs.list); | 	struct vmw_user_context *uctx = | ||||||
|  | 		container_of(ctx, struct vmw_user_context, res); | ||||||
|  | 
 | ||||||
|  | 	return vmw_binding_state_list(uctx->cbs); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| struct vmw_cmdbuf_res_manager *vmw_context_res_man(struct vmw_resource *ctx) | struct vmw_cmdbuf_res_manager *vmw_context_res_man(struct vmw_resource *ctx) | ||||||
| { | { | ||||||
| 	return container_of(ctx, struct vmw_user_context, res)->man; | 	return container_of(ctx, struct vmw_user_context, res)->man; | ||||||
| } | } | ||||||
|  | 
 | ||||||
|  | struct vmw_resource *vmw_context_cotable(struct vmw_resource *ctx, | ||||||
|  | 					 SVGACOTableType cotable_type) | ||||||
|  | { | ||||||
|  | 	if (cotable_type >= SVGA_COTABLE_DX10_MAX) | ||||||
|  | 		return ERR_PTR(-EINVAL); | ||||||
|  | 
 | ||||||
|  | 	return vmw_resource_reference | ||||||
|  | 		(container_of(ctx, struct vmw_user_context, res)-> | ||||||
|  | 		 cotables[cotable_type]); | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | /**
 | ||||||
|  |  * vmw_context_binding_state - | ||||||
|  |  * Return a pointer to a context binding state structure | ||||||
|  |  * | ||||||
|  |  * @ctx: The context resource | ||||||
|  |  * | ||||||
|  |  * Returns the current state of bindings of the given context. Note that | ||||||
|  |  * this state becomes stale as soon as the dev_priv::binding_mutex is unlocked. | ||||||
|  |  */ | ||||||
|  | struct vmw_ctx_binding_state * | ||||||
|  | vmw_context_binding_state(struct vmw_resource *ctx) | ||||||
|  | { | ||||||
|  | 	return container_of(ctx, struct vmw_user_context, res)->cbs; | ||||||
|  | } | ||||||
|  |  | ||||||
							
								
								
									
										662
									
								
								drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										662
									
								
								drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
									
										
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,662 @@ | ||||||
|  | /**************************************************************************
 | ||||||
|  |  * | ||||||
|  |  * Copyright © 2014 VMware, Inc., Palo Alto, CA., USA | ||||||
|  |  * All Rights Reserved. | ||||||
|  |  * | ||||||
|  |  * Permission is hereby granted, free of charge, to any person obtaining a | ||||||
|  |  * copy of this software and associated documentation files (the | ||||||
|  |  * "Software"), to deal in the Software without restriction, including | ||||||
|  |  * without limitation the rights to use, copy, modify, merge, publish, | ||||||
|  |  * distribute, sub license, and/or sell copies of the Software, and to | ||||||
|  |  * permit persons to whom the Software is furnished to do so, subject to | ||||||
|  |  * the following conditions: | ||||||
|  |  * | ||||||
|  |  * The above copyright notice and this permission notice (including the | ||||||
|  |  * next paragraph) shall be included in all copies or substantial portions | ||||||
|  |  * of the Software. | ||||||
|  |  * | ||||||
|  |  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||||||
|  |  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||||||
|  |  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||||||
|  |  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | ||||||
|  |  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | ||||||
|  |  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | ||||||
|  |  * USE OR OTHER DEALINGS IN THE SOFTWARE. | ||||||
|  |  * | ||||||
|  |  **************************************************************************/ | ||||||
|  | /*
 | ||||||
|  |  * Treat context OTables as resources to make use of the resource | ||||||
|  |  * backing MOB eviction mechanism, that is used to read back the COTable | ||||||
|  |  * whenever the backing MOB is evicted. | ||||||
|  |  */ | ||||||
|  | 
 | ||||||
|  | #include "vmwgfx_drv.h" | ||||||
|  | #include "vmwgfx_resource_priv.h" | ||||||
|  | #include <ttm/ttm_placement.h> | ||||||
|  | #include "vmwgfx_so.h" | ||||||
|  | 
 | ||||||
|  | /**
 | ||||||
|  |  * struct vmw_cotable - Context Object Table resource | ||||||
|  |  * | ||||||
|  |  * @res: struct vmw_resource we are deriving from. | ||||||
|  |  * @ctx: non-refcounted pointer to the owning context. | ||||||
|  |  * @size_read_back: Size of data read back during eviction. | ||||||
|  |  * @seen_entries: Seen entries in command stream for this cotable. | ||||||
|  |  * @type: The cotable type. | ||||||
|  |  * @scrubbed: Whether the cotable has been scrubbed. | ||||||
|  |  * @resource_list: List of resources in the cotable. | ||||||
|  |  */ | ||||||
|  | struct vmw_cotable { | ||||||
|  | 	struct vmw_resource res; | ||||||
|  | 	struct vmw_resource *ctx; | ||||||
|  | 	size_t size_read_back; | ||||||
|  | 	int seen_entries; | ||||||
|  | 	u32 type; | ||||||
|  | 	bool scrubbed; | ||||||
|  | 	struct list_head resource_list; | ||||||
|  | }; | ||||||
|  | 
 | ||||||
|  | /**
 | ||||||
|  |  * struct vmw_cotable_info - Static info about cotable types | ||||||
|  |  * | ||||||
|  |  * @min_initial_entries: Min number of initial intries at cotable allocation | ||||||
|  |  * for this cotable type. | ||||||
|  |  * @size: Size of each entry. | ||||||
|  |  */ | ||||||
|  | struct vmw_cotable_info { | ||||||
|  | 	u32 min_initial_entries; | ||||||
|  | 	u32 size; | ||||||
|  | 	void (*unbind_func)(struct vmw_private *, struct list_head *, | ||||||
|  | 			    bool); | ||||||
|  | }; | ||||||
|  | 
 | ||||||
|  | static const struct vmw_cotable_info co_info[] = { | ||||||
|  | 	{1, sizeof(SVGACOTableDXRTViewEntry), &vmw_view_cotable_list_destroy}, | ||||||
|  | 	{1, sizeof(SVGACOTableDXDSViewEntry), &vmw_view_cotable_list_destroy}, | ||||||
|  | 	{1, sizeof(SVGACOTableDXSRViewEntry), &vmw_view_cotable_list_destroy}, | ||||||
|  | 	{1, sizeof(SVGACOTableDXElementLayoutEntry), NULL}, | ||||||
|  | 	{1, sizeof(SVGACOTableDXBlendStateEntry), NULL}, | ||||||
|  | 	{1, sizeof(SVGACOTableDXDepthStencilEntry), NULL}, | ||||||
|  | 	{1, sizeof(SVGACOTableDXRasterizerStateEntry), NULL}, | ||||||
|  | 	{1, sizeof(SVGACOTableDXSamplerEntry), NULL}, | ||||||
|  | 	{1, sizeof(SVGACOTableDXStreamOutputEntry), NULL}, | ||||||
|  | 	{1, sizeof(SVGACOTableDXQueryEntry), NULL}, | ||||||
|  | 	{1, sizeof(SVGACOTableDXShaderEntry), &vmw_dx_shader_cotable_list_scrub} | ||||||
|  | }; | ||||||
|  | 
 | ||||||
|  | /*
 | ||||||
|  |  * Cotables with bindings that we remove must be scrubbed first, | ||||||
|  |  * otherwise, the device will swap in an invalid context when we remove | ||||||
|  |  * bindings before scrubbing a cotable... | ||||||
|  |  */ | ||||||
|  | const SVGACOTableType vmw_cotable_scrub_order[] = { | ||||||
|  | 	SVGA_COTABLE_RTVIEW, | ||||||
|  | 	SVGA_COTABLE_DSVIEW, | ||||||
|  | 	SVGA_COTABLE_SRVIEW, | ||||||
|  | 	SVGA_COTABLE_DXSHADER, | ||||||
|  | 	SVGA_COTABLE_ELEMENTLAYOUT, | ||||||
|  | 	SVGA_COTABLE_BLENDSTATE, | ||||||
|  | 	SVGA_COTABLE_DEPTHSTENCIL, | ||||||
|  | 	SVGA_COTABLE_RASTERIZERSTATE, | ||||||
|  | 	SVGA_COTABLE_SAMPLER, | ||||||
|  | 	SVGA_COTABLE_STREAMOUTPUT, | ||||||
|  | 	SVGA_COTABLE_DXQUERY, | ||||||
|  | }; | ||||||
|  | 
 | ||||||
|  | static int vmw_cotable_bind(struct vmw_resource *res, | ||||||
|  | 			    struct ttm_validate_buffer *val_buf); | ||||||
|  | static int vmw_cotable_unbind(struct vmw_resource *res, | ||||||
|  | 			      bool readback, | ||||||
|  | 			      struct ttm_validate_buffer *val_buf); | ||||||
|  | static int vmw_cotable_create(struct vmw_resource *res); | ||||||
|  | static int vmw_cotable_destroy(struct vmw_resource *res); | ||||||
|  | 
 | ||||||
|  | static const struct vmw_res_func vmw_cotable_func = { | ||||||
|  | 	.res_type = vmw_res_cotable, | ||||||
|  | 	.needs_backup = true, | ||||||
|  | 	.may_evict = true, | ||||||
|  | 	.type_name = "context guest backed object tables", | ||||||
|  | 	.backup_placement = &vmw_mob_placement, | ||||||
|  | 	.create = vmw_cotable_create, | ||||||
|  | 	.destroy = vmw_cotable_destroy, | ||||||
|  | 	.bind = vmw_cotable_bind, | ||||||
|  | 	.unbind = vmw_cotable_unbind, | ||||||
|  | }; | ||||||
|  | 
 | ||||||
|  | /**
 | ||||||
|  |  * vmw_cotable - Convert a struct vmw_resource pointer to a struct | ||||||
|  |  * vmw_cotable pointer | ||||||
|  |  * | ||||||
|  |  * @res: Pointer to the resource. | ||||||
|  |  */ | ||||||
|  | static struct vmw_cotable *vmw_cotable(struct vmw_resource *res) | ||||||
|  | { | ||||||
|  | 	return container_of(res, struct vmw_cotable, res); | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | /**
 | ||||||
|  |  * vmw_cotable_destroy - Cotable resource destroy callback | ||||||
|  |  * | ||||||
|  |  * @res: Pointer to the cotable resource. | ||||||
|  |  * | ||||||
|  |  * There is no device cotable destroy command, so this function only | ||||||
|  |  * makes sure that the resource id is set to invalid. | ||||||
|  |  */ | ||||||
|  | static int vmw_cotable_destroy(struct vmw_resource *res) | ||||||
|  | { | ||||||
|  | 	res->id = -1; | ||||||
|  | 	return 0; | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | /**
 | ||||||
|  |  * vmw_cotable_unscrub - Undo a cotable unscrub operation | ||||||
|  |  * | ||||||
|  |  * @res: Pointer to the cotable resource | ||||||
|  |  * | ||||||
|  |  * This function issues commands to (re)bind the cotable to | ||||||
|  |  * its backing mob, which needs to be validated and reserved at this point. | ||||||
|  |  * This is identical to bind() except the function interface looks different. | ||||||
|  |  */ | ||||||
|  | static int vmw_cotable_unscrub(struct vmw_resource *res) | ||||||
|  | { | ||||||
|  | 	struct vmw_cotable *vcotbl = vmw_cotable(res); | ||||||
|  | 	struct vmw_private *dev_priv = res->dev_priv; | ||||||
|  | 	struct ttm_buffer_object *bo = &res->backup->base; | ||||||
|  | 	struct { | ||||||
|  | 		SVGA3dCmdHeader header; | ||||||
|  | 		SVGA3dCmdDXSetCOTable body; | ||||||
|  | 	} *cmd; | ||||||
|  | 
 | ||||||
|  | 	WARN_ON_ONCE(bo->mem.mem_type != VMW_PL_MOB); | ||||||
|  | 	lockdep_assert_held(&bo->resv->lock.base); | ||||||
|  | 
 | ||||||
|  | 	cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), SVGA3D_INVALID_ID); | ||||||
|  | 	if (!cmd) { | ||||||
|  | 		DRM_ERROR("Failed reserving FIFO space for cotable " | ||||||
|  | 			  "binding.\n"); | ||||||
|  | 		return -ENOMEM; | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	WARN_ON(vcotbl->ctx->id == SVGA3D_INVALID_ID); | ||||||
|  | 	WARN_ON(bo->mem.mem_type != VMW_PL_MOB); | ||||||
|  | 	cmd->header.id = SVGA_3D_CMD_DX_SET_COTABLE; | ||||||
|  | 	cmd->header.size = sizeof(cmd->body); | ||||||
|  | 	cmd->body.cid = vcotbl->ctx->id; | ||||||
|  | 	cmd->body.type = vcotbl->type; | ||||||
|  | 	cmd->body.mobid = bo->mem.start; | ||||||
|  | 	cmd->body.validSizeInBytes = vcotbl->size_read_back; | ||||||
|  | 
 | ||||||
|  | 	vmw_fifo_commit_flush(dev_priv, sizeof(*cmd)); | ||||||
|  | 	vcotbl->scrubbed = false; | ||||||
|  | 
 | ||||||
|  | 	return 0; | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | /**
 | ||||||
|  |  * vmw_cotable_bind - Undo a cotable unscrub operation | ||||||
|  |  * | ||||||
|  |  * @res: Pointer to the cotable resource | ||||||
|  |  * @val_buf: Pointer to a struct ttm_validate_buffer prepared by the caller | ||||||
|  |  * for convenience / fencing. | ||||||
|  |  * | ||||||
|  |  * This function issues commands to (re)bind the cotable to | ||||||
|  |  * its backing mob, which needs to be validated and reserved at this point. | ||||||
|  |  */ | ||||||
|  | static int vmw_cotable_bind(struct vmw_resource *res, | ||||||
|  | 			    struct ttm_validate_buffer *val_buf) | ||||||
|  | { | ||||||
|  | 	/*
 | ||||||
|  | 	 * The create() callback may have changed @res->backup without | ||||||
|  | 	 * the caller noticing, and with val_buf->bo still pointing to | ||||||
|  | 	 * the old backup buffer. Although hackish, and not used currently, | ||||||
|  | 	 * take the opportunity to correct the value here so that it's not | ||||||
|  | 	 * misused in the future. | ||||||
|  | 	 */ | ||||||
|  | 	val_buf->bo = &res->backup->base; | ||||||
|  | 
 | ||||||
|  | 	return vmw_cotable_unscrub(res); | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | /**
 | ||||||
|  |  * vmw_cotable_scrub - Scrub the cotable from the device. | ||||||
|  |  * | ||||||
|  |  * @res: Pointer to the cotable resource. | ||||||
|  |  * @readback: Whether initiate a readback of the cotable data to the backup | ||||||
|  |  * buffer. | ||||||
|  |  * | ||||||
|  |  * In some situations (context swapouts) it might be desirable to make the | ||||||
|  |  * device forget about the cotable without performing a full unbind. A full | ||||||
|  |  * unbind requires reserved backup buffers and it might not be possible to | ||||||
|  |  * reserve them due to locking order violation issues. The vmw_cotable_scrub | ||||||
|  |  * function implements a partial unbind() without that requirement but with the | ||||||
|  |  * following restrictions. | ||||||
|  |  * 1) Before the cotable is again used by the GPU, vmw_cotable_unscrub() must | ||||||
|  |  *    be called. | ||||||
|  |  * 2) Before the cotable backing buffer is used by the CPU, or during the | ||||||
|  |  *    resource destruction, vmw_cotable_unbind() must be called. | ||||||
|  |  */ | ||||||
|  | int vmw_cotable_scrub(struct vmw_resource *res, bool readback) | ||||||
|  | { | ||||||
|  | 	struct vmw_cotable *vcotbl = vmw_cotable(res); | ||||||
|  | 	struct vmw_private *dev_priv = res->dev_priv; | ||||||
|  | 	size_t submit_size; | ||||||
|  | 
 | ||||||
|  | 	struct { | ||||||
|  | 		SVGA3dCmdHeader header; | ||||||
|  | 		SVGA3dCmdDXReadbackCOTable body; | ||||||
|  | 	} *cmd0; | ||||||
|  | 	struct { | ||||||
|  | 		SVGA3dCmdHeader header; | ||||||
|  | 		SVGA3dCmdDXSetCOTable body; | ||||||
|  | 	} *cmd1; | ||||||
|  | 
 | ||||||
|  | 	if (vcotbl->scrubbed) | ||||||
|  | 		return 0; | ||||||
|  | 
 | ||||||
|  | 	if (co_info[vcotbl->type].unbind_func) | ||||||
|  | 		co_info[vcotbl->type].unbind_func(dev_priv, | ||||||
|  | 						  &vcotbl->resource_list, | ||||||
|  | 						  readback); | ||||||
|  | 	submit_size = sizeof(*cmd1); | ||||||
|  | 	if (readback) | ||||||
|  | 		submit_size += sizeof(*cmd0); | ||||||
|  | 
 | ||||||
|  | 	cmd1 = vmw_fifo_reserve_dx(dev_priv, submit_size, SVGA3D_INVALID_ID); | ||||||
|  | 	if (!cmd1) { | ||||||
|  | 		DRM_ERROR("Failed reserving FIFO space for cotable " | ||||||
|  | 			  "unbinding.\n"); | ||||||
|  | 		return -ENOMEM; | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	vcotbl->size_read_back = 0; | ||||||
|  | 	if (readback) { | ||||||
|  | 		cmd0 = (void *) cmd1; | ||||||
|  | 		cmd0->header.id = SVGA_3D_CMD_DX_READBACK_COTABLE; | ||||||
|  | 		cmd0->header.size = sizeof(cmd0->body); | ||||||
|  | 		cmd0->body.cid = vcotbl->ctx->id; | ||||||
|  | 		cmd0->body.type = vcotbl->type; | ||||||
|  | 		cmd1 = (void *) &cmd0[1]; | ||||||
|  | 		vcotbl->size_read_back = res->backup_size; | ||||||
|  | 	} | ||||||
|  | 	cmd1->header.id = SVGA_3D_CMD_DX_SET_COTABLE; | ||||||
|  | 	cmd1->header.size = sizeof(cmd1->body); | ||||||
|  | 	cmd1->body.cid = vcotbl->ctx->id; | ||||||
|  | 	cmd1->body.type = vcotbl->type; | ||||||
|  | 	cmd1->body.mobid = SVGA3D_INVALID_ID; | ||||||
|  | 	cmd1->body.validSizeInBytes = 0; | ||||||
|  | 	vmw_fifo_commit_flush(dev_priv, submit_size); | ||||||
|  | 	vcotbl->scrubbed = true; | ||||||
|  | 
 | ||||||
|  | 	/* Trigger a create() on next validate. */ | ||||||
|  | 	res->id = -1; | ||||||
|  | 
 | ||||||
|  | 	return 0; | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | /**
 | ||||||
|  |  * vmw_cotable_unbind - Cotable resource unbind callback | ||||||
|  |  * | ||||||
|  |  * @res: Pointer to the cotable resource. | ||||||
|  |  * @readback: Whether to read back cotable data to the backup buffer. | ||||||
|  |  * val_buf: Pointer to a struct ttm_validate_buffer prepared by the caller | ||||||
|  |  * for convenience / fencing. | ||||||
|  |  * | ||||||
|  |  * Unbinds the cotable from the device and fences the backup buffer. | ||||||
|  |  */ | ||||||
|  | static int vmw_cotable_unbind(struct vmw_resource *res, | ||||||
|  | 			      bool readback, | ||||||
|  | 			      struct ttm_validate_buffer *val_buf) | ||||||
|  | { | ||||||
|  | 	struct vmw_cotable *vcotbl = vmw_cotable(res); | ||||||
|  | 	struct vmw_private *dev_priv = res->dev_priv; | ||||||
|  | 	struct ttm_buffer_object *bo = val_buf->bo; | ||||||
|  | 	struct vmw_fence_obj *fence; | ||||||
|  | 	int ret; | ||||||
|  | 
 | ||||||
|  | 	if (list_empty(&res->mob_head)) | ||||||
|  | 		return 0; | ||||||
|  | 
 | ||||||
|  | 	WARN_ON_ONCE(bo->mem.mem_type != VMW_PL_MOB); | ||||||
|  | 	lockdep_assert_held(&bo->resv->lock.base); | ||||||
|  | 
 | ||||||
|  | 	mutex_lock(&dev_priv->binding_mutex); | ||||||
|  | 	if (!vcotbl->scrubbed) | ||||||
|  | 		vmw_dx_context_scrub_cotables(vcotbl->ctx, readback); | ||||||
|  | 	mutex_unlock(&dev_priv->binding_mutex); | ||||||
|  | 	(void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL); | ||||||
|  | 	vmw_fence_single_bo(bo, fence); | ||||||
|  | 	if (likely(fence != NULL)) | ||||||
|  | 		vmw_fence_obj_unreference(&fence); | ||||||
|  | 
 | ||||||
|  | 	return ret; | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | /**
 | ||||||
|  |  * vmw_cotable_readback - Read back a cotable without unbinding. | ||||||
|  |  * | ||||||
|  |  * @res: The cotable resource. | ||||||
|  |  * | ||||||
|  |  * Reads back a cotable to its backing mob without scrubbing the MOB from | ||||||
|  |  * the cotable. The MOB is fenced for subsequent CPU access. | ||||||
|  |  */ | ||||||
|  | static int vmw_cotable_readback(struct vmw_resource *res) | ||||||
|  | { | ||||||
|  | 	struct vmw_cotable *vcotbl = vmw_cotable(res); | ||||||
|  | 	struct vmw_private *dev_priv = res->dev_priv; | ||||||
|  | 
 | ||||||
|  | 	struct { | ||||||
|  | 		SVGA3dCmdHeader header; | ||||||
|  | 		SVGA3dCmdDXReadbackCOTable body; | ||||||
|  | 	} *cmd; | ||||||
|  | 	struct vmw_fence_obj *fence; | ||||||
|  | 
 | ||||||
|  | 	if (!vcotbl->scrubbed) { | ||||||
|  | 		cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), | ||||||
|  | 					  SVGA3D_INVALID_ID); | ||||||
|  | 		if (!cmd) { | ||||||
|  | 			DRM_ERROR("Failed reserving FIFO space for cotable " | ||||||
|  | 				  "readback.\n"); | ||||||
|  | 			return -ENOMEM; | ||||||
|  | 		} | ||||||
|  | 		cmd->header.id = SVGA_3D_CMD_DX_READBACK_COTABLE; | ||||||
|  | 		cmd->header.size = sizeof(cmd->body); | ||||||
|  | 		cmd->body.cid = vcotbl->ctx->id; | ||||||
|  | 		cmd->body.type = vcotbl->type; | ||||||
|  | 		vcotbl->size_read_back = res->backup_size; | ||||||
|  | 		vmw_fifo_commit(dev_priv, sizeof(*cmd)); | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	(void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL); | ||||||
|  | 	vmw_fence_single_bo(&res->backup->base, fence); | ||||||
|  | 	vmw_fence_obj_unreference(&fence); | ||||||
|  | 
 | ||||||
|  | 	return 0; | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | /**
 | ||||||
|  |  * vmw_cotable_resize - Resize a cotable. | ||||||
|  |  * | ||||||
|  |  * @res: The cotable resource. | ||||||
|  |  * @new_size: The new size. | ||||||
|  |  * | ||||||
|  |  * Resizes a cotable and binds the new backup buffer. | ||||||
|  |  * On failure the cotable is left intact. | ||||||
|  |  * Important! This function may not fail once the MOB switch has been | ||||||
|  |  * committed to hardware. That would put the device context in an | ||||||
|  |  * invalid state which we can't currently recover from. | ||||||
|  |  */ | ||||||
|  | static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size) | ||||||
|  | { | ||||||
|  | 	struct vmw_private *dev_priv = res->dev_priv; | ||||||
|  | 	struct vmw_cotable *vcotbl = vmw_cotable(res); | ||||||
|  | 	struct vmw_dma_buffer *buf, *old_buf = res->backup; | ||||||
|  | 	struct ttm_buffer_object *bo, *old_bo = &res->backup->base; | ||||||
|  | 	size_t old_size = res->backup_size; | ||||||
|  | 	size_t old_size_read_back = vcotbl->size_read_back; | ||||||
|  | 	size_t cur_size_read_back; | ||||||
|  | 	struct ttm_bo_kmap_obj old_map, new_map; | ||||||
|  | 	int ret; | ||||||
|  | 	size_t i; | ||||||
|  | 
 | ||||||
|  | 	ret = vmw_cotable_readback(res); | ||||||
|  | 	if (ret) | ||||||
|  | 		return ret; | ||||||
|  | 
 | ||||||
|  | 	cur_size_read_back = vcotbl->size_read_back; | ||||||
|  | 	vcotbl->size_read_back = old_size_read_back; | ||||||
|  | 
 | ||||||
|  | 	/*
 | ||||||
|  | 	 * While device is processing, Allocate and reserve a buffer object | ||||||
|  | 	 * for the new COTable. Initially pin the buffer object to make sure | ||||||
|  | 	 * we can use tryreserve without failure. | ||||||
|  | 	 */ | ||||||
|  | 	buf = kzalloc(sizeof(*buf), GFP_KERNEL); | ||||||
|  | 	if (!buf) | ||||||
|  | 		return -ENOMEM; | ||||||
|  | 
 | ||||||
|  | 	ret = vmw_dmabuf_init(dev_priv, buf, new_size, &vmw_mob_ne_placement, | ||||||
|  | 			      true, vmw_dmabuf_bo_free); | ||||||
|  | 	if (ret) { | ||||||
|  | 		DRM_ERROR("Failed initializing new cotable MOB.\n"); | ||||||
|  | 		return ret; | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	bo = &buf->base; | ||||||
|  | 	WARN_ON_ONCE(ttm_bo_reserve(bo, false, true, false, NULL)); | ||||||
|  | 
 | ||||||
|  | 	ret = ttm_bo_wait(old_bo, false, false, false); | ||||||
|  | 	if (unlikely(ret != 0)) { | ||||||
|  | 		DRM_ERROR("Failed waiting for cotable unbind.\n"); | ||||||
|  | 		goto out_wait; | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	/*
 | ||||||
|  | 	 * Do a page by page copy of COTables. This eliminates slow vmap()s. | ||||||
|  | 	 * This should really be a TTM utility. | ||||||
|  | 	 */ | ||||||
|  | 	for (i = 0; i < old_bo->num_pages; ++i) { | ||||||
|  | 		bool dummy; | ||||||
|  | 
 | ||||||
|  | 		ret = ttm_bo_kmap(old_bo, i, 1, &old_map); | ||||||
|  | 		if (unlikely(ret != 0)) { | ||||||
|  | 			DRM_ERROR("Failed mapping old COTable on resize.\n"); | ||||||
|  | 			goto out_wait; | ||||||
|  | 		} | ||||||
|  | 		ret = ttm_bo_kmap(bo, i, 1, &new_map); | ||||||
|  | 		if (unlikely(ret != 0)) { | ||||||
|  | 			DRM_ERROR("Failed mapping new COTable on resize.\n"); | ||||||
|  | 			goto out_map_new; | ||||||
|  | 		} | ||||||
|  | 		memcpy(ttm_kmap_obj_virtual(&new_map, &dummy), | ||||||
|  | 		       ttm_kmap_obj_virtual(&old_map, &dummy), | ||||||
|  | 		       PAGE_SIZE); | ||||||
|  | 		ttm_bo_kunmap(&new_map); | ||||||
|  | 		ttm_bo_kunmap(&old_map); | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	/* Unpin new buffer, and switch backup buffers. */ | ||||||
|  | 	ret = ttm_bo_validate(bo, &vmw_mob_placement, false, false); | ||||||
|  | 	if (unlikely(ret != 0)) { | ||||||
|  | 		DRM_ERROR("Failed validating new COTable backup buffer.\n"); | ||||||
|  | 		goto out_wait; | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	res->backup = buf; | ||||||
|  | 	res->backup_size = new_size; | ||||||
|  | 	vcotbl->size_read_back = cur_size_read_back; | ||||||
|  | 
 | ||||||
|  | 	/*
 | ||||||
|  | 	 * Now tell the device to switch. If this fails, then we need to | ||||||
|  | 	 * revert the full resize. | ||||||
|  | 	 */ | ||||||
|  | 	ret = vmw_cotable_unscrub(res); | ||||||
|  | 	if (ret) { | ||||||
|  | 		DRM_ERROR("Failed switching COTable backup buffer.\n"); | ||||||
|  | 		res->backup = old_buf; | ||||||
|  | 		res->backup_size = old_size; | ||||||
|  | 		vcotbl->size_read_back = old_size_read_back; | ||||||
|  | 		goto out_wait; | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	/* Let go of the old mob. */ | ||||||
|  | 	list_del(&res->mob_head); | ||||||
|  | 	list_add_tail(&res->mob_head, &buf->res_list); | ||||||
|  | 	vmw_dmabuf_unreference(&old_buf); | ||||||
|  | 	res->id = vcotbl->type; | ||||||
|  | 
 | ||||||
|  | 	return 0; | ||||||
|  | 
 | ||||||
|  | out_map_new: | ||||||
|  | 	ttm_bo_kunmap(&old_map); | ||||||
|  | out_wait: | ||||||
|  | 	ttm_bo_unreserve(bo); | ||||||
|  | 	vmw_dmabuf_unreference(&buf); | ||||||
|  | 
 | ||||||
|  | 	return ret; | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | /**
 | ||||||
|  |  * vmw_cotable_create - Cotable resource create callback | ||||||
|  |  * | ||||||
|  |  * @res: Pointer to a cotable resource. | ||||||
|  |  * | ||||||
|  |  * There is no separate create command for cotables, so this callback, which | ||||||
|  |  * is called before bind() in the validation sequence is instead used for two | ||||||
|  |  * things. | ||||||
|  |  * 1) Unscrub the cotable if it is scrubbed and still attached to a backup | ||||||
|  |  *    buffer, that is, if @res->mob_head is non-empty. | ||||||
|  |  * 2) Resize the cotable if needed. | ||||||
|  |  */ | ||||||
|  | static int vmw_cotable_create(struct vmw_resource *res) | ||||||
|  | { | ||||||
|  | 	struct vmw_cotable *vcotbl = vmw_cotable(res); | ||||||
|  | 	size_t new_size = res->backup_size; | ||||||
|  | 	size_t needed_size; | ||||||
|  | 	int ret; | ||||||
|  | 
 | ||||||
|  | 	/* Check whether we need to resize the cotable */ | ||||||
|  | 	needed_size = (vcotbl->seen_entries + 1) * co_info[vcotbl->type].size; | ||||||
|  | 	while (needed_size > new_size) | ||||||
|  | 		new_size *= 2; | ||||||
|  | 
 | ||||||
|  | 	if (likely(new_size <= res->backup_size)) { | ||||||
|  | 		if (vcotbl->scrubbed && !list_empty(&res->mob_head)) { | ||||||
|  | 			ret = vmw_cotable_unscrub(res); | ||||||
|  | 			if (ret) | ||||||
|  | 				return ret; | ||||||
|  | 		} | ||||||
|  | 		res->id = vcotbl->type; | ||||||
|  | 		return 0; | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	return vmw_cotable_resize(res, new_size); | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | /**
 | ||||||
|  |  * vmw_hw_cotable_destroy - Cotable hw_destroy callback | ||||||
|  |  * | ||||||
|  |  * @res: Pointer to a cotable resource. | ||||||
|  |  * | ||||||
|  |  * The final (part of resource destruction) destroy callback. | ||||||
|  |  */ | ||||||
|  | static void vmw_hw_cotable_destroy(struct vmw_resource *res) | ||||||
|  | { | ||||||
|  | 	(void) vmw_cotable_destroy(res); | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | static size_t cotable_acc_size; | ||||||
|  | 
 | ||||||
|  | /**
 | ||||||
|  |  * vmw_cotable_free - Cotable resource destructor | ||||||
|  |  * | ||||||
|  |  * @res: Pointer to a cotable resource. | ||||||
|  |  */ | ||||||
|  | static void vmw_cotable_free(struct vmw_resource *res) | ||||||
|  | { | ||||||
|  | 	struct vmw_private *dev_priv = res->dev_priv; | ||||||
|  | 
 | ||||||
|  | 	kfree(res); | ||||||
|  | 	ttm_mem_global_free(vmw_mem_glob(dev_priv), cotable_acc_size); | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | /**
 | ||||||
|  |  * vmw_cotable_alloc - Create a cotable resource | ||||||
|  |  * | ||||||
|  |  * @dev_priv: Pointer to a device private struct. | ||||||
|  |  * @ctx: Pointer to the context resource. | ||||||
|  |  * The cotable resource will not add a refcount. | ||||||
|  |  * @type: The cotable type. | ||||||
|  |  */ | ||||||
|  | struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv, | ||||||
|  | 				       struct vmw_resource *ctx, | ||||||
|  | 				       u32 type) | ||||||
|  | { | ||||||
|  | 	struct vmw_cotable *vcotbl; | ||||||
|  | 	int ret; | ||||||
|  | 	u32 num_entries; | ||||||
|  | 
 | ||||||
|  | 	if (unlikely(cotable_acc_size == 0)) | ||||||
|  | 		cotable_acc_size = ttm_round_pot(sizeof(struct vmw_cotable)); | ||||||
|  | 
 | ||||||
|  | 	ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), | ||||||
|  | 				   cotable_acc_size, false, true); | ||||||
|  | 	if (unlikely(ret)) | ||||||
|  | 		return ERR_PTR(ret); | ||||||
|  | 
 | ||||||
|  | 	vcotbl = kzalloc(sizeof(*vcotbl), GFP_KERNEL); | ||||||
|  | 	if (unlikely(vcotbl == NULL)) { | ||||||
|  | 		ret = -ENOMEM; | ||||||
|  | 		goto out_no_alloc; | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	ret = vmw_resource_init(dev_priv, &vcotbl->res, true, | ||||||
|  | 				vmw_cotable_free, &vmw_cotable_func); | ||||||
|  | 	if (unlikely(ret != 0)) | ||||||
|  | 		goto out_no_init; | ||||||
|  | 
 | ||||||
|  | 	INIT_LIST_HEAD(&vcotbl->resource_list); | ||||||
|  | 	vcotbl->res.id = type; | ||||||
|  | 	vcotbl->res.backup_size = PAGE_SIZE; | ||||||
|  | 	num_entries = PAGE_SIZE / co_info[type].size; | ||||||
|  | 	if (num_entries < co_info[type].min_initial_entries) { | ||||||
|  | 		vcotbl->res.backup_size = co_info[type].min_initial_entries * | ||||||
|  | 			co_info[type].size; | ||||||
|  | 		vcotbl->res.backup_size = | ||||||
|  | 			(vcotbl->res.backup_size + PAGE_SIZE - 1) & PAGE_MASK; | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	vcotbl->scrubbed = true; | ||||||
|  | 	vcotbl->seen_entries = -1; | ||||||
|  | 	vcotbl->type = type; | ||||||
|  | 	vcotbl->ctx = ctx; | ||||||
|  | 
 | ||||||
|  | 	vmw_resource_activate(&vcotbl->res, vmw_hw_cotable_destroy); | ||||||
|  | 
 | ||||||
|  | 	return &vcotbl->res; | ||||||
|  | 
 | ||||||
|  | out_no_init: | ||||||
|  | 	kfree(vcotbl); | ||||||
|  | out_no_alloc: | ||||||
|  | 	ttm_mem_global_free(vmw_mem_glob(dev_priv), cotable_acc_size); | ||||||
|  | 	return ERR_PTR(ret); | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | /**
 | ||||||
|  |  * vmw_cotable_notify - Notify the cotable about an item creation | ||||||
|  |  * | ||||||
|  |  * @res: Pointer to a cotable resource. | ||||||
|  |  * @id: Item id. | ||||||
|  |  */ | ||||||
|  | int vmw_cotable_notify(struct vmw_resource *res, int id) | ||||||
|  | { | ||||||
|  | 	struct vmw_cotable *vcotbl = vmw_cotable(res); | ||||||
|  | 
 | ||||||
|  | 	if (id < 0 || id >= SVGA_COTABLE_MAX_IDS) { | ||||||
|  | 		DRM_ERROR("Illegal COTable id. Type is %u. Id is %d\n", | ||||||
|  | 			  (unsigned) vcotbl->type, id); | ||||||
|  | 		return -EINVAL; | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	if (vcotbl->seen_entries < id) { | ||||||
|  | 		/* Trigger a call to create() on next validate */ | ||||||
|  | 		res->id = -1; | ||||||
|  | 		vcotbl->seen_entries = id; | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	return 0; | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | /**
 | ||||||
|  |  * vmw_cotable_add_view - add a view to the cotable's list of active views. | ||||||
|  |  * | ||||||
|  |  * @res: pointer struct vmw_resource representing the cotable. | ||||||
|  |  * @head: pointer to the struct list_head member of the resource, dedicated | ||||||
|  |  * to the cotable active resource list. | ||||||
|  |  */ | ||||||
|  | void vmw_cotable_add_resource(struct vmw_resource *res, struct list_head *head) | ||||||
|  | { | ||||||
|  | 	struct vmw_cotable *vcotbl = | ||||||
|  | 		container_of(res, struct vmw_cotable, res); | ||||||
|  | 
 | ||||||
|  | 	list_add_tail(head, &vcotbl->resource_list); | ||||||
|  | } | ||||||
|  | @ -28,6 +28,7 @@ | ||||||
| 
 | 
 | ||||||
| #include <drm/drmP.h> | #include <drm/drmP.h> | ||||||
| #include "vmwgfx_drv.h" | #include "vmwgfx_drv.h" | ||||||
|  | #include "vmwgfx_binding.h" | ||||||
| #include <drm/ttm/ttm_placement.h> | #include <drm/ttm/ttm_placement.h> | ||||||
| #include <drm/ttm/ttm_bo_driver.h> | #include <drm/ttm/ttm_bo_driver.h> | ||||||
| #include <drm/ttm/ttm_object.h> | #include <drm/ttm/ttm_object.h> | ||||||
|  | @ -127,6 +128,9 @@ | ||||||
| #define DRM_IOCTL_VMW_SYNCCPU					\ | #define DRM_IOCTL_VMW_SYNCCPU					\ | ||||||
| 	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_SYNCCPU,		\ | 	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_SYNCCPU,		\ | ||||||
| 		 struct drm_vmw_synccpu_arg) | 		 struct drm_vmw_synccpu_arg) | ||||||
|  | #define DRM_IOCTL_VMW_CREATE_EXTENDED_CONTEXT			\ | ||||||
|  | 	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_EXTENDED_CONTEXT,	\ | ||||||
|  | 		struct drm_vmw_context_arg) | ||||||
| 
 | 
 | ||||||
| /**
 | /**
 | ||||||
|  * The core DRM version of this macro doesn't account for |  * The core DRM version of this macro doesn't account for | ||||||
|  | @ -168,8 +172,8 @@ static const struct drm_ioctl_desc vmw_ioctls[] = { | ||||||
| 		      DRM_UNLOCKED | DRM_RENDER_ALLOW), | 		      DRM_UNLOCKED | DRM_RENDER_ALLOW), | ||||||
| 	VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl, | 	VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl, | ||||||
| 		      DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW), | 		      DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW), | ||||||
| 	VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl, | 	VMW_IOCTL_DEF(VMW_EXECBUF, NULL, DRM_AUTH | DRM_UNLOCKED | | ||||||
| 		      DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW), | 		      DRM_RENDER_ALLOW), | ||||||
| 	VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl, | 	VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl, | ||||||
| 		      DRM_UNLOCKED | DRM_RENDER_ALLOW), | 		      DRM_UNLOCKED | DRM_RENDER_ALLOW), | ||||||
| 	VMW_IOCTL_DEF(VMW_FENCE_SIGNALED, | 	VMW_IOCTL_DEF(VMW_FENCE_SIGNALED, | ||||||
|  | @ -206,6 +210,9 @@ static const struct drm_ioctl_desc vmw_ioctls[] = { | ||||||
| 	VMW_IOCTL_DEF(VMW_SYNCCPU, | 	VMW_IOCTL_DEF(VMW_SYNCCPU, | ||||||
| 		      vmw_user_dmabuf_synccpu_ioctl, | 		      vmw_user_dmabuf_synccpu_ioctl, | ||||||
| 		      DRM_UNLOCKED | DRM_RENDER_ALLOW), | 		      DRM_UNLOCKED | DRM_RENDER_ALLOW), | ||||||
|  | 	VMW_IOCTL_DEF(VMW_CREATE_EXTENDED_CONTEXT, | ||||||
|  | 		      vmw_extended_context_define_ioctl, | ||||||
|  | 		      DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW), | ||||||
| }; | }; | ||||||
| 
 | 
 | ||||||
| static struct pci_device_id vmw_pci_id_list[] = { | static struct pci_device_id vmw_pci_id_list[] = { | ||||||
|  | @ -390,8 +397,10 @@ static int vmw_request_device(struct vmw_private *dev_priv) | ||||||
| 	} | 	} | ||||||
| 	vmw_fence_fifo_up(dev_priv->fman); | 	vmw_fence_fifo_up(dev_priv->fman); | ||||||
| 	dev_priv->cman = vmw_cmdbuf_man_create(dev_priv); | 	dev_priv->cman = vmw_cmdbuf_man_create(dev_priv); | ||||||
| 	if (IS_ERR(dev_priv->cman)) | 	if (IS_ERR(dev_priv->cman)) { | ||||||
| 		dev_priv->cman = NULL; | 		dev_priv->cman = NULL; | ||||||
|  | 		dev_priv->has_dx = false; | ||||||
|  | 	} | ||||||
| 
 | 
 | ||||||
| 	ret = vmw_request_device_late(dev_priv); | 	ret = vmw_request_device_late(dev_priv); | ||||||
| 	if (ret) | 	if (ret) | ||||||
|  | @ -848,6 +857,14 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
|  | 	if (dev_priv->has_mob) { | ||||||
|  | 		spin_lock(&dev_priv->cap_lock); | ||||||
|  | 		vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_DX); | ||||||
|  | 		dev_priv->has_dx = !!vmw_read(dev_priv, SVGA_REG_DEV_CAP); | ||||||
|  | 		spin_unlock(&dev_priv->cap_lock); | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
| 	ret = vmw_kms_init(dev_priv); | 	ret = vmw_kms_init(dev_priv); | ||||||
| 	if (unlikely(ret != 0)) | 	if (unlikely(ret != 0)) | ||||||
| 		goto out_no_kms; | 		goto out_no_kms; | ||||||
|  | @ -857,6 +874,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | ||||||
| 	if (ret) | 	if (ret) | ||||||
| 		goto out_no_fifo; | 		goto out_no_fifo; | ||||||
| 
 | 
 | ||||||
|  | 	DRM_INFO("DX: %s\n", dev_priv->has_dx ? "yes." : "no."); | ||||||
|  | 
 | ||||||
| 	if (dev_priv->enable_fb) { | 	if (dev_priv->enable_fb) { | ||||||
| 		vmw_fifo_resource_inc(dev_priv); | 		vmw_fifo_resource_inc(dev_priv); | ||||||
| 		vmw_svga_enable(dev_priv); | 		vmw_svga_enable(dev_priv); | ||||||
|  | @ -900,6 +919,8 @@ out_err0: | ||||||
| 	for (i = vmw_res_context; i < vmw_res_max; ++i) | 	for (i = vmw_res_context; i < vmw_res_max; ++i) | ||||||
| 		idr_destroy(&dev_priv->res_idr[i]); | 		idr_destroy(&dev_priv->res_idr[i]); | ||||||
| 
 | 
 | ||||||
|  | 	if (dev_priv->ctx.staged_bindings) | ||||||
|  | 		vmw_binding_state_free(dev_priv->ctx.staged_bindings); | ||||||
| 	kfree(dev_priv); | 	kfree(dev_priv); | ||||||
| 	return ret; | 	return ret; | ||||||
| } | } | ||||||
|  | @ -945,6 +966,8 @@ static int vmw_driver_unload(struct drm_device *dev) | ||||||
| 	iounmap(dev_priv->mmio_virt); | 	iounmap(dev_priv->mmio_virt); | ||||||
| 	arch_phys_wc_del(dev_priv->mmio_mtrr); | 	arch_phys_wc_del(dev_priv->mmio_mtrr); | ||||||
| 	(void)ttm_bo_device_release(&dev_priv->bdev); | 	(void)ttm_bo_device_release(&dev_priv->bdev); | ||||||
|  | 	if (dev_priv->ctx.staged_bindings) | ||||||
|  | 		vmw_binding_state_free(dev_priv->ctx.staged_bindings); | ||||||
| 	vmw_ttm_global_release(dev_priv); | 	vmw_ttm_global_release(dev_priv); | ||||||
| 
 | 
 | ||||||
| 	for (i = vmw_res_context; i < vmw_res_max; ++i) | 	for (i = vmw_res_context; i < vmw_res_max; ++i) | ||||||
|  | @ -1082,11 +1105,21 @@ static long vmw_generic_ioctl(struct file *filp, unsigned int cmd, | ||||||
| 		const struct drm_ioctl_desc *ioctl = | 		const struct drm_ioctl_desc *ioctl = | ||||||
| 			&vmw_ioctls[nr - DRM_COMMAND_BASE]; | 			&vmw_ioctls[nr - DRM_COMMAND_BASE]; | ||||||
| 
 | 
 | ||||||
| 		if (unlikely(ioctl->cmd != cmd)) { | 		if (nr == DRM_COMMAND_BASE + DRM_VMW_EXECBUF) { | ||||||
| 			DRM_ERROR("Invalid command format, ioctl %d\n", | 			ret = (long) drm_ioctl_permit(ioctl->flags, file_priv); | ||||||
| 				  nr - DRM_COMMAND_BASE); | 			if (unlikely(ret != 0)) | ||||||
| 			return -EINVAL; | 				return ret; | ||||||
|  | 
 | ||||||
|  | 			if (unlikely((cmd & (IOC_IN | IOC_OUT)) != IOC_IN)) | ||||||
|  | 				goto out_io_encoding; | ||||||
|  | 
 | ||||||
|  | 			return (long) vmw_execbuf_ioctl(dev, arg, file_priv, | ||||||
|  | 							_IOC_SIZE(cmd)); | ||||||
| 		} | 		} | ||||||
|  | 
 | ||||||
|  | 		if (unlikely(ioctl->cmd != cmd)) | ||||||
|  | 			goto out_io_encoding; | ||||||
|  | 
 | ||||||
| 		flags = ioctl->flags; | 		flags = ioctl->flags; | ||||||
| 	} else if (!drm_ioctl_flags(nr, &flags)) | 	} else if (!drm_ioctl_flags(nr, &flags)) | ||||||
| 		return -EINVAL; | 		return -EINVAL; | ||||||
|  | @ -1106,6 +1139,12 @@ static long vmw_generic_ioctl(struct file *filp, unsigned int cmd, | ||||||
| 		ttm_read_unlock(&vmaster->lock); | 		ttm_read_unlock(&vmaster->lock); | ||||||
| 
 | 
 | ||||||
| 	return ret; | 	return ret; | ||||||
|  | 
 | ||||||
|  | out_io_encoding: | ||||||
|  | 	DRM_ERROR("Invalid command format, ioctl %d\n", | ||||||
|  | 		  nr - DRM_COMMAND_BASE); | ||||||
|  | 
 | ||||||
|  | 	return -EINVAL; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd, | static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd, | ||||||
|  | @ -1156,7 +1195,6 @@ static void vmw_master_destroy(struct drm_device *dev, | ||||||
| 	kfree(vmaster); | 	kfree(vmaster); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| 
 |  | ||||||
| static int vmw_master_set(struct drm_device *dev, | static int vmw_master_set(struct drm_device *dev, | ||||||
| 			  struct drm_file *file_priv, | 			  struct drm_file *file_priv, | ||||||
| 			  bool from_open) | 			  bool from_open) | ||||||
|  |  | ||||||
|  | @ -59,6 +59,8 @@ | ||||||
| #define VMWGFX_NUM_GB_SHADER 20000 | #define VMWGFX_NUM_GB_SHADER 20000 | ||||||
| #define VMWGFX_NUM_GB_SURFACE 32768 | #define VMWGFX_NUM_GB_SURFACE 32768 | ||||||
| #define VMWGFX_NUM_GB_SCREEN_TARGET VMWGFX_MAX_DISPLAYS | #define VMWGFX_NUM_GB_SCREEN_TARGET VMWGFX_MAX_DISPLAYS | ||||||
|  | #define VMWGFX_NUM_DXCONTEXT 256 | ||||||
|  | #define VMWGFX_NUM_DXQUERY 512 | ||||||
| #define VMWGFX_NUM_MOB (VMWGFX_NUM_GB_CONTEXT +\ | #define VMWGFX_NUM_MOB (VMWGFX_NUM_GB_CONTEXT +\ | ||||||
| 			VMWGFX_NUM_GB_SHADER +\ | 			VMWGFX_NUM_GB_SHADER +\ | ||||||
| 			VMWGFX_NUM_GB_SURFACE +\ | 			VMWGFX_NUM_GB_SURFACE +\ | ||||||
|  | @ -132,6 +134,9 @@ enum vmw_res_type { | ||||||
| 	vmw_res_surface, | 	vmw_res_surface, | ||||||
| 	vmw_res_stream, | 	vmw_res_stream, | ||||||
| 	vmw_res_shader, | 	vmw_res_shader, | ||||||
|  | 	vmw_res_dx_context, | ||||||
|  | 	vmw_res_cotable, | ||||||
|  | 	vmw_res_view, | ||||||
| 	vmw_res_max | 	vmw_res_max | ||||||
| }; | }; | ||||||
| 
 | 
 | ||||||
|  | @ -139,7 +144,8 @@ enum vmw_res_type { | ||||||
|  * Resources that are managed using command streams. |  * Resources that are managed using command streams. | ||||||
|  */ |  */ | ||||||
| enum vmw_cmdbuf_res_type { | enum vmw_cmdbuf_res_type { | ||||||
| 	vmw_cmdbuf_res_compat_shader | 	vmw_cmdbuf_res_shader, | ||||||
|  | 	vmw_cmdbuf_res_view | ||||||
| }; | }; | ||||||
| 
 | 
 | ||||||
| struct vmw_cmdbuf_res_manager; | struct vmw_cmdbuf_res_manager; | ||||||
|  | @ -162,11 +168,13 @@ struct vmw_surface { | ||||||
| 	struct drm_vmw_size *sizes; | 	struct drm_vmw_size *sizes; | ||||||
| 	uint32_t num_sizes; | 	uint32_t num_sizes; | ||||||
| 	bool scanout; | 	bool scanout; | ||||||
|  | 	uint32_t array_size; | ||||||
| 	/* TODO so far just a extra pointer */ | 	/* TODO so far just a extra pointer */ | ||||||
| 	struct vmw_cursor_snooper snooper; | 	struct vmw_cursor_snooper snooper; | ||||||
| 	struct vmw_surface_offset *offsets; | 	struct vmw_surface_offset *offsets; | ||||||
| 	SVGA3dTextureFilter autogen_filter; | 	SVGA3dTextureFilter autogen_filter; | ||||||
| 	uint32_t multisample_count; | 	uint32_t multisample_count; | ||||||
|  | 	struct list_head view_list; | ||||||
| }; | }; | ||||||
| 
 | 
 | ||||||
| struct vmw_marker_queue { | struct vmw_marker_queue { | ||||||
|  | @ -186,6 +194,7 @@ struct vmw_fifo_state { | ||||||
| 	struct mutex fifo_mutex; | 	struct mutex fifo_mutex; | ||||||
| 	struct rw_semaphore rwsem; | 	struct rw_semaphore rwsem; | ||||||
| 	struct vmw_marker_queue marker_queue; | 	struct vmw_marker_queue marker_queue; | ||||||
|  | 	bool dx; | ||||||
| }; | }; | ||||||
| 
 | 
 | ||||||
| struct vmw_relocation { | struct vmw_relocation { | ||||||
|  | @ -265,73 +274,6 @@ struct vmw_piter { | ||||||
| 	struct page *(*page)(struct vmw_piter *); | 	struct page *(*page)(struct vmw_piter *); | ||||||
| }; | }; | ||||||
| 
 | 
 | ||||||
| /*
 |  | ||||||
|  * enum vmw_ctx_binding_type - abstract resource to context binding types |  | ||||||
|  */ |  | ||||||
| enum vmw_ctx_binding_type { |  | ||||||
| 	vmw_ctx_binding_shader, |  | ||||||
| 	vmw_ctx_binding_rt, |  | ||||||
| 	vmw_ctx_binding_tex, |  | ||||||
| 	vmw_ctx_binding_max |  | ||||||
| }; |  | ||||||
| 
 |  | ||||||
| /**
 |  | ||||||
|  * struct vmw_ctx_bindinfo - structure representing a single context binding |  | ||||||
|  * |  | ||||||
|  * @ctx: Pointer to the context structure. NULL means the binding is not |  | ||||||
|  * active. |  | ||||||
|  * @res: Non ref-counted pointer to the bound resource. |  | ||||||
|  * @bt: The binding type. |  | ||||||
|  * @i1: Union of information needed to unbind. |  | ||||||
|  */ |  | ||||||
| struct vmw_ctx_bindinfo { |  | ||||||
| 	struct vmw_resource *ctx; |  | ||||||
| 	struct vmw_resource *res; |  | ||||||
| 	enum vmw_ctx_binding_type bt; |  | ||||||
| 	bool scrubbed; |  | ||||||
| 	union { |  | ||||||
| 		SVGA3dShaderType shader_type; |  | ||||||
| 		SVGA3dRenderTargetType rt_type; |  | ||||||
| 		uint32 texture_stage; |  | ||||||
| 	} i1; |  | ||||||
| }; |  | ||||||
| 
 |  | ||||||
| /**
 |  | ||||||
|  * struct vmw_ctx_binding - structure representing a single context binding |  | ||||||
|  *                        - suitable for tracking in a context |  | ||||||
|  * |  | ||||||
|  * @ctx_list: List head for context. |  | ||||||
|  * @res_list: List head for bound resource. |  | ||||||
|  * @bi: Binding info |  | ||||||
|  */ |  | ||||||
| struct vmw_ctx_binding { |  | ||||||
| 	struct list_head ctx_list; |  | ||||||
| 	struct list_head res_list; |  | ||||||
| 	struct vmw_ctx_bindinfo bi; |  | ||||||
| }; |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| /**
 |  | ||||||
|  * struct vmw_ctx_binding_state - context binding state |  | ||||||
|  * |  | ||||||
|  * @list: linked list of individual bindings. |  | ||||||
|  * @render_targets: Render target bindings. |  | ||||||
|  * @texture_units: Texture units/samplers bindings. |  | ||||||
|  * @shaders: Shader bindings. |  | ||||||
|  * |  | ||||||
|  * Note that this structure also provides storage space for the individual |  | ||||||
|  * struct vmw_ctx_binding objects, so that no dynamic allocation is needed |  | ||||||
|  * for individual bindings. |  | ||||||
|  * |  | ||||||
|  */ |  | ||||||
| struct vmw_ctx_binding_state { |  | ||||||
| 	struct list_head list; |  | ||||||
| 	struct vmw_ctx_binding render_targets[SVGA3D_RT_MAX]; |  | ||||||
| 	struct vmw_ctx_binding texture_units[SVGA3D_NUM_TEXTURE_UNITS]; |  | ||||||
| 	struct vmw_ctx_binding shaders[SVGA3D_SHADERTYPE_PREDX_MAX]; |  | ||||||
| }; |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| /*
 | /*
 | ||||||
|  * enum vmw_display_unit_type - Describes the display unit |  * enum vmw_display_unit_type - Describes the display unit | ||||||
|  */ |  */ | ||||||
|  | @ -356,6 +298,7 @@ struct vmw_sw_context{ | ||||||
| 	uint32_t *cmd_bounce; | 	uint32_t *cmd_bounce; | ||||||
| 	uint32_t cmd_bounce_size; | 	uint32_t cmd_bounce_size; | ||||||
| 	struct list_head resource_list; | 	struct list_head resource_list; | ||||||
|  | 	struct list_head ctx_resource_list; /* For contexts and cotables */ | ||||||
| 	struct vmw_dma_buffer *cur_query_bo; | 	struct vmw_dma_buffer *cur_query_bo; | ||||||
| 	struct list_head res_relocations; | 	struct list_head res_relocations; | ||||||
| 	uint32_t *buf_start; | 	uint32_t *buf_start; | ||||||
|  | @ -363,8 +306,13 @@ struct vmw_sw_context{ | ||||||
| 	struct vmw_resource *last_query_ctx; | 	struct vmw_resource *last_query_ctx; | ||||||
| 	bool needs_post_query_barrier; | 	bool needs_post_query_barrier; | ||||||
| 	struct vmw_resource *error_resource; | 	struct vmw_resource *error_resource; | ||||||
| 	struct vmw_ctx_binding_state staged_bindings; | 	struct vmw_ctx_binding_state *staged_bindings; | ||||||
|  | 	bool staged_bindings_inuse; | ||||||
| 	struct list_head staged_cmd_res; | 	struct list_head staged_cmd_res; | ||||||
|  | 	struct vmw_resource_val_node *dx_ctx_node; | ||||||
|  | 	struct vmw_dma_buffer *dx_query_mob; | ||||||
|  | 	struct vmw_resource *dx_query_ctx; | ||||||
|  | 	struct vmw_cmdbuf_res_manager *man; | ||||||
| }; | }; | ||||||
| 
 | 
 | ||||||
| struct vmw_legacy_display; | struct vmw_legacy_display; | ||||||
|  | @ -382,6 +330,26 @@ struct vmw_vga_topology_state { | ||||||
| 	uint32_t pos_y; | 	uint32_t pos_y; | ||||||
| }; | }; | ||||||
| 
 | 
 | ||||||
|  | 
 | ||||||
|  | /*
 | ||||||
|  |  * struct vmw_otable - Guest Memory OBject table metadata | ||||||
|  |  * | ||||||
|  |  * @size:           Size of the table (page-aligned). | ||||||
|  |  * @page_table:     Pointer to a struct vmw_mob holding the page table. | ||||||
|  |  */ | ||||||
|  | struct vmw_otable { | ||||||
|  | 	unsigned long size; | ||||||
|  | 	struct vmw_mob *page_table; | ||||||
|  | 	bool enabled; | ||||||
|  | }; | ||||||
|  | 
 | ||||||
|  | struct vmw_otable_batch { | ||||||
|  | 	unsigned num_otables; | ||||||
|  | 	struct vmw_otable *otables; | ||||||
|  | 	struct vmw_resource *context; | ||||||
|  | 	struct ttm_buffer_object *otable_bo; | ||||||
|  | }; | ||||||
|  | 
 | ||||||
| struct vmw_private { | struct vmw_private { | ||||||
| 	struct ttm_bo_device bdev; | 	struct ttm_bo_device bdev; | ||||||
| 	struct ttm_bo_global_ref bo_global_ref; | 	struct ttm_bo_global_ref bo_global_ref; | ||||||
|  | @ -417,6 +385,7 @@ struct vmw_private { | ||||||
| 	bool has_mob; | 	bool has_mob; | ||||||
| 	spinlock_t hw_lock; | 	spinlock_t hw_lock; | ||||||
| 	spinlock_t cap_lock; | 	spinlock_t cap_lock; | ||||||
|  | 	bool has_dx; | ||||||
| 
 | 
 | ||||||
| 	/*
 | 	/*
 | ||||||
| 	 * VGA registers. | 	 * VGA registers. | ||||||
|  | @ -552,8 +521,7 @@ struct vmw_private { | ||||||
| 	/*
 | 	/*
 | ||||||
| 	 * Guest Backed stuff | 	 * Guest Backed stuff | ||||||
| 	 */ | 	 */ | ||||||
| 	struct ttm_buffer_object *otable_bo; | 	struct vmw_otable_batch otable_batch; | ||||||
| 	struct vmw_otable *otables; |  | ||||||
| 
 | 
 | ||||||
| 	struct vmw_cmdbuf_man *cman; | 	struct vmw_cmdbuf_man *cman; | ||||||
| }; | }; | ||||||
|  | @ -685,6 +653,7 @@ extern int vmw_user_stream_lookup(struct vmw_private *dev_priv, | ||||||
| 				  uint32_t *inout_id, | 				  uint32_t *inout_id, | ||||||
| 				  struct vmw_resource **out); | 				  struct vmw_resource **out); | ||||||
| extern void vmw_resource_unreserve(struct vmw_resource *res, | extern void vmw_resource_unreserve(struct vmw_resource *res, | ||||||
|  | 				   bool switch_backup, | ||||||
| 				   struct vmw_dma_buffer *new_backup, | 				   struct vmw_dma_buffer *new_backup, | ||||||
| 				   unsigned long new_backup_offset); | 				   unsigned long new_backup_offset); | ||||||
| extern void vmw_resource_move_notify(struct ttm_buffer_object *bo, | extern void vmw_resource_move_notify(struct ttm_buffer_object *bo, | ||||||
|  | @ -742,7 +711,10 @@ extern int vmw_fifo_init(struct vmw_private *dev_priv, | ||||||
| extern void vmw_fifo_release(struct vmw_private *dev_priv, | extern void vmw_fifo_release(struct vmw_private *dev_priv, | ||||||
| 			     struct vmw_fifo_state *fifo); | 			     struct vmw_fifo_state *fifo); | ||||||
| extern void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes); | extern void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes); | ||||||
|  | extern void * | ||||||
|  | vmw_fifo_reserve_dx(struct vmw_private *dev_priv, uint32_t bytes, int ctx_id); | ||||||
| extern void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes); | extern void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes); | ||||||
|  | extern void vmw_fifo_commit_flush(struct vmw_private *dev_priv, uint32_t bytes); | ||||||
| extern int vmw_fifo_send_fence(struct vmw_private *dev_priv, | extern int vmw_fifo_send_fence(struct vmw_private *dev_priv, | ||||||
| 			       uint32_t *seqno); | 			       uint32_t *seqno); | ||||||
| extern void vmw_fifo_ping_host_locked(struct vmw_private *, uint32_t reason); | extern void vmw_fifo_ping_host_locked(struct vmw_private *, uint32_t reason); | ||||||
|  | @ -828,14 +800,15 @@ static inline struct page *vmw_piter_page(struct vmw_piter *viter) | ||||||
|  * Command submission - vmwgfx_execbuf.c |  * Command submission - vmwgfx_execbuf.c | ||||||
|  */ |  */ | ||||||
| 
 | 
 | ||||||
| extern int vmw_execbuf_ioctl(struct drm_device *dev, void *data, | extern int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data, | ||||||
| 			     struct drm_file *file_priv); | 			     struct drm_file *file_priv, size_t size); | ||||||
| extern int vmw_execbuf_process(struct drm_file *file_priv, | extern int vmw_execbuf_process(struct drm_file *file_priv, | ||||||
| 			       struct vmw_private *dev_priv, | 			       struct vmw_private *dev_priv, | ||||||
| 			       void __user *user_commands, | 			       void __user *user_commands, | ||||||
| 			       void *kernel_commands, | 			       void *kernel_commands, | ||||||
| 			       uint32_t command_size, | 			       uint32_t command_size, | ||||||
| 			       uint64_t throttle_us, | 			       uint64_t throttle_us, | ||||||
|  | 			       uint32_t dx_context_handle, | ||||||
| 			       struct drm_vmw_fence_rep __user | 			       struct drm_vmw_fence_rep __user | ||||||
| 			       *user_fence_rep, | 			       *user_fence_rep, | ||||||
| 			       struct vmw_fence_obj **out_fence); | 			       struct vmw_fence_obj **out_fence); | ||||||
|  | @ -960,6 +933,7 @@ int vmw_dumb_destroy(struct drm_file *file_priv, | ||||||
| 		     uint32_t handle); | 		     uint32_t handle); | ||||||
| extern int vmw_resource_pin(struct vmw_resource *res, bool interruptible); | extern int vmw_resource_pin(struct vmw_resource *res, bool interruptible); | ||||||
| extern void vmw_resource_unpin(struct vmw_resource *res); | extern void vmw_resource_unpin(struct vmw_resource *res); | ||||||
|  | extern enum vmw_res_type vmw_res_type(const struct vmw_resource *res); | ||||||
| 
 | 
 | ||||||
| /**
 | /**
 | ||||||
|  * Overlay control - vmwgfx_overlay.c |  * Overlay control - vmwgfx_overlay.c | ||||||
|  | @ -1016,27 +990,28 @@ extern void vmw_otables_takedown(struct vmw_private *dev_priv); | ||||||
| 
 | 
 | ||||||
| extern const struct vmw_user_resource_conv *user_context_converter; | extern const struct vmw_user_resource_conv *user_context_converter; | ||||||
| 
 | 
 | ||||||
| extern struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv); |  | ||||||
| 
 |  | ||||||
| extern int vmw_context_check(struct vmw_private *dev_priv, | extern int vmw_context_check(struct vmw_private *dev_priv, | ||||||
| 			     struct ttm_object_file *tfile, | 			     struct ttm_object_file *tfile, | ||||||
| 			     int id, | 			     int id, | ||||||
| 			     struct vmw_resource **p_res); | 			     struct vmw_resource **p_res); | ||||||
| extern int vmw_context_define_ioctl(struct drm_device *dev, void *data, | extern int vmw_context_define_ioctl(struct drm_device *dev, void *data, | ||||||
| 				    struct drm_file *file_priv); | 				    struct drm_file *file_priv); | ||||||
|  | extern int vmw_extended_context_define_ioctl(struct drm_device *dev, void *data, | ||||||
|  | 					     struct drm_file *file_priv); | ||||||
| extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data, | extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data, | ||||||
| 				     struct drm_file *file_priv); | 				     struct drm_file *file_priv); | ||||||
| extern int vmw_context_binding_add(struct vmw_ctx_binding_state *cbs, |  | ||||||
| 				   const struct vmw_ctx_bindinfo *ci); |  | ||||||
| extern void |  | ||||||
| vmw_context_binding_state_transfer(struct vmw_resource *res, |  | ||||||
| 				   struct vmw_ctx_binding_state *cbs); |  | ||||||
| extern void vmw_context_binding_res_list_kill(struct list_head *head); |  | ||||||
| extern void vmw_context_binding_res_list_scrub(struct list_head *head); |  | ||||||
| extern int vmw_context_rebind_all(struct vmw_resource *ctx); |  | ||||||
| extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx); | extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx); | ||||||
| extern struct vmw_cmdbuf_res_manager * | extern struct vmw_cmdbuf_res_manager * | ||||||
| vmw_context_res_man(struct vmw_resource *ctx); | vmw_context_res_man(struct vmw_resource *ctx); | ||||||
|  | extern struct vmw_resource *vmw_context_cotable(struct vmw_resource *ctx, | ||||||
|  | 						SVGACOTableType cotable_type); | ||||||
|  | extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx); | ||||||
|  | struct vmw_ctx_binding_state; | ||||||
|  | extern struct vmw_ctx_binding_state * | ||||||
|  | vmw_context_binding_state(struct vmw_resource *ctx); | ||||||
|  | extern void vmw_dx_context_scrub_cotables(struct vmw_resource *ctx, | ||||||
|  | 					  bool readback); | ||||||
|  | 
 | ||||||
| /*
 | /*
 | ||||||
|  * Surface management - vmwgfx_surface.c |  * Surface management - vmwgfx_surface.c | ||||||
|  */ |  */ | ||||||
|  | @ -1066,6 +1041,7 @@ int vmw_surface_gb_priv_define(struct drm_device *dev, | ||||||
| 			       bool for_scanout, | 			       bool for_scanout, | ||||||
| 			       uint32_t num_mip_levels, | 			       uint32_t num_mip_levels, | ||||||
| 			       uint32_t multisample_count, | 			       uint32_t multisample_count, | ||||||
|  | 			       uint32_t array_size, | ||||||
| 			       struct drm_vmw_size size, | 			       struct drm_vmw_size size, | ||||||
| 			       struct vmw_surface **srf_out); | 			       struct vmw_surface **srf_out); | ||||||
| 
 | 
 | ||||||
|  | @ -1085,12 +1061,21 @@ extern int vmw_compat_shader_add(struct vmw_private *dev_priv, | ||||||
| 				 SVGA3dShaderType shader_type, | 				 SVGA3dShaderType shader_type, | ||||||
| 				 size_t size, | 				 size_t size, | ||||||
| 				 struct list_head *list); | 				 struct list_head *list); | ||||||
| extern int vmw_compat_shader_remove(struct vmw_cmdbuf_res_manager *man, | extern int vmw_shader_remove(struct vmw_cmdbuf_res_manager *man, | ||||||
| 				    u32 user_key, SVGA3dShaderType shader_type, | 			     u32 user_key, SVGA3dShaderType shader_type, | ||||||
| 				    struct list_head *list); | 			     struct list_head *list); | ||||||
|  | extern int vmw_dx_shader_add(struct vmw_cmdbuf_res_manager *man, | ||||||
|  | 			     struct vmw_resource *ctx, | ||||||
|  | 			     u32 user_key, | ||||||
|  | 			     SVGA3dShaderType shader_type, | ||||||
|  | 			     struct list_head *list); | ||||||
|  | extern void vmw_dx_shader_cotable_list_scrub(struct vmw_private *dev_priv, | ||||||
|  | 					     struct list_head *list, | ||||||
|  | 					     bool readback); | ||||||
|  | 
 | ||||||
| extern struct vmw_resource * | extern struct vmw_resource * | ||||||
| vmw_compat_shader_lookup(struct vmw_cmdbuf_res_manager *man, | vmw_shader_lookup(struct vmw_cmdbuf_res_manager *man, | ||||||
| 			 u32 user_key, SVGA3dShaderType shader_type); | 		  u32 user_key, SVGA3dShaderType shader_type); | ||||||
| 
 | 
 | ||||||
| /*
 | /*
 | ||||||
|  * Command buffer managed resources - vmwgfx_cmdbuf_res.c |  * Command buffer managed resources - vmwgfx_cmdbuf_res.c | ||||||
|  | @ -1114,8 +1099,20 @@ extern int vmw_cmdbuf_res_add(struct vmw_cmdbuf_res_manager *man, | ||||||
| extern int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man, | extern int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man, | ||||||
| 				 enum vmw_cmdbuf_res_type res_type, | 				 enum vmw_cmdbuf_res_type res_type, | ||||||
| 				 u32 user_key, | 				 u32 user_key, | ||||||
| 				 struct list_head *list); | 				 struct list_head *list, | ||||||
|  | 				 struct vmw_resource **res); | ||||||
| 
 | 
 | ||||||
|  | /*
 | ||||||
|  |  * COTable management - vmwgfx_cotable.c | ||||||
|  |  */ | ||||||
|  | extern const SVGACOTableType vmw_cotable_scrub_order[]; | ||||||
|  | extern struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv, | ||||||
|  | 					      struct vmw_resource *ctx, | ||||||
|  | 					      u32 type); | ||||||
|  | extern int vmw_cotable_notify(struct vmw_resource *res, int id); | ||||||
|  | extern int vmw_cotable_scrub(struct vmw_resource *res, bool readback); | ||||||
|  | extern void vmw_cotable_add_resource(struct vmw_resource *ctx, | ||||||
|  | 				     struct list_head *head); | ||||||
| 
 | 
 | ||||||
| /*
 | /*
 | ||||||
|  * Command buffer managerment vmwgfx_cmdbuf.c |  * Command buffer managerment vmwgfx_cmdbuf.c | ||||||
|  |  | ||||||
										
											
												File diff suppressed because it is too large
												Load diff
											
										
									
								
							|  | @ -29,6 +29,11 @@ | ||||||
| #include <drm/drmP.h> | #include <drm/drmP.h> | ||||||
| #include <drm/ttm/ttm_placement.h> | #include <drm/ttm/ttm_placement.h> | ||||||
| 
 | 
 | ||||||
|  | struct vmw_temp_set_context { | ||||||
|  | 	SVGA3dCmdHeader header; | ||||||
|  | 	SVGA3dCmdDXTempSetContext body; | ||||||
|  | }; | ||||||
|  | 
 | ||||||
| bool vmw_fifo_have_3d(struct vmw_private *dev_priv) | bool vmw_fifo_have_3d(struct vmw_private *dev_priv) | ||||||
| { | { | ||||||
| 	u32 __iomem *fifo_mem = dev_priv->mmio_virt; | 	u32 __iomem *fifo_mem = dev_priv->mmio_virt; | ||||||
|  | @ -99,6 +104,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) | ||||||
| 	uint32_t max; | 	uint32_t max; | ||||||
| 	uint32_t min; | 	uint32_t min; | ||||||
| 
 | 
 | ||||||
|  | 	fifo->dx = false; | ||||||
| 	fifo->static_buffer_size = VMWGFX_FIFO_STATIC_SIZE; | 	fifo->static_buffer_size = VMWGFX_FIFO_STATIC_SIZE; | ||||||
| 	fifo->static_buffer = vmalloc(fifo->static_buffer_size); | 	fifo->static_buffer = vmalloc(fifo->static_buffer_size); | ||||||
| 	if (unlikely(fifo->static_buffer == NULL)) | 	if (unlikely(fifo->static_buffer == NULL)) | ||||||
|  | @ -396,15 +402,20 @@ out_err: | ||||||
| 	return NULL; | 	return NULL; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes) | void *vmw_fifo_reserve_dx(struct vmw_private *dev_priv, uint32_t bytes, | ||||||
|  | 			  int ctx_id) | ||||||
| { | { | ||||||
| 	void *ret; | 	void *ret; | ||||||
| 
 | 
 | ||||||
| 	if (dev_priv->cman) | 	if (dev_priv->cman) | ||||||
| 		ret = vmw_cmdbuf_reserve(dev_priv->cman, bytes, | 		ret = vmw_cmdbuf_reserve(dev_priv->cman, bytes, | ||||||
| 					 SVGA3D_INVALID_ID, false, NULL); | 					 ctx_id, false, NULL); | ||||||
| 	else | 	else if (ctx_id == SVGA3D_INVALID_ID) | ||||||
| 		ret = vmw_local_fifo_reserve(dev_priv, bytes); | 		ret = vmw_local_fifo_reserve(dev_priv, bytes); | ||||||
|  | 	else { | ||||||
|  | 		WARN_ON("Command buffer has not been allocated.\n"); | ||||||
|  | 		ret = NULL; | ||||||
|  | 	} | ||||||
| 	if (IS_ERR_OR_NULL(ret)) { | 	if (IS_ERR_OR_NULL(ret)) { | ||||||
| 		DRM_ERROR("Fifo reserve failure of %u bytes.\n", | 		DRM_ERROR("Fifo reserve failure of %u bytes.\n", | ||||||
| 			  (unsigned) bytes); | 			  (unsigned) bytes); | ||||||
|  | @ -466,6 +477,10 @@ static void vmw_local_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes) | ||||||
| 	uint32_t min = ioread32(fifo_mem + SVGA_FIFO_MIN); | 	uint32_t min = ioread32(fifo_mem + SVGA_FIFO_MIN); | ||||||
| 	bool reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE; | 	bool reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE; | ||||||
| 
 | 
 | ||||||
|  | 	if (fifo_state->dx) | ||||||
|  | 		bytes += sizeof(struct vmw_temp_set_context); | ||||||
|  | 
 | ||||||
|  | 	fifo_state->dx = false; | ||||||
| 	BUG_ON((bytes & 3) != 0); | 	BUG_ON((bytes & 3) != 0); | ||||||
| 	BUG_ON(bytes > fifo_state->reserved_size); | 	BUG_ON(bytes > fifo_state->reserved_size); | ||||||
| 
 | 
 | ||||||
|  | @ -518,7 +533,7 @@ void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes) | ||||||
|  * @dev_priv: Pointer to device private structure. |  * @dev_priv: Pointer to device private structure. | ||||||
|  * @bytes: Number of bytes to commit. |  * @bytes: Number of bytes to commit. | ||||||
|  */ |  */ | ||||||
| static void vmw_fifo_commit_flush(struct vmw_private *dev_priv, uint32_t bytes) | void vmw_fifo_commit_flush(struct vmw_private *dev_priv, uint32_t bytes) | ||||||
| { | { | ||||||
| 	if (dev_priv->cman) | 	if (dev_priv->cman) | ||||||
| 		vmw_cmdbuf_commit(dev_priv->cman, bytes, NULL, true); | 		vmw_cmdbuf_commit(dev_priv->cman, bytes, NULL, true); | ||||||
|  | @ -706,3 +721,8 @@ int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv, | ||||||
| 
 | 
 | ||||||
| 	return vmw_fifo_emit_dummy_legacy_query(dev_priv, cid); | 	return vmw_fifo_emit_dummy_legacy_query(dev_priv, cid); | ||||||
| } | } | ||||||
|  | 
 | ||||||
|  | void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes) | ||||||
|  | { | ||||||
|  | 	return vmw_fifo_reserve_dx(dev_priv, bytes, SVGA3D_INVALID_ID); | ||||||
|  | } | ||||||
|  |  | ||||||
|  | @ -110,6 +110,9 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data, | ||||||
| 		param->value = | 		param->value = | ||||||
| 			(dev_priv->active_display_unit == vmw_du_screen_target); | 			(dev_priv->active_display_unit == vmw_du_screen_target); | ||||||
| 		break; | 		break; | ||||||
|  | 	case DRM_VMW_PARAM_DX: | ||||||
|  | 		param->value = dev_priv->has_dx; | ||||||
|  | 		break; | ||||||
| 	default: | 	default: | ||||||
| 		DRM_ERROR("Illegal vmwgfx get param request: %d\n", | 		DRM_ERROR("Illegal vmwgfx get param request: %d\n", | ||||||
| 			  param->param); | 			  param->param); | ||||||
|  | @ -193,8 +196,8 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data, | ||||||
| 		uint32_t *bounce32 = (uint32_t *) bounce; | 		uint32_t *bounce32 = (uint32_t *) bounce; | ||||||
| 
 | 
 | ||||||
| 		num = size / sizeof(uint32_t); | 		num = size / sizeof(uint32_t); | ||||||
| 		if (num > SVGA3D_DEVCAP_MAX) | 		if (num > SVGA3D_DEVCAP_DX) | ||||||
| 			num = SVGA3D_DEVCAP_MAX; | 			num = SVGA3D_DEVCAP_DX; | ||||||
| 
 | 
 | ||||||
| 		spin_lock(&dev_priv->cap_lock); | 		spin_lock(&dev_priv->cap_lock); | ||||||
| 		for (i = 0; i < num; ++i) { | 		for (i = 0; i < num; ++i) { | ||||||
|  |  | ||||||
|  | @ -528,7 +528,11 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv, | ||||||
| 		return -EINVAL; | 		return -EINVAL; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	if (unlikely(format != surface->format)) { | 	/*
 | ||||||
|  | 	 * For DX, surface format validation is done when surface->scanout | ||||||
|  | 	 * is set. | ||||||
|  | 	 */ | ||||||
|  | 	if (!dev_priv->has_dx && format != surface->format) { | ||||||
| 		DRM_ERROR("Invalid surface format for requested mode.\n"); | 		DRM_ERROR("Invalid surface format for requested mode.\n"); | ||||||
| 		return -EINVAL; | 		return -EINVAL; | ||||||
| 	} | 	} | ||||||
|  | @ -754,6 +758,7 @@ static int vmw_create_dmabuf_proxy(struct drm_device *dev, | ||||||
| 			true, /* can be a scanout buffer */ | 			true, /* can be a scanout buffer */ | ||||||
| 			1, /* num of mip levels */ | 			1, /* num of mip levels */ | ||||||
| 			0, | 			0, | ||||||
|  | 			0, | ||||||
| 			content_base_size, | 			content_base_size, | ||||||
| 			srf_out); | 			srf_out); | ||||||
| 	if (ret) { | 	if (ret) { | ||||||
|  | @ -769,7 +774,7 @@ static int vmw_create_dmabuf_proxy(struct drm_device *dev, | ||||||
| 	vmw_dmabuf_unreference(&res->backup); | 	vmw_dmabuf_unreference(&res->backup); | ||||||
| 	res->backup = vmw_dmabuf_reference(dmabuf_mob); | 	res->backup = vmw_dmabuf_reference(dmabuf_mob); | ||||||
| 	res->backup_offset = 0; | 	res->backup_offset = 0; | ||||||
| 	vmw_resource_unreserve(res, NULL, 0); | 	vmw_resource_unreserve(res, false, NULL, 0); | ||||||
| 	mutex_unlock(&res->dev_priv->cmdbuf_mutex); | 	mutex_unlock(&res->dev_priv->cmdbuf_mutex); | ||||||
| 
 | 
 | ||||||
| 	return 0; | 	return 0; | ||||||
|  | @ -1869,7 +1874,7 @@ void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv, | ||||||
| void vmw_kms_helper_resource_revert(struct vmw_resource *res) | void vmw_kms_helper_resource_revert(struct vmw_resource *res) | ||||||
| { | { | ||||||
| 	vmw_kms_helper_buffer_revert(res->backup); | 	vmw_kms_helper_buffer_revert(res->backup); | ||||||
| 	vmw_resource_unreserve(res, NULL, 0); | 	vmw_resource_unreserve(res, false, NULL, 0); | ||||||
| 	mutex_unlock(&res->dev_priv->cmdbuf_mutex); | 	mutex_unlock(&res->dev_priv->cmdbuf_mutex); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | @ -1916,7 +1921,7 @@ int vmw_kms_helper_resource_prepare(struct vmw_resource *res, | ||||||
| out_revert: | out_revert: | ||||||
| 	vmw_kms_helper_buffer_revert(res->backup); | 	vmw_kms_helper_buffer_revert(res->backup); | ||||||
| out_unreserve: | out_unreserve: | ||||||
| 	vmw_resource_unreserve(res, NULL, 0); | 	vmw_resource_unreserve(res, false, NULL, 0); | ||||||
| out_unlock: | out_unlock: | ||||||
| 	mutex_unlock(&res->dev_priv->cmdbuf_mutex); | 	mutex_unlock(&res->dev_priv->cmdbuf_mutex); | ||||||
| 	return ret; | 	return ret; | ||||||
|  | @ -1937,7 +1942,7 @@ void vmw_kms_helper_resource_finish(struct vmw_resource *res, | ||||||
| 		vmw_kms_helper_buffer_finish(res->dev_priv, NULL, res->backup, | 		vmw_kms_helper_buffer_finish(res->dev_priv, NULL, res->backup, | ||||||
| 					     out_fence, NULL); | 					     out_fence, NULL); | ||||||
| 
 | 
 | ||||||
| 	vmw_resource_unreserve(res, NULL, 0); | 	vmw_resource_unreserve(res, false, NULL, 0); | ||||||
| 	mutex_unlock(&res->dev_priv->cmdbuf_mutex); | 	mutex_unlock(&res->dev_priv->cmdbuf_mutex); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -67,9 +67,23 @@ struct vmw_mob { | ||||||
|  * @size:           Size of the table (page-aligned). |  * @size:           Size of the table (page-aligned). | ||||||
|  * @page_table:     Pointer to a struct vmw_mob holding the page table. |  * @page_table:     Pointer to a struct vmw_mob holding the page table. | ||||||
|  */ |  */ | ||||||
| struct vmw_otable { | static const struct vmw_otable pre_dx_tables[] = { | ||||||
| 	unsigned long size; | 	{VMWGFX_NUM_MOB * SVGA3D_OTABLE_MOB_ENTRY_SIZE, NULL, true}, | ||||||
| 	struct vmw_mob *page_table; | 	{VMWGFX_NUM_GB_SURFACE * SVGA3D_OTABLE_SURFACE_ENTRY_SIZE, NULL, true}, | ||||||
|  | 	{VMWGFX_NUM_GB_CONTEXT * SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE, NULL, true}, | ||||||
|  | 	{VMWGFX_NUM_GB_SHADER * SVGA3D_OTABLE_SHADER_ENTRY_SIZE, NULL, true}, | ||||||
|  | 	{VMWGFX_NUM_GB_SCREEN_TARGET * SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE, | ||||||
|  | 	 NULL, VMWGFX_ENABLE_SCREEN_TARGET_OTABLE} | ||||||
|  | }; | ||||||
|  | 
 | ||||||
|  | static const struct vmw_otable dx_tables[] = { | ||||||
|  | 	{VMWGFX_NUM_MOB * SVGA3D_OTABLE_MOB_ENTRY_SIZE, NULL, true}, | ||||||
|  | 	{VMWGFX_NUM_GB_SURFACE * SVGA3D_OTABLE_SURFACE_ENTRY_SIZE, NULL, true}, | ||||||
|  | 	{VMWGFX_NUM_GB_CONTEXT * SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE, NULL, true}, | ||||||
|  | 	{VMWGFX_NUM_GB_SHADER * SVGA3D_OTABLE_SHADER_ENTRY_SIZE, NULL, true}, | ||||||
|  | 	{VMWGFX_NUM_GB_SCREEN_TARGET * SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE, | ||||||
|  | 	 NULL, VMWGFX_ENABLE_SCREEN_TARGET_OTABLE}, | ||||||
|  | 	{VMWGFX_NUM_DXCONTEXT * sizeof(SVGAOTableDXContextEntry), NULL, true}, | ||||||
| }; | }; | ||||||
| 
 | 
 | ||||||
| static int vmw_mob_pt_populate(struct vmw_private *dev_priv, | static int vmw_mob_pt_populate(struct vmw_private *dev_priv, | ||||||
|  | @ -92,6 +106,7 @@ static void vmw_mob_pt_setup(struct vmw_mob *mob, | ||||||
|  */ |  */ | ||||||
| static int vmw_setup_otable_base(struct vmw_private *dev_priv, | static int vmw_setup_otable_base(struct vmw_private *dev_priv, | ||||||
| 				 SVGAOTableType type, | 				 SVGAOTableType type, | ||||||
|  | 				 struct ttm_buffer_object *otable_bo, | ||||||
| 				 unsigned long offset, | 				 unsigned long offset, | ||||||
| 				 struct vmw_otable *otable) | 				 struct vmw_otable *otable) | ||||||
| { | { | ||||||
|  | @ -106,7 +121,7 @@ static int vmw_setup_otable_base(struct vmw_private *dev_priv, | ||||||
| 
 | 
 | ||||||
| 	BUG_ON(otable->page_table != NULL); | 	BUG_ON(otable->page_table != NULL); | ||||||
| 
 | 
 | ||||||
| 	vsgt = vmw_bo_sg_table(dev_priv->otable_bo); | 	vsgt = vmw_bo_sg_table(otable_bo); | ||||||
| 	vmw_piter_start(&iter, vsgt, offset >> PAGE_SHIFT); | 	vmw_piter_start(&iter, vsgt, offset >> PAGE_SHIFT); | ||||||
| 	WARN_ON(!vmw_piter_next(&iter)); | 	WARN_ON(!vmw_piter_next(&iter)); | ||||||
| 
 | 
 | ||||||
|  | @ -193,7 +208,7 @@ static void vmw_takedown_otable_base(struct vmw_private *dev_priv, | ||||||
| 			  "takedown.\n"); | 			  "takedown.\n"); | ||||||
| 		return; | 		return; | ||||||
| 	} | 	} | ||||||
|   | 
 | ||||||
| 	memset(cmd, 0, sizeof(*cmd)); | 	memset(cmd, 0, sizeof(*cmd)); | ||||||
| 	cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE; | 	cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE; | ||||||
| 	cmd->header.size = sizeof(cmd->body); | 	cmd->header.size = sizeof(cmd->body); | ||||||
|  | @ -218,47 +233,21 @@ static void vmw_takedown_otable_base(struct vmw_private *dev_priv, | ||||||
| 	otable->page_table = NULL; | 	otable->page_table = NULL; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /*
 | 
 | ||||||
|  * vmw_otables_setup - Set up guest backed memory object tables | static int vmw_otable_batch_setup(struct vmw_private *dev_priv, | ||||||
|  * | 				  struct vmw_otable_batch *batch) | ||||||
|  * @dev_priv:       Pointer to a device private structure |  | ||||||
|  * |  | ||||||
|  * Takes care of the device guest backed surface |  | ||||||
|  * initialization, by setting up the guest backed memory object tables. |  | ||||||
|  * Returns 0 on success and various error codes on failure. A succesful return |  | ||||||
|  * means the object tables can be taken down using the vmw_otables_takedown |  | ||||||
|  * function. |  | ||||||
|  */ |  | ||||||
| int vmw_otables_setup(struct vmw_private *dev_priv) |  | ||||||
| { | { | ||||||
| 	unsigned long offset; | 	unsigned long offset; | ||||||
| 	unsigned long bo_size; | 	unsigned long bo_size; | ||||||
| 	struct vmw_otable *otables; | 	struct vmw_otable *otables = batch->otables; | ||||||
| 	SVGAOTableType i; | 	SVGAOTableType i; | ||||||
| 	int ret; | 	int ret; | ||||||
| 
 | 
 | ||||||
| 	otables = kzalloc(SVGA_OTABLE_DX9_MAX * sizeof(*otables), |  | ||||||
| 			  GFP_KERNEL); |  | ||||||
| 	if (unlikely(otables == NULL)) { |  | ||||||
| 		DRM_ERROR("Failed to allocate space for otable " |  | ||||||
| 			  "metadata.\n"); |  | ||||||
| 		return -ENOMEM; |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	otables[SVGA_OTABLE_MOB].size = |  | ||||||
| 		VMWGFX_NUM_MOB * SVGA3D_OTABLE_MOB_ENTRY_SIZE; |  | ||||||
| 	otables[SVGA_OTABLE_SURFACE].size = |  | ||||||
| 		VMWGFX_NUM_GB_SURFACE * SVGA3D_OTABLE_SURFACE_ENTRY_SIZE; |  | ||||||
| 	otables[SVGA_OTABLE_CONTEXT].size = |  | ||||||
| 		VMWGFX_NUM_GB_CONTEXT * SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE; |  | ||||||
| 	otables[SVGA_OTABLE_SHADER].size = |  | ||||||
| 		VMWGFX_NUM_GB_SHADER * SVGA3D_OTABLE_SHADER_ENTRY_SIZE; |  | ||||||
| 	otables[SVGA_OTABLE_SCREENTARGET].size = |  | ||||||
| 		VMWGFX_NUM_GB_SCREEN_TARGET * |  | ||||||
| 		SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE; |  | ||||||
| 
 |  | ||||||
| 	bo_size = 0; | 	bo_size = 0; | ||||||
| 	for (i = 0; i < SVGA_OTABLE_DX9_MAX; ++i) { | 	for (i = 0; i < batch->num_otables; ++i) { | ||||||
|  | 		if (!otables[i].enabled) | ||||||
|  | 			continue; | ||||||
|  | 
 | ||||||
| 		otables[i].size = | 		otables[i].size = | ||||||
| 			(otables[i].size + PAGE_SIZE - 1) & PAGE_MASK; | 			(otables[i].size + PAGE_SIZE - 1) & PAGE_MASK; | ||||||
| 		bo_size += otables[i].size; | 		bo_size += otables[i].size; | ||||||
|  | @ -268,46 +257,114 @@ int vmw_otables_setup(struct vmw_private *dev_priv) | ||||||
| 			    ttm_bo_type_device, | 			    ttm_bo_type_device, | ||||||
| 			    &vmw_sys_ne_placement, | 			    &vmw_sys_ne_placement, | ||||||
| 			    0, false, NULL, | 			    0, false, NULL, | ||||||
| 			    &dev_priv->otable_bo); | 			    &batch->otable_bo); | ||||||
| 
 | 
 | ||||||
| 	if (unlikely(ret != 0)) | 	if (unlikely(ret != 0)) | ||||||
| 		goto out_no_bo; | 		goto out_no_bo; | ||||||
| 
 | 
 | ||||||
| 	ret = ttm_bo_reserve(dev_priv->otable_bo, false, true, false, NULL); | 	ret = ttm_bo_reserve(batch->otable_bo, false, true, false, NULL); | ||||||
| 	BUG_ON(ret != 0); | 	BUG_ON(ret != 0); | ||||||
| 	ret = vmw_bo_driver.ttm_tt_populate(dev_priv->otable_bo->ttm); | 	ret = vmw_bo_driver.ttm_tt_populate(batch->otable_bo->ttm); | ||||||
| 	if (unlikely(ret != 0)) | 	if (unlikely(ret != 0)) | ||||||
| 		goto out_unreserve; | 		goto out_unreserve; | ||||||
| 	ret = vmw_bo_map_dma(dev_priv->otable_bo); | 	ret = vmw_bo_map_dma(batch->otable_bo); | ||||||
| 	if (unlikely(ret != 0)) | 	if (unlikely(ret != 0)) | ||||||
| 		goto out_unreserve; | 		goto out_unreserve; | ||||||
| 
 | 
 | ||||||
| 	ttm_bo_unreserve(dev_priv->otable_bo); | 	ttm_bo_unreserve(batch->otable_bo); | ||||||
| 
 | 
 | ||||||
| 	offset = 0; | 	offset = 0; | ||||||
| 	for (i = 0; i < SVGA_OTABLE_DX9_MAX - VMW_OTABLE_SETUP_SUB; ++i) { | 	for (i = 0; i < batch->num_otables; ++i) { | ||||||
| 		ret = vmw_setup_otable_base(dev_priv, i, offset, | 		if (!batch->otables[i].enabled) | ||||||
|  | 			continue; | ||||||
|  | 
 | ||||||
|  | 		ret = vmw_setup_otable_base(dev_priv, i, batch->otable_bo, | ||||||
|  | 					    offset, | ||||||
| 					    &otables[i]); | 					    &otables[i]); | ||||||
| 		if (unlikely(ret != 0)) | 		if (unlikely(ret != 0)) | ||||||
| 			goto out_no_setup; | 			goto out_no_setup; | ||||||
| 		offset += otables[i].size; | 		offset += otables[i].size; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	dev_priv->otables = otables; |  | ||||||
| 	return 0; | 	return 0; | ||||||
| 
 | 
 | ||||||
| out_unreserve: | out_unreserve: | ||||||
| 	ttm_bo_unreserve(dev_priv->otable_bo); | 	ttm_bo_unreserve(batch->otable_bo); | ||||||
| out_no_setup: | out_no_setup: | ||||||
| 	for (i = 0; i < SVGA_OTABLE_DX9_MAX - VMW_OTABLE_SETUP_SUB; ++i) | 	for (i = 0; i < batch->num_otables; ++i) { | ||||||
| 		vmw_takedown_otable_base(dev_priv, i, &otables[i]); | 		if (batch->otables[i].enabled) | ||||||
|  | 			vmw_takedown_otable_base(dev_priv, i, | ||||||
|  | 						 &batch->otables[i]); | ||||||
|  | 	} | ||||||
| 
 | 
 | ||||||
| 	ttm_bo_unref(&dev_priv->otable_bo); | 	ttm_bo_unref(&batch->otable_bo); | ||||||
| out_no_bo: | out_no_bo: | ||||||
| 	kfree(otables); |  | ||||||
| 	return ret; | 	return ret; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | /*
 | ||||||
|  |  * vmw_otables_setup - Set up guest backed memory object tables | ||||||
|  |  * | ||||||
|  |  * @dev_priv:       Pointer to a device private structure | ||||||
|  |  * | ||||||
|  |  * Takes care of the device guest backed surface | ||||||
|  |  * initialization, by setting up the guest backed memory object tables. | ||||||
|  |  * Returns 0 on success and various error codes on failure. A successful return | ||||||
|  |  * means the object tables can be taken down using the vmw_otables_takedown | ||||||
|  |  * function. | ||||||
|  |  */ | ||||||
|  | int vmw_otables_setup(struct vmw_private *dev_priv) | ||||||
|  | { | ||||||
|  | 	struct vmw_otable **otables = &dev_priv->otable_batch.otables; | ||||||
|  | 	int ret; | ||||||
|  | 
 | ||||||
|  | 	if (dev_priv->has_dx) { | ||||||
|  | 		*otables = kmalloc(sizeof(dx_tables), GFP_KERNEL); | ||||||
|  | 		if (*otables == NULL) | ||||||
|  | 			return -ENOMEM; | ||||||
|  | 
 | ||||||
|  | 		memcpy(*otables, dx_tables, sizeof(dx_tables)); | ||||||
|  | 		dev_priv->otable_batch.num_otables = ARRAY_SIZE(dx_tables); | ||||||
|  | 	} else { | ||||||
|  | 		*otables = kmalloc(sizeof(pre_dx_tables), GFP_KERNEL); | ||||||
|  | 		if (*otables == NULL) | ||||||
|  | 			return -ENOMEM; | ||||||
|  | 
 | ||||||
|  | 		memcpy(*otables, pre_dx_tables, sizeof(pre_dx_tables)); | ||||||
|  | 		dev_priv->otable_batch.num_otables = ARRAY_SIZE(pre_dx_tables); | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	ret = vmw_otable_batch_setup(dev_priv, &dev_priv->otable_batch); | ||||||
|  | 	if (unlikely(ret != 0)) | ||||||
|  | 		goto out_setup; | ||||||
|  | 
 | ||||||
|  | 	return 0; | ||||||
|  | 
 | ||||||
|  | out_setup: | ||||||
|  | 	kfree(*otables); | ||||||
|  | 	return ret; | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | static void vmw_otable_batch_takedown(struct vmw_private *dev_priv, | ||||||
|  | 			       struct vmw_otable_batch *batch) | ||||||
|  | { | ||||||
|  | 	SVGAOTableType i; | ||||||
|  | 	struct ttm_buffer_object *bo = batch->otable_bo; | ||||||
|  | 	int ret; | ||||||
|  | 
 | ||||||
|  | 	for (i = 0; i < batch->num_otables; ++i) | ||||||
|  | 		if (batch->otables[i].enabled) | ||||||
|  | 			vmw_takedown_otable_base(dev_priv, i, | ||||||
|  | 						 &batch->otables[i]); | ||||||
|  | 
 | ||||||
|  | 	ret = ttm_bo_reserve(bo, false, true, false, NULL); | ||||||
|  | 	BUG_ON(ret != 0); | ||||||
|  | 
 | ||||||
|  | 	vmw_fence_single_bo(bo, NULL); | ||||||
|  | 	ttm_bo_unreserve(bo); | ||||||
|  | 
 | ||||||
|  | 	ttm_bo_unref(&batch->otable_bo); | ||||||
|  | } | ||||||
| 
 | 
 | ||||||
| /*
 | /*
 | ||||||
|  * vmw_otables_takedown - Take down guest backed memory object tables |  * vmw_otables_takedown - Take down guest backed memory object tables | ||||||
|  | @ -318,26 +375,10 @@ out_no_bo: | ||||||
|  */ |  */ | ||||||
| void vmw_otables_takedown(struct vmw_private *dev_priv) | void vmw_otables_takedown(struct vmw_private *dev_priv) | ||||||
| { | { | ||||||
| 	SVGAOTableType i; | 	vmw_otable_batch_takedown(dev_priv, &dev_priv->otable_batch); | ||||||
| 	struct ttm_buffer_object *bo = dev_priv->otable_bo; | 	kfree(dev_priv->otable_batch.otables); | ||||||
| 	int ret; |  | ||||||
| 
 |  | ||||||
| 	for (i = 0; i < SVGA_OTABLE_DX9_MAX - VMW_OTABLE_SETUP_SUB; ++i) |  | ||||||
| 		vmw_takedown_otable_base(dev_priv, i, |  | ||||||
| 					 &dev_priv->otables[i]); |  | ||||||
| 
 |  | ||||||
| 	ret = ttm_bo_reserve(bo, false, true, false, NULL); |  | ||||||
| 	BUG_ON(ret != 0); |  | ||||||
| 
 |  | ||||||
| 	vmw_fence_single_bo(bo, NULL); |  | ||||||
| 	ttm_bo_unreserve(bo); |  | ||||||
| 
 |  | ||||||
| 	ttm_bo_unref(&dev_priv->otable_bo); |  | ||||||
| 	kfree(dev_priv->otables); |  | ||||||
| 	dev_priv->otables = NULL; |  | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| 
 |  | ||||||
| /*
 | /*
 | ||||||
|  * vmw_mob_calculate_pt_pages - Calculate the number of page table pages |  * vmw_mob_calculate_pt_pages - Calculate the number of page table pages | ||||||
|  * needed for a guest backed memory object. |  * needed for a guest backed memory object. | ||||||
|  | @ -410,7 +451,7 @@ static int vmw_mob_pt_populate(struct vmw_private *dev_priv, | ||||||
| 		goto out_unreserve; | 		goto out_unreserve; | ||||||
| 
 | 
 | ||||||
| 	ttm_bo_unreserve(mob->pt_bo); | 	ttm_bo_unreserve(mob->pt_bo); | ||||||
| 	 | 
 | ||||||
| 	return 0; | 	return 0; | ||||||
| 
 | 
 | ||||||
| out_unreserve: | out_unreserve: | ||||||
|  |  | ||||||
|  | @ -31,6 +31,7 @@ | ||||||
| #include <drm/ttm/ttm_placement.h> | #include <drm/ttm/ttm_placement.h> | ||||||
| #include <drm/drmP.h> | #include <drm/drmP.h> | ||||||
| #include "vmwgfx_resource_priv.h" | #include "vmwgfx_resource_priv.h" | ||||||
|  | #include "vmwgfx_binding.h" | ||||||
| 
 | 
 | ||||||
| #define VMW_RES_EVICT_ERR_COUNT 10 | #define VMW_RES_EVICT_ERR_COUNT 10 | ||||||
| 
 | 
 | ||||||
|  | @ -144,10 +145,10 @@ static void vmw_resource_release(struct kref *kref) | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	if (likely(res->hw_destroy != NULL)) { | 	if (likely(res->hw_destroy != NULL)) { | ||||||
| 		res->hw_destroy(res); |  | ||||||
| 		mutex_lock(&dev_priv->binding_mutex); | 		mutex_lock(&dev_priv->binding_mutex); | ||||||
| 		vmw_context_binding_res_list_kill(&res->binding_head); | 		vmw_binding_res_list_kill(&res->binding_head); | ||||||
| 		mutex_unlock(&dev_priv->binding_mutex); | 		mutex_unlock(&dev_priv->binding_mutex); | ||||||
|  | 		res->hw_destroy(res); | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	id = res->id; | 	id = res->id; | ||||||
|  | @ -1149,14 +1150,16 @@ out_bind_failed: | ||||||
|  * command submission. |  * command submission. | ||||||
|  * |  * | ||||||
|  * @res:               Pointer to the struct vmw_resource to unreserve. |  * @res:               Pointer to the struct vmw_resource to unreserve. | ||||||
|  |  * @switch_backup:     Backup buffer has been switched. | ||||||
|  * @new_backup:        Pointer to new backup buffer if command submission |  * @new_backup:        Pointer to new backup buffer if command submission | ||||||
|  *                     switched. |  *                     switched. May be NULL. | ||||||
|  * @new_backup_offset: New backup offset if @new_backup is !NULL. |  * @new_backup_offset: New backup offset if @switch_backup is true. | ||||||
|  * |  * | ||||||
|  * Currently unreserving a resource means putting it back on the device's |  * Currently unreserving a resource means putting it back on the device's | ||||||
|  * resource lru list, so that it can be evicted if necessary. |  * resource lru list, so that it can be evicted if necessary. | ||||||
|  */ |  */ | ||||||
| void vmw_resource_unreserve(struct vmw_resource *res, | void vmw_resource_unreserve(struct vmw_resource *res, | ||||||
|  | 			    bool switch_backup, | ||||||
| 			    struct vmw_dma_buffer *new_backup, | 			    struct vmw_dma_buffer *new_backup, | ||||||
| 			    unsigned long new_backup_offset) | 			    unsigned long new_backup_offset) | ||||||
| { | { | ||||||
|  | @ -1165,19 +1168,22 @@ void vmw_resource_unreserve(struct vmw_resource *res, | ||||||
| 	if (!list_empty(&res->lru_head)) | 	if (!list_empty(&res->lru_head)) | ||||||
| 		return; | 		return; | ||||||
| 
 | 
 | ||||||
| 	if (new_backup && new_backup != res->backup) { | 	if (switch_backup && new_backup != res->backup) { | ||||||
| 
 |  | ||||||
| 		if (res->backup) { | 		if (res->backup) { | ||||||
| 			lockdep_assert_held(&res->backup->base.resv->lock.base); | 			lockdep_assert_held(&res->backup->base.resv->lock.base); | ||||||
| 			list_del_init(&res->mob_head); | 			list_del_init(&res->mob_head); | ||||||
| 			vmw_dmabuf_unreference(&res->backup); | 			vmw_dmabuf_unreference(&res->backup); | ||||||
| 		} | 		} | ||||||
| 
 | 
 | ||||||
| 		res->backup = vmw_dmabuf_reference(new_backup); | 		if (new_backup) { | ||||||
| 		lockdep_assert_held(&new_backup->base.resv->lock.base); | 			res->backup = vmw_dmabuf_reference(new_backup); | ||||||
| 		list_add_tail(&res->mob_head, &new_backup->res_list); | 			lockdep_assert_held(&new_backup->base.resv->lock.base); | ||||||
|  | 			list_add_tail(&res->mob_head, &new_backup->res_list); | ||||||
|  | 		} else { | ||||||
|  | 			res->backup = NULL; | ||||||
|  | 		} | ||||||
| 	} | 	} | ||||||
| 	if (new_backup) | 	if (switch_backup) | ||||||
| 		res->backup_offset = new_backup_offset; | 		res->backup_offset = new_backup_offset; | ||||||
| 
 | 
 | ||||||
| 	if (!res->func->may_evict || res->id == -1 || res->pin_count) | 	if (!res->func->may_evict || res->id == -1 || res->pin_count) | ||||||
|  | @ -1269,8 +1275,12 @@ int vmw_resource_reserve(struct vmw_resource *res, bool interruptible, | ||||||
| 	if (res->func->needs_backup && res->backup == NULL && | 	if (res->func->needs_backup && res->backup == NULL && | ||||||
| 	    !no_backup) { | 	    !no_backup) { | ||||||
| 		ret = vmw_resource_buf_alloc(res, interruptible); | 		ret = vmw_resource_buf_alloc(res, interruptible); | ||||||
| 		if (unlikely(ret != 0)) | 		if (unlikely(ret != 0)) { | ||||||
|  | 			DRM_ERROR("Failed to allocate a backup buffer " | ||||||
|  | 				  "of size %lu. bytes\n", | ||||||
|  | 				  (unsigned long) res->backup_size); | ||||||
| 			return ret; | 			return ret; | ||||||
|  | 		} | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	return 0; | 	return 0; | ||||||
|  | @ -1354,7 +1364,7 @@ int vmw_resource_validate(struct vmw_resource *res) | ||||||
| 	struct ttm_validate_buffer val_buf; | 	struct ttm_validate_buffer val_buf; | ||||||
| 	unsigned err_count = 0; | 	unsigned err_count = 0; | ||||||
| 
 | 
 | ||||||
| 	if (likely(!res->func->may_evict)) | 	if (!res->func->create) | ||||||
| 		return 0; | 		return 0; | ||||||
| 
 | 
 | ||||||
| 	val_buf.bo = NULL; | 	val_buf.bo = NULL; | ||||||
|  | @ -1624,7 +1634,7 @@ int vmw_resource_pin(struct vmw_resource *res, bool interruptible) | ||||||
| 	res->pin_count++; | 	res->pin_count++; | ||||||
| 
 | 
 | ||||||
| out_no_validate: | out_no_validate: | ||||||
| 	vmw_resource_unreserve(res, NULL, 0UL); | 	vmw_resource_unreserve(res, false, NULL, 0UL); | ||||||
| out_no_reserve: | out_no_reserve: | ||||||
| 	mutex_unlock(&dev_priv->cmdbuf_mutex); | 	mutex_unlock(&dev_priv->cmdbuf_mutex); | ||||||
| 	ttm_write_unlock(&dev_priv->reservation_sem); | 	ttm_write_unlock(&dev_priv->reservation_sem); | ||||||
|  | @ -1660,8 +1670,18 @@ void vmw_resource_unpin(struct vmw_resource *res) | ||||||
| 		ttm_bo_unreserve(&vbo->base); | 		ttm_bo_unreserve(&vbo->base); | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	vmw_resource_unreserve(res, NULL, 0UL); | 	vmw_resource_unreserve(res, false, NULL, 0UL); | ||||||
| 
 | 
 | ||||||
| 	mutex_unlock(&dev_priv->cmdbuf_mutex); | 	mutex_unlock(&dev_priv->cmdbuf_mutex); | ||||||
| 	ttm_read_unlock(&dev_priv->reservation_sem); | 	ttm_read_unlock(&dev_priv->reservation_sem); | ||||||
| } | } | ||||||
|  | 
 | ||||||
|  | /**
 | ||||||
|  |  * vmw_res_type - Return the resource type | ||||||
|  |  * | ||||||
|  |  * @res: Pointer to the resource | ||||||
|  |  */ | ||||||
|  | enum vmw_res_type vmw_res_type(const struct vmw_resource *res) | ||||||
|  | { | ||||||
|  | 	return res->func->res_type; | ||||||
|  | } | ||||||
|  |  | ||||||
|  | @ -30,6 +30,12 @@ | ||||||
| 
 | 
 | ||||||
| #include "vmwgfx_drv.h" | #include "vmwgfx_drv.h" | ||||||
| 
 | 
 | ||||||
|  | enum vmw_cmdbuf_res_state { | ||||||
|  | 	VMW_CMDBUF_RES_COMMITTED, | ||||||
|  | 	VMW_CMDBUF_RES_ADD, | ||||||
|  | 	VMW_CMDBUF_RES_DEL | ||||||
|  | }; | ||||||
|  | 
 | ||||||
| /**
 | /**
 | ||||||
|  * struct vmw_user_resource_conv - Identify a derived user-exported resource |  * struct vmw_user_resource_conv - Identify a derived user-exported resource | ||||||
|  * type and provide a function to convert its ttm_base_object pointer to |  * type and provide a function to convert its ttm_base_object pointer to | ||||||
|  | @ -55,8 +61,10 @@ struct vmw_user_resource_conv { | ||||||
|  * @bind:              Bind a hardware resource to persistent buffer storage. |  * @bind:              Bind a hardware resource to persistent buffer storage. | ||||||
|  * @unbind:            Unbind a hardware resource from persistent |  * @unbind:            Unbind a hardware resource from persistent | ||||||
|  *                     buffer storage. |  *                     buffer storage. | ||||||
|  |  * @commit_notify:     If the resource is a command buffer managed resource, | ||||||
|  |  *                     callback to notify that a define or remove command | ||||||
|  |  *                     has been committed to the device. | ||||||
|  */ |  */ | ||||||
| 
 |  | ||||||
| struct vmw_res_func { | struct vmw_res_func { | ||||||
| 	enum vmw_res_type res_type; | 	enum vmw_res_type res_type; | ||||||
| 	bool needs_backup; | 	bool needs_backup; | ||||||
|  | @ -71,6 +79,8 @@ struct vmw_res_func { | ||||||
| 	int (*unbind) (struct vmw_resource *res, | 	int (*unbind) (struct vmw_resource *res, | ||||||
| 		       bool readback, | 		       bool readback, | ||||||
| 		       struct ttm_validate_buffer *val_buf); | 		       struct ttm_validate_buffer *val_buf); | ||||||
|  | 	void (*commit_notify)(struct vmw_resource *res, | ||||||
|  | 			      enum vmw_cmdbuf_res_state state); | ||||||
| }; | }; | ||||||
| 
 | 
 | ||||||
| int vmw_resource_alloc_id(struct vmw_resource *res); | int vmw_resource_alloc_id(struct vmw_resource *res); | ||||||
|  |  | ||||||
|  | @ -27,12 +27,15 @@ | ||||||
| 
 | 
 | ||||||
| #include "vmwgfx_drv.h" | #include "vmwgfx_drv.h" | ||||||
| #include "vmwgfx_resource_priv.h" | #include "vmwgfx_resource_priv.h" | ||||||
|  | #include "vmwgfx_binding.h" | ||||||
| #include "ttm/ttm_placement.h" | #include "ttm/ttm_placement.h" | ||||||
| 
 | 
 | ||||||
| struct vmw_shader { | struct vmw_shader { | ||||||
| 	struct vmw_resource res; | 	struct vmw_resource res; | ||||||
| 	SVGA3dShaderType type; | 	SVGA3dShaderType type; | ||||||
| 	uint32_t size; | 	uint32_t size; | ||||||
|  | 	uint8_t num_input_sig; | ||||||
|  | 	uint8_t num_output_sig; | ||||||
| }; | }; | ||||||
| 
 | 
 | ||||||
| struct vmw_user_shader { | struct vmw_user_shader { | ||||||
|  | @ -40,8 +43,18 @@ struct vmw_user_shader { | ||||||
| 	struct vmw_shader shader; | 	struct vmw_shader shader; | ||||||
| }; | }; | ||||||
| 
 | 
 | ||||||
|  | struct vmw_dx_shader { | ||||||
|  | 	struct vmw_resource res; | ||||||
|  | 	struct vmw_resource *ctx; | ||||||
|  | 	struct vmw_resource *cotable; | ||||||
|  | 	u32 id; | ||||||
|  | 	bool committed; | ||||||
|  | 	struct list_head cotable_head; | ||||||
|  | }; | ||||||
|  | 
 | ||||||
| static uint64_t vmw_user_shader_size; | static uint64_t vmw_user_shader_size; | ||||||
| static uint64_t vmw_shader_size; | static uint64_t vmw_shader_size; | ||||||
|  | static size_t vmw_shader_dx_size; | ||||||
| 
 | 
 | ||||||
| static void vmw_user_shader_free(struct vmw_resource *res); | static void vmw_user_shader_free(struct vmw_resource *res); | ||||||
| static struct vmw_resource * | static struct vmw_resource * | ||||||
|  | @ -55,6 +68,18 @@ static int vmw_gb_shader_unbind(struct vmw_resource *res, | ||||||
| 				 struct ttm_validate_buffer *val_buf); | 				 struct ttm_validate_buffer *val_buf); | ||||||
| static int vmw_gb_shader_destroy(struct vmw_resource *res); | static int vmw_gb_shader_destroy(struct vmw_resource *res); | ||||||
| 
 | 
 | ||||||
|  | static int vmw_dx_shader_create(struct vmw_resource *res); | ||||||
|  | static int vmw_dx_shader_bind(struct vmw_resource *res, | ||||||
|  | 			       struct ttm_validate_buffer *val_buf); | ||||||
|  | static int vmw_dx_shader_unbind(struct vmw_resource *res, | ||||||
|  | 				 bool readback, | ||||||
|  | 				 struct ttm_validate_buffer *val_buf); | ||||||
|  | static void vmw_dx_shader_commit_notify(struct vmw_resource *res, | ||||||
|  | 					enum vmw_cmdbuf_res_state state); | ||||||
|  | static bool vmw_shader_id_ok(u32 user_key, SVGA3dShaderType shader_type); | ||||||
|  | static u32 vmw_shader_key(u32 user_key, SVGA3dShaderType shader_type); | ||||||
|  | static uint64_t vmw_user_shader_size; | ||||||
|  | 
 | ||||||
| static const struct vmw_user_resource_conv user_shader_conv = { | static const struct vmw_user_resource_conv user_shader_conv = { | ||||||
| 	.object_type = VMW_RES_SHADER, | 	.object_type = VMW_RES_SHADER, | ||||||
| 	.base_obj_to_res = vmw_user_shader_base_to_res, | 	.base_obj_to_res = vmw_user_shader_base_to_res, | ||||||
|  | @ -77,6 +102,24 @@ static const struct vmw_res_func vmw_gb_shader_func = { | ||||||
| 	.unbind = vmw_gb_shader_unbind | 	.unbind = vmw_gb_shader_unbind | ||||||
| }; | }; | ||||||
| 
 | 
 | ||||||
|  | static const struct vmw_res_func vmw_dx_shader_func = { | ||||||
|  | 	.res_type = vmw_res_shader, | ||||||
|  | 	.needs_backup = true, | ||||||
|  | 	.may_evict = false, | ||||||
|  | 	.type_name = "dx shaders", | ||||||
|  | 	.backup_placement = &vmw_mob_placement, | ||||||
|  | 	.create = vmw_dx_shader_create, | ||||||
|  | 	/*
 | ||||||
|  | 	 * The destroy callback is only called with a committed resource on | ||||||
|  | 	 * context destroy, in which case we destroy the cotable anyway, | ||||||
|  | 	 * so there's no need to destroy DX shaders separately. | ||||||
|  | 	 */ | ||||||
|  | 	.destroy = NULL, | ||||||
|  | 	.bind = vmw_dx_shader_bind, | ||||||
|  | 	.unbind = vmw_dx_shader_unbind, | ||||||
|  | 	.commit_notify = vmw_dx_shader_commit_notify, | ||||||
|  | }; | ||||||
|  | 
 | ||||||
| /**
 | /**
 | ||||||
|  * Shader management: |  * Shader management: | ||||||
|  */ |  */ | ||||||
|  | @ -87,25 +130,42 @@ vmw_res_to_shader(struct vmw_resource *res) | ||||||
| 	return container_of(res, struct vmw_shader, res); | 	return container_of(res, struct vmw_shader, res); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | /**
 | ||||||
|  |  * vmw_res_to_dx_shader - typecast a struct vmw_resource to a | ||||||
|  |  * struct vmw_dx_shader | ||||||
|  |  * | ||||||
|  |  * @res: Pointer to the struct vmw_resource. | ||||||
|  |  */ | ||||||
|  | static inline struct vmw_dx_shader * | ||||||
|  | vmw_res_to_dx_shader(struct vmw_resource *res) | ||||||
|  | { | ||||||
|  | 	return container_of(res, struct vmw_dx_shader, res); | ||||||
|  | } | ||||||
|  | 
 | ||||||
| static void vmw_hw_shader_destroy(struct vmw_resource *res) | static void vmw_hw_shader_destroy(struct vmw_resource *res) | ||||||
| { | { | ||||||
| 	(void) vmw_gb_shader_destroy(res); | 	if (likely(res->func->destroy)) | ||||||
|  | 		(void) res->func->destroy(res); | ||||||
|  | 	else | ||||||
|  | 		res->id = -1; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | 
 | ||||||
| static int vmw_gb_shader_init(struct vmw_private *dev_priv, | static int vmw_gb_shader_init(struct vmw_private *dev_priv, | ||||||
| 			      struct vmw_resource *res, | 			      struct vmw_resource *res, | ||||||
| 			      uint32_t size, | 			      uint32_t size, | ||||||
| 			      uint64_t offset, | 			      uint64_t offset, | ||||||
| 			      SVGA3dShaderType type, | 			      SVGA3dShaderType type, | ||||||
|  | 			      uint8_t num_input_sig, | ||||||
|  | 			      uint8_t num_output_sig, | ||||||
| 			      struct vmw_dma_buffer *byte_code, | 			      struct vmw_dma_buffer *byte_code, | ||||||
| 			      void (*res_free) (struct vmw_resource *res)) | 			      void (*res_free) (struct vmw_resource *res)) | ||||||
| { | { | ||||||
| 	struct vmw_shader *shader = vmw_res_to_shader(res); | 	struct vmw_shader *shader = vmw_res_to_shader(res); | ||||||
| 	int ret; | 	int ret; | ||||||
| 
 | 
 | ||||||
| 	ret = vmw_resource_init(dev_priv, res, true, | 	ret = vmw_resource_init(dev_priv, res, true, res_free, | ||||||
| 				res_free, &vmw_gb_shader_func); | 				&vmw_gb_shader_func); | ||||||
| 
 |  | ||||||
| 
 | 
 | ||||||
| 	if (unlikely(ret != 0)) { | 	if (unlikely(ret != 0)) { | ||||||
| 		if (res_free) | 		if (res_free) | ||||||
|  | @ -122,11 +182,17 @@ static int vmw_gb_shader_init(struct vmw_private *dev_priv, | ||||||
| 	} | 	} | ||||||
| 	shader->size = size; | 	shader->size = size; | ||||||
| 	shader->type = type; | 	shader->type = type; | ||||||
|  | 	shader->num_input_sig = num_input_sig; | ||||||
|  | 	shader->num_output_sig = num_output_sig; | ||||||
| 
 | 
 | ||||||
| 	vmw_resource_activate(res, vmw_hw_shader_destroy); | 	vmw_resource_activate(res, vmw_hw_shader_destroy); | ||||||
| 	return 0; | 	return 0; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | /*
 | ||||||
|  |  * GB shader code: | ||||||
|  |  */ | ||||||
|  | 
 | ||||||
| static int vmw_gb_shader_create(struct vmw_resource *res) | static int vmw_gb_shader_create(struct vmw_resource *res) | ||||||
| { | { | ||||||
| 	struct vmw_private *dev_priv = res->dev_priv; | 	struct vmw_private *dev_priv = res->dev_priv; | ||||||
|  | @ -259,7 +325,7 @@ static int vmw_gb_shader_destroy(struct vmw_resource *res) | ||||||
| 		return 0; | 		return 0; | ||||||
| 
 | 
 | ||||||
| 	mutex_lock(&dev_priv->binding_mutex); | 	mutex_lock(&dev_priv->binding_mutex); | ||||||
| 	vmw_context_binding_res_list_scrub(&res->binding_head); | 	vmw_binding_res_list_scrub(&res->binding_head); | ||||||
| 
 | 
 | ||||||
| 	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | 	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | ||||||
| 	if (unlikely(cmd == NULL)) { | 	if (unlikely(cmd == NULL)) { | ||||||
|  | @ -280,6 +346,321 @@ static int vmw_gb_shader_destroy(struct vmw_resource *res) | ||||||
| 	return 0; | 	return 0; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | /*
 | ||||||
|  |  * DX shader code: | ||||||
|  |  */ | ||||||
|  | 
 | ||||||
|  | /**
 | ||||||
|  |  * vmw_dx_shader_commit_notify - Notify that a shader operation has been | ||||||
|  |  * committed to hardware from a user-supplied command stream. | ||||||
|  |  * | ||||||
|  |  * @res: Pointer to the shader resource. | ||||||
|  |  * @state: Indicating whether a creation or removal has been committed. | ||||||
|  |  * | ||||||
|  |  */ | ||||||
|  | static void vmw_dx_shader_commit_notify(struct vmw_resource *res, | ||||||
|  | 					enum vmw_cmdbuf_res_state state) | ||||||
|  | { | ||||||
|  | 	struct vmw_dx_shader *shader = vmw_res_to_dx_shader(res); | ||||||
|  | 	struct vmw_private *dev_priv = res->dev_priv; | ||||||
|  | 
 | ||||||
|  | 	if (state == VMW_CMDBUF_RES_ADD) { | ||||||
|  | 		mutex_lock(&dev_priv->binding_mutex); | ||||||
|  | 		vmw_cotable_add_resource(shader->cotable, | ||||||
|  | 					 &shader->cotable_head); | ||||||
|  | 		shader->committed = true; | ||||||
|  | 		res->id = shader->id; | ||||||
|  | 		mutex_unlock(&dev_priv->binding_mutex); | ||||||
|  | 	} else { | ||||||
|  | 		mutex_lock(&dev_priv->binding_mutex); | ||||||
|  | 		list_del_init(&shader->cotable_head); | ||||||
|  | 		shader->committed = false; | ||||||
|  | 		res->id = -1; | ||||||
|  | 		mutex_unlock(&dev_priv->binding_mutex); | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | /**
 | ||||||
|  |  * vmw_dx_shader_unscrub - Have the device reattach a MOB to a DX shader. | ||||||
|  |  * | ||||||
|  |  * @res: The shader resource | ||||||
|  |  * | ||||||
|  |  * This function reverts a scrub operation. | ||||||
|  |  */ | ||||||
|  | static int vmw_dx_shader_unscrub(struct vmw_resource *res) | ||||||
|  | { | ||||||
|  | 	struct vmw_dx_shader *shader = vmw_res_to_dx_shader(res); | ||||||
|  | 	struct vmw_private *dev_priv = res->dev_priv; | ||||||
|  | 	struct { | ||||||
|  | 		SVGA3dCmdHeader header; | ||||||
|  | 		SVGA3dCmdDXBindShader body; | ||||||
|  | 	} *cmd; | ||||||
|  | 
 | ||||||
|  | 	if (!list_empty(&shader->cotable_head) || !shader->committed) | ||||||
|  | 		return 0; | ||||||
|  | 
 | ||||||
|  | 	cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), | ||||||
|  | 				  shader->ctx->id); | ||||||
|  | 	if (unlikely(cmd == NULL)) { | ||||||
|  | 		DRM_ERROR("Failed reserving FIFO space for shader " | ||||||
|  | 			  "scrubbing.\n"); | ||||||
|  | 		return -ENOMEM; | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	cmd->header.id = SVGA_3D_CMD_DX_BIND_SHADER; | ||||||
|  | 	cmd->header.size = sizeof(cmd->body); | ||||||
|  | 	cmd->body.cid = shader->ctx->id; | ||||||
|  | 	cmd->body.shid = shader->id; | ||||||
|  | 	cmd->body.mobid = res->backup->base.mem.start; | ||||||
|  | 	cmd->body.offsetInBytes = res->backup_offset; | ||||||
|  | 	vmw_fifo_commit(dev_priv, sizeof(*cmd)); | ||||||
|  | 
 | ||||||
|  | 	vmw_cotable_add_resource(shader->cotable, &shader->cotable_head); | ||||||
|  | 
 | ||||||
|  | 	return 0; | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | /**
 | ||||||
|  |  * vmw_dx_shader_create - The DX shader create callback | ||||||
|  |  * | ||||||
|  |  * @res: The DX shader resource | ||||||
|  |  * | ||||||
|  |  * The create callback is called as part of resource validation and | ||||||
|  |  * makes sure that we unscrub the shader if it's previously been scrubbed. | ||||||
|  |  */ | ||||||
|  | static int vmw_dx_shader_create(struct vmw_resource *res) | ||||||
|  | { | ||||||
|  | 	struct vmw_private *dev_priv = res->dev_priv; | ||||||
|  | 	struct vmw_dx_shader *shader = vmw_res_to_dx_shader(res); | ||||||
|  | 	int ret = 0; | ||||||
|  | 
 | ||||||
|  | 	WARN_ON_ONCE(!shader->committed); | ||||||
|  | 
 | ||||||
|  | 	if (!list_empty(&res->mob_head)) { | ||||||
|  | 		mutex_lock(&dev_priv->binding_mutex); | ||||||
|  | 		ret = vmw_dx_shader_unscrub(res); | ||||||
|  | 		mutex_unlock(&dev_priv->binding_mutex); | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	res->id = shader->id; | ||||||
|  | 	return ret; | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | /**
 | ||||||
|  |  * vmw_dx_shader_bind - The DX shader bind callback | ||||||
|  |  * | ||||||
|  |  * @res: The DX shader resource | ||||||
|  |  * @val_buf: Pointer to the validate buffer. | ||||||
|  |  * | ||||||
|  |  */ | ||||||
|  | static int vmw_dx_shader_bind(struct vmw_resource *res, | ||||||
|  | 			      struct ttm_validate_buffer *val_buf) | ||||||
|  | { | ||||||
|  | 	struct vmw_private *dev_priv = res->dev_priv; | ||||||
|  | 	struct ttm_buffer_object *bo = val_buf->bo; | ||||||
|  | 
 | ||||||
|  | 	BUG_ON(bo->mem.mem_type != VMW_PL_MOB); | ||||||
|  | 	mutex_lock(&dev_priv->binding_mutex); | ||||||
|  | 	vmw_dx_shader_unscrub(res); | ||||||
|  | 	mutex_unlock(&dev_priv->binding_mutex); | ||||||
|  | 
 | ||||||
|  | 	return 0; | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | /**
 | ||||||
|  |  * vmw_dx_shader_scrub - Have the device unbind a MOB from a DX shader. | ||||||
|  |  * | ||||||
|  |  * @res: The shader resource | ||||||
|  |  * | ||||||
|  |  * This function unbinds a MOB from the DX shader without requiring the | ||||||
|  |  * MOB dma_buffer to be reserved. The driver still considers the MOB bound. | ||||||
|  |  * However, once the driver eventually decides to unbind the MOB, it doesn't | ||||||
|  |  * need to access the context. | ||||||
|  |  */ | ||||||
|  | static int vmw_dx_shader_scrub(struct vmw_resource *res) | ||||||
|  | { | ||||||
|  | 	struct vmw_dx_shader *shader = vmw_res_to_dx_shader(res); | ||||||
|  | 	struct vmw_private *dev_priv = res->dev_priv; | ||||||
|  | 	struct { | ||||||
|  | 		SVGA3dCmdHeader header; | ||||||
|  | 		SVGA3dCmdDXBindShader body; | ||||||
|  | 	} *cmd; | ||||||
|  | 
 | ||||||
|  | 	if (list_empty(&shader->cotable_head)) | ||||||
|  | 		return 0; | ||||||
|  | 
 | ||||||
|  | 	WARN_ON_ONCE(!shader->committed); | ||||||
|  | 	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | ||||||
|  | 	if (unlikely(cmd == NULL)) { | ||||||
|  | 		DRM_ERROR("Failed reserving FIFO space for shader " | ||||||
|  | 			  "scrubbing.\n"); | ||||||
|  | 		return -ENOMEM; | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	cmd->header.id = SVGA_3D_CMD_DX_BIND_SHADER; | ||||||
|  | 	cmd->header.size = sizeof(cmd->body); | ||||||
|  | 	cmd->body.cid = shader->ctx->id; | ||||||
|  | 	cmd->body.shid = res->id; | ||||||
|  | 	cmd->body.mobid = SVGA3D_INVALID_ID; | ||||||
|  | 	cmd->body.offsetInBytes = 0; | ||||||
|  | 	vmw_fifo_commit(dev_priv, sizeof(*cmd)); | ||||||
|  | 	res->id = -1; | ||||||
|  | 	list_del_init(&shader->cotable_head); | ||||||
|  | 
 | ||||||
|  | 	return 0; | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | /**
 | ||||||
|  |  * vmw_dx_shader_unbind - The dx shader unbind callback. | ||||||
|  |  * | ||||||
|  |  * @res: The shader resource | ||||||
|  |  * @readback: Whether this is a readback unbind. Currently unused. | ||||||
|  |  * @val_buf: MOB buffer information. | ||||||
|  |  */ | ||||||
|  | static int vmw_dx_shader_unbind(struct vmw_resource *res, | ||||||
|  | 				bool readback, | ||||||
|  | 				struct ttm_validate_buffer *val_buf) | ||||||
|  | { | ||||||
|  | 	struct vmw_private *dev_priv = res->dev_priv; | ||||||
|  | 	struct vmw_fence_obj *fence; | ||||||
|  | 	int ret; | ||||||
|  | 
 | ||||||
|  | 	BUG_ON(res->backup->base.mem.mem_type != VMW_PL_MOB); | ||||||
|  | 
 | ||||||
|  | 	mutex_lock(&dev_priv->binding_mutex); | ||||||
|  | 	ret = vmw_dx_shader_scrub(res); | ||||||
|  | 	mutex_unlock(&dev_priv->binding_mutex); | ||||||
|  | 
 | ||||||
|  | 	if (ret) | ||||||
|  | 		return ret; | ||||||
|  | 
 | ||||||
|  | 	(void) vmw_execbuf_fence_commands(NULL, dev_priv, | ||||||
|  | 					  &fence, NULL); | ||||||
|  | 	vmw_fence_single_bo(val_buf->bo, fence); | ||||||
|  | 
 | ||||||
|  | 	if (likely(fence != NULL)) | ||||||
|  | 		vmw_fence_obj_unreference(&fence); | ||||||
|  | 
 | ||||||
|  | 	return 0; | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | /**
 | ||||||
|  |  * vmw_dx_shader_cotable_list_scrub - The cotable unbind_func callback for | ||||||
|  |  * DX shaders. | ||||||
|  |  * | ||||||
|  |  * @dev_priv: Pointer to device private structure. | ||||||
|  |  * @list: The list of cotable resources. | ||||||
|  |  * @readback: Whether the call was part of a readback unbind. | ||||||
|  |  * | ||||||
|  |  * Scrubs all shader MOBs so that any subsequent shader unbind or shader | ||||||
|  |  * destroy operation won't need to swap in the context. | ||||||
|  |  */ | ||||||
|  | void vmw_dx_shader_cotable_list_scrub(struct vmw_private *dev_priv, | ||||||
|  | 				      struct list_head *list, | ||||||
|  | 				      bool readback) | ||||||
|  | { | ||||||
|  | 	struct vmw_dx_shader *entry, *next; | ||||||
|  | 
 | ||||||
|  | 	WARN_ON_ONCE(!mutex_is_locked(&dev_priv->binding_mutex)); | ||||||
|  | 
 | ||||||
|  | 	list_for_each_entry_safe(entry, next, list, cotable_head) { | ||||||
|  | 		WARN_ON(vmw_dx_shader_scrub(&entry->res)); | ||||||
|  | 		if (!readback) | ||||||
|  | 			entry->committed = false; | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | /**
 | ||||||
|  |  * vmw_dx_shader_res_free - The DX shader free callback | ||||||
|  |  * | ||||||
|  |  * @res: The shader resource | ||||||
|  |  * | ||||||
|  |  * Frees the DX shader resource and updates memory accounting. | ||||||
|  |  */ | ||||||
|  | static void vmw_dx_shader_res_free(struct vmw_resource *res) | ||||||
|  | { | ||||||
|  | 	struct vmw_private *dev_priv = res->dev_priv; | ||||||
|  | 	struct vmw_dx_shader *shader = vmw_res_to_dx_shader(res); | ||||||
|  | 
 | ||||||
|  | 	vmw_resource_unreference(&shader->cotable); | ||||||
|  | 	kfree(shader); | ||||||
|  | 	ttm_mem_global_free(vmw_mem_glob(dev_priv), vmw_shader_dx_size); | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | /**
 | ||||||
|  |  * vmw_dx_shader_add - Add a shader resource as a command buffer managed | ||||||
|  |  * resource. | ||||||
|  |  * | ||||||
|  |  * @man: The command buffer resource manager. | ||||||
|  |  * @ctx: Pointer to the context resource. | ||||||
|  |  * @user_key: The id used for this shader. | ||||||
|  |  * @shader_type: The shader type. | ||||||
|  |  * @list: The list of staged command buffer managed resources. | ||||||
|  |  */ | ||||||
|  | int vmw_dx_shader_add(struct vmw_cmdbuf_res_manager *man, | ||||||
|  | 		      struct vmw_resource *ctx, | ||||||
|  | 		      u32 user_key, | ||||||
|  | 		      SVGA3dShaderType shader_type, | ||||||
|  | 		      struct list_head *list) | ||||||
|  | { | ||||||
|  | 	struct vmw_dx_shader *shader; | ||||||
|  | 	struct vmw_resource *res; | ||||||
|  | 	struct vmw_private *dev_priv = ctx->dev_priv; | ||||||
|  | 	int ret; | ||||||
|  | 
 | ||||||
|  | 	if (!vmw_shader_dx_size) | ||||||
|  | 		vmw_shader_dx_size = ttm_round_pot(sizeof(*shader)); | ||||||
|  | 
 | ||||||
|  | 	if (!vmw_shader_id_ok(user_key, shader_type)) | ||||||
|  | 		return -EINVAL; | ||||||
|  | 
 | ||||||
|  | 	ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), vmw_shader_dx_size, | ||||||
|  | 				   false, true); | ||||||
|  | 	if (ret) { | ||||||
|  | 		if (ret != -ERESTARTSYS) | ||||||
|  | 			DRM_ERROR("Out of graphics memory for shader " | ||||||
|  | 				  "creation.\n"); | ||||||
|  | 		return ret; | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	shader = kmalloc(sizeof(*shader), GFP_KERNEL); | ||||||
|  | 	if (!shader) { | ||||||
|  | 		ttm_mem_global_free(vmw_mem_glob(dev_priv), vmw_shader_dx_size); | ||||||
|  | 		return -ENOMEM; | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	res = &shader->res; | ||||||
|  | 	shader->ctx = ctx; | ||||||
|  | 	shader->cotable = vmw_context_cotable(ctx, SVGA_COTABLE_DXSHADER); | ||||||
|  | 	shader->id = user_key; | ||||||
|  | 	shader->committed = false; | ||||||
|  | 	INIT_LIST_HEAD(&shader->cotable_head); | ||||||
|  | 	ret = vmw_resource_init(dev_priv, res, true, | ||||||
|  | 				vmw_dx_shader_res_free, &vmw_dx_shader_func); | ||||||
|  | 	if (ret) | ||||||
|  | 		goto out_resource_init; | ||||||
|  | 
 | ||||||
|  | 	/*
 | ||||||
|  | 	 * The user_key name-space is not per shader type for DX shaders, | ||||||
|  | 	 * so when hashing, use a single zero shader type. | ||||||
|  | 	 */ | ||||||
|  | 	ret = vmw_cmdbuf_res_add(man, vmw_cmdbuf_res_shader, | ||||||
|  | 				 vmw_shader_key(user_key, 0), | ||||||
|  | 				 res, list); | ||||||
|  | 	if (ret) | ||||||
|  | 		goto out_resource_init; | ||||||
|  | 
 | ||||||
|  | 	res->id = shader->id; | ||||||
|  | 	vmw_resource_activate(res, vmw_hw_shader_destroy); | ||||||
|  | 
 | ||||||
|  | out_resource_init: | ||||||
|  | 	vmw_resource_unreference(&res); | ||||||
|  | 
 | ||||||
|  | 	return ret; | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
| /**
 | /**
 | ||||||
|  * User-space shader management: |  * User-space shader management: | ||||||
|  */ |  */ | ||||||
|  | @ -341,6 +722,8 @@ static int vmw_user_shader_alloc(struct vmw_private *dev_priv, | ||||||
| 				 size_t shader_size, | 				 size_t shader_size, | ||||||
| 				 size_t offset, | 				 size_t offset, | ||||||
| 				 SVGA3dShaderType shader_type, | 				 SVGA3dShaderType shader_type, | ||||||
|  | 				 uint8_t num_input_sig, | ||||||
|  | 				 uint8_t num_output_sig, | ||||||
| 				 struct ttm_object_file *tfile, | 				 struct ttm_object_file *tfile, | ||||||
| 				 u32 *handle) | 				 u32 *handle) | ||||||
| { | { | ||||||
|  | @ -383,7 +766,8 @@ static int vmw_user_shader_alloc(struct vmw_private *dev_priv, | ||||||
| 	 */ | 	 */ | ||||||
| 
 | 
 | ||||||
| 	ret = vmw_gb_shader_init(dev_priv, res, shader_size, | 	ret = vmw_gb_shader_init(dev_priv, res, shader_size, | ||||||
| 				 offset, shader_type, buffer, | 				 offset, shader_type, num_input_sig, | ||||||
|  | 				 num_output_sig, buffer, | ||||||
| 				 vmw_user_shader_free); | 				 vmw_user_shader_free); | ||||||
| 	if (unlikely(ret != 0)) | 	if (unlikely(ret != 0)) | ||||||
| 		goto out; | 		goto out; | ||||||
|  | @ -449,7 +833,7 @@ static struct vmw_resource *vmw_shader_alloc(struct vmw_private *dev_priv, | ||||||
| 	 * From here on, the destructor takes over resource freeing. | 	 * From here on, the destructor takes over resource freeing. | ||||||
| 	 */ | 	 */ | ||||||
| 	ret = vmw_gb_shader_init(dev_priv, res, shader_size, | 	ret = vmw_gb_shader_init(dev_priv, res, shader_size, | ||||||
| 				 offset, shader_type, buffer, | 				 offset, shader_type, 0, 0, buffer, | ||||||
| 				 vmw_shader_free); | 				 vmw_shader_free); | ||||||
| 
 | 
 | ||||||
| out_err: | out_err: | ||||||
|  | @ -457,19 +841,20 @@ out_err: | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| int vmw_shader_define_ioctl(struct drm_device *dev, void *data, | static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv, | ||||||
| 			     struct drm_file *file_priv) | 			     enum drm_vmw_shader_type shader_type_drm, | ||||||
|  | 			     u32 buffer_handle, size_t size, size_t offset, | ||||||
|  | 			     uint8_t num_input_sig, uint8_t num_output_sig, | ||||||
|  | 			     uint32_t *shader_handle) | ||||||
| { | { | ||||||
| 	struct vmw_private *dev_priv = vmw_priv(dev); | 	struct vmw_private *dev_priv = vmw_priv(dev); | ||||||
| 	struct drm_vmw_shader_create_arg *arg = |  | ||||||
| 		(struct drm_vmw_shader_create_arg *)data; |  | ||||||
| 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; | 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; | ||||||
| 	struct vmw_dma_buffer *buffer = NULL; | 	struct vmw_dma_buffer *buffer = NULL; | ||||||
| 	SVGA3dShaderType shader_type; | 	SVGA3dShaderType shader_type; | ||||||
| 	int ret; | 	int ret; | ||||||
| 
 | 
 | ||||||
| 	if (arg->buffer_handle != SVGA3D_INVALID_ID) { | 	if (buffer_handle != SVGA3D_INVALID_ID) { | ||||||
| 		ret = vmw_user_dmabuf_lookup(tfile, arg->buffer_handle, | 		ret = vmw_user_dmabuf_lookup(tfile, buffer_handle, | ||||||
| 					     &buffer); | 					     &buffer); | ||||||
| 		if (unlikely(ret != 0)) { | 		if (unlikely(ret != 0)) { | ||||||
| 			DRM_ERROR("Could not find buffer for shader " | 			DRM_ERROR("Could not find buffer for shader " | ||||||
|  | @ -478,23 +863,20 @@ int vmw_shader_define_ioctl(struct drm_device *dev, void *data, | ||||||
| 		} | 		} | ||||||
| 
 | 
 | ||||||
| 		if ((u64)buffer->base.num_pages * PAGE_SIZE < | 		if ((u64)buffer->base.num_pages * PAGE_SIZE < | ||||||
| 		    (u64)arg->size + (u64)arg->offset) { | 		    (u64)size + (u64)offset) { | ||||||
| 			DRM_ERROR("Illegal buffer- or shader size.\n"); | 			DRM_ERROR("Illegal buffer- or shader size.\n"); | ||||||
| 			ret = -EINVAL; | 			ret = -EINVAL; | ||||||
| 			goto out_bad_arg; | 			goto out_bad_arg; | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	switch (arg->shader_type) { | 	switch (shader_type_drm) { | ||||||
| 	case drm_vmw_shader_type_vs: | 	case drm_vmw_shader_type_vs: | ||||||
| 		shader_type = SVGA3D_SHADERTYPE_VS; | 		shader_type = SVGA3D_SHADERTYPE_VS; | ||||||
| 		break; | 		break; | ||||||
| 	case drm_vmw_shader_type_ps: | 	case drm_vmw_shader_type_ps: | ||||||
| 		shader_type = SVGA3D_SHADERTYPE_PS; | 		shader_type = SVGA3D_SHADERTYPE_PS; | ||||||
| 		break; | 		break; | ||||||
| 	case drm_vmw_shader_type_gs: |  | ||||||
| 		shader_type = SVGA3D_SHADERTYPE_GS; |  | ||||||
| 		break; |  | ||||||
| 	default: | 	default: | ||||||
| 		DRM_ERROR("Illegal shader type.\n"); | 		DRM_ERROR("Illegal shader type.\n"); | ||||||
| 		ret = -EINVAL; | 		ret = -EINVAL; | ||||||
|  | @ -505,8 +887,9 @@ int vmw_shader_define_ioctl(struct drm_device *dev, void *data, | ||||||
| 	if (unlikely(ret != 0)) | 	if (unlikely(ret != 0)) | ||||||
| 		goto out_bad_arg; | 		goto out_bad_arg; | ||||||
| 
 | 
 | ||||||
| 	ret = vmw_user_shader_alloc(dev_priv, buffer, arg->size, arg->offset, | 	ret = vmw_user_shader_alloc(dev_priv, buffer, size, offset, | ||||||
| 				    shader_type, tfile, &arg->shader_handle); | 				    shader_type, num_input_sig, | ||||||
|  | 				    num_output_sig, tfile, shader_handle); | ||||||
| 
 | 
 | ||||||
| 	ttm_read_unlock(&dev_priv->reservation_sem); | 	ttm_read_unlock(&dev_priv->reservation_sem); | ||||||
| out_bad_arg: | out_bad_arg: | ||||||
|  | @ -515,7 +898,7 @@ out_bad_arg: | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /**
 | /**
 | ||||||
|  * vmw_compat_shader_id_ok - Check whether a compat shader user key and |  * vmw_shader_id_ok - Check whether a compat shader user key and | ||||||
|  * shader type are within valid bounds. |  * shader type are within valid bounds. | ||||||
|  * |  * | ||||||
|  * @user_key: User space id of the shader. |  * @user_key: User space id of the shader. | ||||||
|  | @ -523,13 +906,13 @@ out_bad_arg: | ||||||
|  * |  * | ||||||
|  * Returns true if valid false if not. |  * Returns true if valid false if not. | ||||||
|  */ |  */ | ||||||
| static bool vmw_compat_shader_id_ok(u32 user_key, SVGA3dShaderType shader_type) | static bool vmw_shader_id_ok(u32 user_key, SVGA3dShaderType shader_type) | ||||||
| { | { | ||||||
| 	return user_key <= ((1 << 20) - 1) && (unsigned) shader_type < 16; | 	return user_key <= ((1 << 20) - 1) && (unsigned) shader_type < 16; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /**
 | /**
 | ||||||
|  * vmw_compat_shader_key - Compute a hash key suitable for a compat shader. |  * vmw_shader_key - Compute a hash key suitable for a compat shader. | ||||||
|  * |  * | ||||||
|  * @user_key: User space id of the shader. |  * @user_key: User space id of the shader. | ||||||
|  * @shader_type: Shader type. |  * @shader_type: Shader type. | ||||||
|  | @ -537,13 +920,13 @@ static bool vmw_compat_shader_id_ok(u32 user_key, SVGA3dShaderType shader_type) | ||||||
|  * Returns a hash key suitable for a command buffer managed resource |  * Returns a hash key suitable for a command buffer managed resource | ||||||
|  * manager hash table. |  * manager hash table. | ||||||
|  */ |  */ | ||||||
| static u32 vmw_compat_shader_key(u32 user_key, SVGA3dShaderType shader_type) | static u32 vmw_shader_key(u32 user_key, SVGA3dShaderType shader_type) | ||||||
| { | { | ||||||
| 	return user_key | (shader_type << 20); | 	return user_key | (shader_type << 20); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /**
 | /**
 | ||||||
|  * vmw_compat_shader_remove - Stage a compat shader for removal. |  * vmw_shader_remove - Stage a compat shader for removal. | ||||||
|  * |  * | ||||||
|  * @man: Pointer to the compat shader manager identifying the shader namespace. |  * @man: Pointer to the compat shader manager identifying the shader namespace. | ||||||
|  * @user_key: The key that is used to identify the shader. The key is |  * @user_key: The key that is used to identify the shader. The key is | ||||||
|  | @ -551,17 +934,18 @@ static u32 vmw_compat_shader_key(u32 user_key, SVGA3dShaderType shader_type) | ||||||
|  * @shader_type: Shader type. |  * @shader_type: Shader type. | ||||||
|  * @list: Caller's list of staged command buffer resource actions. |  * @list: Caller's list of staged command buffer resource actions. | ||||||
|  */ |  */ | ||||||
| int vmw_compat_shader_remove(struct vmw_cmdbuf_res_manager *man, | int vmw_shader_remove(struct vmw_cmdbuf_res_manager *man, | ||||||
| 			     u32 user_key, SVGA3dShaderType shader_type, | 		      u32 user_key, SVGA3dShaderType shader_type, | ||||||
| 			     struct list_head *list) | 		      struct list_head *list) | ||||||
| { | { | ||||||
| 	if (!vmw_compat_shader_id_ok(user_key, shader_type)) | 	struct vmw_resource *dummy; | ||||||
|  | 
 | ||||||
|  | 	if (!vmw_shader_id_ok(user_key, shader_type)) | ||||||
| 		return -EINVAL; | 		return -EINVAL; | ||||||
| 
 | 
 | ||||||
| 	return vmw_cmdbuf_res_remove(man, vmw_cmdbuf_res_compat_shader, | 	return vmw_cmdbuf_res_remove(man, vmw_cmdbuf_res_shader, | ||||||
| 				     vmw_compat_shader_key(user_key, | 				     vmw_shader_key(user_key, shader_type), | ||||||
| 							   shader_type), | 				     list, &dummy); | ||||||
| 				     list); |  | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /**
 | /**
 | ||||||
|  | @ -591,7 +975,7 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv, | ||||||
| 	int ret; | 	int ret; | ||||||
| 	struct vmw_resource *res; | 	struct vmw_resource *res; | ||||||
| 
 | 
 | ||||||
| 	if (!vmw_compat_shader_id_ok(user_key, shader_type)) | 	if (!vmw_shader_id_ok(user_key, shader_type)) | ||||||
| 		return -EINVAL; | 		return -EINVAL; | ||||||
| 
 | 
 | ||||||
| 	/* Allocate and pin a DMA buffer */ | 	/* Allocate and pin a DMA buffer */ | ||||||
|  | @ -628,8 +1012,8 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv, | ||||||
| 	if (unlikely(ret != 0)) | 	if (unlikely(ret != 0)) | ||||||
| 		goto no_reserve; | 		goto no_reserve; | ||||||
| 
 | 
 | ||||||
| 	ret = vmw_cmdbuf_res_add(man, vmw_cmdbuf_res_compat_shader, | 	ret = vmw_cmdbuf_res_add(man, vmw_cmdbuf_res_shader, | ||||||
| 				 vmw_compat_shader_key(user_key, shader_type), | 				 vmw_shader_key(user_key, shader_type), | ||||||
| 				 res, list); | 				 res, list); | ||||||
| 	vmw_resource_unreference(&res); | 	vmw_resource_unreference(&res); | ||||||
| no_reserve: | no_reserve: | ||||||
|  | @ -639,7 +1023,7 @@ out: | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /**
 | /**
 | ||||||
|  * vmw_compat_shader_lookup - Look up a compat shader |  * vmw_shader_lookup - Look up a compat shader | ||||||
|  * |  * | ||||||
|  * @man: Pointer to the command buffer managed resource manager identifying |  * @man: Pointer to the command buffer managed resource manager identifying | ||||||
|  * the shader namespace. |  * the shader namespace. | ||||||
|  | @ -650,14 +1034,26 @@ out: | ||||||
|  * found. An error pointer otherwise. |  * found. An error pointer otherwise. | ||||||
|  */ |  */ | ||||||
| struct vmw_resource * | struct vmw_resource * | ||||||
| vmw_compat_shader_lookup(struct vmw_cmdbuf_res_manager *man, | vmw_shader_lookup(struct vmw_cmdbuf_res_manager *man, | ||||||
| 			 u32 user_key, | 		  u32 user_key, | ||||||
| 			 SVGA3dShaderType shader_type) | 		  SVGA3dShaderType shader_type) | ||||||
| { | { | ||||||
| 	if (!vmw_compat_shader_id_ok(user_key, shader_type)) | 	if (!vmw_shader_id_ok(user_key, shader_type)) | ||||||
| 		return ERR_PTR(-EINVAL); | 		return ERR_PTR(-EINVAL); | ||||||
| 
 | 
 | ||||||
| 	return vmw_cmdbuf_res_lookup(man, vmw_cmdbuf_res_compat_shader, | 	return vmw_cmdbuf_res_lookup(man, vmw_cmdbuf_res_shader, | ||||||
| 				     vmw_compat_shader_key(user_key, | 				     vmw_shader_key(user_key, shader_type)); | ||||||
| 							   shader_type)); | } | ||||||
|  | 
 | ||||||
|  | int vmw_shader_define_ioctl(struct drm_device *dev, void *data, | ||||||
|  | 			     struct drm_file *file_priv) | ||||||
|  | { | ||||||
|  | 	struct drm_vmw_shader_create_arg *arg = | ||||||
|  | 		(struct drm_vmw_shader_create_arg *)data; | ||||||
|  | 
 | ||||||
|  | 	return vmw_shader_define(dev, file_priv, arg->shader_type, | ||||||
|  | 				 arg->buffer_handle, | ||||||
|  | 				 arg->size, arg->offset, | ||||||
|  | 				 0, 0, | ||||||
|  | 				 &arg->shader_handle); | ||||||
| } | } | ||||||
|  |  | ||||||
							
								
								
									
										555
									
								
								drivers/gpu/drm/vmwgfx/vmwgfx_so.c
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										555
									
								
								drivers/gpu/drm/vmwgfx/vmwgfx_so.c
									
										
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,555 @@ | ||||||
|  | /**************************************************************************
 | ||||||
|  |  * Copyright © 2014 VMware, Inc., Palo Alto, CA., USA | ||||||
|  |  * All Rights Reserved. | ||||||
|  |  * | ||||||
|  |  * Permission is hereby granted, free of charge, to any person obtaining a | ||||||
|  |  * copy of this software and associated documentation files (the | ||||||
|  |  * "Software"), to deal in the Software without restriction, including | ||||||
|  |  * without limitation the rights to use, copy, modify, merge, publish, | ||||||
|  |  * distribute, sub license, and/or sell copies of the Software, and to | ||||||
|  |  * permit persons to whom the Software is furnished to do so, subject to | ||||||
|  |  * the following conditions: | ||||||
|  |  * | ||||||
|  |  * The above copyright notice and this permission notice (including the | ||||||
|  |  * next paragraph) shall be included in all copies or substantial portions | ||||||
|  |  * of the Software. | ||||||
|  |  * | ||||||
|  |  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||||||
|  |  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||||||
|  |  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||||||
|  |  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | ||||||
|  |  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | ||||||
|  |  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | ||||||
|  |  * USE OR OTHER DEALINGS IN THE SOFTWARE. | ||||||
|  |  * | ||||||
|  |  **************************************************************************/ | ||||||
|  | 
 | ||||||
|  | #include "vmwgfx_drv.h" | ||||||
|  | #include "vmwgfx_resource_priv.h" | ||||||
|  | #include "vmwgfx_so.h" | ||||||
|  | #include "vmwgfx_binding.h" | ||||||
|  | 
 | ||||||
|  | /*
 | ||||||
|  |  * The currently only reason we need to keep track of views is that if we | ||||||
|  |  * destroy a hardware surface, all views pointing to it must also be destroyed, | ||||||
|  |  * otherwise the device will error. | ||||||
|  |  * So in particuar if a surface is evicted, we must destroy all views pointing | ||||||
|  |  * to it, and all context bindings of that view. Similarly we must restore | ||||||
|  |  * the view bindings, views and surfaces pointed to by the views when a | ||||||
|  |  * context is referenced in the command stream. | ||||||
|  |  */ | ||||||
|  | 
 | ||||||
|  | /**
 | ||||||
|  |  * struct vmw_view - view metadata | ||||||
|  |  * | ||||||
|  |  * @res: The struct vmw_resource we derive from | ||||||
|  |  * @ctx: Non-refcounted pointer to the context this view belongs to. | ||||||
|  |  * @srf: Refcounted pointer to the surface pointed to by this view. | ||||||
|  |  * @cotable: Refcounted pointer to the cotable holding this view. | ||||||
|  |  * @srf_head: List head for the surface-to-view list. | ||||||
|  |  * @cotable_head: List head for the cotable-to_view list. | ||||||
|  |  * @view_type: View type. | ||||||
|  |  * @view_id: User-space per context view id. Currently used also as per | ||||||
|  |  * context device view id. | ||||||
|  |  * @cmd_size: Size of the SVGA3D define view command that we've copied from the | ||||||
|  |  * command stream. | ||||||
|  |  * @committed: Whether the view is actually created or pending creation at the | ||||||
|  |  * device level. | ||||||
|  |  * @cmd: The SVGA3D define view command copied from the command stream. | ||||||
|  |  */ | ||||||
|  | struct vmw_view { | ||||||
|  | 	struct rcu_head rcu; | ||||||
|  | 	struct vmw_resource res; | ||||||
|  | 	struct vmw_resource *ctx;      /* Immutable */ | ||||||
|  | 	struct vmw_resource *srf;      /* Immutable */ | ||||||
|  | 	struct vmw_resource *cotable;  /* Immutable */ | ||||||
|  | 	struct list_head srf_head;     /* Protected by binding_mutex */ | ||||||
|  | 	struct list_head cotable_head; /* Protected by binding_mutex */ | ||||||
|  | 	unsigned view_type;            /* Immutable */ | ||||||
|  | 	unsigned view_id;              /* Immutable */ | ||||||
|  | 	u32 cmd_size;                  /* Immutable */ | ||||||
|  | 	bool committed;                /* Protected by binding_mutex */ | ||||||
|  | 	u32 cmd[1];                    /* Immutable */ | ||||||
|  | }; | ||||||
|  | 
 | ||||||
|  | static int vmw_view_create(struct vmw_resource *res); | ||||||
|  | static int vmw_view_destroy(struct vmw_resource *res); | ||||||
|  | static void vmw_hw_view_destroy(struct vmw_resource *res); | ||||||
|  | static void vmw_view_commit_notify(struct vmw_resource *res, | ||||||
|  | 				   enum vmw_cmdbuf_res_state state); | ||||||
|  | 
 | ||||||
|  | static const struct vmw_res_func vmw_view_func = { | ||||||
|  | 	.res_type = vmw_res_view, | ||||||
|  | 	.needs_backup = false, | ||||||
|  | 	.may_evict = false, | ||||||
|  | 	.type_name = "DX view", | ||||||
|  | 	.backup_placement = NULL, | ||||||
|  | 	.create = vmw_view_create, | ||||||
|  | 	.commit_notify = vmw_view_commit_notify, | ||||||
|  | }; | ||||||
|  | 
 | ||||||
|  | /**
 | ||||||
|  |  * struct vmw_view - view define command body stub | ||||||
|  |  * | ||||||
|  |  * @view_id: The device id of the view being defined | ||||||
|  |  * @sid: The surface id of the view being defined | ||||||
|  |  * | ||||||
|  |  * This generic struct is used by the code to change @view_id and @sid of a | ||||||
|  |  * saved view define command. | ||||||
|  |  */ | ||||||
|  | struct vmw_view_define { | ||||||
|  | 	uint32 view_id; | ||||||
|  | 	uint32 sid; | ||||||
|  | }; | ||||||
|  | 
 | ||||||
|  | /**
 | ||||||
|  |  * vmw_view - Convert a struct vmw_resource to a struct vmw_view | ||||||
|  |  * | ||||||
|  |  * @res: Pointer to the resource to convert. | ||||||
|  |  * | ||||||
|  |  * Returns a pointer to a struct vmw_view. | ||||||
|  |  */ | ||||||
|  | static struct vmw_view *vmw_view(struct vmw_resource *res) | ||||||
|  | { | ||||||
|  | 	return container_of(res, struct vmw_view, res); | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | /**
 | ||||||
|  |  * vmw_view_commit_notify - Notify that a view operation has been committed to | ||||||
|  |  * hardware from a user-supplied command stream. | ||||||
|  |  * | ||||||
|  |  * @res: Pointer to the view resource. | ||||||
|  |  * @state: Indicating whether a creation or removal has been committed. | ||||||
|  |  * | ||||||
|  |  */ | ||||||
|  | static void vmw_view_commit_notify(struct vmw_resource *res, | ||||||
|  | 				   enum vmw_cmdbuf_res_state state) | ||||||
|  | { | ||||||
|  | 	struct vmw_view *view = vmw_view(res); | ||||||
|  | 	struct vmw_private *dev_priv = res->dev_priv; | ||||||
|  | 
 | ||||||
|  | 	mutex_lock(&dev_priv->binding_mutex); | ||||||
|  | 	if (state == VMW_CMDBUF_RES_ADD) { | ||||||
|  | 		struct vmw_surface *srf = vmw_res_to_srf(view->srf); | ||||||
|  | 
 | ||||||
|  | 		list_add_tail(&view->srf_head, &srf->view_list); | ||||||
|  | 		vmw_cotable_add_resource(view->cotable, &view->cotable_head); | ||||||
|  | 		view->committed = true; | ||||||
|  | 		res->id = view->view_id; | ||||||
|  | 
 | ||||||
|  | 	} else { | ||||||
|  | 		list_del_init(&view->cotable_head); | ||||||
|  | 		list_del_init(&view->srf_head); | ||||||
|  | 		view->committed = false; | ||||||
|  | 		res->id = -1; | ||||||
|  | 	} | ||||||
|  | 	mutex_unlock(&dev_priv->binding_mutex); | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | /**
 | ||||||
|  |  * vmw_view_create - Create a hardware view. | ||||||
|  |  * | ||||||
|  |  * @res: Pointer to the view resource. | ||||||
|  |  * | ||||||
|  |  * Create a hardware view. Typically used if that view has previously been | ||||||
|  |  * destroyed by an eviction operation. | ||||||
|  |  */ | ||||||
|  | static int vmw_view_create(struct vmw_resource *res) | ||||||
|  | { | ||||||
|  | 	struct vmw_view *view = vmw_view(res); | ||||||
|  | 	struct vmw_surface *srf = vmw_res_to_srf(view->srf); | ||||||
|  | 	struct vmw_private *dev_priv = res->dev_priv; | ||||||
|  | 	struct { | ||||||
|  | 		SVGA3dCmdHeader header; | ||||||
|  | 		struct vmw_view_define body; | ||||||
|  | 	} *cmd; | ||||||
|  | 
 | ||||||
|  | 	mutex_lock(&dev_priv->binding_mutex); | ||||||
|  | 	if (!view->committed) { | ||||||
|  | 		mutex_unlock(&dev_priv->binding_mutex); | ||||||
|  | 		return 0; | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	cmd = vmw_fifo_reserve_dx(res->dev_priv, view->cmd_size, | ||||||
|  | 				  view->ctx->id); | ||||||
|  | 	if (!cmd) { | ||||||
|  | 		DRM_ERROR("Failed reserving FIFO space for view creation.\n"); | ||||||
|  | 		mutex_unlock(&dev_priv->binding_mutex); | ||||||
|  | 		return -ENOMEM; | ||||||
|  | 	} | ||||||
|  | 	memcpy(cmd, &view->cmd, view->cmd_size); | ||||||
|  | 	WARN_ON(cmd->body.view_id != view->view_id); | ||||||
|  | 	/* Sid may have changed due to surface eviction. */ | ||||||
|  | 	WARN_ON(view->srf->id == SVGA3D_INVALID_ID); | ||||||
|  | 	cmd->body.sid = view->srf->id; | ||||||
|  | 	vmw_fifo_commit(res->dev_priv, view->cmd_size); | ||||||
|  | 	res->id = view->view_id; | ||||||
|  | 	list_add_tail(&view->srf_head, &srf->view_list); | ||||||
|  | 	vmw_cotable_add_resource(view->cotable, &view->cotable_head); | ||||||
|  | 	mutex_unlock(&dev_priv->binding_mutex); | ||||||
|  | 
 | ||||||
|  | 	return 0; | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | /**
 | ||||||
|  |  * vmw_view_destroy - Destroy a hardware view. | ||||||
|  |  * | ||||||
|  |  * @res: Pointer to the view resource. | ||||||
|  |  * | ||||||
|  |  * Destroy a hardware view. Typically used on unexpected termination of the | ||||||
|  |  * owning process or if the surface the view is pointing to is destroyed. | ||||||
|  |  */ | ||||||
|  | static int vmw_view_destroy(struct vmw_resource *res) | ||||||
|  | { | ||||||
|  | 	struct vmw_private *dev_priv = res->dev_priv; | ||||||
|  | 	struct vmw_view *view = vmw_view(res); | ||||||
|  | 	struct { | ||||||
|  | 		SVGA3dCmdHeader header; | ||||||
|  | 		union vmw_view_destroy body; | ||||||
|  | 	} *cmd; | ||||||
|  | 
 | ||||||
|  | 	WARN_ON_ONCE(!mutex_is_locked(&dev_priv->binding_mutex)); | ||||||
|  | 	vmw_binding_res_list_scrub(&res->binding_head); | ||||||
|  | 
 | ||||||
|  | 	if (!view->committed || res->id == -1) | ||||||
|  | 		return 0; | ||||||
|  | 
 | ||||||
|  | 	cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), view->ctx->id); | ||||||
|  | 	if (!cmd) { | ||||||
|  | 		DRM_ERROR("Failed reserving FIFO space for view " | ||||||
|  | 			  "destruction.\n"); | ||||||
|  | 		return -ENOMEM; | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	cmd->header.id = vmw_view_destroy_cmds[view->view_type]; | ||||||
|  | 	cmd->header.size = sizeof(cmd->body); | ||||||
|  | 	cmd->body.view_id = view->view_id; | ||||||
|  | 	vmw_fifo_commit(dev_priv, sizeof(*cmd)); | ||||||
|  | 	res->id = -1; | ||||||
|  | 	list_del_init(&view->cotable_head); | ||||||
|  | 	list_del_init(&view->srf_head); | ||||||
|  | 
 | ||||||
|  | 	return 0; | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | /**
 | ||||||
|  |  * vmw_hw_view_destroy - Destroy a hardware view as part of resource cleanup. | ||||||
|  |  * | ||||||
|  |  * @res: Pointer to the view resource. | ||||||
|  |  * | ||||||
|  |  * Destroy a hardware view if it's still present. | ||||||
|  |  */ | ||||||
|  | static void vmw_hw_view_destroy(struct vmw_resource *res) | ||||||
|  | { | ||||||
|  | 	struct vmw_private *dev_priv = res->dev_priv; | ||||||
|  | 
 | ||||||
|  | 	mutex_lock(&dev_priv->binding_mutex); | ||||||
|  | 	WARN_ON(vmw_view_destroy(res)); | ||||||
|  | 	res->id = -1; | ||||||
|  | 	mutex_unlock(&dev_priv->binding_mutex); | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | /**
 | ||||||
|  |  * vmw_view_key - Compute a view key suitable for the cmdbuf resource manager | ||||||
|  |  * | ||||||
|  |  * @user_key: The user-space id used for the view. | ||||||
|  |  * @view_type: The view type. | ||||||
|  |  * | ||||||
|  |  * Destroy a hardware view if it's still present. | ||||||
|  |  */ | ||||||
|  | static u32 vmw_view_key(u32 user_key, enum vmw_view_type view_type) | ||||||
|  | { | ||||||
|  | 	return user_key | (view_type << 20); | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | /**
 | ||||||
|  |  * vmw_view_id_ok - Basic view id and type range checks. | ||||||
|  |  * | ||||||
|  |  * @user_key: The user-space id used for the view. | ||||||
|  |  * @view_type: The view type. | ||||||
|  |  * | ||||||
|  |  * Checks that the view id and type (typically provided by user-space) is | ||||||
|  |  * valid. | ||||||
|  |  */ | ||||||
|  | static bool vmw_view_id_ok(u32 user_key, enum vmw_view_type view_type) | ||||||
|  | { | ||||||
|  | 	return (user_key < SVGA_COTABLE_MAX_IDS && | ||||||
|  | 		view_type < vmw_view_max); | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | /**
 | ||||||
|  |  * vmw_view_res_free - resource res_free callback for view resources | ||||||
|  |  * | ||||||
|  |  * @res: Pointer to a struct vmw_resource | ||||||
|  |  * | ||||||
|  |  * Frees memory and memory accounting held by a struct vmw_view. | ||||||
|  |  */ | ||||||
|  | static void vmw_view_res_free(struct vmw_resource *res) | ||||||
|  | { | ||||||
|  | 	struct vmw_view *view = vmw_view(res); | ||||||
|  | 	size_t size = offsetof(struct vmw_view, cmd) + view->cmd_size; | ||||||
|  | 	struct vmw_private *dev_priv = res->dev_priv; | ||||||
|  | 
 | ||||||
|  | 	vmw_resource_unreference(&view->cotable); | ||||||
|  | 	vmw_resource_unreference(&view->srf); | ||||||
|  | 	kfree_rcu(view, rcu); | ||||||
|  | 	ttm_mem_global_free(vmw_mem_glob(dev_priv), size); | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | /**
 | ||||||
|  |  * vmw_view_add - Create a view resource and stage it for addition | ||||||
|  |  * as a command buffer managed resource. | ||||||
|  |  * | ||||||
|  |  * @man: Pointer to the compat shader manager identifying the shader namespace. | ||||||
|  |  * @ctx: Pointer to a struct vmw_resource identifying the active context. | ||||||
|  |  * @srf: Pointer to a struct vmw_resource identifying the surface the view | ||||||
|  |  * points to. | ||||||
|  |  * @view_type: The view type deduced from the view create command. | ||||||
|  |  * @user_key: The key that is used to identify the shader. The key is | ||||||
|  |  * unique to the view type and to the context. | ||||||
|  |  * @cmd: Pointer to the view create command in the command stream. | ||||||
|  |  * @cmd_size: Size of the view create command in the command stream. | ||||||
|  |  * @list: Caller's list of staged command buffer resource actions. | ||||||
|  |  */ | ||||||
|  | int vmw_view_add(struct vmw_cmdbuf_res_manager *man, | ||||||
|  | 		 struct vmw_resource *ctx, | ||||||
|  | 		 struct vmw_resource *srf, | ||||||
|  | 		 enum vmw_view_type view_type, | ||||||
|  | 		 u32 user_key, | ||||||
|  | 		 const void *cmd, | ||||||
|  | 		 size_t cmd_size, | ||||||
|  | 		 struct list_head *list) | ||||||
|  | { | ||||||
|  | 	static const size_t vmw_view_define_sizes[] = { | ||||||
|  | 		[vmw_view_sr] = sizeof(SVGA3dCmdDXDefineShaderResourceView), | ||||||
|  | 		[vmw_view_rt] = sizeof(SVGA3dCmdDXDefineRenderTargetView), | ||||||
|  | 		[vmw_view_ds] = sizeof(SVGA3dCmdDXDefineDepthStencilView) | ||||||
|  | 	}; | ||||||
|  | 
 | ||||||
|  | 	struct vmw_private *dev_priv = ctx->dev_priv; | ||||||
|  | 	struct vmw_resource *res; | ||||||
|  | 	struct vmw_view *view; | ||||||
|  | 	size_t size; | ||||||
|  | 	int ret; | ||||||
|  | 
 | ||||||
|  | 	if (cmd_size != vmw_view_define_sizes[view_type] + | ||||||
|  | 	    sizeof(SVGA3dCmdHeader)) { | ||||||
|  | 		DRM_ERROR("Illegal view create command size.\n"); | ||||||
|  | 		return -EINVAL; | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	if (!vmw_view_id_ok(user_key, view_type)) { | ||||||
|  | 		DRM_ERROR("Illegal view add view id.\n"); | ||||||
|  | 		return -EINVAL; | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	size = offsetof(struct vmw_view, cmd) + cmd_size; | ||||||
|  | 
 | ||||||
|  | 	ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), size, false, true); | ||||||
|  | 	if (ret) { | ||||||
|  | 		if (ret != -ERESTARTSYS) | ||||||
|  | 			DRM_ERROR("Out of graphics memory for view" | ||||||
|  | 				  " creation.\n"); | ||||||
|  | 		return ret; | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	view = kmalloc(size, GFP_KERNEL); | ||||||
|  | 	if (!view) { | ||||||
|  | 		ttm_mem_global_free(vmw_mem_glob(dev_priv), size); | ||||||
|  | 		return -ENOMEM; | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	res = &view->res; | ||||||
|  | 	view->ctx = ctx; | ||||||
|  | 	view->srf = vmw_resource_reference(srf); | ||||||
|  | 	view->cotable = vmw_context_cotable(ctx, vmw_view_cotables[view_type]); | ||||||
|  | 	view->view_type = view_type; | ||||||
|  | 	view->view_id = user_key; | ||||||
|  | 	view->cmd_size = cmd_size; | ||||||
|  | 	view->committed = false; | ||||||
|  | 	INIT_LIST_HEAD(&view->srf_head); | ||||||
|  | 	INIT_LIST_HEAD(&view->cotable_head); | ||||||
|  | 	memcpy(&view->cmd, cmd, cmd_size); | ||||||
|  | 	ret = vmw_resource_init(dev_priv, res, true, | ||||||
|  | 				vmw_view_res_free, &vmw_view_func); | ||||||
|  | 	if (ret) | ||||||
|  | 		goto out_resource_init; | ||||||
|  | 
 | ||||||
|  | 	ret = vmw_cmdbuf_res_add(man, vmw_cmdbuf_res_view, | ||||||
|  | 				 vmw_view_key(user_key, view_type), | ||||||
|  | 				 res, list); | ||||||
|  | 	if (ret) | ||||||
|  | 		goto out_resource_init; | ||||||
|  | 
 | ||||||
|  | 	res->id = view->view_id; | ||||||
|  | 	vmw_resource_activate(res, vmw_hw_view_destroy); | ||||||
|  | 
 | ||||||
|  | out_resource_init: | ||||||
|  | 	vmw_resource_unreference(&res); | ||||||
|  | 
 | ||||||
|  | 	return ret; | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | /**
 | ||||||
|  |  * vmw_view_remove - Stage a view for removal. | ||||||
|  |  * | ||||||
|  |  * @man: Pointer to the view manager identifying the shader namespace. | ||||||
|  |  * @user_key: The key that is used to identify the view. The key is | ||||||
|  |  * unique to the view type. | ||||||
|  |  * @view_type: View type | ||||||
|  |  * @list: Caller's list of staged command buffer resource actions. | ||||||
|  |  * @res_p: If the resource is in an already committed state, points to the | ||||||
|  |  * struct vmw_resource on successful return. The pointer will be | ||||||
|  |  * non ref-counted. | ||||||
|  |  */ | ||||||
|  | int vmw_view_remove(struct vmw_cmdbuf_res_manager *man, | ||||||
|  | 		    u32 user_key, enum vmw_view_type view_type, | ||||||
|  | 		    struct list_head *list, | ||||||
|  | 		    struct vmw_resource **res_p) | ||||||
|  | { | ||||||
|  | 	if (!vmw_view_id_ok(user_key, view_type)) { | ||||||
|  | 		DRM_ERROR("Illegal view remove view id.\n"); | ||||||
|  | 		return -EINVAL; | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	return vmw_cmdbuf_res_remove(man, vmw_cmdbuf_res_view, | ||||||
|  | 				     vmw_view_key(user_key, view_type), | ||||||
|  | 				     list, res_p); | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | /**
 | ||||||
|  |  * vmw_view_cotable_list_destroy - Evict all views belonging to a cotable. | ||||||
|  |  * | ||||||
|  |  * @dev_priv: Pointer to a device private struct. | ||||||
|  |  * @list: List of views belonging to a cotable. | ||||||
|  |  * @readback: Unused. Needed for function interface only. | ||||||
|  |  * | ||||||
|  |  * This function evicts all views belonging to a cotable. | ||||||
|  |  * It must be called with the binding_mutex held, and the caller must hold | ||||||
|  |  * a reference to the view resource. This is typically called before the | ||||||
|  |  * cotable is paged out. | ||||||
|  |  */ | ||||||
|  | void vmw_view_cotable_list_destroy(struct vmw_private *dev_priv, | ||||||
|  | 				   struct list_head *list, | ||||||
|  | 				   bool readback) | ||||||
|  | { | ||||||
|  | 	struct vmw_view *entry, *next; | ||||||
|  | 
 | ||||||
|  | 	WARN_ON_ONCE(!mutex_is_locked(&dev_priv->binding_mutex)); | ||||||
|  | 
 | ||||||
|  | 	list_for_each_entry_safe(entry, next, list, cotable_head) | ||||||
|  | 		WARN_ON(vmw_view_destroy(&entry->res)); | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | /**
 | ||||||
|  |  * vmw_view_surface_list_destroy - Evict all views pointing to a surface | ||||||
|  |  * | ||||||
|  |  * @dev_priv: Pointer to a device private struct. | ||||||
|  |  * @list: List of views pointing to a surface. | ||||||
|  |  * | ||||||
|  |  * This function evicts all views pointing to a surface. This is typically | ||||||
|  |  * called before the surface is evicted. | ||||||
|  |  */ | ||||||
|  | void vmw_view_surface_list_destroy(struct vmw_private *dev_priv, | ||||||
|  | 				   struct list_head *list) | ||||||
|  | { | ||||||
|  | 	struct vmw_view *entry, *next; | ||||||
|  | 
 | ||||||
|  | 	WARN_ON_ONCE(!mutex_is_locked(&dev_priv->binding_mutex)); | ||||||
|  | 
 | ||||||
|  | 	list_for_each_entry_safe(entry, next, list, srf_head) | ||||||
|  | 		WARN_ON(vmw_view_destroy(&entry->res)); | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | /**
 | ||||||
|  |  * vmw_view_srf - Return a non-refcounted pointer to the surface a view is | ||||||
|  |  * pointing to. | ||||||
|  |  * | ||||||
|  |  * @res: pointer to a view resource. | ||||||
|  |  * | ||||||
|  |  * Note that the view itself is holding a reference, so as long | ||||||
|  |  * the view resource is alive, the surface resource will be. | ||||||
|  |  */ | ||||||
|  | struct vmw_resource *vmw_view_srf(struct vmw_resource *res) | ||||||
|  | { | ||||||
|  | 	return vmw_view(res)->srf; | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | /**
 | ||||||
|  |  * vmw_view_lookup - Look up a view. | ||||||
|  |  * | ||||||
|  |  * @man: The context's cmdbuf ref manager. | ||||||
|  |  * @view_type: The view type. | ||||||
|  |  * @user_key: The view user id. | ||||||
|  |  * | ||||||
|  |  * returns a refcounted pointer to a view or an error pointer if not found. | ||||||
|  |  */ | ||||||
|  | struct vmw_resource *vmw_view_lookup(struct vmw_cmdbuf_res_manager *man, | ||||||
|  | 				     enum vmw_view_type view_type, | ||||||
|  | 				     u32 user_key) | ||||||
|  | { | ||||||
|  | 	return vmw_cmdbuf_res_lookup(man, vmw_cmdbuf_res_view, | ||||||
|  | 				     vmw_view_key(user_key, view_type)); | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | const u32 vmw_view_destroy_cmds[] = { | ||||||
|  | 	[vmw_view_sr] = SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW, | ||||||
|  | 	[vmw_view_rt] = SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW, | ||||||
|  | 	[vmw_view_ds] = SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW, | ||||||
|  | }; | ||||||
|  | 
 | ||||||
|  | const SVGACOTableType vmw_view_cotables[] = { | ||||||
|  | 	[vmw_view_sr] = SVGA_COTABLE_SRVIEW, | ||||||
|  | 	[vmw_view_rt] = SVGA_COTABLE_RTVIEW, | ||||||
|  | 	[vmw_view_ds] = SVGA_COTABLE_DSVIEW, | ||||||
|  | }; | ||||||
|  | 
 | ||||||
|  | const SVGACOTableType vmw_so_cotables[] = { | ||||||
|  | 	[vmw_so_el] = SVGA_COTABLE_ELEMENTLAYOUT, | ||||||
|  | 	[vmw_so_bs] = SVGA_COTABLE_BLENDSTATE, | ||||||
|  | 	[vmw_so_ds] = SVGA_COTABLE_DEPTHSTENCIL, | ||||||
|  | 	[vmw_so_rs] = SVGA_COTABLE_RASTERIZERSTATE, | ||||||
|  | 	[vmw_so_ss] = SVGA_COTABLE_SAMPLER, | ||||||
|  | 	[vmw_so_so] = SVGA_COTABLE_STREAMOUTPUT | ||||||
|  | }; | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | /* To remove unused function warning */ | ||||||
|  | static void vmw_so_build_asserts(void) __attribute__((used)); | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | /*
 | ||||||
|  |  * This function is unused at run-time, and only used to dump various build | ||||||
|  |  * asserts important for code optimization assumptions. | ||||||
|  |  */ | ||||||
|  | static void vmw_so_build_asserts(void) | ||||||
|  | { | ||||||
|  | 	/* Assert that our vmw_view_cmd_to_type() function is correct. */ | ||||||
|  | 	BUILD_BUG_ON(SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW != | ||||||
|  | 		     SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW + 1); | ||||||
|  | 	BUILD_BUG_ON(SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW != | ||||||
|  | 		     SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW + 2); | ||||||
|  | 	BUILD_BUG_ON(SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW != | ||||||
|  | 		     SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW + 3); | ||||||
|  | 	BUILD_BUG_ON(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW != | ||||||
|  | 		     SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW + 4); | ||||||
|  | 	BUILD_BUG_ON(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW != | ||||||
|  | 		     SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW + 5); | ||||||
|  | 
 | ||||||
|  | 	/* Assert that our "one body fits all" assumption is valid */ | ||||||
|  | 	BUILD_BUG_ON(sizeof(union vmw_view_destroy) != sizeof(u32)); | ||||||
|  | 
 | ||||||
|  | 	/* Assert that the view key space can hold all view ids. */ | ||||||
|  | 	BUILD_BUG_ON(SVGA_COTABLE_MAX_IDS >= ((1 << 20) - 1)); | ||||||
|  | 
 | ||||||
|  | 	/*
 | ||||||
|  | 	 * Assert that the offset of sid in all view define commands | ||||||
|  | 	 * is what we assume it to be. | ||||||
|  | 	 */ | ||||||
|  | 	BUILD_BUG_ON(offsetof(struct vmw_view_define, sid) != | ||||||
|  | 		     offsetof(SVGA3dCmdDXDefineShaderResourceView, sid)); | ||||||
|  | 	BUILD_BUG_ON(offsetof(struct vmw_view_define, sid) != | ||||||
|  | 		     offsetof(SVGA3dCmdDXDefineRenderTargetView, sid)); | ||||||
|  | 	BUILD_BUG_ON(offsetof(struct vmw_view_define, sid) != | ||||||
|  | 		     offsetof(SVGA3dCmdDXDefineDepthStencilView, sid)); | ||||||
|  | } | ||||||
							
								
								
									
										160
									
								
								drivers/gpu/drm/vmwgfx/vmwgfx_so.h
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										160
									
								
								drivers/gpu/drm/vmwgfx/vmwgfx_so.h
									
										
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,160 @@ | ||||||
|  | /**************************************************************************
 | ||||||
|  |  * Copyright © 2014 VMware, Inc., Palo Alto, CA., USA | ||||||
|  |  * All Rights Reserved. | ||||||
|  |  * | ||||||
|  |  * Permission is hereby granted, free of charge, to any person obtaining a | ||||||
|  |  * copy of this software and associated documentation files (the | ||||||
|  |  * "Software"), to deal in the Software without restriction, including | ||||||
|  |  * without limitation the rights to use, copy, modify, merge, publish, | ||||||
|  |  * distribute, sub license, and/or sell copies of the Software, and to | ||||||
|  |  * permit persons to whom the Software is furnished to do so, subject to | ||||||
|  |  * the following conditions: | ||||||
|  |  * | ||||||
|  |  * The above copyright notice and this permission notice (including the | ||||||
|  |  * next paragraph) shall be included in all copies or substantial portions | ||||||
|  |  * of the Software. | ||||||
|  |  * | ||||||
|  |  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||||||
|  |  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||||||
|  |  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||||||
|  |  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | ||||||
|  |  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | ||||||
|  |  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | ||||||
|  |  * USE OR OTHER DEALINGS IN THE SOFTWARE. | ||||||
|  |  * | ||||||
|  |  **************************************************************************/ | ||||||
|  | #ifndef VMW_SO_H | ||||||
|  | #define VMW_SO_H | ||||||
|  | 
 | ||||||
|  | enum vmw_view_type { | ||||||
|  | 	vmw_view_sr, | ||||||
|  | 	vmw_view_rt, | ||||||
|  | 	vmw_view_ds, | ||||||
|  | 	vmw_view_max, | ||||||
|  | }; | ||||||
|  | 
 | ||||||
|  | enum vmw_so_type { | ||||||
|  | 	vmw_so_el, | ||||||
|  | 	vmw_so_bs, | ||||||
|  | 	vmw_so_ds, | ||||||
|  | 	vmw_so_rs, | ||||||
|  | 	vmw_so_ss, | ||||||
|  | 	vmw_so_so, | ||||||
|  | 	vmw_so_max, | ||||||
|  | }; | ||||||
|  | 
 | ||||||
|  | /**
 | ||||||
|  |  * union vmw_view_destroy - view destruction command body | ||||||
|  |  * | ||||||
|  |  * @rtv: RenderTarget view destruction command body | ||||||
|  |  * @srv: ShaderResource view destruction command body | ||||||
|  |  * @dsv: DepthStencil view destruction command body | ||||||
|  |  * @view_id: A single u32 view id. | ||||||
|  |  * | ||||||
|  |  * The assumption here is that all union members are really represented by a | ||||||
|  |  * single u32 in the command stream. If that's not the case, | ||||||
|  |  * the size of this union will not equal the size of an u32, and the | ||||||
|  |  * assumption is invalid, and we detect that at compile time in the | ||||||
|  |  * vmw_so_build_asserts() function. | ||||||
|  |  */ | ||||||
|  | union vmw_view_destroy { | ||||||
|  | 	struct SVGA3dCmdDXDestroyRenderTargetView rtv; | ||||||
|  | 	struct SVGA3dCmdDXDestroyShaderResourceView srv; | ||||||
|  | 	struct SVGA3dCmdDXDestroyDepthStencilView dsv; | ||||||
|  | 	u32 view_id; | ||||||
|  | }; | ||||||
|  | 
 | ||||||
|  | /* Map enum vmw_view_type to view destroy command ids*/ | ||||||
|  | extern const u32 vmw_view_destroy_cmds[]; | ||||||
|  | 
 | ||||||
|  | /* Map enum vmw_view_type to SVGACOTableType */ | ||||||
|  | extern const SVGACOTableType vmw_view_cotables[]; | ||||||
|  | 
 | ||||||
|  | /* Map enum vmw_so_type to SVGACOTableType */ | ||||||
|  | extern const SVGACOTableType vmw_so_cotables[]; | ||||||
|  | 
 | ||||||
|  | /*
 | ||||||
|  |  * vmw_view_cmd_to_type - Return the view type for a create or destroy command | ||||||
|  |  * | ||||||
|  |  * @id: The SVGA3D command id. | ||||||
|  |  * | ||||||
|  |  * For a given view create or destroy command id, return the corresponding | ||||||
|  |  * enum vmw_view_type. If the command is unknown, return vmw_view_max. | ||||||
|  |  * The validity of the simplified calculation is verified in the | ||||||
|  |  * vmw_so_build_asserts() function. | ||||||
|  |  */ | ||||||
|  | static inline enum vmw_view_type vmw_view_cmd_to_type(u32 id) | ||||||
|  | { | ||||||
|  | 	u32 tmp = (id - SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW) / 2; | ||||||
|  | 
 | ||||||
|  | 	if (tmp > (u32)vmw_view_max) | ||||||
|  | 		return vmw_view_max; | ||||||
|  | 
 | ||||||
|  | 	return (enum vmw_view_type) tmp; | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | /*
 | ||||||
|  |  * vmw_so_cmd_to_type - Return the state object type for a | ||||||
|  |  * create or destroy command | ||||||
|  |  * | ||||||
|  |  * @id: The SVGA3D command id. | ||||||
|  |  * | ||||||
|  |  * For a given state object create or destroy command id, | ||||||
|  |  * return the corresponding enum vmw_so_type. If the command is uknown, | ||||||
|  |  * return vmw_so_max. We should perhaps optimize this function using | ||||||
|  |  * a similar strategy as vmw_view_cmd_to_type(). | ||||||
|  |  */ | ||||||
|  | static inline enum vmw_so_type vmw_so_cmd_to_type(u32 id) | ||||||
|  | { | ||||||
|  | 	switch (id) { | ||||||
|  | 	case SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT: | ||||||
|  | 	case SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT: | ||||||
|  | 		return vmw_so_el; | ||||||
|  | 	case SVGA_3D_CMD_DX_DEFINE_BLEND_STATE: | ||||||
|  | 	case SVGA_3D_CMD_DX_DESTROY_BLEND_STATE: | ||||||
|  | 		return vmw_so_bs; | ||||||
|  | 	case SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE: | ||||||
|  | 	case SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE: | ||||||
|  | 		return vmw_so_ds; | ||||||
|  | 	case SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE: | ||||||
|  | 	case SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE: | ||||||
|  | 		return vmw_so_rs; | ||||||
|  | 	case SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE: | ||||||
|  | 	case SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE: | ||||||
|  | 		return vmw_so_ss; | ||||||
|  | 	case SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT: | ||||||
|  | 	case SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT: | ||||||
|  | 		return vmw_so_so; | ||||||
|  | 	default: | ||||||
|  | 		break; | ||||||
|  | 	} | ||||||
|  | 	return vmw_so_max; | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | /*
 | ||||||
|  |  * View management - vmwgfx_so.c | ||||||
|  |  */ | ||||||
|  | extern int vmw_view_add(struct vmw_cmdbuf_res_manager *man, | ||||||
|  | 			struct vmw_resource *ctx, | ||||||
|  | 			struct vmw_resource *srf, | ||||||
|  | 			enum vmw_view_type view_type, | ||||||
|  | 			u32 user_key, | ||||||
|  | 			const void *cmd, | ||||||
|  | 			size_t cmd_size, | ||||||
|  | 			struct list_head *list); | ||||||
|  | 
 | ||||||
|  | extern int vmw_view_remove(struct vmw_cmdbuf_res_manager *man, | ||||||
|  | 			   u32 user_key, enum vmw_view_type view_type, | ||||||
|  | 			   struct list_head *list, | ||||||
|  | 			   struct vmw_resource **res_p); | ||||||
|  | 
 | ||||||
|  | extern void vmw_view_surface_list_destroy(struct vmw_private *dev_priv, | ||||||
|  | 					  struct list_head *view_list); | ||||||
|  | extern void vmw_view_cotable_list_destroy(struct vmw_private *dev_priv, | ||||||
|  | 					  struct list_head *list, | ||||||
|  | 					  bool readback); | ||||||
|  | extern struct vmw_resource *vmw_view_srf(struct vmw_resource *res); | ||||||
|  | extern struct vmw_resource *vmw_view_lookup(struct vmw_cmdbuf_res_manager *man, | ||||||
|  | 					    enum vmw_view_type view_type, | ||||||
|  | 					    u32 user_key); | ||||||
|  | #endif | ||||||
|  | @ -561,6 +561,7 @@ static int vmw_stdu_crtc_set_config(struct drm_mode_set *set) | ||||||
| 				true, /* a scanout buffer */ | 				true, /* a scanout buffer */ | ||||||
| 				content_srf.mip_levels[0], | 				content_srf.mip_levels[0], | ||||||
| 				content_srf.multisample_count, | 				content_srf.multisample_count, | ||||||
|  | 				0, | ||||||
| 				display_base_size, | 				display_base_size, | ||||||
| 				&display_srf); | 				&display_srf); | ||||||
| 		if (unlikely(ret != 0)) { | 		if (unlikely(ret != 0)) { | ||||||
|  |  | ||||||
|  | @ -27,9 +27,12 @@ | ||||||
| 
 | 
 | ||||||
| #include "vmwgfx_drv.h" | #include "vmwgfx_drv.h" | ||||||
| #include "vmwgfx_resource_priv.h" | #include "vmwgfx_resource_priv.h" | ||||||
|  | #include "vmwgfx_so.h" | ||||||
|  | #include "vmwgfx_binding.h" | ||||||
| #include <ttm/ttm_placement.h> | #include <ttm/ttm_placement.h> | ||||||
| #include "device_include/svga3d_surfacedefs.h" | #include "device_include/svga3d_surfacedefs.h" | ||||||
| 
 | 
 | ||||||
|  | 
 | ||||||
| /**
 | /**
 | ||||||
|  * struct vmw_user_surface - User-space visible surface resource |  * struct vmw_user_surface - User-space visible surface resource | ||||||
|  * |  * | ||||||
|  | @ -593,6 +596,7 @@ static int vmw_surface_init(struct vmw_private *dev_priv, | ||||||
| 	 * surface validate. | 	 * surface validate. | ||||||
| 	 */ | 	 */ | ||||||
| 
 | 
 | ||||||
|  | 	INIT_LIST_HEAD(&srf->view_list); | ||||||
| 	vmw_resource_activate(res, vmw_hw_surface_destroy); | 	vmw_resource_activate(res, vmw_hw_surface_destroy); | ||||||
| 	return ret; | 	return ret; | ||||||
| } | } | ||||||
|  | @ -723,6 +727,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, | ||||||
| 	desc = svga3dsurface_get_desc(req->format); | 	desc = svga3dsurface_get_desc(req->format); | ||||||
| 	if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) { | 	if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) { | ||||||
| 		DRM_ERROR("Invalid surface format for surface creation.\n"); | 		DRM_ERROR("Invalid surface format for surface creation.\n"); | ||||||
|  | 		DRM_ERROR("Format requested is: %d\n", req->format); | ||||||
| 		return -EINVAL; | 		return -EINVAL; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
|  | @ -1018,12 +1023,16 @@ static int vmw_gb_surface_create(struct vmw_resource *res) | ||||||
| { | { | ||||||
| 	struct vmw_private *dev_priv = res->dev_priv; | 	struct vmw_private *dev_priv = res->dev_priv; | ||||||
| 	struct vmw_surface *srf = vmw_res_to_srf(res); | 	struct vmw_surface *srf = vmw_res_to_srf(res); | ||||||
| 	uint32_t cmd_len, submit_len; | 	uint32_t cmd_len, cmd_id, submit_len; | ||||||
| 	int ret; | 	int ret; | ||||||
| 	struct { | 	struct { | ||||||
| 		SVGA3dCmdHeader header; | 		SVGA3dCmdHeader header; | ||||||
| 		SVGA3dCmdDefineGBSurface body; | 		SVGA3dCmdDefineGBSurface body; | ||||||
| 	} *cmd; | 	} *cmd; | ||||||
|  | 	struct { | ||||||
|  | 		SVGA3dCmdHeader header; | ||||||
|  | 		SVGA3dCmdDefineGBSurface_v2 body; | ||||||
|  | 	} *cmd2; | ||||||
| 
 | 
 | ||||||
| 	if (likely(res->id != -1)) | 	if (likely(res->id != -1)) | ||||||
| 		return 0; | 		return 0; | ||||||
|  | @ -1040,9 +1049,19 @@ static int vmw_gb_surface_create(struct vmw_resource *res) | ||||||
| 		goto out_no_fifo; | 		goto out_no_fifo; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	cmd_len = sizeof(cmd->body); | 	if (srf->array_size > 0) { | ||||||
| 	submit_len = sizeof(*cmd); | 		/* has_dx checked on creation time. */ | ||||||
|  | 		cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE_V2; | ||||||
|  | 		cmd_len = sizeof(cmd2->body); | ||||||
|  | 		submit_len = sizeof(*cmd2); | ||||||
|  | 	} else { | ||||||
|  | 		cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE; | ||||||
|  | 		cmd_len = sizeof(cmd->body); | ||||||
|  | 		submit_len = sizeof(*cmd); | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
| 	cmd = vmw_fifo_reserve(dev_priv, submit_len); | 	cmd = vmw_fifo_reserve(dev_priv, submit_len); | ||||||
|  | 	cmd2 = (typeof(cmd2))cmd; | ||||||
| 	if (unlikely(cmd == NULL)) { | 	if (unlikely(cmd == NULL)) { | ||||||
| 		DRM_ERROR("Failed reserving FIFO space for surface " | 		DRM_ERROR("Failed reserving FIFO space for surface " | ||||||
| 			  "creation.\n"); | 			  "creation.\n"); | ||||||
|  | @ -1050,17 +1069,33 @@ static int vmw_gb_surface_create(struct vmw_resource *res) | ||||||
| 		goto out_no_fifo; | 		goto out_no_fifo; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	cmd->header.id = SVGA_3D_CMD_DEFINE_GB_SURFACE; | 	if (srf->array_size > 0) { | ||||||
| 	cmd->header.size = cmd_len; | 		cmd2->header.id = cmd_id; | ||||||
| 	cmd->body.sid = srf->res.id; | 		cmd2->header.size = cmd_len; | ||||||
| 	cmd->body.surfaceFlags = srf->flags; | 		cmd2->body.sid = srf->res.id; | ||||||
| 	cmd->body.format = srf->format; | 		cmd2->body.surfaceFlags = srf->flags; | ||||||
| 	cmd->body.numMipLevels = srf->mip_levels[0]; | 		cmd2->body.format = cpu_to_le32(srf->format); | ||||||
| 	cmd->body.multisampleCount = srf->multisample_count; | 		cmd2->body.numMipLevels = srf->mip_levels[0]; | ||||||
| 	cmd->body.autogenFilter = srf->autogen_filter; | 		cmd2->body.multisampleCount = srf->multisample_count; | ||||||
| 	cmd->body.size.width = srf->base_size.width; | 		cmd2->body.autogenFilter = srf->autogen_filter; | ||||||
| 	cmd->body.size.height = srf->base_size.height; | 		cmd2->body.size.width = srf->base_size.width; | ||||||
| 	cmd->body.size.depth = srf->base_size.depth; | 		cmd2->body.size.height = srf->base_size.height; | ||||||
|  | 		cmd2->body.size.depth = srf->base_size.depth; | ||||||
|  | 		cmd2->body.arraySize = srf->array_size; | ||||||
|  | 	} else { | ||||||
|  | 		cmd->header.id = cmd_id; | ||||||
|  | 		cmd->header.size = cmd_len; | ||||||
|  | 		cmd->body.sid = srf->res.id; | ||||||
|  | 		cmd->body.surfaceFlags = srf->flags; | ||||||
|  | 		cmd->body.format = cpu_to_le32(srf->format); | ||||||
|  | 		cmd->body.numMipLevels = srf->mip_levels[0]; | ||||||
|  | 		cmd->body.multisampleCount = srf->multisample_count; | ||||||
|  | 		cmd->body.autogenFilter = srf->autogen_filter; | ||||||
|  | 		cmd->body.size.width = srf->base_size.width; | ||||||
|  | 		cmd->body.size.height = srf->base_size.height; | ||||||
|  | 		cmd->body.size.depth = srf->base_size.depth; | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
| 	vmw_fifo_commit(dev_priv, submit_len); | 	vmw_fifo_commit(dev_priv, submit_len); | ||||||
| 
 | 
 | ||||||
| 	return 0; | 	return 0; | ||||||
|  | @ -1188,6 +1223,7 @@ static int vmw_gb_surface_unbind(struct vmw_resource *res, | ||||||
| static int vmw_gb_surface_destroy(struct vmw_resource *res) | static int vmw_gb_surface_destroy(struct vmw_resource *res) | ||||||
| { | { | ||||||
| 	struct vmw_private *dev_priv = res->dev_priv; | 	struct vmw_private *dev_priv = res->dev_priv; | ||||||
|  | 	struct vmw_surface *srf = vmw_res_to_srf(res); | ||||||
| 	struct { | 	struct { | ||||||
| 		SVGA3dCmdHeader header; | 		SVGA3dCmdHeader header; | ||||||
| 		SVGA3dCmdDestroyGBSurface body; | 		SVGA3dCmdDestroyGBSurface body; | ||||||
|  | @ -1197,7 +1233,8 @@ static int vmw_gb_surface_destroy(struct vmw_resource *res) | ||||||
| 		return 0; | 		return 0; | ||||||
| 
 | 
 | ||||||
| 	mutex_lock(&dev_priv->binding_mutex); | 	mutex_lock(&dev_priv->binding_mutex); | ||||||
| 	vmw_context_binding_res_list_scrub(&res->binding_head); | 	vmw_view_surface_list_destroy(dev_priv, &srf->view_list); | ||||||
|  | 	vmw_binding_res_list_scrub(&res->binding_head); | ||||||
| 
 | 
 | ||||||
| 	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | 	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | ||||||
| 	if (unlikely(cmd == NULL)) { | 	if (unlikely(cmd == NULL)) { | ||||||
|  | @ -1259,6 +1296,7 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data, | ||||||
| 			req->drm_surface_flags & drm_vmw_surface_flag_scanout, | 			req->drm_surface_flags & drm_vmw_surface_flag_scanout, | ||||||
| 			req->mip_levels, | 			req->mip_levels, | ||||||
| 			req->multisample_count, | 			req->multisample_count, | ||||||
|  | 			req->array_size, | ||||||
| 			req->base_size, | 			req->base_size, | ||||||
| 			&srf); | 			&srf); | ||||||
| 	if (unlikely(ret != 0)) | 	if (unlikely(ret != 0)) | ||||||
|  | @ -1275,10 +1313,17 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data, | ||||||
| 	res = &user_srf->srf.res; | 	res = &user_srf->srf.res; | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| 	if (req->buffer_handle != SVGA3D_INVALID_ID) | 	if (req->buffer_handle != SVGA3D_INVALID_ID) { | ||||||
| 		ret = vmw_user_dmabuf_lookup(tfile, req->buffer_handle, | 		ret = vmw_user_dmabuf_lookup(tfile, req->buffer_handle, | ||||||
| 					     &res->backup); | 					     &res->backup); | ||||||
| 	else if (req->drm_surface_flags & drm_vmw_surface_flag_create_buffer) | 		if (ret == 0 && res->backup->base.num_pages * PAGE_SIZE < | ||||||
|  | 		    res->backup_size) { | ||||||
|  | 			DRM_ERROR("Surface backup buffer is too small.\n"); | ||||||
|  | 			vmw_dmabuf_unreference(&res->backup); | ||||||
|  | 			ret = -EINVAL; | ||||||
|  | 			goto out_unlock; | ||||||
|  | 		} | ||||||
|  | 	} else if (req->drm_surface_flags & drm_vmw_surface_flag_create_buffer) | ||||||
| 		ret = vmw_user_dmabuf_alloc(dev_priv, tfile, | 		ret = vmw_user_dmabuf_alloc(dev_priv, tfile, | ||||||
| 					    res->backup_size, | 					    res->backup_size, | ||||||
| 					    req->drm_surface_flags & | 					    req->drm_surface_flags & | ||||||
|  | @ -1378,6 +1423,7 @@ int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data, | ||||||
| 	rep->creq.drm_surface_flags = 0; | 	rep->creq.drm_surface_flags = 0; | ||||||
| 	rep->creq.multisample_count = srf->multisample_count; | 	rep->creq.multisample_count = srf->multisample_count; | ||||||
| 	rep->creq.autogen_filter = srf->autogen_filter; | 	rep->creq.autogen_filter = srf->autogen_filter; | ||||||
|  | 	rep->creq.array_size = srf->array_size; | ||||||
| 	rep->creq.buffer_handle = backup_handle; | 	rep->creq.buffer_handle = backup_handle; | ||||||
| 	rep->creq.base_size = srf->base_size; | 	rep->creq.base_size = srf->base_size; | ||||||
| 	rep->crep.handle = user_srf->prime.base.hash.key; | 	rep->crep.handle = user_srf->prime.base.hash.key; | ||||||
|  | @ -1404,6 +1450,7 @@ out_bad_resource: | ||||||
|  * @for_scanout: true if inteded to be used for scanout buffer |  * @for_scanout: true if inteded to be used for scanout buffer | ||||||
|  * @num_mip_levels:  number of MIP levels |  * @num_mip_levels:  number of MIP levels | ||||||
|  * @multisample_count: |  * @multisample_count: | ||||||
|  |  * @array_size: Surface array size. | ||||||
|  * @size: width, heigh, depth of the surface requested |  * @size: width, heigh, depth of the surface requested | ||||||
|  * @user_srf_out: allocated user_srf.  Set to NULL on failure. |  * @user_srf_out: allocated user_srf.  Set to NULL on failure. | ||||||
|  * |  * | ||||||
|  | @ -1419,6 +1466,7 @@ int vmw_surface_gb_priv_define(struct drm_device *dev, | ||||||
| 			       bool for_scanout, | 			       bool for_scanout, | ||||||
| 			       uint32_t num_mip_levels, | 			       uint32_t num_mip_levels, | ||||||
| 			       uint32_t multisample_count, | 			       uint32_t multisample_count, | ||||||
|  | 			       uint32_t array_size, | ||||||
| 			       struct drm_vmw_size size, | 			       struct drm_vmw_size size, | ||||||
| 			       struct vmw_surface **srf_out) | 			       struct vmw_surface **srf_out) | ||||||
| { | { | ||||||
|  | @ -1426,7 +1474,7 @@ int vmw_surface_gb_priv_define(struct drm_device *dev, | ||||||
| 	struct vmw_user_surface *user_srf; | 	struct vmw_user_surface *user_srf; | ||||||
| 	struct vmw_surface *srf; | 	struct vmw_surface *srf; | ||||||
| 	int ret; | 	int ret; | ||||||
| 
 | 	u32 num_layers; | ||||||
| 
 | 
 | ||||||
| 	*srf_out = NULL; | 	*srf_out = NULL; | ||||||
| 
 | 
 | ||||||
|  | @ -1445,6 +1493,12 @@ int vmw_surface_gb_priv_define(struct drm_device *dev, | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
|  | 	/* array_size must be null for non-GL3 host. */ | ||||||
|  | 	if (array_size > 0 && !dev_priv->has_dx) { | ||||||
|  | 		DRM_ERROR("Tried to create DX surface on non-DX host.\n"); | ||||||
|  | 		return -EINVAL; | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
| 	ret = ttm_read_lock(&dev_priv->reservation_sem, true); | 	ret = ttm_read_lock(&dev_priv->reservation_sem, true); | ||||||
| 	if (unlikely(ret != 0)) | 	if (unlikely(ret != 0)) | ||||||
| 		return ret; | 		return ret; | ||||||
|  | @ -1481,10 +1535,21 @@ int vmw_surface_gb_priv_define(struct drm_device *dev, | ||||||
| 	srf->autogen_filter    = SVGA3D_TEX_FILTER_NONE; | 	srf->autogen_filter    = SVGA3D_TEX_FILTER_NONE; | ||||||
| 	srf->multisample_count = multisample_count; | 	srf->multisample_count = multisample_count; | ||||||
| 
 | 
 | ||||||
| 	srf->res.backup_size   = svga3dsurface_get_serialized_size(srf->format, | 	if (array_size) | ||||||
| 					srf->base_size, | 		num_layers = array_size; | ||||||
| 					srf->mip_levels[0], | 	else if (svga3d_flags & SVGA3D_SURFACE_CUBEMAP) | ||||||
| 					srf->flags & SVGA3D_SURFACE_CUBEMAP); | 		num_layers = SVGA3D_MAX_SURFACE_FACES; | ||||||
|  | 	else | ||||||
|  | 		num_layers = 1; | ||||||
|  | 
 | ||||||
|  | 	srf->res.backup_size   = | ||||||
|  | 		svga3dsurface_get_serialized_size(srf->format, | ||||||
|  | 						  srf->base_size, | ||||||
|  | 						  srf->mip_levels[0], | ||||||
|  | 						  num_layers); | ||||||
|  | 
 | ||||||
|  | 	if (srf->flags & SVGA3D_SURFACE_BIND_STREAM_OUTPUT) | ||||||
|  | 		srf->res.backup_size += sizeof(SVGA3dDXSOState); | ||||||
| 
 | 
 | ||||||
| 	if (dev_priv->active_display_unit == vmw_du_screen_target && | 	if (dev_priv->active_display_unit == vmw_du_screen_target && | ||||||
| 	    for_scanout) | 	    for_scanout) | ||||||
|  |  | ||||||
|  | @ -64,6 +64,7 @@ | ||||||
| #define DRM_VMW_GB_SURFACE_CREATE    23 | #define DRM_VMW_GB_SURFACE_CREATE    23 | ||||||
| #define DRM_VMW_GB_SURFACE_REF       24 | #define DRM_VMW_GB_SURFACE_REF       24 | ||||||
| #define DRM_VMW_SYNCCPU              25 | #define DRM_VMW_SYNCCPU              25 | ||||||
|  | #define DRM_VMW_CREATE_EXTENDED_CONTEXT 26 | ||||||
| 
 | 
 | ||||||
| /*************************************************************************/ | /*************************************************************************/ | ||||||
| /**
 | /**
 | ||||||
|  | @ -89,6 +90,7 @@ | ||||||
| #define DRM_VMW_PARAM_MAX_MOB_MEMORY   9 | #define DRM_VMW_PARAM_MAX_MOB_MEMORY   9 | ||||||
| #define DRM_VMW_PARAM_MAX_MOB_SIZE     10 | #define DRM_VMW_PARAM_MAX_MOB_SIZE     10 | ||||||
| #define DRM_VMW_PARAM_SCREEN_TARGET    11 | #define DRM_VMW_PARAM_SCREEN_TARGET    11 | ||||||
|  | #define DRM_VMW_PARAM_DX               12 | ||||||
| 
 | 
 | ||||||
| /**
 | /**
 | ||||||
|  * enum drm_vmw_handle_type - handle type for ref ioctls |  * enum drm_vmw_handle_type - handle type for ref ioctls | ||||||
|  | @ -297,7 +299,7 @@ union drm_vmw_surface_reference_arg { | ||||||
|  * Argument to the DRM_VMW_EXECBUF Ioctl. |  * Argument to the DRM_VMW_EXECBUF Ioctl. | ||||||
|  */ |  */ | ||||||
| 
 | 
 | ||||||
| #define DRM_VMW_EXECBUF_VERSION 1 | #define DRM_VMW_EXECBUF_VERSION 2 | ||||||
| 
 | 
 | ||||||
| struct drm_vmw_execbuf_arg { | struct drm_vmw_execbuf_arg { | ||||||
| 	uint64_t commands; | 	uint64_t commands; | ||||||
|  | @ -306,6 +308,8 @@ struct drm_vmw_execbuf_arg { | ||||||
| 	uint64_t fence_rep; | 	uint64_t fence_rep; | ||||||
| 	uint32_t version; | 	uint32_t version; | ||||||
| 	uint32_t flags; | 	uint32_t flags; | ||||||
|  | 	uint32_t context_handle; | ||||||
|  | 	uint32_t pad64; | ||||||
| }; | }; | ||||||
| 
 | 
 | ||||||
| /**
 | /**
 | ||||||
|  | @ -826,7 +830,6 @@ struct drm_vmw_update_layout_arg { | ||||||
| enum drm_vmw_shader_type { | enum drm_vmw_shader_type { | ||||||
| 	drm_vmw_shader_type_vs = 0, | 	drm_vmw_shader_type_vs = 0, | ||||||
| 	drm_vmw_shader_type_ps, | 	drm_vmw_shader_type_ps, | ||||||
| 	drm_vmw_shader_type_gs |  | ||||||
| }; | }; | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
|  | @ -908,6 +911,8 @@ enum drm_vmw_surface_flags { | ||||||
|  * @buffer_handle     Buffer handle of backup buffer. SVGA3D_INVALID_ID |  * @buffer_handle     Buffer handle of backup buffer. SVGA3D_INVALID_ID | ||||||
|  *                    if none. |  *                    if none. | ||||||
|  * @base_size         Size of the base mip level for all faces. |  * @base_size         Size of the base mip level for all faces. | ||||||
|  |  * @array_size        Must be zero for non-DX hardware, and if non-zero | ||||||
|  |  *                    svga3d_flags must have proper bind flags setup. | ||||||
|  * |  * | ||||||
|  * Input argument to the  DRM_VMW_GB_SURFACE_CREATE Ioctl. |  * Input argument to the  DRM_VMW_GB_SURFACE_CREATE Ioctl. | ||||||
|  * Part of output argument for the DRM_VMW_GB_SURFACE_REF Ioctl. |  * Part of output argument for the DRM_VMW_GB_SURFACE_REF Ioctl. | ||||||
|  | @ -920,7 +925,7 @@ struct drm_vmw_gb_surface_create_req { | ||||||
| 	uint32_t multisample_count; | 	uint32_t multisample_count; | ||||||
| 	uint32_t autogen_filter; | 	uint32_t autogen_filter; | ||||||
| 	uint32_t buffer_handle; | 	uint32_t buffer_handle; | ||||||
| 	uint32_t pad64; | 	uint32_t array_size; | ||||||
| 	struct drm_vmw_size base_size; | 	struct drm_vmw_size base_size; | ||||||
| }; | }; | ||||||
| 
 | 
 | ||||||
|  | @ -1060,4 +1065,28 @@ struct drm_vmw_synccpu_arg { | ||||||
| 	uint32_t pad64; | 	uint32_t pad64; | ||||||
| }; | }; | ||||||
| 
 | 
 | ||||||
|  | /*************************************************************************/ | ||||||
|  | /**
 | ||||||
|  |  * DRM_VMW_CREATE_EXTENDED_CONTEXT - Create a host context. | ||||||
|  |  * | ||||||
|  |  * Allocates a device unique context id, and queues a create context command | ||||||
|  |  * for the host. Does not wait for host completion. | ||||||
|  |  */ | ||||||
|  | enum drm_vmw_extended_context { | ||||||
|  | 	drm_vmw_context_legacy, | ||||||
|  | 	drm_vmw_context_dx | ||||||
|  | }; | ||||||
|  | 
 | ||||||
|  | /**
 | ||||||
|  |  * union drm_vmw_extended_context_arg | ||||||
|  |  * | ||||||
|  |  * @req: Context type. | ||||||
|  |  * @rep: Context identifier. | ||||||
|  |  * | ||||||
|  |  * Argument to the DRM_VMW_CREATE_EXTENDED_CONTEXT Ioctl. | ||||||
|  |  */ | ||||||
|  | union drm_vmw_extended_context_arg { | ||||||
|  | 	enum drm_vmw_extended_context req; | ||||||
|  | 	struct drm_vmw_context_arg rep; | ||||||
|  | }; | ||||||
| #endif | #endif | ||||||
|  |  | ||||||
		Loading…
	
	Add table
		Add a link
		
	
		Reference in a new issue
	
	 Thomas Hellstrom
				Thomas Hellstrom