Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'vmwgfx-next-2014-01-17' of git://people.freedesktop.org/~thomash/linux into drm-next

Pull request of 2014-01-17

Pull request for 3.14. One not so urgent fix, One huge device update.

The pull request corresponds to the patches sent out on dri-devel, except:
[PATCH 02/33], review tag typo pointed out by Matt Turner.
[PATCH 04/33], dropped. The new surface formats are never used.

The upcoming vmware svga2 hardware version 11 will introduce the concept
of "guest backed objects" or -resources. The device will in principle
get all
of its memory from the guest, which has big advantages from the device
point of view.

This means that vmwgfx contexts, shaders and surfaces need to be backed
by guest memory in the form of buffer objects called MOBs, presumably
short for MemoryOBjects, which are bound to the device in a special way.

This patch series introduces guest backed object support. Some new IOCTLs
are added to allocate these new guest backed object, and to optionally
provide
them with a backing MOB.

There is an update to the gallium driver that comes with this update, and
it will be pushed in the near timeframe presumably to a separate mesa branch
before merged to master.

* tag 'vmwgfx-next-2014-01-17' of git://people.freedesktop.org/~thomash/linux: (33 commits)
drm/vmwgfx: Invalidate surface on non-readback unbind
drm/vmwgfx: Silence the device command verifier
drm/vmwgfx: Implement 64-bit Otable- and MOB binding v2
drm/vmwgfx: Fix surface framebuffer check for guest-backed surfaces
drm/vmwgfx: Update otable definitions
drm/vmwgfx: Use the linux DMA api also for MOBs
drm/vmwgfx: Ditch the vmw_dummy_query_bo_prepare function
drm/vmwgfx: Persistent tracking of context bindings
drm/vmwgfx: Track context bindings and scrub them upon exiting execbuf
drm/vmwgfx: Block the BIND_SHADERCONSTS command
drm/vmwgfx: Add a parameter to get max MOB memory size
drm/vmwgfx: Implement a buffer object synccpu ioctl.
drm/vmwgfx: Make sure that the multisampling is off
drm/vmwgfx: Extend the command verifier to handle guest-backed on / off
drm/vmwgfx: Fix up the vmwgfx_drv.h header for new files
drm/vmwgfx: Enable 3D for new hardware version
drm/vmwgfx: Add new unused (by user-space) commands to the verifier
drm/vmwgfx: Validate guest-backed shader const commands
drm/vmwgfx: Add guest-backed shaders
drm/vmwgfx: Hook up guest-backed surfaces
...

+4725 -382
+1 -1
drivers/gpu/drm/vmwgfx/Makefile
··· 6 6 vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \ 7 7 vmwgfx_overlay.o vmwgfx_marker.o vmwgfx_gmrid_manager.o \ 8 8 vmwgfx_fence.o vmwgfx_dmabuf.o vmwgfx_scrn.o vmwgfx_context.o \ 9 - vmwgfx_surface.o vmwgfx_prime.o 9 + vmwgfx_surface.o vmwgfx_prime.o vmwgfx_mob.o vmwgfx_shader.o 10 10 11 11 obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
+703 -13
drivers/gpu/drm/vmwgfx/svga3d_reg.h
··· 34 34 35 35 #include "svga_reg.h" 36 36 37 + typedef uint32 PPN; 38 + typedef __le64 PPN64; 37 39 38 40 /* 39 41 * 3D Hardware Version ··· 73 71 #define SVGA3D_MAX_CONTEXT_IDS 256 74 72 #define SVGA3D_MAX_SURFACE_IDS (32 * 1024) 75 73 74 + #define SVGA3D_NUM_TEXTURE_UNITS 32 75 + #define SVGA3D_NUM_LIGHTS 8 76 + 76 77 /* 77 78 * Surface formats. 78 79 * ··· 86 81 */ 87 82 88 83 typedef enum SVGA3dSurfaceFormat { 84 + SVGA3D_FORMAT_MIN = 0, 89 85 SVGA3D_FORMAT_INVALID = 0, 90 86 91 87 SVGA3D_X8R8G8B8 = 1, ··· 140 134 SVGA3D_RG_S10E5 = 35, 141 135 SVGA3D_RG_S23E8 = 36, 142 136 143 - /* 144 - * Any surface can be used as a buffer object, but SVGA3D_BUFFER is 145 - * the most efficient format to use when creating new surfaces 146 - * expressly for index or vertex data. 147 - */ 148 - 149 137 SVGA3D_BUFFER = 37, 150 138 151 139 SVGA3D_Z_D24X8 = 38, ··· 159 159 /* Video format with alpha */ 160 160 SVGA3D_AYUV = 45, 161 161 162 + SVGA3D_R32G32B32A32_TYPELESS = 46, 163 + SVGA3D_R32G32B32A32_FLOAT = 25, 164 + SVGA3D_R32G32B32A32_UINT = 47, 165 + SVGA3D_R32G32B32A32_SINT = 48, 166 + SVGA3D_R32G32B32_TYPELESS = 49, 167 + SVGA3D_R32G32B32_FLOAT = 50, 168 + SVGA3D_R32G32B32_UINT = 51, 169 + SVGA3D_R32G32B32_SINT = 52, 170 + SVGA3D_R16G16B16A16_TYPELESS = 53, 171 + SVGA3D_R16G16B16A16_FLOAT = 24, 172 + SVGA3D_R16G16B16A16_UNORM = 41, 173 + SVGA3D_R16G16B16A16_UINT = 54, 174 + SVGA3D_R16G16B16A16_SNORM = 55, 175 + SVGA3D_R16G16B16A16_SINT = 56, 176 + SVGA3D_R32G32_TYPELESS = 57, 177 + SVGA3D_R32G32_FLOAT = 36, 178 + SVGA3D_R32G32_UINT = 58, 179 + SVGA3D_R32G32_SINT = 59, 180 + SVGA3D_R32G8X24_TYPELESS = 60, 181 + SVGA3D_D32_FLOAT_S8X24_UINT = 61, 182 + SVGA3D_R32_FLOAT_X8X24_TYPELESS = 62, 183 + SVGA3D_X32_TYPELESS_G8X24_UINT = 63, 184 + SVGA3D_R10G10B10A2_TYPELESS = 64, 185 + SVGA3D_R10G10B10A2_UNORM = 26, 186 + SVGA3D_R10G10B10A2_UINT = 65, 187 + SVGA3D_R11G11B10_FLOAT = 66, 188 + SVGA3D_R8G8B8A8_TYPELESS = 67, 189 + SVGA3D_R8G8B8A8_UNORM = 68, 190 + SVGA3D_R8G8B8A8_UNORM_SRGB = 69, 191 + SVGA3D_R8G8B8A8_UINT = 70, 192 + SVGA3D_R8G8B8A8_SNORM = 28, 193 + SVGA3D_R8G8B8A8_SINT = 71, 194 + SVGA3D_R16G16_TYPELESS = 72, 195 + SVGA3D_R16G16_FLOAT = 35, 196 + SVGA3D_R16G16_UNORM = 40, 197 + SVGA3D_R16G16_UINT = 73, 198 + SVGA3D_R16G16_SNORM = 39, 199 + SVGA3D_R16G16_SINT = 74, 200 + SVGA3D_R32_TYPELESS = 75, 201 + SVGA3D_D32_FLOAT = 76, 202 + SVGA3D_R32_FLOAT = 34, 203 + SVGA3D_R32_UINT = 77, 204 + SVGA3D_R32_SINT = 78, 205 + SVGA3D_R24G8_TYPELESS = 79, 206 + SVGA3D_D24_UNORM_S8_UINT = 80, 207 + SVGA3D_R24_UNORM_X8_TYPELESS = 81, 208 + SVGA3D_X24_TYPELESS_G8_UINT = 82, 209 + SVGA3D_R8G8_TYPELESS = 83, 210 + SVGA3D_R8G8_UNORM = 84, 211 + SVGA3D_R8G8_UINT = 85, 212 + SVGA3D_R8G8_SNORM = 27, 213 + SVGA3D_R8G8_SINT = 86, 214 + SVGA3D_R16_TYPELESS = 87, 215 + SVGA3D_R16_FLOAT = 33, 216 + SVGA3D_D16_UNORM = 8, 217 + SVGA3D_R16_UNORM = 88, 218 + SVGA3D_R16_UINT = 89, 219 + SVGA3D_R16_SNORM = 90, 220 + SVGA3D_R16_SINT = 91, 221 + SVGA3D_R8_TYPELESS = 92, 222 + SVGA3D_R8_UNORM = 93, 223 + SVGA3D_R8_UINT = 94, 224 + SVGA3D_R8_SNORM = 95, 225 + SVGA3D_R8_SINT = 96, 226 + SVGA3D_A8_UNORM = 32, 227 + SVGA3D_R1_UNORM = 97, 228 + SVGA3D_R9G9B9E5_SHAREDEXP = 98, 229 + SVGA3D_R8G8_B8G8_UNORM = 99, 230 + SVGA3D_G8R8_G8B8_UNORM = 100, 231 + SVGA3D_BC1_TYPELESS = 101, 232 + SVGA3D_BC1_UNORM = 15, 233 + SVGA3D_BC1_UNORM_SRGB = 102, 234 + SVGA3D_BC2_TYPELESS = 103, 235 + SVGA3D_BC2_UNORM = 17, 236 + SVGA3D_BC2_UNORM_SRGB = 104, 237 + SVGA3D_BC3_TYPELESS = 105, 238 + SVGA3D_BC3_UNORM = 19, 239 + SVGA3D_BC3_UNORM_SRGB = 106, 240 + SVGA3D_BC4_TYPELESS = 107, 162 241 SVGA3D_BC4_UNORM = 108, 242 + SVGA3D_BC4_SNORM = 109, 243 + SVGA3D_BC5_TYPELESS = 110, 163 244 SVGA3D_BC5_UNORM = 111, 245 + SVGA3D_BC5_SNORM = 112, 246 + SVGA3D_B5G6R5_UNORM = 3, 247 + SVGA3D_B5G5R5A1_UNORM = 5, 248 + SVGA3D_B8G8R8A8_UNORM = 2, 249 + SVGA3D_B8G8R8X8_UNORM = 1, 250 + SVGA3D_R10G10B10_XR_BIAS_A2_UNORM = 113, 251 + SVGA3D_B8G8R8A8_TYPELESS = 114, 252 + SVGA3D_B8G8R8A8_UNORM_SRGB = 115, 253 + SVGA3D_B8G8R8X8_TYPELESS = 116, 254 + SVGA3D_B8G8R8X8_UNORM_SRGB = 117, 164 255 165 256 /* Advanced D3D9 depth formats. */ 166 257 SVGA3D_Z_DF16 = 118, 167 258 SVGA3D_Z_DF24 = 119, 168 259 SVGA3D_Z_D24S8_INT = 120, 169 260 170 - SVGA3D_FORMAT_MAX 261 + /* Planar video formats. */ 262 + SVGA3D_YV12 = 121, 263 + 264 + /* Shader constant formats. */ 265 + SVGA3D_SURFACE_SHADERCONST_FLOAT = 122, 266 + SVGA3D_SURFACE_SHADERCONST_INT = 123, 267 + SVGA3D_SURFACE_SHADERCONST_BOOL = 124, 268 + 269 + SVGA3D_FORMAT_MAX = 125, 171 270 } SVGA3dSurfaceFormat; 172 271 173 272 typedef uint32 SVGA3dColor; /* a, r, g, b */ ··· 1056 957 } SVGA3dCubeFace; 1057 958 1058 959 typedef enum { 960 + SVGA3D_SHADERTYPE_INVALID = 0, 961 + SVGA3D_SHADERTYPE_MIN = 1, 1059 962 SVGA3D_SHADERTYPE_VS = 1, 1060 963 SVGA3D_SHADERTYPE_PS = 2, 1061 - SVGA3D_SHADERTYPE_MAX 964 + SVGA3D_SHADERTYPE_MAX = 3, 965 + SVGA3D_SHADERTYPE_GS = 3, 1062 966 } SVGA3dShaderType; 967 + 968 + #define SVGA3D_NUM_SHADERTYPE (SVGA3D_SHADERTYPE_MAX - SVGA3D_SHADERTYPE_MIN) 1063 969 1064 970 typedef enum { 1065 971 SVGA3D_CONST_TYPE_FLOAT = 0, 1066 972 SVGA3D_CONST_TYPE_INT = 1, 1067 973 SVGA3D_CONST_TYPE_BOOL = 2, 974 + SVGA3D_CONST_TYPE_MAX 1068 975 } SVGA3dShaderConstType; 1069 976 1070 977 #define SVGA3D_MAX_SURFACE_FACES 6 ··· 1161 1056 #define SVGA_3D_CMD_GENERATE_MIPMAPS SVGA_3D_CMD_BASE + 31 1162 1057 #define SVGA_3D_CMD_ACTIVATE_SURFACE SVGA_3D_CMD_BASE + 40 1163 1058 #define SVGA_3D_CMD_DEACTIVATE_SURFACE SVGA_3D_CMD_BASE + 41 1164 - #define SVGA_3D_CMD_MAX SVGA_3D_CMD_BASE + 42 1059 + #define SVGA_3D_CMD_SCREEN_DMA 1082 1060 + #define SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE 1083 1061 + #define SVGA_3D_CMD_OPEN_CONTEXT_SURFACE 1084 1165 1062 1166 - #define SVGA_3D_CMD_FUTURE_MAX 2000 1063 + #define SVGA_3D_CMD_LOGICOPS_BITBLT 1085 1064 + #define SVGA_3D_CMD_LOGICOPS_TRANSBLT 1086 1065 + #define SVGA_3D_CMD_LOGICOPS_STRETCHBLT 1087 1066 + #define SVGA_3D_CMD_LOGICOPS_COLORFILL 1088 1067 + #define SVGA_3D_CMD_LOGICOPS_ALPHABLEND 1089 1068 + #define SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND 1090 1069 + 1070 + #define SVGA_3D_CMD_SET_OTABLE_BASE 1091 1071 + #define SVGA_3D_CMD_READBACK_OTABLE 1092 1072 + 1073 + #define SVGA_3D_CMD_DEFINE_GB_MOB 1093 1074 + #define SVGA_3D_CMD_DESTROY_GB_MOB 1094 1075 + #define SVGA_3D_CMD_REDEFINE_GB_MOB 1095 1076 + #define SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING 1096 1077 + 1078 + #define SVGA_3D_CMD_DEFINE_GB_SURFACE 1097 1079 + #define SVGA_3D_CMD_DESTROY_GB_SURFACE 1098 1080 + #define SVGA_3D_CMD_BIND_GB_SURFACE 1099 1081 + #define SVGA_3D_CMD_COND_BIND_GB_SURFACE 1100 1082 + #define SVGA_3D_CMD_UPDATE_GB_IMAGE 1101 1083 + #define SVGA_3D_CMD_UPDATE_GB_SURFACE 1102 1084 + #define SVGA_3D_CMD_READBACK_GB_IMAGE 1103 1085 + #define SVGA_3D_CMD_READBACK_GB_SURFACE 1104 1086 + #define SVGA_3D_CMD_INVALIDATE_GB_IMAGE 1105 1087 + #define SVGA_3D_CMD_INVALIDATE_GB_SURFACE 1106 1088 + 1089 + #define SVGA_3D_CMD_DEFINE_GB_CONTEXT 1107 1090 + #define SVGA_3D_CMD_DESTROY_GB_CONTEXT 1108 1091 + #define SVGA_3D_CMD_BIND_GB_CONTEXT 1109 1092 + #define SVGA_3D_CMD_READBACK_GB_CONTEXT 1110 1093 + #define SVGA_3D_CMD_INVALIDATE_GB_CONTEXT 1111 1094 + 1095 + #define SVGA_3D_CMD_DEFINE_GB_SHADER 1112 1096 + #define SVGA_3D_CMD_DESTROY_GB_SHADER 1113 1097 + #define SVGA_3D_CMD_BIND_GB_SHADER 1114 1098 + 1099 + #define SVGA_3D_CMD_SET_OTABLE_BASE64 1115 1100 + 1101 + #define SVGA_3D_CMD_BEGIN_GB_QUERY 1116 1102 + #define SVGA_3D_CMD_END_GB_QUERY 1117 1103 + #define SVGA_3D_CMD_WAIT_FOR_GB_QUERY 1118 1104 + 1105 + #define SVGA_3D_CMD_NOP 1119 1106 + 1107 + #define SVGA_3D_CMD_ENABLE_GART 1120 1108 + #define SVGA_3D_CMD_DISABLE_GART 1121 1109 + #define SVGA_3D_CMD_MAP_MOB_INTO_GART 1122 1110 + #define SVGA_3D_CMD_UNMAP_GART_RANGE 1123 1111 + 1112 + #define SVGA_3D_CMD_DEFINE_GB_SCREENTARGET 1124 1113 + #define SVGA_3D_CMD_DESTROY_GB_SCREENTARGET 1125 1114 + #define SVGA_3D_CMD_BIND_GB_SCREENTARGET 1126 1115 + #define SVGA_3D_CMD_UPDATE_GB_SCREENTARGET 1127 1116 + 1117 + #define SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL 1128 1118 + #define SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL 1129 1119 + 1120 + #define SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE 1130 1121 + 1122 + #define SVGA_3D_CMD_DEFINE_GB_MOB64 1135 1123 + #define SVGA_3D_CMD_REDEFINE_GB_MOB64 1136 1124 + 1125 + #define SVGA_3D_CMD_MAX 1142 1126 + #define SVGA_3D_CMD_FUTURE_MAX 3000 1167 1127 1168 1128 /* 1169 1129 * Common substructures used in multiple FIFO commands: ··· 1920 1750 1921 1751 1922 1752 /* 1753 + * Guest-backed surface definitions. 1754 + */ 1755 + 1756 + typedef uint32 SVGAMobId; 1757 + 1758 + typedef enum SVGAMobFormat { 1759 + SVGA3D_MOBFMT_INVALID = SVGA3D_INVALID_ID, 1760 + SVGA3D_MOBFMT_PTDEPTH_0 = 0, 1761 + SVGA3D_MOBFMT_PTDEPTH_1 = 1, 1762 + SVGA3D_MOBFMT_PTDEPTH_2 = 2, 1763 + SVGA3D_MOBFMT_RANGE = 3, 1764 + SVGA3D_MOBFMT_PTDEPTH64_0 = 4, 1765 + SVGA3D_MOBFMT_PTDEPTH64_1 = 5, 1766 + SVGA3D_MOBFMT_PTDEPTH64_2 = 6, 1767 + SVGA3D_MOBFMT_MAX, 1768 + } SVGAMobFormat; 1769 + 1770 + /* 1771 + * Sizes of opaque types. 1772 + */ 1773 + 1774 + #define SVGA3D_OTABLE_MOB_ENTRY_SIZE 16 1775 + #define SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE 8 1776 + #define SVGA3D_OTABLE_SURFACE_ENTRY_SIZE 64 1777 + #define SVGA3D_OTABLE_SHADER_ENTRY_SIZE 16 1778 + #define SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE 64 1779 + #define SVGA3D_CONTEXT_DATA_SIZE 16384 1780 + 1781 + /* 1782 + * SVGA3dCmdSetOTableBase -- 1783 + * 1784 + * This command allows the guest to specify the base PPN of the 1785 + * specified object table. 1786 + */ 1787 + 1788 + typedef enum { 1789 + SVGA_OTABLE_MOB = 0, 1790 + SVGA_OTABLE_MIN = 0, 1791 + SVGA_OTABLE_SURFACE = 1, 1792 + SVGA_OTABLE_CONTEXT = 2, 1793 + SVGA_OTABLE_SHADER = 3, 1794 + SVGA_OTABLE_SCREEN_TARGET = 4, 1795 + SVGA_OTABLE_DX9_MAX = 5, 1796 + SVGA_OTABLE_MAX = 8 1797 + } SVGAOTableType; 1798 + 1799 + typedef 1800 + struct { 1801 + SVGAOTableType type; 1802 + PPN baseAddress; 1803 + uint32 sizeInBytes; 1804 + uint32 validSizeInBytes; 1805 + SVGAMobFormat ptDepth; 1806 + } 1807 + __attribute__((__packed__)) 1808 + SVGA3dCmdSetOTableBase; /* SVGA_3D_CMD_SET_OTABLE_BASE */ 1809 + 1810 + typedef 1811 + struct { 1812 + SVGAOTableType type; 1813 + PPN64 baseAddress; 1814 + uint32 sizeInBytes; 1815 + uint32 validSizeInBytes; 1816 + SVGAMobFormat ptDepth; 1817 + } 1818 + __attribute__((__packed__)) 1819 + SVGA3dCmdSetOTableBase64; /* SVGA_3D_CMD_SET_OTABLE_BASE64 */ 1820 + 1821 + typedef 1822 + struct { 1823 + SVGAOTableType type; 1824 + } 1825 + __attribute__((__packed__)) 1826 + SVGA3dCmdReadbackOTable; /* SVGA_3D_CMD_READBACK_OTABLE */ 1827 + 1828 + /* 1829 + * Define a memory object (Mob) in the OTable. 1830 + */ 1831 + 1832 + typedef 1833 + struct SVGA3dCmdDefineGBMob { 1834 + SVGAMobId mobid; 1835 + SVGAMobFormat ptDepth; 1836 + PPN base; 1837 + uint32 sizeInBytes; 1838 + } 1839 + __attribute__((__packed__)) 1840 + SVGA3dCmdDefineGBMob; /* SVGA_3D_CMD_DEFINE_GB_MOB */ 1841 + 1842 + 1843 + /* 1844 + * Destroys an object in the OTable. 1845 + */ 1846 + 1847 + typedef 1848 + struct SVGA3dCmdDestroyGBMob { 1849 + SVGAMobId mobid; 1850 + } 1851 + __attribute__((__packed__)) 1852 + SVGA3dCmdDestroyGBMob; /* SVGA_3D_CMD_DESTROY_GB_MOB */ 1853 + 1854 + /* 1855 + * Redefine an object in the OTable. 1856 + */ 1857 + 1858 + typedef 1859 + struct SVGA3dCmdRedefineGBMob { 1860 + SVGAMobId mobid; 1861 + SVGAMobFormat ptDepth; 1862 + PPN base; 1863 + uint32 sizeInBytes; 1864 + } 1865 + __attribute__((__packed__)) 1866 + SVGA3dCmdRedefineGBMob; /* SVGA_3D_CMD_REDEFINE_GB_MOB */ 1867 + 1868 + /* 1869 + * Define a memory object (Mob) in the OTable with a PPN64 base. 1870 + */ 1871 + 1872 + typedef 1873 + struct SVGA3dCmdDefineGBMob64 { 1874 + SVGAMobId mobid; 1875 + SVGAMobFormat ptDepth; 1876 + PPN64 base; 1877 + uint32 sizeInBytes; 1878 + } 1879 + __attribute__((__packed__)) 1880 + SVGA3dCmdDefineGBMob64; /* SVGA_3D_CMD_DEFINE_GB_MOB64 */ 1881 + 1882 + /* 1883 + * Redefine an object in the OTable with PPN64 base. 1884 + */ 1885 + 1886 + typedef 1887 + struct SVGA3dCmdRedefineGBMob64 { 1888 + SVGAMobId mobid; 1889 + SVGAMobFormat ptDepth; 1890 + PPN64 base; 1891 + uint32 sizeInBytes; 1892 + } 1893 + __attribute__((__packed__)) 1894 + SVGA3dCmdRedefineGBMob64; /* SVGA_3D_CMD_REDEFINE_GB_MOB64 */ 1895 + 1896 + /* 1897 + * Notification that the page tables have been modified. 1898 + */ 1899 + 1900 + typedef 1901 + struct SVGA3dCmdUpdateGBMobMapping { 1902 + SVGAMobId mobid; 1903 + } 1904 + __attribute__((__packed__)) 1905 + SVGA3dCmdUpdateGBMobMapping; /* SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING */ 1906 + 1907 + /* 1908 + * Define a guest-backed surface. 1909 + */ 1910 + 1911 + typedef 1912 + struct SVGA3dCmdDefineGBSurface { 1913 + uint32 sid; 1914 + SVGA3dSurfaceFlags surfaceFlags; 1915 + SVGA3dSurfaceFormat format; 1916 + uint32 numMipLevels; 1917 + uint32 multisampleCount; 1918 + SVGA3dTextureFilter autogenFilter; 1919 + SVGA3dSize size; 1920 + } SVGA3dCmdDefineGBSurface; /* SVGA_3D_CMD_DEFINE_GB_SURFACE */ 1921 + 1922 + /* 1923 + * Destroy a guest-backed surface. 1924 + */ 1925 + 1926 + typedef 1927 + struct SVGA3dCmdDestroyGBSurface { 1928 + uint32 sid; 1929 + } SVGA3dCmdDestroyGBSurface; /* SVGA_3D_CMD_DESTROY_GB_SURFACE */ 1930 + 1931 + /* 1932 + * Bind a guest-backed surface to an object. 1933 + */ 1934 + 1935 + typedef 1936 + struct SVGA3dCmdBindGBSurface { 1937 + uint32 sid; 1938 + SVGAMobId mobid; 1939 + } SVGA3dCmdBindGBSurface; /* SVGA_3D_CMD_BIND_GB_SURFACE */ 1940 + 1941 + /* 1942 + * Conditionally bind a mob to a guest backed surface if testMobid 1943 + * matches the currently bound mob. Optionally issue a readback on 1944 + * the surface while it is still bound to the old mobid if the mobid 1945 + * is changed by this command. 1946 + */ 1947 + 1948 + #define SVGA3D_COND_BIND_GB_SURFACE_FLAG_READBACK (1 << 0) 1949 + 1950 + typedef 1951 + struct{ 1952 + uint32 sid; 1953 + SVGAMobId testMobid; 1954 + SVGAMobId mobid; 1955 + uint32 flags; 1956 + } 1957 + SVGA3dCmdCondBindGBSurface; /* SVGA_3D_CMD_COND_BIND_GB_SURFACE */ 1958 + 1959 + /* 1960 + * Update an image in a guest-backed surface. 1961 + * (Inform the device that the guest-contents have been updated.) 1962 + */ 1963 + 1964 + typedef 1965 + struct SVGA3dCmdUpdateGBImage { 1966 + SVGA3dSurfaceImageId image; 1967 + SVGA3dBox box; 1968 + } SVGA3dCmdUpdateGBImage; /* SVGA_3D_CMD_UPDATE_GB_IMAGE */ 1969 + 1970 + /* 1971 + * Update an entire guest-backed surface. 1972 + * (Inform the device that the guest-contents have been updated.) 1973 + */ 1974 + 1975 + typedef 1976 + struct SVGA3dCmdUpdateGBSurface { 1977 + uint32 sid; 1978 + } SVGA3dCmdUpdateGBSurface; /* SVGA_3D_CMD_UPDATE_GB_SURFACE */ 1979 + 1980 + /* 1981 + * Readback an image in a guest-backed surface. 1982 + * (Request the device to flush the dirty contents into the guest.) 1983 + */ 1984 + 1985 + typedef 1986 + struct SVGA3dCmdReadbackGBImage { 1987 + SVGA3dSurfaceImageId image; 1988 + } SVGA3dCmdReadbackGBImage; /* SVGA_3D_CMD_READBACK_GB_IMAGE*/ 1989 + 1990 + /* 1991 + * Readback an entire guest-backed surface. 1992 + * (Request the device to flush the dirty contents into the guest.) 1993 + */ 1994 + 1995 + typedef 1996 + struct SVGA3dCmdReadbackGBSurface { 1997 + uint32 sid; 1998 + } SVGA3dCmdReadbackGBSurface; /* SVGA_3D_CMD_READBACK_GB_SURFACE */ 1999 + 2000 + /* 2001 + * Readback a sub rect of an image in a guest-backed surface. After 2002 + * issuing this command the driver is required to issue an update call 2003 + * of the same region before issuing any other commands that reference 2004 + * this surface or rendering is not guaranteed. 2005 + */ 2006 + 2007 + typedef 2008 + struct SVGA3dCmdReadbackGBImagePartial { 2009 + SVGA3dSurfaceImageId image; 2010 + SVGA3dBox box; 2011 + uint32 invertBox; 2012 + } 2013 + SVGA3dCmdReadbackGBImagePartial; /* SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL */ 2014 + 2015 + /* 2016 + * Invalidate an image in a guest-backed surface. 2017 + * (Notify the device that the contents can be lost.) 2018 + */ 2019 + 2020 + typedef 2021 + struct SVGA3dCmdInvalidateGBImage { 2022 + SVGA3dSurfaceImageId image; 2023 + } SVGA3dCmdInvalidateGBImage; /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE */ 2024 + 2025 + /* 2026 + * Invalidate an entire guest-backed surface. 2027 + * (Notify the device that the contents if all images can be lost.) 2028 + */ 2029 + 2030 + typedef 2031 + struct SVGA3dCmdInvalidateGBSurface { 2032 + uint32 sid; 2033 + } SVGA3dCmdInvalidateGBSurface; /* SVGA_3D_CMD_INVALIDATE_GB_SURFACE */ 2034 + 2035 + /* 2036 + * Invalidate a sub rect of an image in a guest-backed surface. After 2037 + * issuing this command the driver is required to issue an update call 2038 + * of the same region before issuing any other commands that reference 2039 + * this surface or rendering is not guaranteed. 2040 + */ 2041 + 2042 + typedef 2043 + struct SVGA3dCmdInvalidateGBImagePartial { 2044 + SVGA3dSurfaceImageId image; 2045 + SVGA3dBox box; 2046 + uint32 invertBox; 2047 + } 2048 + SVGA3dCmdInvalidateGBImagePartial; /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL */ 2049 + 2050 + /* 2051 + * Define a guest-backed context. 2052 + */ 2053 + 2054 + typedef 2055 + struct SVGA3dCmdDefineGBContext { 2056 + uint32 cid; 2057 + } SVGA3dCmdDefineGBContext; /* SVGA_3D_CMD_DEFINE_GB_CONTEXT */ 2058 + 2059 + /* 2060 + * Destroy a guest-backed context. 2061 + */ 2062 + 2063 + typedef 2064 + struct SVGA3dCmdDestroyGBContext { 2065 + uint32 cid; 2066 + } SVGA3dCmdDestroyGBContext; /* SVGA_3D_CMD_DESTROY_GB_CONTEXT */ 2067 + 2068 + /* 2069 + * Bind a guest-backed context. 2070 + * 2071 + * validContents should be set to 0 for new contexts, 2072 + * and 1 if this is an old context which is getting paged 2073 + * back on to the device. 2074 + * 2075 + * For new contexts, it is recommended that the driver 2076 + * issue commands to initialize all interesting state 2077 + * prior to rendering. 2078 + */ 2079 + 2080 + typedef 2081 + struct SVGA3dCmdBindGBContext { 2082 + uint32 cid; 2083 + SVGAMobId mobid; 2084 + uint32 validContents; 2085 + } SVGA3dCmdBindGBContext; /* SVGA_3D_CMD_BIND_GB_CONTEXT */ 2086 + 2087 + /* 2088 + * Readback a guest-backed context. 2089 + * (Request that the device flush the contents back into guest memory.) 2090 + */ 2091 + 2092 + typedef 2093 + struct SVGA3dCmdReadbackGBContext { 2094 + uint32 cid; 2095 + } SVGA3dCmdReadbackGBContext; /* SVGA_3D_CMD_READBACK_GB_CONTEXT */ 2096 + 2097 + /* 2098 + * Invalidate a guest-backed context. 2099 + */ 2100 + typedef 2101 + struct SVGA3dCmdInvalidateGBContext { 2102 + uint32 cid; 2103 + } SVGA3dCmdInvalidateGBContext; /* SVGA_3D_CMD_INVALIDATE_GB_CONTEXT */ 2104 + 2105 + /* 2106 + * Define a guest-backed shader. 2107 + */ 2108 + 2109 + typedef 2110 + struct SVGA3dCmdDefineGBShader { 2111 + uint32 shid; 2112 + SVGA3dShaderType type; 2113 + uint32 sizeInBytes; 2114 + } SVGA3dCmdDefineGBShader; /* SVGA_3D_CMD_DEFINE_GB_SHADER */ 2115 + 2116 + /* 2117 + * Bind a guest-backed shader. 2118 + */ 2119 + 2120 + typedef struct SVGA3dCmdBindGBShader { 2121 + uint32 shid; 2122 + SVGAMobId mobid; 2123 + uint32 offsetInBytes; 2124 + } SVGA3dCmdBindGBShader; /* SVGA_3D_CMD_BIND_GB_SHADER */ 2125 + 2126 + /* 2127 + * Destroy a guest-backed shader. 2128 + */ 2129 + 2130 + typedef struct SVGA3dCmdDestroyGBShader { 2131 + uint32 shid; 2132 + } SVGA3dCmdDestroyGBShader; /* SVGA_3D_CMD_DESTROY_GB_SHADER */ 2133 + 2134 + typedef 2135 + struct { 2136 + uint32 cid; 2137 + uint32 regStart; 2138 + SVGA3dShaderType shaderType; 2139 + SVGA3dShaderConstType constType; 2140 + 2141 + /* 2142 + * Followed by a variable number of shader constants. 2143 + * 2144 + * Note that FLOAT and INT constants are 4-dwords in length, while 2145 + * BOOL constants are 1-dword in length. 2146 + */ 2147 + } SVGA3dCmdSetGBShaderConstInline; 2148 + /* SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE */ 2149 + 2150 + typedef 2151 + struct { 2152 + uint32 cid; 2153 + SVGA3dQueryType type; 2154 + } SVGA3dCmdBeginGBQuery; /* SVGA_3D_CMD_BEGIN_GB_QUERY */ 2155 + 2156 + typedef 2157 + struct { 2158 + uint32 cid; 2159 + SVGA3dQueryType type; 2160 + SVGAMobId mobid; 2161 + uint32 offset; 2162 + } SVGA3dCmdEndGBQuery; /* SVGA_3D_CMD_END_GB_QUERY */ 2163 + 2164 + 2165 + /* 2166 + * SVGA_3D_CMD_WAIT_FOR_GB_QUERY -- 2167 + * 2168 + * The semantics of this command are identical to the 2169 + * SVGA_3D_CMD_WAIT_FOR_QUERY except that the results are written 2170 + * to a Mob instead of a GMR. 2171 + */ 2172 + 2173 + typedef 2174 + struct { 2175 + uint32 cid; 2176 + SVGA3dQueryType type; 2177 + SVGAMobId mobid; 2178 + uint32 offset; 2179 + } SVGA3dCmdWaitForGBQuery; /* SVGA_3D_CMD_WAIT_FOR_GB_QUERY */ 2180 + 2181 + typedef 2182 + struct { 2183 + SVGAMobId mobid; 2184 + uint32 fbOffset; 2185 + uint32 initalized; 2186 + } 2187 + SVGA3dCmdEnableGart; /* SVGA_3D_CMD_ENABLE_GART */ 2188 + 2189 + typedef 2190 + struct { 2191 + SVGAMobId mobid; 2192 + uint32 gartOffset; 2193 + } 2194 + SVGA3dCmdMapMobIntoGart; /* SVGA_3D_CMD_MAP_MOB_INTO_GART */ 2195 + 2196 + 2197 + typedef 2198 + struct { 2199 + uint32 gartOffset; 2200 + uint32 numPages; 2201 + } 2202 + SVGA3dCmdUnmapGartRange; /* SVGA_3D_CMD_UNMAP_GART_RANGE */ 2203 + 2204 + 2205 + /* 2206 + * Screen Targets 2207 + */ 2208 + #define SVGA_STFLAG_PRIMARY (1 << 0) 2209 + 2210 + typedef 2211 + struct { 2212 + uint32 stid; 2213 + uint32 width; 2214 + uint32 height; 2215 + int32 xRoot; 2216 + int32 yRoot; 2217 + uint32 flags; 2218 + } 2219 + SVGA3dCmdDefineGBScreenTarget; /* SVGA_3D_CMD_DEFINE_GB_SCREENTARGET */ 2220 + 2221 + typedef 2222 + struct { 2223 + uint32 stid; 2224 + } 2225 + SVGA3dCmdDestroyGBScreenTarget; /* SVGA_3D_CMD_DESTROY_GB_SCREENTARGET */ 2226 + 2227 + typedef 2228 + struct { 2229 + uint32 stid; 2230 + SVGA3dSurfaceImageId image; 2231 + } 2232 + SVGA3dCmdBindGBScreenTarget; /* SVGA_3D_CMD_BIND_GB_SCREENTARGET */ 2233 + 2234 + typedef 2235 + struct { 2236 + uint32 stid; 2237 + SVGA3dBox box; 2238 + } 2239 + SVGA3dCmdUpdateGBScreenTarget; /* SVGA_3D_CMD_UPDATE_GB_SCREENTARGET */ 2240 + 2241 + /* 1923 2242 * Capability query index. 1924 2243 * 1925 2244 * Notes: ··· 2538 1879 SVGA3D_DEVCAP_SURFACEFMT_BC5_UNORM = 83, 2539 1880 2540 1881 /* 2541 - * Don't add new caps into the previous section; the values in this 2542 - * enumeration must not change. You can put new values right before 2543 - * SVGA3D_DEVCAP_MAX. 1882 + * Deprecated. 2544 1883 */ 1884 + SVGA3D_DEVCAP_VGPU10 = 84, 1885 + 1886 + /* 1887 + * This contains several SVGA_3D_CAPS_VIDEO_DECODE elements 1888 + * ored together, one for every type of video decoding supported. 1889 + */ 1890 + SVGA3D_DEVCAP_VIDEO_DECODE = 85, 1891 + 1892 + /* 1893 + * This contains several SVGA_3D_CAPS_VIDEO_PROCESS elements 1894 + * ored together, one for every type of video processing supported. 1895 + */ 1896 + SVGA3D_DEVCAP_VIDEO_PROCESS = 86, 1897 + 1898 + SVGA3D_DEVCAP_LINE_AA = 87, /* boolean */ 1899 + SVGA3D_DEVCAP_LINE_STIPPLE = 88, /* boolean */ 1900 + SVGA3D_DEVCAP_MAX_LINE_WIDTH = 89, /* float */ 1901 + SVGA3D_DEVCAP_MAX_AA_LINE_WIDTH = 90, /* float */ 1902 + 1903 + SVGA3D_DEVCAP_SURFACEFMT_YV12 = 91, 1904 + 1905 + /* 1906 + * Does the host support the SVGA logic ops commands? 1907 + */ 1908 + SVGA3D_DEVCAP_LOGICOPS = 92, 1909 + 1910 + /* 1911 + * What support does the host have for screen targets? 1912 + * 1913 + * See the SVGA3D_SCREENTARGET_CAP bits below. 1914 + */ 1915 + SVGA3D_DEVCAP_SCREENTARGETS = 93, 1916 + 2545 1917 SVGA3D_DEVCAP_MAX /* This must be the last index. */ 2546 1918 } SVGA3dDevCapIndex; 2547 1919
+8 -2
drivers/gpu/drm/vmwgfx/svga_reg.h
··· 169 169 SVGA_REG_TRACES = 45, /* Enable trace-based updates even when FIFO is on */ 170 170 SVGA_REG_GMRS_MAX_PAGES = 46, /* Maximum number of 4KB pages for all GMRs */ 171 171 SVGA_REG_MEMORY_SIZE = 47, /* Total dedicated device memory excluding FIFO */ 172 - SVGA_REG_TOP = 48, /* Must be 1 more than the last register */ 172 + SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM = 50, /* Max primary memory */ 173 + SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB = 51, /* Suggested limit on mob mem */ 174 + SVGA_REG_DEV_CAP = 52, /* Write dev cap index, read value */ 175 + SVGA_REG_TOP = 53, /* Must be 1 more than the last register */ 173 176 174 177 SVGA_PALETTE_BASE = 1024, /* Base of SVGA color map */ 175 178 /* Next 768 (== 256*3) registers exist for colormap */ ··· 434 431 #define SVGA_CAP_TRACES 0x00200000 435 432 #define SVGA_CAP_GMR2 0x00400000 436 433 #define SVGA_CAP_SCREEN_OBJECT_2 0x00800000 437 - 434 + #define SVGA_CAP_COMMAND_BUFFERS 0x01000000 435 + #define SVGA_CAP_DEAD1 0x02000000 436 + #define SVGA_CAP_CMD_BUFFERS_2 0x04000000 437 + #define SVGA_CAP_GBOBJECTS 0x08000000 438 438 439 439 /* 440 440 * FIFO register indices.
+167 -7
drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
··· 40 40 static uint32_t sys_placement_flags = TTM_PL_FLAG_SYSTEM | 41 41 TTM_PL_FLAG_CACHED; 42 42 43 + static uint32_t sys_ne_placement_flags = TTM_PL_FLAG_SYSTEM | 44 + TTM_PL_FLAG_CACHED | 45 + TTM_PL_FLAG_NO_EVICT; 46 + 43 47 static uint32_t gmr_placement_flags = VMW_PL_FLAG_GMR | 44 48 TTM_PL_FLAG_CACHED; 45 49 46 50 static uint32_t gmr_ne_placement_flags = VMW_PL_FLAG_GMR | 47 51 TTM_PL_FLAG_CACHED | 48 52 TTM_PL_FLAG_NO_EVICT; 53 + 54 + static uint32_t mob_placement_flags = VMW_PL_FLAG_MOB | 55 + TTM_PL_FLAG_CACHED; 49 56 50 57 struct ttm_placement vmw_vram_placement = { 51 58 .fpfn = 0, ··· 123 116 .busy_placement = &sys_placement_flags 124 117 }; 125 118 119 + struct ttm_placement vmw_sys_ne_placement = { 120 + .fpfn = 0, 121 + .lpfn = 0, 122 + .num_placement = 1, 123 + .placement = &sys_ne_placement_flags, 124 + .num_busy_placement = 1, 125 + .busy_placement = &sys_ne_placement_flags 126 + }; 127 + 126 128 static uint32_t evictable_placement_flags[] = { 127 129 TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED, 128 130 TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED, 129 - VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED 131 + VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED, 132 + VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED 130 133 }; 131 134 132 135 struct ttm_placement vmw_evictable_placement = { 133 136 .fpfn = 0, 134 137 .lpfn = 0, 135 - .num_placement = 3, 138 + .num_placement = 4, 136 139 .placement = evictable_placement_flags, 137 140 .num_busy_placement = 1, 138 141 .busy_placement = &sys_placement_flags ··· 157 140 .busy_placement = gmr_vram_placement_flags 158 141 }; 159 142 143 + struct ttm_placement vmw_mob_placement = { 144 + .fpfn = 0, 145 + .lpfn = 0, 146 + .num_placement = 1, 147 + .num_busy_placement = 1, 148 + .placement = &mob_placement_flags, 149 + .busy_placement = &mob_placement_flags 150 + }; 151 + 160 152 struct vmw_ttm_tt { 161 153 struct ttm_dma_tt dma_ttm; 162 154 struct vmw_private *dev_priv; 163 155 int gmr_id; 156 + struct vmw_mob *mob; 157 + int mem_type; 164 158 struct sg_table sgt; 165 159 struct vmw_sg_table vsgt; 166 160 uint64_t sg_alloc_size; ··· 272 244 viter->dma_address = &__vmw_piter_dma_addr; 273 245 viter->page = &__vmw_piter_non_sg_page; 274 246 viter->addrs = vsgt->addrs; 247 + viter->pages = vsgt->pages; 275 248 break; 276 249 case vmw_dma_map_populate: 277 250 case vmw_dma_map_bind: ··· 453 424 vmw_tt->mapped = false; 454 425 } 455 426 427 + 428 + /** 429 + * vmw_bo_map_dma - Make sure buffer object pages are visible to the device 430 + * 431 + * @bo: Pointer to a struct ttm_buffer_object 432 + * 433 + * Wrapper around vmw_ttm_map_dma, that takes a TTM buffer object pointer 434 + * instead of a pointer to a struct vmw_ttm_backend as argument. 435 + * Note that the buffer object must be either pinned or reserved before 436 + * calling this function. 437 + */ 438 + int vmw_bo_map_dma(struct ttm_buffer_object *bo) 439 + { 440 + struct vmw_ttm_tt *vmw_tt = 441 + container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm); 442 + 443 + return vmw_ttm_map_dma(vmw_tt); 444 + } 445 + 446 + 447 + /** 448 + * vmw_bo_unmap_dma - Make sure buffer object pages are visible to the device 449 + * 450 + * @bo: Pointer to a struct ttm_buffer_object 451 + * 452 + * Wrapper around vmw_ttm_unmap_dma, that takes a TTM buffer object pointer 453 + * instead of a pointer to a struct vmw_ttm_backend as argument. 454 + */ 455 + void vmw_bo_unmap_dma(struct ttm_buffer_object *bo) 456 + { 457 + struct vmw_ttm_tt *vmw_tt = 458 + container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm); 459 + 460 + vmw_ttm_unmap_dma(vmw_tt); 461 + } 462 + 463 + 464 + /** 465 + * vmw_bo_sg_table - Return a struct vmw_sg_table object for a 466 + * TTM buffer object 467 + * 468 + * @bo: Pointer to a struct ttm_buffer_object 469 + * 470 + * Returns a pointer to a struct vmw_sg_table object. The object should 471 + * not be freed after use. 472 + * Note that for the device addresses to be valid, the buffer object must 473 + * either be reserved or pinned. 474 + */ 475 + const struct vmw_sg_table *vmw_bo_sg_table(struct ttm_buffer_object *bo) 476 + { 477 + struct vmw_ttm_tt *vmw_tt = 478 + container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm); 479 + 480 + return &vmw_tt->vsgt; 481 + } 482 + 483 + 456 484 static int vmw_ttm_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) 457 485 { 458 486 struct vmw_ttm_tt *vmw_be = ··· 521 435 return ret; 522 436 523 437 vmw_be->gmr_id = bo_mem->start; 438 + vmw_be->mem_type = bo_mem->mem_type; 524 439 525 - return vmw_gmr_bind(vmw_be->dev_priv, &vmw_be->vsgt, 526 - ttm->num_pages, vmw_be->gmr_id); 440 + switch (bo_mem->mem_type) { 441 + case VMW_PL_GMR: 442 + return vmw_gmr_bind(vmw_be->dev_priv, &vmw_be->vsgt, 443 + ttm->num_pages, vmw_be->gmr_id); 444 + case VMW_PL_MOB: 445 + if (unlikely(vmw_be->mob == NULL)) { 446 + vmw_be->mob = 447 + vmw_mob_create(ttm->num_pages); 448 + if (unlikely(vmw_be->mob == NULL)) 449 + return -ENOMEM; 450 + } 451 + 452 + return vmw_mob_bind(vmw_be->dev_priv, vmw_be->mob, 453 + &vmw_be->vsgt, ttm->num_pages, 454 + vmw_be->gmr_id); 455 + default: 456 + BUG(); 457 + } 458 + return 0; 527 459 } 528 460 529 461 static int vmw_ttm_unbind(struct ttm_tt *ttm) ··· 549 445 struct vmw_ttm_tt *vmw_be = 550 446 container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm); 551 447 552 - vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id); 448 + switch (vmw_be->mem_type) { 449 + case VMW_PL_GMR: 450 + vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id); 451 + break; 452 + case VMW_PL_MOB: 453 + vmw_mob_unbind(vmw_be->dev_priv, vmw_be->mob); 454 + break; 455 + default: 456 + BUG(); 457 + } 553 458 554 459 if (vmw_be->dev_priv->map_mode == vmw_dma_map_bind) 555 460 vmw_ttm_unmap_dma(vmw_be); 556 461 557 462 return 0; 558 463 } 464 + 559 465 560 466 static void vmw_ttm_destroy(struct ttm_tt *ttm) 561 467 { ··· 577 463 ttm_dma_tt_fini(&vmw_be->dma_ttm); 578 464 else 579 465 ttm_tt_fini(ttm); 466 + 467 + if (vmw_be->mob) 468 + vmw_mob_destroy(vmw_be->mob); 469 + 580 470 kfree(vmw_be); 581 471 } 472 + 582 473 583 474 static int vmw_ttm_populate(struct ttm_tt *ttm) 584 475 { ··· 619 500 struct vmw_private *dev_priv = vmw_tt->dev_priv; 620 501 struct ttm_mem_global *glob = vmw_mem_glob(dev_priv); 621 502 503 + 504 + if (vmw_tt->mob) { 505 + vmw_mob_destroy(vmw_tt->mob); 506 + vmw_tt->mob = NULL; 507 + } 508 + 622 509 vmw_ttm_unmap_dma(vmw_tt); 623 510 if (dev_priv->map_mode == vmw_dma_alloc_coherent) { 624 511 size_t size = ··· 655 530 656 531 vmw_be->dma_ttm.ttm.func = &vmw_ttm_func; 657 532 vmw_be->dev_priv = container_of(bdev, struct vmw_private, bdev); 533 + vmw_be->mob = NULL; 658 534 659 535 if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent) 660 536 ret = ttm_dma_tt_init(&vmw_be->dma_ttm, bdev, size, page_flags, ··· 697 571 man->default_caching = TTM_PL_FLAG_CACHED; 698 572 break; 699 573 case VMW_PL_GMR: 574 + case VMW_PL_MOB: 700 575 /* 701 576 * "Guest Memory Regions" is an aperture like feature with 702 577 * one slot per bo. There is an upper limit of the number of ··· 745 618 switch (mem->mem_type) { 746 619 case TTM_PL_SYSTEM: 747 620 case VMW_PL_GMR: 621 + case VMW_PL_MOB: 748 622 return 0; 749 623 case TTM_PL_VRAM: 750 624 mem->bus.offset = mem->start << PAGE_SHIFT; ··· 805 677 VMW_FENCE_WAIT_TIMEOUT); 806 678 } 807 679 680 + /** 681 + * vmw_move_notify - TTM move_notify_callback 682 + * 683 + * @bo: The TTM buffer object about to move. 684 + * @mem: The truct ttm_mem_reg indicating to what memory 685 + * region the move is taking place. 686 + * 687 + * Calls move_notify for all subsystems needing it. 688 + * (currently only resources). 689 + */ 690 + static void vmw_move_notify(struct ttm_buffer_object *bo, 691 + struct ttm_mem_reg *mem) 692 + { 693 + vmw_resource_move_notify(bo, mem); 694 + } 695 + 696 + 697 + /** 698 + * vmw_swap_notify - TTM move_notify_callback 699 + * 700 + * @bo: The TTM buffer object about to be swapped out. 701 + */ 702 + static void vmw_swap_notify(struct ttm_buffer_object *bo) 703 + { 704 + struct ttm_bo_device *bdev = bo->bdev; 705 + 706 + spin_lock(&bdev->fence_lock); 707 + ttm_bo_wait(bo, false, false, false); 708 + spin_unlock(&bdev->fence_lock); 709 + } 710 + 711 + 808 712 struct ttm_bo_driver vmw_bo_driver = { 809 713 .ttm_tt_create = &vmw_ttm_tt_create, 810 714 .ttm_tt_populate = &vmw_ttm_populate, ··· 851 691 .sync_obj_flush = vmw_sync_obj_flush, 852 692 .sync_obj_unref = vmw_sync_obj_unref, 853 693 .sync_obj_ref = vmw_sync_obj_ref, 854 - .move_notify = NULL, 855 - .swap_notify = NULL, 694 + .move_notify = vmw_move_notify, 695 + .swap_notify = vmw_swap_notify, 856 696 .fault_reserve_notify = &vmw_ttm_fault_reserve_notify, 857 697 .io_mem_reserve = &vmw_ttm_io_mem_reserve, 858 698 .io_mem_free = &vmw_ttm_io_mem_free,
+531
drivers/gpu/drm/vmwgfx/vmwgfx_context.c
··· 32 32 struct vmw_user_context { 33 33 struct ttm_base_object base; 34 34 struct vmw_resource res; 35 + struct vmw_ctx_binding_state cbs; 35 36 }; 37 + 38 + 39 + 40 + typedef int (*vmw_scrub_func)(struct vmw_ctx_bindinfo *); 36 41 37 42 static void vmw_user_context_free(struct vmw_resource *res); 38 43 static struct vmw_resource * 39 44 vmw_user_context_base_to_res(struct ttm_base_object *base); 40 45 46 + static int vmw_gb_context_create(struct vmw_resource *res); 47 + static int vmw_gb_context_bind(struct vmw_resource *res, 48 + struct ttm_validate_buffer *val_buf); 49 + static int vmw_gb_context_unbind(struct vmw_resource *res, 50 + bool readback, 51 + struct ttm_validate_buffer *val_buf); 52 + static int vmw_gb_context_destroy(struct vmw_resource *res); 53 + static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi); 54 + static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi); 55 + static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi); 56 + static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs); 41 57 static uint64_t vmw_user_context_size; 42 58 43 59 static const struct vmw_user_resource_conv user_context_conv = { ··· 78 62 .unbind = NULL 79 63 }; 80 64 65 + static const struct vmw_res_func vmw_gb_context_func = { 66 + .res_type = vmw_res_context, 67 + .needs_backup = true, 68 + .may_evict = true, 69 + .type_name = "guest backed contexts", 70 + .backup_placement = &vmw_mob_placement, 71 + .create = vmw_gb_context_create, 72 + .destroy = vmw_gb_context_destroy, 73 + .bind = vmw_gb_context_bind, 74 + .unbind = vmw_gb_context_unbind 75 + }; 76 + 77 + static const vmw_scrub_func vmw_scrub_funcs[vmw_ctx_binding_max] = { 78 + [vmw_ctx_binding_shader] = vmw_context_scrub_shader, 79 + [vmw_ctx_binding_rt] = vmw_context_scrub_render_target, 80 + [vmw_ctx_binding_tex] = vmw_context_scrub_texture }; 81 + 81 82 /** 82 83 * Context management: 83 84 */ ··· 108 75 SVGA3dCmdDestroyContext body; 109 76 } *cmd; 110 77 78 + 79 + if (res->func->destroy == vmw_gb_context_destroy) { 80 + mutex_lock(&dev_priv->cmdbuf_mutex); 81 + (void) vmw_gb_context_destroy(res); 82 + if (dev_priv->pinned_bo != NULL && 83 + !dev_priv->query_cid_valid) 84 + __vmw_execbuf_release_pinned_bo(dev_priv, NULL); 85 + mutex_unlock(&dev_priv->cmdbuf_mutex); 86 + return; 87 + } 111 88 112 89 vmw_execbuf_release_pinned_bo(dev_priv); 113 90 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); ··· 135 92 vmw_3d_resource_dec(dev_priv, false); 136 93 } 137 94 95 + static int vmw_gb_context_init(struct vmw_private *dev_priv, 96 + struct vmw_resource *res, 97 + void (*res_free) (struct vmw_resource *res)) 98 + { 99 + int ret; 100 + struct vmw_user_context *uctx = 101 + container_of(res, struct vmw_user_context, res); 102 + 103 + ret = vmw_resource_init(dev_priv, res, true, 104 + res_free, &vmw_gb_context_func); 105 + res->backup_size = SVGA3D_CONTEXT_DATA_SIZE; 106 + 107 + if (unlikely(ret != 0)) { 108 + if (res_free) 109 + res_free(res); 110 + else 111 + kfree(res); 112 + return ret; 113 + } 114 + 115 + memset(&uctx->cbs, 0, sizeof(uctx->cbs)); 116 + INIT_LIST_HEAD(&uctx->cbs.list); 117 + 118 + vmw_resource_activate(res, vmw_hw_context_destroy); 119 + return 0; 120 + } 121 + 138 122 static int vmw_context_init(struct vmw_private *dev_priv, 139 123 struct vmw_resource *res, 140 124 void (*res_free) (struct vmw_resource *res)) ··· 172 102 SVGA3dCmdHeader header; 173 103 SVGA3dCmdDefineContext body; 174 104 } *cmd; 105 + 106 + if (dev_priv->has_mob) 107 + return vmw_gb_context_init(dev_priv, res, res_free); 175 108 176 109 ret = vmw_resource_init(dev_priv, res, false, 177 110 res_free, &vmw_legacy_context_func); ··· 225 152 ret = vmw_context_init(dev_priv, res, NULL); 226 153 227 154 return (ret == 0) ? res : NULL; 155 + } 156 + 157 + 158 + static int vmw_gb_context_create(struct vmw_resource *res) 159 + { 160 + struct vmw_private *dev_priv = res->dev_priv; 161 + int ret; 162 + struct { 163 + SVGA3dCmdHeader header; 164 + SVGA3dCmdDefineGBContext body; 165 + } *cmd; 166 + 167 + if (likely(res->id != -1)) 168 + return 0; 169 + 170 + ret = vmw_resource_alloc_id(res); 171 + if (unlikely(ret != 0)) { 172 + DRM_ERROR("Failed to allocate a context id.\n"); 173 + goto out_no_id; 174 + } 175 + 176 + if (unlikely(res->id >= VMWGFX_NUM_GB_CONTEXT)) { 177 + ret = -EBUSY; 178 + goto out_no_fifo; 179 + } 180 + 181 + cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); 182 + if (unlikely(cmd == NULL)) { 183 + DRM_ERROR("Failed reserving FIFO space for context " 184 + "creation.\n"); 185 + ret = -ENOMEM; 186 + goto out_no_fifo; 187 + } 188 + 189 + cmd->header.id = SVGA_3D_CMD_DEFINE_GB_CONTEXT; 190 + cmd->header.size = sizeof(cmd->body); 191 + cmd->body.cid = res->id; 192 + vmw_fifo_commit(dev_priv, sizeof(*cmd)); 193 + (void) vmw_3d_resource_inc(dev_priv, false); 194 + 195 + return 0; 196 + 197 + out_no_fifo: 198 + vmw_resource_release_id(res); 199 + out_no_id: 200 + return ret; 201 + } 202 + 203 + static int vmw_gb_context_bind(struct vmw_resource *res, 204 + struct ttm_validate_buffer *val_buf) 205 + { 206 + struct vmw_private *dev_priv = res->dev_priv; 207 + struct { 208 + SVGA3dCmdHeader header; 209 + SVGA3dCmdBindGBContext body; 210 + } *cmd; 211 + struct ttm_buffer_object *bo = val_buf->bo; 212 + 213 + BUG_ON(bo->mem.mem_type != VMW_PL_MOB); 214 + 215 + cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); 216 + if (unlikely(cmd == NULL)) { 217 + DRM_ERROR("Failed reserving FIFO space for context " 218 + "binding.\n"); 219 + return -ENOMEM; 220 + } 221 + 222 + cmd->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT; 223 + cmd->header.size = sizeof(cmd->body); 224 + cmd->body.cid = res->id; 225 + cmd->body.mobid = bo->mem.start; 226 + cmd->body.validContents = res->backup_dirty; 227 + res->backup_dirty = false; 228 + vmw_fifo_commit(dev_priv, sizeof(*cmd)); 229 + 230 + return 0; 231 + } 232 + 233 + static int vmw_gb_context_unbind(struct vmw_resource *res, 234 + bool readback, 235 + struct ttm_validate_buffer *val_buf) 236 + { 237 + struct vmw_private *dev_priv = res->dev_priv; 238 + struct ttm_buffer_object *bo = val_buf->bo; 239 + struct vmw_fence_obj *fence; 240 + struct vmw_user_context *uctx = 241 + container_of(res, struct vmw_user_context, res); 242 + 243 + struct { 244 + SVGA3dCmdHeader header; 245 + SVGA3dCmdReadbackGBContext body; 246 + } *cmd1; 247 + struct { 248 + SVGA3dCmdHeader header; 249 + SVGA3dCmdBindGBContext body; 250 + } *cmd2; 251 + uint32_t submit_size; 252 + uint8_t *cmd; 253 + 254 + 255 + BUG_ON(bo->mem.mem_type != VMW_PL_MOB); 256 + 257 + mutex_lock(&dev_priv->binding_mutex); 258 + vmw_context_binding_state_kill(&uctx->cbs); 259 + 260 + submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0); 261 + 262 + cmd = vmw_fifo_reserve(dev_priv, submit_size); 263 + if (unlikely(cmd == NULL)) { 264 + DRM_ERROR("Failed reserving FIFO space for context " 265 + "unbinding.\n"); 266 + mutex_unlock(&dev_priv->binding_mutex); 267 + return -ENOMEM; 268 + } 269 + 270 + cmd2 = (void *) cmd; 271 + if (readback) { 272 + cmd1 = (void *) cmd; 273 + cmd1->header.id = SVGA_3D_CMD_READBACK_GB_CONTEXT; 274 + cmd1->header.size = sizeof(cmd1->body); 275 + cmd1->body.cid = res->id; 276 + cmd2 = (void *) (&cmd1[1]); 277 + } 278 + cmd2->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT; 279 + cmd2->header.size = sizeof(cmd2->body); 280 + cmd2->body.cid = res->id; 281 + cmd2->body.mobid = SVGA3D_INVALID_ID; 282 + 283 + vmw_fifo_commit(dev_priv, submit_size); 284 + mutex_unlock(&dev_priv->binding_mutex); 285 + 286 + /* 287 + * Create a fence object and fence the backup buffer. 288 + */ 289 + 290 + (void) vmw_execbuf_fence_commands(NULL, dev_priv, 291 + &fence, NULL); 292 + 293 + vmw_fence_single_bo(bo, fence); 294 + 295 + if (likely(fence != NULL)) 296 + vmw_fence_obj_unreference(&fence); 297 + 298 + return 0; 299 + } 300 + 301 + static int vmw_gb_context_destroy(struct vmw_resource *res) 302 + { 303 + struct vmw_private *dev_priv = res->dev_priv; 304 + struct { 305 + SVGA3dCmdHeader header; 306 + SVGA3dCmdDestroyGBContext body; 307 + } *cmd; 308 + struct vmw_user_context *uctx = 309 + container_of(res, struct vmw_user_context, res); 310 + 311 + BUG_ON(!list_empty(&uctx->cbs.list)); 312 + 313 + if (likely(res->id == -1)) 314 + return 0; 315 + 316 + cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); 317 + if (unlikely(cmd == NULL)) { 318 + DRM_ERROR("Failed reserving FIFO space for context " 319 + "destruction.\n"); 320 + return -ENOMEM; 321 + } 322 + 323 + cmd->header.id = SVGA_3D_CMD_DESTROY_GB_CONTEXT; 324 + cmd->header.size = sizeof(cmd->body); 325 + cmd->body.cid = res->id; 326 + vmw_fifo_commit(dev_priv, sizeof(*cmd)); 327 + if (dev_priv->query_cid == res->id) 328 + dev_priv->query_cid_valid = false; 329 + vmw_resource_release_id(res); 330 + vmw_3d_resource_dec(dev_priv, false); 331 + 332 + return 0; 228 333 } 229 334 230 335 /** ··· 522 271 ttm_read_unlock(&vmaster->lock); 523 272 return ret; 524 273 274 + } 275 + 276 + /** 277 + * vmw_context_scrub_shader - scrub a shader binding from a context. 278 + * 279 + * @bi: single binding information. 280 + */ 281 + static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi) 282 + { 283 + struct vmw_private *dev_priv = bi->ctx->dev_priv; 284 + struct { 285 + SVGA3dCmdHeader header; 286 + SVGA3dCmdSetShader body; 287 + } *cmd; 288 + 289 + cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); 290 + if (unlikely(cmd == NULL)) { 291 + DRM_ERROR("Failed reserving FIFO space for shader " 292 + "unbinding.\n"); 293 + return -ENOMEM; 294 + } 295 + 296 + cmd->header.id = SVGA_3D_CMD_SET_SHADER; 297 + cmd->header.size = sizeof(cmd->body); 298 + cmd->body.cid = bi->ctx->id; 299 + cmd->body.type = bi->i1.shader_type; 300 + cmd->body.shid = SVGA3D_INVALID_ID; 301 + vmw_fifo_commit(dev_priv, sizeof(*cmd)); 302 + 303 + return 0; 304 + } 305 + 306 + /** 307 + * vmw_context_scrub_render_target - scrub a render target binding 308 + * from a context. 309 + * 310 + * @bi: single binding information. 311 + */ 312 + static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi) 313 + { 314 + struct vmw_private *dev_priv = bi->ctx->dev_priv; 315 + struct { 316 + SVGA3dCmdHeader header; 317 + SVGA3dCmdSetRenderTarget body; 318 + } *cmd; 319 + 320 + cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); 321 + if (unlikely(cmd == NULL)) { 322 + DRM_ERROR("Failed reserving FIFO space for render target " 323 + "unbinding.\n"); 324 + return -ENOMEM; 325 + } 326 + 327 + cmd->header.id = SVGA_3D_CMD_SETRENDERTARGET; 328 + cmd->header.size = sizeof(cmd->body); 329 + cmd->body.cid = bi->ctx->id; 330 + cmd->body.type = bi->i1.rt_type; 331 + cmd->body.target.sid = SVGA3D_INVALID_ID; 332 + cmd->body.target.face = 0; 333 + cmd->body.target.mipmap = 0; 334 + vmw_fifo_commit(dev_priv, sizeof(*cmd)); 335 + 336 + return 0; 337 + } 338 + 339 + /** 340 + * vmw_context_scrub_texture - scrub a texture binding from a context. 341 + * 342 + * @bi: single binding information. 343 + * 344 + * TODO: Possibly complement this function with a function that takes 345 + * a list of texture bindings and combines them to a single command. 346 + */ 347 + static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi) 348 + { 349 + struct vmw_private *dev_priv = bi->ctx->dev_priv; 350 + struct { 351 + SVGA3dCmdHeader header; 352 + struct { 353 + SVGA3dCmdSetTextureState c; 354 + SVGA3dTextureState s1; 355 + } body; 356 + } *cmd; 357 + 358 + cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); 359 + if (unlikely(cmd == NULL)) { 360 + DRM_ERROR("Failed reserving FIFO space for texture " 361 + "unbinding.\n"); 362 + return -ENOMEM; 363 + } 364 + 365 + 366 + cmd->header.id = SVGA_3D_CMD_SETTEXTURESTATE; 367 + cmd->header.size = sizeof(cmd->body); 368 + cmd->body.c.cid = bi->ctx->id; 369 + cmd->body.s1.stage = bi->i1.texture_stage; 370 + cmd->body.s1.name = SVGA3D_TS_BIND_TEXTURE; 371 + cmd->body.s1.value = (uint32) SVGA3D_INVALID_ID; 372 + vmw_fifo_commit(dev_priv, sizeof(*cmd)); 373 + 374 + return 0; 375 + } 376 + 377 + /** 378 + * vmw_context_binding_drop: Stop tracking a context binding 379 + * 380 + * @cb: Pointer to binding tracker storage. 381 + * 382 + * Stops tracking a context binding, and re-initializes its storage. 383 + * Typically used when the context binding is replaced with a binding to 384 + * another (or the same, for that matter) resource. 385 + */ 386 + static void vmw_context_binding_drop(struct vmw_ctx_binding *cb) 387 + { 388 + list_del(&cb->ctx_list); 389 + if (!list_empty(&cb->res_list)) 390 + list_del(&cb->res_list); 391 + cb->bi.ctx = NULL; 392 + } 393 + 394 + /** 395 + * vmw_context_binding_add: Start tracking a context binding 396 + * 397 + * @cbs: Pointer to the context binding state tracker. 398 + * @bi: Information about the binding to track. 399 + * 400 + * Performs basic checks on the binding to make sure arguments are within 401 + * bounds and then starts tracking the binding in the context binding 402 + * state structure @cbs. 403 + */ 404 + int vmw_context_binding_add(struct vmw_ctx_binding_state *cbs, 405 + const struct vmw_ctx_bindinfo *bi) 406 + { 407 + struct vmw_ctx_binding *loc; 408 + 409 + switch (bi->bt) { 410 + case vmw_ctx_binding_rt: 411 + if (unlikely((unsigned)bi->i1.rt_type >= SVGA3D_RT_MAX)) { 412 + DRM_ERROR("Illegal render target type %u.\n", 413 + (unsigned) bi->i1.rt_type); 414 + return -EINVAL; 415 + } 416 + loc = &cbs->render_targets[bi->i1.rt_type]; 417 + break; 418 + case vmw_ctx_binding_tex: 419 + if (unlikely((unsigned)bi->i1.texture_stage >= 420 + SVGA3D_NUM_TEXTURE_UNITS)) { 421 + DRM_ERROR("Illegal texture/sampler unit %u.\n", 422 + (unsigned) bi->i1.texture_stage); 423 + return -EINVAL; 424 + } 425 + loc = &cbs->texture_units[bi->i1.texture_stage]; 426 + break; 427 + case vmw_ctx_binding_shader: 428 + if (unlikely((unsigned)bi->i1.shader_type >= 429 + SVGA3D_SHADERTYPE_MAX)) { 430 + DRM_ERROR("Illegal shader type %u.\n", 431 + (unsigned) bi->i1.shader_type); 432 + return -EINVAL; 433 + } 434 + loc = &cbs->shaders[bi->i1.shader_type]; 435 + break; 436 + default: 437 + BUG(); 438 + } 439 + 440 + if (loc->bi.ctx != NULL) 441 + vmw_context_binding_drop(loc); 442 + 443 + loc->bi = *bi; 444 + list_add_tail(&loc->ctx_list, &cbs->list); 445 + INIT_LIST_HEAD(&loc->res_list); 446 + 447 + return 0; 448 + } 449 + 450 + /** 451 + * vmw_context_binding_transfer: Transfer a context binding tracking entry. 452 + * 453 + * @cbs: Pointer to the persistent context binding state tracker. 454 + * @bi: Information about the binding to track. 455 + * 456 + */ 457 + static void vmw_context_binding_transfer(struct vmw_ctx_binding_state *cbs, 458 + const struct vmw_ctx_bindinfo *bi) 459 + { 460 + struct vmw_ctx_binding *loc; 461 + 462 + switch (bi->bt) { 463 + case vmw_ctx_binding_rt: 464 + loc = &cbs->render_targets[bi->i1.rt_type]; 465 + break; 466 + case vmw_ctx_binding_tex: 467 + loc = &cbs->texture_units[bi->i1.texture_stage]; 468 + break; 469 + case vmw_ctx_binding_shader: 470 + loc = &cbs->shaders[bi->i1.shader_type]; 471 + break; 472 + default: 473 + BUG(); 474 + } 475 + 476 + if (loc->bi.ctx != NULL) 477 + vmw_context_binding_drop(loc); 478 + 479 + loc->bi = *bi; 480 + list_add_tail(&loc->ctx_list, &cbs->list); 481 + if (bi->res != NULL) 482 + list_add_tail(&loc->res_list, &bi->res->binding_head); 483 + else 484 + INIT_LIST_HEAD(&loc->res_list); 485 + } 486 + 487 + /** 488 + * vmw_context_binding_kill - Kill a binding on the device 489 + * and stop tracking it. 490 + * 491 + * @cb: Pointer to binding tracker storage. 492 + * 493 + * Emits FIFO commands to scrub a binding represented by @cb. 494 + * Then stops tracking the binding and re-initializes its storage. 495 + */ 496 + void vmw_context_binding_kill(struct vmw_ctx_binding *cb) 497 + { 498 + (void) vmw_scrub_funcs[cb->bi.bt](&cb->bi); 499 + vmw_context_binding_drop(cb); 500 + } 501 + 502 + /** 503 + * vmw_context_binding_state_kill - Kill all bindings associated with a 504 + * struct vmw_ctx_binding state structure, and re-initialize the structure. 505 + * 506 + * @cbs: Pointer to the context binding state tracker. 507 + * 508 + * Emits commands to scrub all bindings associated with the 509 + * context binding state tracker. Then re-initializes the whole structure. 510 + */ 511 + static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs) 512 + { 513 + struct vmw_ctx_binding *entry, *next; 514 + 515 + list_for_each_entry_safe(entry, next, &cbs->list, ctx_list) 516 + vmw_context_binding_kill(entry); 517 + } 518 + 519 + /** 520 + * vmw_context_binding_res_list_kill - Kill all bindings on a 521 + * resource binding list 522 + * 523 + * @head: list head of resource binding list 524 + * 525 + * Kills all bindings associated with a specific resource. Typically 526 + * called before the resource is destroyed. 527 + */ 528 + void vmw_context_binding_res_list_kill(struct list_head *head) 529 + { 530 + struct vmw_ctx_binding *entry, *next; 531 + 532 + list_for_each_entry_safe(entry, next, head, res_list) 533 + vmw_context_binding_kill(entry); 534 + } 535 + 536 + /** 537 + * vmw_context_binding_state_transfer - Commit staged binding info 538 + * 539 + * @ctx: Pointer to context to commit the staged binding info to. 540 + * @from: Staged binding info built during execbuf. 541 + * 542 + * Transfers binding info from a temporary structure to the persistent 543 + * structure in the context. This can be done once commands 544 + */ 545 + void vmw_context_binding_state_transfer(struct vmw_resource *ctx, 546 + struct vmw_ctx_binding_state *from) 547 + { 548 + struct vmw_user_context *uctx = 549 + container_of(ctx, struct vmw_user_context, res); 550 + struct vmw_ctx_binding *entry, *next; 551 + 552 + list_for_each_entry_safe(entry, next, &from->list, ctx_list) 553 + vmw_context_binding_transfer(&uctx->cbs, &entry->bi); 525 554 }
+3 -5
drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
··· 290 290 /** 291 291 * vmw_bo_pin - Pin or unpin a buffer object without moving it. 292 292 * 293 - * @bo: The buffer object. Must be reserved, and present either in VRAM 294 - * or GMR memory. 293 + * @bo: The buffer object. Must be reserved. 295 294 * @pin: Whether to pin or unpin. 296 295 * 297 296 */ ··· 302 303 int ret; 303 304 304 305 lockdep_assert_held(&bo->resv->lock.base); 305 - BUG_ON(old_mem_type != TTM_PL_VRAM && 306 - old_mem_type != VMW_PL_GMR); 307 306 308 - pl_flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED; 307 + pl_flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | VMW_PL_FLAG_MOB 308 + | TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED; 309 309 if (pin) 310 310 pl_flags |= TTM_PL_FLAG_NO_EVICT; 311 311
+164 -65
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
··· 112 112 #define DRM_IOCTL_VMW_UPDATE_LAYOUT \ 113 113 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT, \ 114 114 struct drm_vmw_update_layout_arg) 115 + #define DRM_IOCTL_VMW_CREATE_SHADER \ 116 + DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SHADER, \ 117 + struct drm_vmw_shader_create_arg) 118 + #define DRM_IOCTL_VMW_UNREF_SHADER \ 119 + DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SHADER, \ 120 + struct drm_vmw_shader_arg) 121 + #define DRM_IOCTL_VMW_GB_SURFACE_CREATE \ 122 + DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_CREATE, \ 123 + union drm_vmw_gb_surface_create_arg) 124 + #define DRM_IOCTL_VMW_GB_SURFACE_REF \ 125 + DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF, \ 126 + union drm_vmw_gb_surface_reference_arg) 127 + #define DRM_IOCTL_VMW_SYNCCPU \ 128 + DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_SYNCCPU, \ 129 + struct drm_vmw_synccpu_arg) 115 130 116 131 /** 117 132 * The core DRM version of this macro doesn't account for ··· 192 177 VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT, 193 178 vmw_kms_update_layout_ioctl, 194 179 DRM_MASTER | DRM_UNLOCKED), 180 + VMW_IOCTL_DEF(VMW_CREATE_SHADER, 181 + vmw_shader_define_ioctl, 182 + DRM_AUTH | DRM_UNLOCKED), 183 + VMW_IOCTL_DEF(VMW_UNREF_SHADER, 184 + vmw_shader_destroy_ioctl, 185 + DRM_AUTH | DRM_UNLOCKED), 186 + VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE, 187 + vmw_gb_surface_define_ioctl, 188 + DRM_AUTH | DRM_UNLOCKED), 189 + VMW_IOCTL_DEF(VMW_GB_SURFACE_REF, 190 + vmw_gb_surface_reference_ioctl, 191 + DRM_AUTH | DRM_UNLOCKED), 192 + VMW_IOCTL_DEF(VMW_SYNCCPU, 193 + vmw_user_dmabuf_synccpu_ioctl, 194 + DRM_AUTH | DRM_UNLOCKED), 195 195 }; 196 196 197 197 static struct pci_device_id vmw_pci_id_list[] = { ··· 219 189 static int vmw_force_iommu; 220 190 static int vmw_restrict_iommu; 221 191 static int vmw_force_coherent; 192 + static int vmw_restrict_dma_mask; 222 193 223 194 static int vmw_probe(struct pci_dev *, const struct pci_device_id *); 224 195 static void vmw_master_init(struct vmw_master *); ··· 234 203 module_param_named(restrict_iommu, vmw_restrict_iommu, int, 0600); 235 204 MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages"); 236 205 module_param_named(force_coherent, vmw_force_coherent, int, 0600); 206 + MODULE_PARM_DESC(restrict_dma_mask, "Restrict DMA mask to 44 bits with IOMMU"); 207 + module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, 0600); 237 208 238 209 239 210 static void vmw_print_capabilities(uint32_t capabilities) ··· 273 240 DRM_INFO(" GMR2.\n"); 274 241 if (capabilities & SVGA_CAP_SCREEN_OBJECT_2) 275 242 DRM_INFO(" Screen Object 2.\n"); 243 + if (capabilities & SVGA_CAP_COMMAND_BUFFERS) 244 + DRM_INFO(" Command Buffers.\n"); 245 + if (capabilities & SVGA_CAP_CMD_BUFFERS_2) 246 + DRM_INFO(" Command Buffers 2.\n"); 247 + if (capabilities & SVGA_CAP_GBOBJECTS) 248 + DRM_INFO(" Guest Backed Resources.\n"); 276 249 } 277 - 278 - 279 - /** 280 - * vmw_execbuf_prepare_dummy_query - Initialize a query result structure at 281 - * the start of a buffer object. 282 - * 283 - * @dev_priv: The device private structure. 284 - * 285 - * This function will idle the buffer using an uninterruptible wait, then 286 - * map the first page and initialize a pending occlusion query result structure, 287 - * Finally it will unmap the buffer. 288 - * 289 - * TODO: Since we're only mapping a single page, we should optimize the map 290 - * to use kmap_atomic / iomap_atomic. 291 - */ 292 - static void vmw_dummy_query_bo_prepare(struct vmw_private *dev_priv) 293 - { 294 - struct ttm_bo_kmap_obj map; 295 - volatile SVGA3dQueryResult *result; 296 - bool dummy; 297 - int ret; 298 - struct ttm_bo_device *bdev = &dev_priv->bdev; 299 - struct ttm_buffer_object *bo = dev_priv->dummy_query_bo; 300 - 301 - ttm_bo_reserve(bo, false, false, false, 0); 302 - spin_lock(&bdev->fence_lock); 303 - ret = ttm_bo_wait(bo, false, false, false); 304 - spin_unlock(&bdev->fence_lock); 305 - if (unlikely(ret != 0)) 306 - (void) vmw_fallback_wait(dev_priv, false, true, 0, false, 307 - 10*HZ); 308 - 309 - ret = ttm_bo_kmap(bo, 0, 1, &map); 310 - if (likely(ret == 0)) { 311 - result = ttm_kmap_obj_virtual(&map, &dummy); 312 - result->totalSize = sizeof(*result); 313 - result->state = SVGA3D_QUERYSTATE_PENDING; 314 - result->result32 = 0xff; 315 - ttm_bo_kunmap(&map); 316 - } else 317 - DRM_ERROR("Dummy query buffer map failed.\n"); 318 - ttm_bo_unreserve(bo); 319 - } 320 - 321 250 322 251 /** 323 252 * vmw_dummy_query_bo_create - create a bo to hold a dummy query result ··· 288 293 * 289 294 * This function creates a small buffer object that holds the query 290 295 * result for dummy queries emitted as query barriers. 296 + * The function will then map the first page and initialize a pending 297 + * occlusion query result structure, Finally it will unmap the buffer. 291 298 * No interruptible waits are done within this function. 292 299 * 293 - * Returns an error if bo creation fails. 300 + * Returns an error if bo creation or initialization fails. 294 301 */ 295 302 static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv) 296 303 { 297 - return ttm_bo_create(&dev_priv->bdev, 298 - PAGE_SIZE, 299 - ttm_bo_type_device, 300 - &vmw_vram_sys_placement, 301 - 0, false, NULL, 302 - &dev_priv->dummy_query_bo); 303 - } 304 + int ret; 305 + struct ttm_buffer_object *bo; 306 + struct ttm_bo_kmap_obj map; 307 + volatile SVGA3dQueryResult *result; 308 + bool dummy; 304 309 310 + /* 311 + * Create the bo as pinned, so that a tryreserve will 312 + * immediately succeed. This is because we're the only 313 + * user of the bo currently. 314 + */ 315 + ret = ttm_bo_create(&dev_priv->bdev, 316 + PAGE_SIZE, 317 + ttm_bo_type_device, 318 + &vmw_sys_ne_placement, 319 + 0, false, NULL, 320 + &bo); 321 + 322 + if (unlikely(ret != 0)) 323 + return ret; 324 + 325 + ret = ttm_bo_reserve(bo, false, true, false, 0); 326 + BUG_ON(ret != 0); 327 + 328 + ret = ttm_bo_kmap(bo, 0, 1, &map); 329 + if (likely(ret == 0)) { 330 + result = ttm_kmap_obj_virtual(&map, &dummy); 331 + result->totalSize = sizeof(*result); 332 + result->state = SVGA3D_QUERYSTATE_PENDING; 333 + result->result32 = 0xff; 334 + ttm_bo_kunmap(&map); 335 + } 336 + vmw_bo_pin(bo, false); 337 + ttm_bo_unreserve(bo); 338 + 339 + if (unlikely(ret != 0)) { 340 + DRM_ERROR("Dummy query buffer map failed.\n"); 341 + ttm_bo_unref(&bo); 342 + } else 343 + dev_priv->dummy_query_bo = bo; 344 + 345 + return ret; 346 + } 305 347 306 348 static int vmw_request_device(struct vmw_private *dev_priv) 307 349 { ··· 350 318 return ret; 351 319 } 352 320 vmw_fence_fifo_up(dev_priv->fman); 321 + if (dev_priv->has_mob) { 322 + ret = vmw_otables_setup(dev_priv); 323 + if (unlikely(ret != 0)) { 324 + DRM_ERROR("Unable to initialize " 325 + "guest Memory OBjects.\n"); 326 + goto out_no_mob; 327 + } 328 + } 353 329 ret = vmw_dummy_query_bo_create(dev_priv); 354 330 if (unlikely(ret != 0)) 355 331 goto out_no_query_bo; 356 - vmw_dummy_query_bo_prepare(dev_priv); 357 332 358 333 return 0; 359 334 360 335 out_no_query_bo: 336 + if (dev_priv->has_mob) 337 + vmw_otables_takedown(dev_priv); 338 + out_no_mob: 361 339 vmw_fence_fifo_down(dev_priv->fman); 362 340 vmw_fifo_release(dev_priv, &dev_priv->fifo); 363 341 return ret; ··· 383 341 BUG_ON(dev_priv->pinned_bo != NULL); 384 342 385 343 ttm_bo_unref(&dev_priv->dummy_query_bo); 344 + if (dev_priv->has_mob) 345 + vmw_otables_takedown(dev_priv); 386 346 vmw_fence_fifo_down(dev_priv->fman); 387 347 vmw_fifo_release(dev_priv, &dev_priv->fifo); 388 348 } 349 + 389 350 390 351 /** 391 352 * Increase the 3d resource refcount. ··· 555 510 return 0; 556 511 } 557 512 513 + /** 514 + * vmw_dma_masks - set required page- and dma masks 515 + * 516 + * @dev: Pointer to struct drm-device 517 + * 518 + * With 32-bit we can only handle 32 bit PFNs. Optionally set that 519 + * restriction also for 64-bit systems. 520 + */ 521 + #ifdef CONFIG_INTEL_IOMMU 522 + static int vmw_dma_masks(struct vmw_private *dev_priv) 523 + { 524 + struct drm_device *dev = dev_priv->dev; 525 + 526 + if (intel_iommu_enabled && 527 + (sizeof(unsigned long) == 4 || vmw_restrict_dma_mask)) { 528 + DRM_INFO("Restricting DMA addresses to 44 bits.\n"); 529 + return dma_set_mask(dev->dev, DMA_BIT_MASK(44)); 530 + } 531 + return 0; 532 + } 533 + #else 534 + static int vmw_dma_masks(struct vmw_private *dev_priv) 535 + { 536 + return 0; 537 + } 538 + #endif 539 + 558 540 static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) 559 541 { 560 542 struct vmw_private *dev_priv; ··· 604 532 mutex_init(&dev_priv->hw_mutex); 605 533 mutex_init(&dev_priv->cmdbuf_mutex); 606 534 mutex_init(&dev_priv->release_mutex); 535 + mutex_init(&dev_priv->binding_mutex); 607 536 rwlock_init(&dev_priv->resource_lock); 608 537 609 538 for (i = vmw_res_context; i < vmw_res_max; ++i) { ··· 651 578 652 579 vmw_get_initial_size(dev_priv); 653 580 654 - if (dev_priv->capabilities & SVGA_CAP_GMR) { 655 - dev_priv->max_gmr_descriptors = 656 - vmw_read(dev_priv, 657 - SVGA_REG_GMR_MAX_DESCRIPTOR_LENGTH); 581 + if (dev_priv->capabilities & SVGA_CAP_GMR2) { 658 582 dev_priv->max_gmr_ids = 659 583 vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS); 660 - } 661 - if (dev_priv->capabilities & SVGA_CAP_GMR2) { 662 584 dev_priv->max_gmr_pages = 663 585 vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES); 664 586 dev_priv->memory_size = ··· 666 598 */ 667 599 dev_priv->memory_size = 512*1024*1024; 668 600 } 601 + dev_priv->max_mob_pages = 0; 602 + if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) { 603 + uint64_t mem_size = 604 + vmw_read(dev_priv, 605 + SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB); 606 + 607 + dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE; 608 + dev_priv->prim_bb_mem = 609 + vmw_read(dev_priv, 610 + SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM); 611 + } else 612 + dev_priv->prim_bb_mem = dev_priv->vram_size; 613 + 614 + ret = vmw_dma_masks(dev_priv); 615 + if (unlikely(ret != 0)) 616 + goto out_err0; 617 + 618 + if (unlikely(dev_priv->prim_bb_mem < dev_priv->vram_size)) 619 + dev_priv->prim_bb_mem = dev_priv->vram_size; 669 620 670 621 mutex_unlock(&dev_priv->hw_mutex); 671 622 672 623 vmw_print_capabilities(dev_priv->capabilities); 673 624 674 - if (dev_priv->capabilities & SVGA_CAP_GMR) { 625 + if (dev_priv->capabilities & SVGA_CAP_GMR2) { 675 626 DRM_INFO("Max GMR ids is %u\n", 676 627 (unsigned)dev_priv->max_gmr_ids); 677 - DRM_INFO("Max GMR descriptors is %u\n", 678 - (unsigned)dev_priv->max_gmr_descriptors); 679 - } 680 - if (dev_priv->capabilities & SVGA_CAP_GMR2) { 681 628 DRM_INFO("Max number of GMR pages is %u\n", 682 629 (unsigned)dev_priv->max_gmr_pages); 683 630 DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n", 684 631 (unsigned)dev_priv->memory_size / 1024); 685 632 } 633 + DRM_INFO("Maximum display memory size is %u kiB\n", 634 + dev_priv->prim_bb_mem / 1024); 686 635 DRM_INFO("VRAM at 0x%08x size is %u kiB\n", 687 636 dev_priv->vram_start, dev_priv->vram_size / 1024); 688 637 DRM_INFO("MMIO at 0x%08x size is %u kiB\n", ··· 734 649 dev_priv->has_gmr = true; 735 650 if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) || 736 651 refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR, 737 - dev_priv->max_gmr_ids) != 0) { 652 + VMW_PL_GMR) != 0) { 738 653 DRM_INFO("No GMR memory available. " 739 654 "Graphics memory resources are very limited.\n"); 740 655 dev_priv->has_gmr = false; 656 + } 657 + 658 + if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) { 659 + dev_priv->has_mob = true; 660 + if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB, 661 + VMW_PL_MOB) != 0) { 662 + DRM_INFO("No MOB memory available. " 663 + "3D will be disabled.\n"); 664 + dev_priv->has_mob = false; 665 + } 741 666 } 742 667 743 668 dev_priv->mmio_mtrr = arch_phys_wc_add(dev_priv->mmio_start, ··· 852 757 iounmap(dev_priv->mmio_virt); 853 758 out_err3: 854 759 arch_phys_wc_del(dev_priv->mmio_mtrr); 760 + if (dev_priv->has_mob) 761 + (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB); 855 762 if (dev_priv->has_gmr) 856 763 (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); 857 764 (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); ··· 898 801 ttm_object_device_release(&dev_priv->tdev); 899 802 iounmap(dev_priv->mmio_virt); 900 803 arch_phys_wc_del(dev_priv->mmio_mtrr); 804 + if (dev_priv->has_mob) 805 + (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB); 901 806 if (dev_priv->has_gmr) 902 807 (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); 903 808 (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
+185 -26
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
··· 40 40 #include <drm/ttm/ttm_module.h> 41 41 #include "vmwgfx_fence.h" 42 42 43 - #define VMWGFX_DRIVER_DATE "20120209" 43 + #define VMWGFX_DRIVER_DATE "20121114" 44 44 #define VMWGFX_DRIVER_MAJOR 2 45 - #define VMWGFX_DRIVER_MINOR 4 45 + #define VMWGFX_DRIVER_MINOR 5 46 46 #define VMWGFX_DRIVER_PATCHLEVEL 0 47 47 #define VMWGFX_FILE_PAGE_OFFSET 0x00100000 48 48 #define VMWGFX_FIFO_STATIC_SIZE (1024*1024) ··· 50 50 #define VMWGFX_MAX_VALIDATIONS 2048 51 51 #define VMWGFX_MAX_DISPLAYS 16 52 52 #define VMWGFX_CMD_BOUNCE_INIT_SIZE 32768 53 + #define VMWGFX_ENABLE_SCREEN_TARGET_OTABLE 0 54 + 55 + /* 56 + * Perhaps we should have sysfs entries for these. 57 + */ 58 + #define VMWGFX_NUM_GB_CONTEXT 256 59 + #define VMWGFX_NUM_GB_SHADER 20000 60 + #define VMWGFX_NUM_GB_SURFACE 32768 61 + #define VMWGFX_NUM_GB_SCREEN_TARGET VMWGFX_MAX_DISPLAYS 62 + #define VMWGFX_NUM_MOB (VMWGFX_NUM_GB_CONTEXT +\ 63 + VMWGFX_NUM_GB_SHADER +\ 64 + VMWGFX_NUM_GB_SURFACE +\ 65 + VMWGFX_NUM_GB_SCREEN_TARGET) 53 66 54 67 #define VMW_PL_GMR TTM_PL_PRIV0 55 68 #define VMW_PL_FLAG_GMR TTM_PL_FLAG_PRIV0 69 + #define VMW_PL_MOB TTM_PL_PRIV1 70 + #define VMW_PL_FLAG_MOB TTM_PL_FLAG_PRIV1 56 71 57 72 #define VMW_RES_CONTEXT ttm_driver_type0 58 73 #define VMW_RES_SURFACE ttm_driver_type1 59 74 #define VMW_RES_STREAM ttm_driver_type2 60 75 #define VMW_RES_FENCE ttm_driver_type3 76 + #define VMW_RES_SHADER ttm_driver_type4 61 77 62 78 struct vmw_fpriv { 63 79 struct drm_master *locked_master; ··· 98 82 struct vmw_validate_buffer { 99 83 struct ttm_validate_buffer base; 100 84 struct drm_hash_item hash; 85 + bool validate_as_mob; 101 86 }; 102 87 103 88 struct vmw_res_func; ··· 115 98 const struct vmw_res_func *func; 116 99 struct list_head lru_head; /* Protected by the resource lock */ 117 100 struct list_head mob_head; /* Protected by @backup reserved */ 101 + struct list_head binding_head; /* Protected by binding_mutex */ 118 102 void (*res_free) (struct vmw_resource *res); 119 103 void (*hw_destroy) (struct vmw_resource *res); 120 104 }; ··· 124 106 vmw_res_context, 125 107 vmw_res_surface, 126 108 vmw_res_stream, 109 + vmw_res_shader, 127 110 vmw_res_max 128 111 }; 129 112 ··· 173 154 }; 174 155 175 156 struct vmw_relocation { 157 + SVGAMobId *mob_loc; 176 158 SVGAGuestPtr *location; 177 159 uint32_t index; 178 160 }; ··· 249 229 struct page *(*page)(struct vmw_piter *); 250 230 }; 251 231 232 + /* 233 + * enum vmw_ctx_binding_type - abstract resource to context binding types 234 + */ 235 + enum vmw_ctx_binding_type { 236 + vmw_ctx_binding_shader, 237 + vmw_ctx_binding_rt, 238 + vmw_ctx_binding_tex, 239 + vmw_ctx_binding_max 240 + }; 241 + 242 + /** 243 + * struct vmw_ctx_bindinfo - structure representing a single context binding 244 + * 245 + * @ctx: Pointer to the context structure. NULL means the binding is not 246 + * active. 247 + * @res: Non ref-counted pointer to the bound resource. 248 + * @bt: The binding type. 249 + * @i1: Union of information needed to unbind. 250 + */ 251 + struct vmw_ctx_bindinfo { 252 + struct vmw_resource *ctx; 253 + struct vmw_resource *res; 254 + enum vmw_ctx_binding_type bt; 255 + union { 256 + SVGA3dShaderType shader_type; 257 + SVGA3dRenderTargetType rt_type; 258 + uint32 texture_stage; 259 + } i1; 260 + }; 261 + 262 + /** 263 + * struct vmw_ctx_binding - structure representing a single context binding 264 + * - suitable for tracking in a context 265 + * 266 + * @ctx_list: List head for context. 267 + * @res_list: List head for bound resource. 268 + * @bi: Binding info 269 + */ 270 + struct vmw_ctx_binding { 271 + struct list_head ctx_list; 272 + struct list_head res_list; 273 + struct vmw_ctx_bindinfo bi; 274 + }; 275 + 276 + 277 + /** 278 + * struct vmw_ctx_binding_state - context binding state 279 + * 280 + * @list: linked list of individual bindings. 281 + * @render_targets: Render target bindings. 282 + * @texture_units: Texture units/samplers bindings. 283 + * @shaders: Shader bindings. 284 + * 285 + * Note that this structure also provides storage space for the individual 286 + * struct vmw_ctx_binding objects, so that no dynamic allocation is needed 287 + * for individual bindings. 288 + * 289 + */ 290 + struct vmw_ctx_binding_state { 291 + struct list_head list; 292 + struct vmw_ctx_binding render_targets[SVGA3D_RT_MAX]; 293 + struct vmw_ctx_binding texture_units[SVGA3D_NUM_TEXTURE_UNITS]; 294 + struct vmw_ctx_binding shaders[SVGA3D_SHADERTYPE_MAX]; 295 + }; 296 + 252 297 struct vmw_sw_context{ 253 298 struct drm_open_hash res_ht; 254 299 bool res_ht_initialized; ··· 335 250 struct vmw_resource *last_query_ctx; 336 251 bool needs_post_query_barrier; 337 252 struct vmw_resource *error_resource; 253 + struct vmw_ctx_binding_state staged_bindings; 338 254 }; 339 255 340 256 struct vmw_legacy_display; ··· 367 281 unsigned int io_start; 368 282 uint32_t vram_start; 369 283 uint32_t vram_size; 284 + uint32_t prim_bb_mem; 370 285 uint32_t mmio_start; 371 286 uint32_t mmio_size; 372 287 uint32_t fb_max_width; ··· 377 290 __le32 __iomem *mmio_virt; 378 291 int mmio_mtrr; 379 292 uint32_t capabilities; 380 - uint32_t max_gmr_descriptors; 381 293 uint32_t max_gmr_ids; 382 294 uint32_t max_gmr_pages; 295 + uint32_t max_mob_pages; 383 296 uint32_t memory_size; 384 297 bool has_gmr; 298 + bool has_mob; 385 299 struct mutex hw_mutex; 386 300 387 301 /* ··· 458 370 459 371 struct vmw_sw_context ctx; 460 372 struct mutex cmdbuf_mutex; 373 + struct mutex binding_mutex; 461 374 462 375 /** 463 376 * Operating mode. ··· 504 415 * DMA mapping stuff. 505 416 */ 506 417 enum vmw_dma_map_mode map_mode; 418 + 419 + /* 420 + * Guest Backed stuff 421 + */ 422 + struct ttm_buffer_object *otable_bo; 423 + struct vmw_otable *otables; 507 424 }; 508 425 509 426 static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res) ··· 566 471 * Resource utilities - vmwgfx_resource.c 567 472 */ 568 473 struct vmw_user_resource_conv; 569 - extern const struct vmw_user_resource_conv *user_surface_converter; 570 - extern const struct vmw_user_resource_conv *user_context_converter; 571 474 572 - extern struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv); 573 475 extern void vmw_resource_unreference(struct vmw_resource **p_res); 574 476 extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res); 575 477 extern int vmw_resource_validate(struct vmw_resource *res); 576 478 extern int vmw_resource_reserve(struct vmw_resource *res, bool no_backup); 577 479 extern bool vmw_resource_needs_backup(const struct vmw_resource *res); 578 - extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data, 579 - struct drm_file *file_priv); 580 - extern int vmw_context_define_ioctl(struct drm_device *dev, void *data, 581 - struct drm_file *file_priv); 582 - extern int vmw_context_check(struct vmw_private *dev_priv, 583 - struct ttm_object_file *tfile, 584 - int id, 585 - struct vmw_resource **p_res); 586 480 extern int vmw_user_lookup_handle(struct vmw_private *dev_priv, 587 481 struct ttm_object_file *tfile, 588 482 uint32_t handle, ··· 583 499 uint32_t handle, 584 500 const struct vmw_user_resource_conv *converter, 585 501 struct vmw_resource **p_res); 586 - extern void vmw_surface_res_free(struct vmw_resource *res); 587 - extern int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data, 588 - struct drm_file *file_priv); 589 - extern int vmw_surface_define_ioctl(struct drm_device *dev, void *data, 590 - struct drm_file *file_priv); 591 - extern int vmw_surface_reference_ioctl(struct drm_device *dev, void *data, 592 - struct drm_file *file_priv); 593 - extern int vmw_surface_check(struct vmw_private *dev_priv, 594 - struct ttm_object_file *tfile, 595 - uint32_t handle, int *id); 596 - extern int vmw_surface_validate(struct vmw_private *dev_priv, 597 - struct vmw_surface *srf); 598 502 extern void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo); 599 503 extern int vmw_dmabuf_init(struct vmw_private *dev_priv, 600 504 struct vmw_dma_buffer *vmw_bo, ··· 591 519 void (*bo_free) (struct ttm_buffer_object *bo)); 592 520 extern int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo, 593 521 struct ttm_object_file *tfile); 522 + extern int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv, 523 + struct ttm_object_file *tfile, 524 + uint32_t size, 525 + bool shareable, 526 + uint32_t *handle, 527 + struct vmw_dma_buffer **p_dma_buf); 528 + extern int vmw_user_dmabuf_reference(struct ttm_object_file *tfile, 529 + struct vmw_dma_buffer *dma_buf, 530 + uint32_t *handle); 594 531 extern int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data, 595 532 struct drm_file *file_priv); 596 533 extern int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data, 597 534 struct drm_file *file_priv); 535 + extern int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data, 536 + struct drm_file *file_priv); 598 537 extern uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo, 599 538 uint32_t cur_validate_node); 600 539 extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo); ··· 705 622 extern struct ttm_placement vmw_vram_gmr_placement; 706 623 extern struct ttm_placement vmw_vram_gmr_ne_placement; 707 624 extern struct ttm_placement vmw_sys_placement; 625 + extern struct ttm_placement vmw_sys_ne_placement; 708 626 extern struct ttm_placement vmw_evictable_placement; 709 627 extern struct ttm_placement vmw_srf_placement; 628 + extern struct ttm_placement vmw_mob_placement; 710 629 extern struct ttm_bo_driver vmw_bo_driver; 711 630 extern int vmw_dma_quiescent(struct drm_device *dev); 631 + extern int vmw_bo_map_dma(struct ttm_buffer_object *bo); 632 + extern void vmw_bo_unmap_dma(struct ttm_buffer_object *bo); 633 + extern const struct vmw_sg_table * 634 + vmw_bo_sg_table(struct ttm_buffer_object *bo); 712 635 extern void vmw_piter_start(struct vmw_piter *viter, 713 636 const struct vmw_sg_table *vsgt, 714 637 unsigned long p_offs); ··· 921 832 uint32_t handle, uint32_t flags, 922 833 int *prime_fd); 923 834 835 + /* 836 + * MemoryOBject management - vmwgfx_mob.c 837 + */ 838 + struct vmw_mob; 839 + extern int vmw_mob_bind(struct vmw_private *dev_priv, struct vmw_mob *mob, 840 + const struct vmw_sg_table *vsgt, 841 + unsigned long num_data_pages, int32_t mob_id); 842 + extern void vmw_mob_unbind(struct vmw_private *dev_priv, 843 + struct vmw_mob *mob); 844 + extern void vmw_mob_destroy(struct vmw_mob *mob); 845 + extern struct vmw_mob *vmw_mob_create(unsigned long data_pages); 846 + extern int vmw_otables_setup(struct vmw_private *dev_priv); 847 + extern void vmw_otables_takedown(struct vmw_private *dev_priv); 848 + 849 + /* 850 + * Context management - vmwgfx_context.c 851 + */ 852 + 853 + extern const struct vmw_user_resource_conv *user_context_converter; 854 + 855 + extern struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv); 856 + 857 + extern int vmw_context_check(struct vmw_private *dev_priv, 858 + struct ttm_object_file *tfile, 859 + int id, 860 + struct vmw_resource **p_res); 861 + extern int vmw_context_define_ioctl(struct drm_device *dev, void *data, 862 + struct drm_file *file_priv); 863 + extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data, 864 + struct drm_file *file_priv); 865 + extern int vmw_context_binding_add(struct vmw_ctx_binding_state *cbs, 866 + const struct vmw_ctx_bindinfo *ci); 867 + extern void 868 + vmw_context_binding_state_transfer(struct vmw_resource *res, 869 + struct vmw_ctx_binding_state *cbs); 870 + extern void vmw_context_binding_res_list_kill(struct list_head *head); 871 + 872 + /* 873 + * Surface management - vmwgfx_surface.c 874 + */ 875 + 876 + extern const struct vmw_user_resource_conv *user_surface_converter; 877 + 878 + extern void vmw_surface_res_free(struct vmw_resource *res); 879 + extern int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data, 880 + struct drm_file *file_priv); 881 + extern int vmw_surface_define_ioctl(struct drm_device *dev, void *data, 882 + struct drm_file *file_priv); 883 + extern int vmw_surface_reference_ioctl(struct drm_device *dev, void *data, 884 + struct drm_file *file_priv); 885 + extern int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data, 886 + struct drm_file *file_priv); 887 + extern int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data, 888 + struct drm_file *file_priv); 889 + extern int vmw_surface_check(struct vmw_private *dev_priv, 890 + struct ttm_object_file *tfile, 891 + uint32_t handle, int *id); 892 + extern int vmw_surface_validate(struct vmw_private *dev_priv, 893 + struct vmw_surface *srf); 894 + 895 + /* 896 + * Shader management - vmwgfx_shader.c 897 + */ 898 + 899 + extern const struct vmw_user_resource_conv *user_shader_converter; 900 + 901 + extern int vmw_shader_define_ioctl(struct drm_device *dev, void *data, 902 + struct drm_file *file_priv); 903 + extern int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data, 904 + struct drm_file *file_priv); 924 905 925 906 /** 926 907 * Inline helper functions
+796 -76
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
··· 54 54 * @res: Ref-counted pointer to the resource. 55 55 * @switch_backup: Boolean whether to switch backup buffer on unreserve. 56 56 * @new_backup: Refcounted pointer to the new backup buffer. 57 + * @staged_bindings: If @res is a context, tracks bindings set up during 58 + * the command batch. Otherwise NULL. 57 59 * @new_backup_offset: New backup buffer offset if @new_backup is non-NUll. 58 60 * @first_usage: Set to true the first time the resource is referenced in 59 61 * the command stream. ··· 67 65 struct drm_hash_item hash; 68 66 struct vmw_resource *res; 69 67 struct vmw_dma_buffer *new_backup; 68 + struct vmw_ctx_binding_state *staged_bindings; 70 69 unsigned long new_backup_offset; 71 70 bool first_usage; 72 71 bool no_buffer_needed; 73 72 }; 73 + 74 + /** 75 + * struct vmw_cmd_entry - Describe a command for the verifier 76 + * 77 + * @user_allow: Whether allowed from the execbuf ioctl. 78 + * @gb_disable: Whether disabled if guest-backed objects are available. 79 + * @gb_enable: Whether enabled iff guest-backed objects are available. 80 + */ 81 + struct vmw_cmd_entry { 82 + int (*func) (struct vmw_private *, struct vmw_sw_context *, 83 + SVGA3dCmdHeader *); 84 + bool user_allow; 85 + bool gb_disable; 86 + bool gb_enable; 87 + }; 88 + 89 + #define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable) \ 90 + [(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\ 91 + (_gb_disable), (_gb_enable)} 74 92 75 93 /** 76 94 * vmw_resource_unreserve - unreserve resources previously reserved for ··· 109 87 struct vmw_dma_buffer *new_backup = 110 88 backoff ? NULL : val->new_backup; 111 89 90 + /* 91 + * Transfer staged context bindings to the 92 + * persistent context binding tracker. 93 + */ 94 + if (unlikely(val->staged_bindings)) { 95 + vmw_context_binding_state_transfer 96 + (val->res, val->staged_bindings); 97 + kfree(val->staged_bindings); 98 + val->staged_bindings = NULL; 99 + } 112 100 vmw_resource_unreserve(res, new_backup, 113 101 val->new_backup_offset); 114 102 vmw_dmabuf_unreference(&val->new_backup); ··· 256 224 * 257 225 * @sw_context: The software context used for this command submission batch. 258 226 * @bo: The buffer object to add. 227 + * @validate_as_mob: Validate this buffer as a MOB. 259 228 * @p_val_node: If non-NULL Will be updated with the validate node number 260 229 * on return. 261 230 * ··· 265 232 */ 266 233 static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context, 267 234 struct ttm_buffer_object *bo, 235 + bool validate_as_mob, 268 236 uint32_t *p_val_node) 269 237 { 270 238 uint32_t val_node; ··· 278 244 &hash) == 0)) { 279 245 vval_buf = container_of(hash, struct vmw_validate_buffer, 280 246 hash); 247 + if (unlikely(vval_buf->validate_as_mob != validate_as_mob)) { 248 + DRM_ERROR("Inconsistent buffer usage.\n"); 249 + return -EINVAL; 250 + } 281 251 val_buf = &vval_buf->base; 282 252 val_node = vval_buf - sw_context->val_bufs; 283 253 } else { ··· 304 266 val_buf->bo = ttm_bo_reference(bo); 305 267 val_buf->reserved = false; 306 268 list_add_tail(&val_buf->head, &sw_context->validate_nodes); 269 + vval_buf->validate_as_mob = validate_as_mob; 307 270 } 308 271 309 272 sw_context->fence_flags |= DRM_VMW_FENCE_FLAG_EXEC; ··· 341 302 struct ttm_buffer_object *bo = &res->backup->base; 342 303 343 304 ret = vmw_bo_to_validate_list 344 - (sw_context, bo, NULL); 305 + (sw_context, bo, 306 + vmw_resource_needs_backup(res), NULL); 345 307 346 308 if (unlikely(ret != 0)) 347 309 return ret; ··· 402 362 struct vmw_resource_val_node *node; 403 363 int ret; 404 364 405 - if (*id == SVGA3D_INVALID_ID) 365 + if (*id == SVGA3D_INVALID_ID) { 366 + if (p_val) 367 + *p_val = NULL; 368 + if (res_type == vmw_res_context) { 369 + DRM_ERROR("Illegal context invalid id.\n"); 370 + return -EINVAL; 371 + } 406 372 return 0; 373 + } 407 374 408 375 /* 409 376 * Fastpath in case of repeated commands referencing the same ··· 458 411 rcache->node = node; 459 412 if (p_val) 460 413 *p_val = node; 414 + 415 + if (node->first_usage && res_type == vmw_res_context) { 416 + node->staged_bindings = 417 + kzalloc(sizeof(*node->staged_bindings), GFP_KERNEL); 418 + if (node->staged_bindings == NULL) { 419 + DRM_ERROR("Failed to allocate context binding " 420 + "information.\n"); 421 + goto out_no_reloc; 422 + } 423 + INIT_LIST_HEAD(&node->staged_bindings->list); 424 + } 425 + 461 426 vmw_resource_unreference(&res); 462 427 return 0; 463 428 ··· 512 453 SVGA3dCmdHeader header; 513 454 SVGA3dCmdSetRenderTarget body; 514 455 } *cmd; 456 + struct vmw_resource_val_node *ctx_node; 457 + struct vmw_resource_val_node *res_node; 515 458 int ret; 516 459 517 - ret = vmw_cmd_cid_check(dev_priv, sw_context, header); 460 + cmd = container_of(header, struct vmw_sid_cmd, header); 461 + 462 + ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, 463 + user_context_converter, &cmd->body.cid, 464 + &ctx_node); 518 465 if (unlikely(ret != 0)) 519 466 return ret; 520 467 521 - cmd = container_of(header, struct vmw_sid_cmd, header); 522 468 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 523 469 user_surface_converter, 524 - &cmd->body.target.sid, NULL); 525 - return ret; 470 + &cmd->body.target.sid, &res_node); 471 + if (unlikely(ret != 0)) 472 + return ret; 473 + 474 + if (dev_priv->has_mob) { 475 + struct vmw_ctx_bindinfo bi; 476 + 477 + bi.ctx = ctx_node->res; 478 + bi.res = res_node ? res_node->res : NULL; 479 + bi.bt = vmw_ctx_binding_rt; 480 + bi.i1.rt_type = cmd->body.type; 481 + return vmw_context_binding_add(ctx_node->staged_bindings, &bi); 482 + } 483 + 484 + return 0; 526 485 } 527 486 528 487 static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv, ··· 596 519 597 520 cmd = container_of(header, struct vmw_sid_cmd, header); 598 521 599 - if (unlikely(!sw_context->kernel)) { 600 - DRM_ERROR("Kernel only SVGA3d command: %u.\n", cmd->header.id); 601 - return -EPERM; 602 - } 603 - 604 522 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 605 523 user_surface_converter, 606 524 &cmd->body.srcImage.sid, NULL); ··· 612 540 613 541 614 542 cmd = container_of(header, struct vmw_sid_cmd, header); 615 - 616 - if (unlikely(!sw_context->kernel)) { 617 - DRM_ERROR("Kernel only SVGA3d command: %u.\n", cmd->header.id); 618 - return -EPERM; 619 - } 620 543 621 544 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 622 545 user_surface_converter, &cmd->body.sid, ··· 653 586 sw_context->needs_post_query_barrier = true; 654 587 ret = vmw_bo_to_validate_list(sw_context, 655 588 sw_context->cur_query_bo, 656 - NULL); 589 + dev_priv->has_mob, NULL); 657 590 if (unlikely(ret != 0)) 658 591 return ret; 659 592 } ··· 661 594 662 595 ret = vmw_bo_to_validate_list(sw_context, 663 596 dev_priv->dummy_query_bo, 664 - NULL); 597 + dev_priv->has_mob, NULL); 665 598 if (unlikely(ret != 0)) 666 599 return ret; 667 600 ··· 739 672 } 740 673 741 674 /** 675 + * vmw_translate_mob_pointer - Prepare to translate a user-space buffer 676 + * handle to a MOB id. 677 + * 678 + * @dev_priv: Pointer to a device private structure. 679 + * @sw_context: The software context used for this command batch validation. 680 + * @id: Pointer to the user-space handle to be translated. 681 + * @vmw_bo_p: Points to a location that, on successful return will carry 682 + * a reference-counted pointer to the DMA buffer identified by the 683 + * user-space handle in @id. 684 + * 685 + * This function saves information needed to translate a user-space buffer 686 + * handle to a MOB id. The translation does not take place immediately, but 687 + * during a call to vmw_apply_relocations(). This function builds a relocation 688 + * list and a list of buffers to validate. The former needs to be freed using 689 + * either vmw_apply_relocations() or vmw_free_relocations(). The latter 690 + * needs to be freed using vmw_clear_validations. 691 + */ 692 + static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, 693 + struct vmw_sw_context *sw_context, 694 + SVGAMobId *id, 695 + struct vmw_dma_buffer **vmw_bo_p) 696 + { 697 + struct vmw_dma_buffer *vmw_bo = NULL; 698 + struct ttm_buffer_object *bo; 699 + uint32_t handle = *id; 700 + struct vmw_relocation *reloc; 701 + int ret; 702 + 703 + ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo); 704 + if (unlikely(ret != 0)) { 705 + DRM_ERROR("Could not find or use MOB buffer.\n"); 706 + return -EINVAL; 707 + } 708 + bo = &vmw_bo->base; 709 + 710 + if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) { 711 + DRM_ERROR("Max number relocations per submission" 712 + " exceeded\n"); 713 + ret = -EINVAL; 714 + goto out_no_reloc; 715 + } 716 + 717 + reloc = &sw_context->relocs[sw_context->cur_reloc++]; 718 + reloc->mob_loc = id; 719 + reloc->location = NULL; 720 + 721 + ret = vmw_bo_to_validate_list(sw_context, bo, true, &reloc->index); 722 + if (unlikely(ret != 0)) 723 + goto out_no_reloc; 724 + 725 + *vmw_bo_p = vmw_bo; 726 + return 0; 727 + 728 + out_no_reloc: 729 + vmw_dmabuf_unreference(&vmw_bo); 730 + vmw_bo_p = NULL; 731 + return ret; 732 + } 733 + 734 + /** 742 735 * vmw_translate_guest_pointer - Prepare to translate a user-space buffer 743 736 * handle to a valid SVGAGuestPtr 744 737 * ··· 845 718 reloc = &sw_context->relocs[sw_context->cur_reloc++]; 846 719 reloc->location = ptr; 847 720 848 - ret = vmw_bo_to_validate_list(sw_context, bo, &reloc->index); 721 + ret = vmw_bo_to_validate_list(sw_context, bo, false, &reloc->index); 849 722 if (unlikely(ret != 0)) 850 723 goto out_no_reloc; 851 724 ··· 856 729 vmw_dmabuf_unreference(&vmw_bo); 857 730 vmw_bo_p = NULL; 858 731 return ret; 732 + } 733 + 734 + /** 735 + * vmw_cmd_begin_gb_query - validate a SVGA_3D_CMD_BEGIN_GB_QUERY command. 736 + * 737 + * @dev_priv: Pointer to a device private struct. 738 + * @sw_context: The software context used for this command submission. 739 + * @header: Pointer to the command header in the command stream. 740 + */ 741 + static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv, 742 + struct vmw_sw_context *sw_context, 743 + SVGA3dCmdHeader *header) 744 + { 745 + struct vmw_begin_gb_query_cmd { 746 + SVGA3dCmdHeader header; 747 + SVGA3dCmdBeginGBQuery q; 748 + } *cmd; 749 + 750 + cmd = container_of(header, struct vmw_begin_gb_query_cmd, 751 + header); 752 + 753 + return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, 754 + user_context_converter, &cmd->q.cid, 755 + NULL); 859 756 } 860 757 861 758 /** ··· 901 750 cmd = container_of(header, struct vmw_begin_query_cmd, 902 751 header); 903 752 753 + if (unlikely(dev_priv->has_mob)) { 754 + struct { 755 + SVGA3dCmdHeader header; 756 + SVGA3dCmdBeginGBQuery q; 757 + } gb_cmd; 758 + 759 + BUG_ON(sizeof(gb_cmd) != sizeof(*cmd)); 760 + 761 + gb_cmd.header.id = SVGA_3D_CMD_BEGIN_GB_QUERY; 762 + gb_cmd.header.size = cmd->header.size; 763 + gb_cmd.q.cid = cmd->q.cid; 764 + gb_cmd.q.type = cmd->q.type; 765 + 766 + memcpy(cmd, &gb_cmd, sizeof(*cmd)); 767 + return vmw_cmd_begin_gb_query(dev_priv, sw_context, header); 768 + } 769 + 904 770 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, 905 771 user_context_converter, &cmd->q.cid, 906 772 NULL); 773 + } 774 + 775 + /** 776 + * vmw_cmd_end_gb_query - validate a SVGA_3D_CMD_END_GB_QUERY command. 777 + * 778 + * @dev_priv: Pointer to a device private struct. 779 + * @sw_context: The software context used for this command submission. 780 + * @header: Pointer to the command header in the command stream. 781 + */ 782 + static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv, 783 + struct vmw_sw_context *sw_context, 784 + SVGA3dCmdHeader *header) 785 + { 786 + struct vmw_dma_buffer *vmw_bo; 787 + struct vmw_query_cmd { 788 + SVGA3dCmdHeader header; 789 + SVGA3dCmdEndGBQuery q; 790 + } *cmd; 791 + int ret; 792 + 793 + cmd = container_of(header, struct vmw_query_cmd, header); 794 + ret = vmw_cmd_cid_check(dev_priv, sw_context, header); 795 + if (unlikely(ret != 0)) 796 + return ret; 797 + 798 + ret = vmw_translate_mob_ptr(dev_priv, sw_context, 799 + &cmd->q.mobid, 800 + &vmw_bo); 801 + if (unlikely(ret != 0)) 802 + return ret; 803 + 804 + ret = vmw_query_bo_switch_prepare(dev_priv, &vmw_bo->base, sw_context); 805 + 806 + vmw_dmabuf_unreference(&vmw_bo); 807 + return ret; 907 808 } 908 809 909 810 /** ··· 977 774 int ret; 978 775 979 776 cmd = container_of(header, struct vmw_query_cmd, header); 777 + if (dev_priv->has_mob) { 778 + struct { 779 + SVGA3dCmdHeader header; 780 + SVGA3dCmdEndGBQuery q; 781 + } gb_cmd; 782 + 783 + BUG_ON(sizeof(gb_cmd) != sizeof(*cmd)); 784 + 785 + gb_cmd.header.id = SVGA_3D_CMD_END_GB_QUERY; 786 + gb_cmd.header.size = cmd->header.size; 787 + gb_cmd.q.cid = cmd->q.cid; 788 + gb_cmd.q.type = cmd->q.type; 789 + gb_cmd.q.mobid = cmd->q.guestResult.gmrId; 790 + gb_cmd.q.offset = cmd->q.guestResult.offset; 791 + 792 + memcpy(cmd, &gb_cmd, sizeof(*cmd)); 793 + return vmw_cmd_end_gb_query(dev_priv, sw_context, header); 794 + } 795 + 980 796 ret = vmw_cmd_cid_check(dev_priv, sw_context, header); 981 797 if (unlikely(ret != 0)) 982 798 return ret; ··· 1012 790 return ret; 1013 791 } 1014 792 1015 - /* 793 + /** 794 + * vmw_cmd_wait_gb_query - validate a SVGA_3D_CMD_WAIT_GB_QUERY command. 795 + * 796 + * @dev_priv: Pointer to a device private struct. 797 + * @sw_context: The software context used for this command submission. 798 + * @header: Pointer to the command header in the command stream. 799 + */ 800 + static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv, 801 + struct vmw_sw_context *sw_context, 802 + SVGA3dCmdHeader *header) 803 + { 804 + struct vmw_dma_buffer *vmw_bo; 805 + struct vmw_query_cmd { 806 + SVGA3dCmdHeader header; 807 + SVGA3dCmdWaitForGBQuery q; 808 + } *cmd; 809 + int ret; 810 + 811 + cmd = container_of(header, struct vmw_query_cmd, header); 812 + ret = vmw_cmd_cid_check(dev_priv, sw_context, header); 813 + if (unlikely(ret != 0)) 814 + return ret; 815 + 816 + ret = vmw_translate_mob_ptr(dev_priv, sw_context, 817 + &cmd->q.mobid, 818 + &vmw_bo); 819 + if (unlikely(ret != 0)) 820 + return ret; 821 + 822 + vmw_dmabuf_unreference(&vmw_bo); 823 + return 0; 824 + } 825 + 826 + /** 1016 827 * vmw_cmd_wait_query - validate a SVGA_3D_CMD_WAIT_QUERY command. 1017 828 * 1018 829 * @dev_priv: Pointer to a device private struct. ··· 1064 809 int ret; 1065 810 1066 811 cmd = container_of(header, struct vmw_query_cmd, header); 812 + if (dev_priv->has_mob) { 813 + struct { 814 + SVGA3dCmdHeader header; 815 + SVGA3dCmdWaitForGBQuery q; 816 + } gb_cmd; 817 + 818 + BUG_ON(sizeof(gb_cmd) != sizeof(*cmd)); 819 + 820 + gb_cmd.header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY; 821 + gb_cmd.header.size = cmd->header.size; 822 + gb_cmd.q.cid = cmd->q.cid; 823 + gb_cmd.q.type = cmd->q.type; 824 + gb_cmd.q.mobid = cmd->q.guestResult.gmrId; 825 + gb_cmd.q.offset = cmd->q.guestResult.offset; 826 + 827 + memcpy(cmd, &gb_cmd, sizeof(*cmd)); 828 + return vmw_cmd_wait_gb_query(dev_priv, sw_context, header); 829 + } 830 + 1067 831 ret = vmw_cmd_cid_check(dev_priv, sw_context, header); 1068 832 if (unlikely(ret != 0)) 1069 833 return ret; ··· 1195 921 struct vmw_tex_state_cmd { 1196 922 SVGA3dCmdHeader header; 1197 923 SVGA3dCmdSetTextureState state; 1198 - }; 924 + } *cmd; 1199 925 1200 926 SVGA3dTextureState *last_state = (SVGA3dTextureState *) 1201 927 ((unsigned long) header + header->size + sizeof(header)); 1202 928 SVGA3dTextureState *cur_state = (SVGA3dTextureState *) 1203 929 ((unsigned long) header + sizeof(struct vmw_tex_state_cmd)); 930 + struct vmw_resource_val_node *ctx_node; 931 + struct vmw_resource_val_node *res_node; 1204 932 int ret; 1205 933 1206 - ret = vmw_cmd_cid_check(dev_priv, sw_context, header); 934 + cmd = container_of(header, struct vmw_tex_state_cmd, 935 + header); 936 + 937 + ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, 938 + user_context_converter, &cmd->state.cid, 939 + &ctx_node); 1207 940 if (unlikely(ret != 0)) 1208 941 return ret; 1209 942 ··· 1220 939 1221 940 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 1222 941 user_surface_converter, 1223 - &cur_state->value, NULL); 942 + &cur_state->value, &res_node); 1224 943 if (unlikely(ret != 0)) 1225 944 return ret; 945 + 946 + if (dev_priv->has_mob) { 947 + struct vmw_ctx_bindinfo bi; 948 + 949 + bi.ctx = ctx_node->res; 950 + bi.res = res_node ? res_node->res : NULL; 951 + bi.bt = vmw_ctx_binding_tex; 952 + bi.i1.texture_stage = cur_state->stage; 953 + vmw_context_binding_add(ctx_node->staged_bindings, 954 + &bi); 955 + } 1226 956 } 1227 957 1228 958 return 0; ··· 1263 971 } 1264 972 1265 973 /** 974 + * vmw_cmd_switch_backup - Utility function to handle backup buffer switching 975 + * 976 + * @dev_priv: Pointer to a device private struct. 977 + * @sw_context: The software context being used for this batch. 978 + * @res_type: The resource type. 979 + * @converter: Information about user-space binding for this resource type. 980 + * @res_id: Pointer to the user-space resource handle in the command stream. 981 + * @buf_id: Pointer to the user-space backup buffer handle in the command 982 + * stream. 983 + * @backup_offset: Offset of backup into MOB. 984 + * 985 + * This function prepares for registering a switch of backup buffers 986 + * in the resource metadata just prior to unreserving. 987 + */ 988 + static int vmw_cmd_switch_backup(struct vmw_private *dev_priv, 989 + struct vmw_sw_context *sw_context, 990 + enum vmw_res_type res_type, 991 + const struct vmw_user_resource_conv 992 + *converter, 993 + uint32_t *res_id, 994 + uint32_t *buf_id, 995 + unsigned long backup_offset) 996 + { 997 + int ret; 998 + struct vmw_dma_buffer *dma_buf; 999 + struct vmw_resource_val_node *val_node; 1000 + 1001 + ret = vmw_cmd_res_check(dev_priv, sw_context, res_type, 1002 + converter, res_id, &val_node); 1003 + if (unlikely(ret != 0)) 1004 + return ret; 1005 + 1006 + ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf); 1007 + if (unlikely(ret != 0)) 1008 + return ret; 1009 + 1010 + if (val_node->first_usage) 1011 + val_node->no_buffer_needed = true; 1012 + 1013 + vmw_dmabuf_unreference(&val_node->new_backup); 1014 + val_node->new_backup = dma_buf; 1015 + val_node->new_backup_offset = backup_offset; 1016 + 1017 + return 0; 1018 + } 1019 + 1020 + /** 1021 + * vmw_cmd_bind_gb_surface - Validate an SVGA_3D_CMD_BIND_GB_SURFACE 1022 + * command 1023 + * 1024 + * @dev_priv: Pointer to a device private struct. 1025 + * @sw_context: The software context being used for this batch. 1026 + * @header: Pointer to the command header in the command stream. 1027 + */ 1028 + static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv, 1029 + struct vmw_sw_context *sw_context, 1030 + SVGA3dCmdHeader *header) 1031 + { 1032 + struct vmw_bind_gb_surface_cmd { 1033 + SVGA3dCmdHeader header; 1034 + SVGA3dCmdBindGBSurface body; 1035 + } *cmd; 1036 + 1037 + cmd = container_of(header, struct vmw_bind_gb_surface_cmd, header); 1038 + 1039 + return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_surface, 1040 + user_surface_converter, 1041 + &cmd->body.sid, &cmd->body.mobid, 1042 + 0); 1043 + } 1044 + 1045 + /** 1046 + * vmw_cmd_update_gb_image - Validate an SVGA_3D_CMD_UPDATE_GB_IMAGE 1047 + * command 1048 + * 1049 + * @dev_priv: Pointer to a device private struct. 1050 + * @sw_context: The software context being used for this batch. 1051 + * @header: Pointer to the command header in the command stream. 1052 + */ 1053 + static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv, 1054 + struct vmw_sw_context *sw_context, 1055 + SVGA3dCmdHeader *header) 1056 + { 1057 + struct vmw_gb_surface_cmd { 1058 + SVGA3dCmdHeader header; 1059 + SVGA3dCmdUpdateGBImage body; 1060 + } *cmd; 1061 + 1062 + cmd = container_of(header, struct vmw_gb_surface_cmd, header); 1063 + 1064 + return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 1065 + user_surface_converter, 1066 + &cmd->body.image.sid, NULL); 1067 + } 1068 + 1069 + /** 1070 + * vmw_cmd_update_gb_surface - Validate an SVGA_3D_CMD_UPDATE_GB_SURFACE 1071 + * command 1072 + * 1073 + * @dev_priv: Pointer to a device private struct. 1074 + * @sw_context: The software context being used for this batch. 1075 + * @header: Pointer to the command header in the command stream. 1076 + */ 1077 + static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv, 1078 + struct vmw_sw_context *sw_context, 1079 + SVGA3dCmdHeader *header) 1080 + { 1081 + struct vmw_gb_surface_cmd { 1082 + SVGA3dCmdHeader header; 1083 + SVGA3dCmdUpdateGBSurface body; 1084 + } *cmd; 1085 + 1086 + cmd = container_of(header, struct vmw_gb_surface_cmd, header); 1087 + 1088 + return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 1089 + user_surface_converter, 1090 + &cmd->body.sid, NULL); 1091 + } 1092 + 1093 + /** 1094 + * vmw_cmd_readback_gb_image - Validate an SVGA_3D_CMD_READBACK_GB_IMAGE 1095 + * command 1096 + * 1097 + * @dev_priv: Pointer to a device private struct. 1098 + * @sw_context: The software context being used for this batch. 1099 + * @header: Pointer to the command header in the command stream. 1100 + */ 1101 + static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv, 1102 + struct vmw_sw_context *sw_context, 1103 + SVGA3dCmdHeader *header) 1104 + { 1105 + struct vmw_gb_surface_cmd { 1106 + SVGA3dCmdHeader header; 1107 + SVGA3dCmdReadbackGBImage body; 1108 + } *cmd; 1109 + 1110 + cmd = container_of(header, struct vmw_gb_surface_cmd, header); 1111 + 1112 + return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 1113 + user_surface_converter, 1114 + &cmd->body.image.sid, NULL); 1115 + } 1116 + 1117 + /** 1118 + * vmw_cmd_readback_gb_surface - Validate an SVGA_3D_CMD_READBACK_GB_SURFACE 1119 + * command 1120 + * 1121 + * @dev_priv: Pointer to a device private struct. 1122 + * @sw_context: The software context being used for this batch. 1123 + * @header: Pointer to the command header in the command stream. 1124 + */ 1125 + static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv, 1126 + struct vmw_sw_context *sw_context, 1127 + SVGA3dCmdHeader *header) 1128 + { 1129 + struct vmw_gb_surface_cmd { 1130 + SVGA3dCmdHeader header; 1131 + SVGA3dCmdReadbackGBSurface body; 1132 + } *cmd; 1133 + 1134 + cmd = container_of(header, struct vmw_gb_surface_cmd, header); 1135 + 1136 + return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 1137 + user_surface_converter, 1138 + &cmd->body.sid, NULL); 1139 + } 1140 + 1141 + /** 1142 + * vmw_cmd_invalidate_gb_image - Validate an SVGA_3D_CMD_INVALIDATE_GB_IMAGE 1143 + * command 1144 + * 1145 + * @dev_priv: Pointer to a device private struct. 1146 + * @sw_context: The software context being used for this batch. 1147 + * @header: Pointer to the command header in the command stream. 1148 + */ 1149 + static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv, 1150 + struct vmw_sw_context *sw_context, 1151 + SVGA3dCmdHeader *header) 1152 + { 1153 + struct vmw_gb_surface_cmd { 1154 + SVGA3dCmdHeader header; 1155 + SVGA3dCmdInvalidateGBImage body; 1156 + } *cmd; 1157 + 1158 + cmd = container_of(header, struct vmw_gb_surface_cmd, header); 1159 + 1160 + return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 1161 + user_surface_converter, 1162 + &cmd->body.image.sid, NULL); 1163 + } 1164 + 1165 + /** 1166 + * vmw_cmd_invalidate_gb_surface - Validate an 1167 + * SVGA_3D_CMD_INVALIDATE_GB_SURFACE command 1168 + * 1169 + * @dev_priv: Pointer to a device private struct. 1170 + * @sw_context: The software context being used for this batch. 1171 + * @header: Pointer to the command header in the command stream. 1172 + */ 1173 + static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv, 1174 + struct vmw_sw_context *sw_context, 1175 + SVGA3dCmdHeader *header) 1176 + { 1177 + struct vmw_gb_surface_cmd { 1178 + SVGA3dCmdHeader header; 1179 + SVGA3dCmdInvalidateGBSurface body; 1180 + } *cmd; 1181 + 1182 + cmd = container_of(header, struct vmw_gb_surface_cmd, header); 1183 + 1184 + return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 1185 + user_surface_converter, 1186 + &cmd->body.sid, NULL); 1187 + } 1188 + 1189 + /** 1266 1190 * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER 1267 1191 * command 1268 1192 * ··· 1494 986 SVGA3dCmdHeader header; 1495 987 SVGA3dCmdSetShader body; 1496 988 } *cmd; 989 + struct vmw_resource_val_node *ctx_node; 1497 990 int ret; 1498 991 1499 992 cmd = container_of(header, struct vmw_set_shader_cmd, 1500 993 header); 1501 994 1502 - ret = vmw_cmd_cid_check(dev_priv, sw_context, header); 995 + ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, 996 + user_context_converter, &cmd->body.cid, 997 + &ctx_node); 1503 998 if (unlikely(ret != 0)) 1504 999 return ret; 1505 1000 1001 + if (dev_priv->has_mob) { 1002 + struct vmw_ctx_bindinfo bi; 1003 + struct vmw_resource_val_node *res_node; 1004 + 1005 + ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_shader, 1006 + user_shader_converter, 1007 + &cmd->body.shid, &res_node); 1008 + if (unlikely(ret != 0)) 1009 + return ret; 1010 + 1011 + bi.ctx = ctx_node->res; 1012 + bi.res = res_node ? res_node->res : NULL; 1013 + bi.bt = vmw_ctx_binding_shader; 1014 + bi.i1.shader_type = cmd->body.type; 1015 + return vmw_context_binding_add(ctx_node->staged_bindings, &bi); 1016 + } 1017 + 1506 1018 return 0; 1019 + } 1020 + 1021 + /** 1022 + * vmw_cmd_bind_gb_shader - Validate an SVGA_3D_CMD_BIND_GB_SHADER 1023 + * command 1024 + * 1025 + * @dev_priv: Pointer to a device private struct. 1026 + * @sw_context: The software context being used for this batch. 1027 + * @header: Pointer to the command header in the command stream. 1028 + */ 1029 + static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv, 1030 + struct vmw_sw_context *sw_context, 1031 + SVGA3dCmdHeader *header) 1032 + { 1033 + struct vmw_bind_gb_shader_cmd { 1034 + SVGA3dCmdHeader header; 1035 + SVGA3dCmdBindGBShader body; 1036 + } *cmd; 1037 + 1038 + cmd = container_of(header, struct vmw_bind_gb_shader_cmd, 1039 + header); 1040 + 1041 + return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader, 1042 + user_shader_converter, 1043 + &cmd->body.shid, &cmd->body.mobid, 1044 + cmd->body.offsetInBytes); 1507 1045 } 1508 1046 1509 1047 static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv, ··· 1595 1041 return 0; 1596 1042 } 1597 1043 1598 - typedef int (*vmw_cmd_func) (struct vmw_private *, 1599 - struct vmw_sw_context *, 1600 - SVGA3dCmdHeader *); 1601 - 1602 - #define VMW_CMD_DEF(cmd, func) \ 1603 - [cmd - SVGA_3D_CMD_BASE] = func 1604 - 1605 - static vmw_cmd_func vmw_cmd_funcs[SVGA_3D_CMD_MAX] = { 1606 - VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid), 1607 - VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid), 1608 - VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check), 1609 - VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check), 1610 - VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma), 1611 - VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid), 1612 - VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid), 1613 - VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check), 1614 - VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check), 1615 - VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check), 1044 + static const struct vmw_cmd_entry const vmw_cmd_entries[SVGA_3D_CMD_MAX] = { 1045 + VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid, 1046 + false, false, false), 1047 + VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid, 1048 + false, false, false), 1049 + VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check, 1050 + true, false, false), 1051 + VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check, 1052 + true, false, false), 1053 + VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma, 1054 + true, false, false), 1055 + VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid, 1056 + false, false, false), 1057 + VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid, 1058 + false, false, false), 1059 + VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check, 1060 + true, false, false), 1061 + VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check, 1062 + true, false, false), 1063 + VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check, 1064 + true, false, false), 1616 1065 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET, 1617 - &vmw_cmd_set_render_target_check), 1618 - VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state), 1619 - VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check), 1620 - VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check), 1621 - VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check), 1622 - VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check), 1623 - VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check), 1624 - VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check), 1625 - VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check), 1626 - VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_cid_check), 1627 - VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_cid_check), 1628 - VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader), 1629 - VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_cid_check), 1630 - VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw), 1631 - VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check), 1632 - VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query), 1633 - VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query), 1634 - VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query), 1635 - VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok), 1066 + &vmw_cmd_set_render_target_check, true, false, false), 1067 + VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state, 1068 + true, false, false), 1069 + VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check, 1070 + true, false, false), 1071 + VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check, 1072 + true, false, false), 1073 + VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check, 1074 + true, false, false), 1075 + VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check, 1076 + true, false, false), 1077 + VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check, 1078 + true, false, false), 1079 + VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check, 1080 + true, false, false), 1081 + VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check, 1082 + false, false, false), 1083 + VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_cid_check, 1084 + true, true, false), 1085 + VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_cid_check, 1086 + true, true, false), 1087 + VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader, 1088 + true, false, false), 1089 + VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_cid_check, 1090 + true, true, false), 1091 + VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw, 1092 + true, false, false), 1093 + VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check, 1094 + true, false, false), 1095 + VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query, 1096 + true, false, false), 1097 + VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query, 1098 + true, false, false), 1099 + VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query, 1100 + true, false, false), 1101 + VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok, 1102 + true, false, false), 1636 1103 VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN, 1637 - &vmw_cmd_blt_surf_screen_check), 1638 - VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid), 1639 - VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid), 1640 - VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid), 1641 - VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid), 1104 + &vmw_cmd_blt_surf_screen_check, false, false, false), 1105 + VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid, 1106 + false, false, false), 1107 + VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid, 1108 + false, false, false), 1109 + VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid, 1110 + false, false, false), 1111 + VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid, 1112 + false, false, false), 1113 + VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA, &vmw_cmd_invalid, 1114 + false, false, false), 1115 + VMW_CMD_DEF(SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE, &vmw_cmd_invalid, 1116 + false, false, false), 1117 + VMW_CMD_DEF(SVGA_3D_CMD_OPEN_CONTEXT_SURFACE, &vmw_cmd_invalid, 1118 + false, false, false), 1119 + VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_BITBLT, &vmw_cmd_invalid, 1120 + false, false, false), 1121 + VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_TRANSBLT, &vmw_cmd_invalid, 1122 + false, false, false), 1123 + VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_STRETCHBLT, &vmw_cmd_invalid, 1124 + false, false, false), 1125 + VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_COLORFILL, &vmw_cmd_invalid, 1126 + false, false, false), 1127 + VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_ALPHABLEND, &vmw_cmd_invalid, 1128 + false, false, false), 1129 + VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND, &vmw_cmd_invalid, 1130 + false, false, false), 1131 + VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE, &vmw_cmd_invalid, 1132 + false, false, true), 1133 + VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE, &vmw_cmd_invalid, 1134 + false, false, true), 1135 + VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB, &vmw_cmd_invalid, 1136 + false, false, true), 1137 + VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid, 1138 + false, false, true), 1139 + VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB, &vmw_cmd_invalid, 1140 + false, false, true), 1141 + VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid, 1142 + false, false, true), 1143 + VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid, 1144 + false, false, true), 1145 + VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE, &vmw_cmd_invalid, 1146 + false, false, true), 1147 + VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE, &vmw_cmd_bind_gb_surface, 1148 + true, false, true), 1149 + VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE, &vmw_cmd_invalid, 1150 + false, false, true), 1151 + VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE, &vmw_cmd_update_gb_image, 1152 + true, false, true), 1153 + VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE, 1154 + &vmw_cmd_update_gb_surface, true, false, true), 1155 + VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE, 1156 + &vmw_cmd_readback_gb_image, true, false, true), 1157 + VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE, 1158 + &vmw_cmd_readback_gb_surface, true, false, true), 1159 + VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE, 1160 + &vmw_cmd_invalidate_gb_image, true, false, true), 1161 + VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE, 1162 + &vmw_cmd_invalidate_gb_surface, true, false, true), 1163 + VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT, &vmw_cmd_invalid, 1164 + false, false, true), 1165 + VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT, &vmw_cmd_invalid, 1166 + false, false, true), 1167 + VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT, &vmw_cmd_invalid, 1168 + false, false, true), 1169 + VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_CONTEXT, &vmw_cmd_invalid, 1170 + false, false, true), 1171 + VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT, &vmw_cmd_invalid, 1172 + false, false, true), 1173 + VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER, &vmw_cmd_invalid, 1174 + false, false, true), 1175 + VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER, &vmw_cmd_bind_gb_shader, 1176 + true, false, true), 1177 + VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER, &vmw_cmd_invalid, 1178 + false, false, true), 1179 + VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE64, &vmw_cmd_invalid, 1180 + false, false, false), 1181 + VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY, &vmw_cmd_begin_gb_query, 1182 + true, false, true), 1183 + VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY, &vmw_cmd_end_gb_query, 1184 + true, false, true), 1185 + VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY, &vmw_cmd_wait_gb_query, 1186 + true, false, true), 1187 + VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok, 1188 + true, false, true), 1189 + VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid, 1190 + false, false, true), 1191 + VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid, 1192 + false, false, true), 1193 + VMW_CMD_DEF(SVGA_3D_CMD_MAP_MOB_INTO_GART, &vmw_cmd_invalid, 1194 + false, false, true), 1195 + VMW_CMD_DEF(SVGA_3D_CMD_UNMAP_GART_RANGE, &vmw_cmd_invalid, 1196 + false, false, true), 1197 + VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SCREENTARGET, &vmw_cmd_invalid, 1198 + false, false, true), 1199 + VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SCREENTARGET, &vmw_cmd_invalid, 1200 + false, false, true), 1201 + VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SCREENTARGET, &vmw_cmd_invalid, 1202 + false, false, true), 1203 + VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SCREENTARGET, &vmw_cmd_invalid, 1204 + false, false, true), 1205 + VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL, &vmw_cmd_invalid, 1206 + false, false, true), 1207 + VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid, 1208 + false, false, true), 1209 + VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check, 1210 + true, false, true) 1642 1211 }; 1643 1212 1644 1213 static int vmw_cmd_check(struct vmw_private *dev_priv, ··· 1772 1095 uint32_t size_remaining = *size; 1773 1096 SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf; 1774 1097 int ret; 1098 + const struct vmw_cmd_entry *entry; 1099 + bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS; 1775 1100 1776 1101 cmd_id = le32_to_cpu(((uint32_t *)buf)[0]); 1777 1102 /* Handle any none 3D commands */ ··· 1786 1107 1787 1108 cmd_id -= SVGA_3D_CMD_BASE; 1788 1109 if (unlikely(*size > size_remaining)) 1789 - goto out_err; 1110 + goto out_invalid; 1790 1111 1791 1112 if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE)) 1792 - goto out_err; 1113 + goto out_invalid; 1793 1114 1794 - ret = vmw_cmd_funcs[cmd_id](dev_priv, sw_context, header); 1115 + entry = &vmw_cmd_entries[cmd_id]; 1116 + if (unlikely(!entry->user_allow && !sw_context->kernel)) 1117 + goto out_privileged; 1118 + 1119 + if (unlikely(entry->gb_disable && gb)) 1120 + goto out_old; 1121 + 1122 + if (unlikely(entry->gb_enable && !gb)) 1123 + goto out_new; 1124 + 1125 + ret = entry->func(dev_priv, sw_context, header); 1795 1126 if (unlikely(ret != 0)) 1796 - goto out_err; 1127 + goto out_invalid; 1797 1128 1798 1129 return 0; 1799 - out_err: 1800 - DRM_ERROR("Illegal / Invalid SVGA3D command: %d\n", 1130 + out_invalid: 1131 + DRM_ERROR("Invalid SVGA3D command: %d\n", 1132 + cmd_id + SVGA_3D_CMD_BASE); 1133 + return -EINVAL; 1134 + out_privileged: 1135 + DRM_ERROR("Privileged SVGA3D command: %d\n", 1136 + cmd_id + SVGA_3D_CMD_BASE); 1137 + return -EPERM; 1138 + out_old: 1139 + DRM_ERROR("Deprecated (disallowed) SVGA3D command: %d\n", 1140 + cmd_id + SVGA_3D_CMD_BASE); 1141 + return -EINVAL; 1142 + out_new: 1143 + DRM_ERROR("SVGA3D command: %d not supported by virtual hardware.\n", 1801 1144 cmd_id + SVGA_3D_CMD_BASE); 1802 1145 return -EINVAL; 1803 1146 } ··· 1875 1174 case VMW_PL_GMR: 1876 1175 reloc->location->gmrId = bo->mem.start; 1877 1176 break; 1177 + case VMW_PL_MOB: 1178 + *reloc->mob_loc = bo->mem.start; 1179 + break; 1878 1180 default: 1879 1181 BUG(); 1880 1182 } ··· 1902 1198 list_for_each_entry_safe(val, val_next, list, head) { 1903 1199 list_del_init(&val->head); 1904 1200 vmw_resource_unreference(&val->res); 1201 + if (unlikely(val->staged_bindings)) 1202 + kfree(val->staged_bindings); 1905 1203 kfree(val); 1906 1204 } 1907 1205 } ··· 1930 1224 } 1931 1225 1932 1226 static int vmw_validate_single_buffer(struct vmw_private *dev_priv, 1933 - struct ttm_buffer_object *bo) 1227 + struct ttm_buffer_object *bo, 1228 + bool validate_as_mob) 1934 1229 { 1935 1230 int ret; 1936 1231 ··· 1944 1237 (bo == dev_priv->dummy_query_bo && 1945 1238 dev_priv->dummy_query_bo_pinned)) 1946 1239 return 0; 1240 + 1241 + if (validate_as_mob) 1242 + return ttm_bo_validate(bo, &vmw_mob_placement, true, false); 1947 1243 1948 1244 /** 1949 1245 * Put BO in VRAM if there is space, otherwise as a GMR. ··· 1969 1259 return ret; 1970 1260 } 1971 1261 1972 - 1973 1262 static int vmw_validate_buffers(struct vmw_private *dev_priv, 1974 1263 struct vmw_sw_context *sw_context) 1975 1264 { ··· 1976 1267 int ret; 1977 1268 1978 1269 list_for_each_entry(entry, &sw_context->validate_nodes, base.head) { 1979 - ret = vmw_validate_single_buffer(dev_priv, entry->base.bo); 1270 + ret = vmw_validate_single_buffer(dev_priv, entry->base.bo, 1271 + entry->validate_as_mob); 1980 1272 if (unlikely(ret != 0)) 1981 1273 return ret; 1982 1274 } ··· 2219 1509 goto out_err; 2220 1510 } 2221 1511 1512 + ret = mutex_lock_interruptible(&dev_priv->binding_mutex); 1513 + if (unlikely(ret != 0)) { 1514 + ret = -ERESTARTSYS; 1515 + goto out_err; 1516 + } 1517 + 2222 1518 cmd = vmw_fifo_reserve(dev_priv, command_size); 2223 1519 if (unlikely(cmd == NULL)) { 2224 1520 DRM_ERROR("Failed reserving fifo space for commands.\n"); 2225 1521 ret = -ENOMEM; 2226 - goto out_err; 1522 + goto out_unlock_binding; 2227 1523 } 2228 1524 2229 1525 vmw_apply_relocations(sw_context); ··· 2254 1538 DRM_ERROR("Fence submission error. Syncing.\n"); 2255 1539 2256 1540 vmw_resource_list_unreserve(&sw_context->resource_list, false); 1541 + mutex_unlock(&dev_priv->binding_mutex); 1542 + 2257 1543 ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes, 2258 1544 (void *) fence); 2259 1545 ··· 2286 1568 2287 1569 return 0; 2288 1570 1571 + out_unlock_binding: 1572 + mutex_unlock(&dev_priv->binding_mutex); 2289 1573 out_err: 2290 1574 vmw_resource_relocations_free(&sw_context->res_relocations); 2291 1575 vmw_free_relocations(sw_context);
+94 -13
drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
··· 35 35 uint32_t fifo_min, hwversion; 36 36 const struct vmw_fifo_state *fifo = &dev_priv->fifo; 37 37 38 + if (!(dev_priv->capabilities & SVGA_CAP_3D)) 39 + return false; 40 + 41 + if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) { 42 + uint32_t result; 43 + 44 + if (!dev_priv->has_mob) 45 + return false; 46 + 47 + mutex_lock(&dev_priv->hw_mutex); 48 + vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_3D); 49 + result = vmw_read(dev_priv, SVGA_REG_DEV_CAP); 50 + mutex_unlock(&dev_priv->hw_mutex); 51 + 52 + return (result != 0); 53 + } 54 + 38 55 if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO)) 39 56 return false; 40 57 ··· 528 511 } 529 512 530 513 /** 531 - * vmw_fifo_emit_dummy_query - emits a dummy query to the fifo. 514 + * vmw_fifo_emit_dummy_legacy_query - emits a dummy query to the fifo using 515 + * legacy query commands. 532 516 * 533 517 * @dev_priv: The device private structure. 534 518 * @cid: The hardware context id used for the query. 535 519 * 536 - * This function is used to emit a dummy occlusion query with 537 - * no primitives rendered between query begin and query end. 538 - * It's used to provide a query barrier, in order to know that when 539 - * this query is finished, all preceding queries are also finished. 540 - * 541 - * A Query results structure should have been initialized at the start 542 - * of the dev_priv->dummy_query_bo buffer object. And that buffer object 543 - * must also be either reserved or pinned when this function is called. 544 - * 545 - * Returns -ENOMEM on failure to reserve fifo space. 520 + * See the vmw_fifo_emit_dummy_query documentation. 546 521 */ 547 - int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv, 548 - uint32_t cid) 522 + static int vmw_fifo_emit_dummy_legacy_query(struct vmw_private *dev_priv, 523 + uint32_t cid) 549 524 { 550 525 /* 551 526 * A query wait without a preceding query end will ··· 574 565 vmw_fifo_commit(dev_priv, sizeof(*cmd)); 575 566 576 567 return 0; 568 + } 569 + 570 + /** 571 + * vmw_fifo_emit_dummy_gb_query - emits a dummy query to the fifo using 572 + * guest-backed resource query commands. 573 + * 574 + * @dev_priv: The device private structure. 575 + * @cid: The hardware context id used for the query. 576 + * 577 + * See the vmw_fifo_emit_dummy_query documentation. 578 + */ 579 + static int vmw_fifo_emit_dummy_gb_query(struct vmw_private *dev_priv, 580 + uint32_t cid) 581 + { 582 + /* 583 + * A query wait without a preceding query end will 584 + * actually finish all queries for this cid 585 + * without writing to the query result structure. 586 + */ 587 + 588 + struct ttm_buffer_object *bo = dev_priv->dummy_query_bo; 589 + struct { 590 + SVGA3dCmdHeader header; 591 + SVGA3dCmdWaitForGBQuery body; 592 + } *cmd; 593 + 594 + cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); 595 + 596 + if (unlikely(cmd == NULL)) { 597 + DRM_ERROR("Out of fifo space for dummy query.\n"); 598 + return -ENOMEM; 599 + } 600 + 601 + cmd->header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY; 602 + cmd->header.size = sizeof(cmd->body); 603 + cmd->body.cid = cid; 604 + cmd->body.type = SVGA3D_QUERYTYPE_OCCLUSION; 605 + BUG_ON(bo->mem.mem_type != VMW_PL_MOB); 606 + cmd->body.mobid = bo->mem.start; 607 + cmd->body.offset = 0; 608 + 609 + vmw_fifo_commit(dev_priv, sizeof(*cmd)); 610 + 611 + return 0; 612 + } 613 + 614 + 615 + /** 616 + * vmw_fifo_emit_dummy_gb_query - emits a dummy query to the fifo using 617 + * appropriate resource query commands. 618 + * 619 + * @dev_priv: The device private structure. 620 + * @cid: The hardware context id used for the query. 621 + * 622 + * This function is used to emit a dummy occlusion query with 623 + * no primitives rendered between query begin and query end. 624 + * It's used to provide a query barrier, in order to know that when 625 + * this query is finished, all preceding queries are also finished. 626 + * 627 + * A Query results structure should have been initialized at the start 628 + * of the dev_priv->dummy_query_bo buffer object. And that buffer object 629 + * must also be either reserved or pinned when this function is called. 630 + * 631 + * Returns -ENOMEM on failure to reserve fifo space. 632 + */ 633 + int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv, 634 + uint32_t cid) 635 + { 636 + if (dev_priv->has_mob) 637 + return vmw_fifo_emit_dummy_gb_query(dev_priv, cid); 638 + 639 + return vmw_fifo_emit_dummy_legacy_query(dev_priv, cid); 577 640 }
+3 -157
drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
··· 125 125 } 126 126 127 127 128 - static void vmw_gmr_free_descriptors(struct device *dev, dma_addr_t desc_dma, 129 - struct list_head *desc_pages) 130 - { 131 - struct page *page, *next; 132 - struct svga_guest_mem_descriptor *page_virtual; 133 - unsigned int desc_per_page = PAGE_SIZE / 134 - sizeof(struct svga_guest_mem_descriptor) - 1; 135 - 136 - if (list_empty(desc_pages)) 137 - return; 138 - 139 - list_for_each_entry_safe(page, next, desc_pages, lru) { 140 - list_del_init(&page->lru); 141 - 142 - if (likely(desc_dma != DMA_ADDR_INVALID)) { 143 - dma_unmap_page(dev, desc_dma, PAGE_SIZE, 144 - DMA_TO_DEVICE); 145 - } 146 - 147 - page_virtual = kmap_atomic(page); 148 - desc_dma = (dma_addr_t) 149 - le32_to_cpu(page_virtual[desc_per_page].ppn) << 150 - PAGE_SHIFT; 151 - kunmap_atomic(page_virtual); 152 - 153 - __free_page(page); 154 - } 155 - } 156 - 157 - /** 158 - * FIXME: Adjust to the ttm lowmem / highmem storage to minimize 159 - * the number of used descriptors. 160 - * 161 - */ 162 - 163 - static int vmw_gmr_build_descriptors(struct device *dev, 164 - struct list_head *desc_pages, 165 - struct vmw_piter *iter, 166 - unsigned long num_pages, 167 - dma_addr_t *first_dma) 168 - { 169 - struct page *page; 170 - struct svga_guest_mem_descriptor *page_virtual = NULL; 171 - struct svga_guest_mem_descriptor *desc_virtual = NULL; 172 - unsigned int desc_per_page; 173 - unsigned long prev_pfn; 174 - unsigned long pfn; 175 - int ret; 176 - dma_addr_t desc_dma; 177 - 178 - desc_per_page = PAGE_SIZE / 179 - sizeof(struct svga_guest_mem_descriptor) - 1; 180 - 181 - while (likely(num_pages != 0)) { 182 - page = alloc_page(__GFP_HIGHMEM); 183 - if (unlikely(page == NULL)) { 184 - ret = -ENOMEM; 185 - goto out_err; 186 - } 187 - 188 - list_add_tail(&page->lru, desc_pages); 189 - page_virtual = kmap_atomic(page); 190 - desc_virtual = page_virtual - 1; 191 - prev_pfn = ~(0UL); 192 - 193 - while (likely(num_pages != 0)) { 194 - pfn = vmw_piter_dma_addr(iter) >> PAGE_SHIFT; 195 - 196 - if (pfn != prev_pfn + 1) { 197 - 198 - if (desc_virtual - page_virtual == 199 - desc_per_page - 1) 200 - break; 201 - 202 - (++desc_virtual)->ppn = cpu_to_le32(pfn); 203 - desc_virtual->num_pages = cpu_to_le32(1); 204 - } else { 205 - uint32_t tmp = 206 - le32_to_cpu(desc_virtual->num_pages); 207 - desc_virtual->num_pages = cpu_to_le32(tmp + 1); 208 - } 209 - prev_pfn = pfn; 210 - --num_pages; 211 - vmw_piter_next(iter); 212 - } 213 - 214 - (++desc_virtual)->ppn = DMA_PAGE_INVALID; 215 - desc_virtual->num_pages = cpu_to_le32(0); 216 - kunmap_atomic(page_virtual); 217 - } 218 - 219 - desc_dma = 0; 220 - list_for_each_entry_reverse(page, desc_pages, lru) { 221 - page_virtual = kmap_atomic(page); 222 - page_virtual[desc_per_page].ppn = cpu_to_le32 223 - (desc_dma >> PAGE_SHIFT); 224 - kunmap_atomic(page_virtual); 225 - desc_dma = dma_map_page(dev, page, 0, PAGE_SIZE, 226 - DMA_TO_DEVICE); 227 - 228 - if (unlikely(dma_mapping_error(dev, desc_dma))) 229 - goto out_err; 230 - } 231 - *first_dma = desc_dma; 232 - 233 - return 0; 234 - out_err: 235 - vmw_gmr_free_descriptors(dev, DMA_ADDR_INVALID, desc_pages); 236 - return ret; 237 - } 238 - 239 - static void vmw_gmr_fire_descriptors(struct vmw_private *dev_priv, 240 - int gmr_id, dma_addr_t desc_dma) 241 - { 242 - mutex_lock(&dev_priv->hw_mutex); 243 - 244 - vmw_write(dev_priv, SVGA_REG_GMR_ID, gmr_id); 245 - wmb(); 246 - vmw_write(dev_priv, SVGA_REG_GMR_DESCRIPTOR, desc_dma >> PAGE_SHIFT); 247 - mb(); 248 - 249 - mutex_unlock(&dev_priv->hw_mutex); 250 - 251 - } 252 - 253 128 int vmw_gmr_bind(struct vmw_private *dev_priv, 254 129 const struct vmw_sg_table *vsgt, 255 130 unsigned long num_pages, 256 131 int gmr_id) 257 132 { 258 - struct list_head desc_pages; 259 - dma_addr_t desc_dma = 0; 260 - struct device *dev = dev_priv->dev->dev; 261 133 struct vmw_piter data_iter; 262 - int ret; 263 134 264 135 vmw_piter_start(&data_iter, vsgt, 0); 265 136 266 137 if (unlikely(!vmw_piter_next(&data_iter))) 267 138 return 0; 268 139 269 - if (likely(dev_priv->capabilities & SVGA_CAP_GMR2)) 270 - return vmw_gmr2_bind(dev_priv, &data_iter, num_pages, gmr_id); 271 - 272 - if (unlikely(!(dev_priv->capabilities & SVGA_CAP_GMR))) 140 + if (unlikely(!(dev_priv->capabilities & SVGA_CAP_GMR2))) 273 141 return -EINVAL; 274 142 275 - if (vsgt->num_regions > dev_priv->max_gmr_descriptors) 276 - return -EINVAL; 277 - 278 - INIT_LIST_HEAD(&desc_pages); 279 - 280 - ret = vmw_gmr_build_descriptors(dev, &desc_pages, &data_iter, 281 - num_pages, &desc_dma); 282 - if (unlikely(ret != 0)) 283 - return ret; 284 - 285 - vmw_gmr_fire_descriptors(dev_priv, gmr_id, desc_dma); 286 - vmw_gmr_free_descriptors(dev, desc_dma, &desc_pages); 287 - 288 - return 0; 143 + return vmw_gmr2_bind(dev_priv, &data_iter, num_pages, gmr_id); 289 144 } 290 145 291 146 292 147 void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id) 293 148 { 294 - if (likely(dev_priv->capabilities & SVGA_CAP_GMR2)) { 149 + if (likely(dev_priv->capabilities & SVGA_CAP_GMR2)) 295 150 vmw_gmr2_unbind(dev_priv, gmr_id); 296 - return; 297 - } 298 - 299 - mutex_lock(&dev_priv->hw_mutex); 300 - vmw_write(dev_priv, SVGA_REG_GMR_ID, gmr_id); 301 - wmb(); 302 - vmw_write(dev_priv, SVGA_REG_GMR_DESCRIPTOR, 0); 303 - mb(); 304 - mutex_unlock(&dev_priv->hw_mutex); 305 151 }
+13 -2
drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
··· 125 125 return -ENOMEM; 126 126 127 127 spin_lock_init(&gman->lock); 128 - gman->max_gmr_pages = dev_priv->max_gmr_pages; 129 128 gman->used_gmr_pages = 0; 130 129 ida_init(&gman->gmr_ida); 131 - gman->max_gmr_ids = p_size; 130 + 131 + switch (p_size) { 132 + case VMW_PL_GMR: 133 + gman->max_gmr_ids = dev_priv->max_gmr_ids; 134 + gman->max_gmr_pages = dev_priv->max_gmr_pages; 135 + break; 136 + case VMW_PL_MOB: 137 + gman->max_gmr_ids = VMWGFX_NUM_MOB; 138 + gman->max_gmr_pages = dev_priv->max_mob_pages; 139 + break; 140 + default: 141 + BUG(); 142 + } 132 143 man->priv = (void *) gman; 133 144 return 0; 134 145 }
+38 -4
drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
··· 53 53 param->value = dev_priv->fifo.capabilities; 54 54 break; 55 55 case DRM_VMW_PARAM_MAX_FB_SIZE: 56 - param->value = dev_priv->vram_size; 56 + param->value = dev_priv->prim_bb_mem; 57 57 break; 58 58 case DRM_VMW_PARAM_FIFO_HW_VERSION: 59 59 { ··· 68 68 SVGA_FIFO_3D_HWVERSION)); 69 69 break; 70 70 } 71 + case DRM_VMW_PARAM_MAX_SURF_MEMORY: 72 + param->value = dev_priv->memory_size; 73 + break; 74 + case DRM_VMW_PARAM_3D_CAPS_SIZE: 75 + if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) 76 + param->value = SVGA3D_DEVCAP_MAX; 77 + else 78 + param->value = (SVGA_FIFO_3D_CAPS_LAST - 79 + SVGA_FIFO_3D_CAPS + 1); 80 + param->value *= sizeof(uint32_t); 81 + break; 82 + case DRM_VMW_PARAM_MAX_MOB_MEMORY: 83 + param->value = dev_priv->max_mob_pages * PAGE_SIZE; 84 + break; 71 85 default: 72 86 DRM_ERROR("Illegal vmwgfx get param request: %d\n", 73 87 param->param); ··· 103 89 void __user *buffer = (void __user *)((unsigned long)(arg->buffer)); 104 90 void *bounce; 105 91 int ret; 92 + bool gb_objects = !!(dev_priv->capabilities & SVGA_CAP_GBOBJECTS); 106 93 107 94 if (unlikely(arg->pad64 != 0)) { 108 95 DRM_ERROR("Illegal GET_3D_CAP argument.\n"); 109 96 return -EINVAL; 110 97 } 111 98 112 - size = (SVGA_FIFO_3D_CAPS_LAST - SVGA_FIFO_3D_CAPS + 1) << 2; 99 + if (gb_objects) 100 + size = SVGA3D_DEVCAP_MAX; 101 + else 102 + size = (SVGA_FIFO_3D_CAPS_LAST - SVGA_FIFO_3D_CAPS + 1); 103 + 104 + size *= sizeof(uint32_t); 113 105 114 106 if (arg->max_size < size) 115 107 size = arg->max_size; ··· 126 106 return -ENOMEM; 127 107 } 128 108 129 - fifo_mem = dev_priv->mmio_virt; 130 - memcpy_fromio(bounce, &fifo_mem[SVGA_FIFO_3D_CAPS], size); 109 + if (gb_objects) { 110 + int i; 111 + uint32_t *bounce32 = (uint32_t *) bounce; 112 + 113 + mutex_lock(&dev_priv->hw_mutex); 114 + for (i = 0; i < SVGA3D_DEVCAP_MAX; ++i) { 115 + vmw_write(dev_priv, SVGA_REG_DEV_CAP, i); 116 + *bounce32++ = vmw_read(dev_priv, SVGA_REG_DEV_CAP); 117 + } 118 + mutex_unlock(&dev_priv->hw_mutex); 119 + 120 + } else { 121 + 122 + fifo_mem = dev_priv->mmio_virt; 123 + memcpy_fromio(bounce, &fifo_mem[SVGA_FIFO_3D_CAPS], size); 124 + } 131 125 132 126 ret = copy_to_user(buffer, bounce, size); 133 127 if (ret)
+4 -4
drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
··· 672 672 673 673 if (unlikely(surface->mip_levels[0] != 1 || 674 674 surface->num_sizes != 1 || 675 - surface->sizes[0].width < mode_cmd->width || 676 - surface->sizes[0].height < mode_cmd->height || 677 - surface->sizes[0].depth != 1)) { 675 + surface->base_size.width < mode_cmd->width || 676 + surface->base_size.height < mode_cmd->height || 677 + surface->base_size.depth != 1)) { 678 678 DRM_ERROR("Incompatible surface dimensions " 679 679 "for requested mode.\n"); 680 680 return -EINVAL; ··· 1645 1645 uint32_t pitch, 1646 1646 uint32_t height) 1647 1647 { 1648 - return ((u64) pitch * (u64) height) < (u64) dev_priv->vram_size; 1648 + return ((u64) pitch * (u64) height) < (u64) dev_priv->prim_bb_mem; 1649 1649 } 1650 1650 1651 1651
+659
drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
··· 1 + /************************************************************************** 2 + * 3 + * Copyright © 2012 VMware, Inc., Palo Alto, CA., USA 4 + * All Rights Reserved. 5 + * 6 + * Permission is hereby granted, free of charge, to any person obtaining a 7 + * copy of this software and associated documentation files (the 8 + * "Software"), to deal in the Software without restriction, including 9 + * without limitation the rights to use, copy, modify, merge, publish, 10 + * distribute, sub license, and/or sell copies of the Software, and to 11 + * permit persons to whom the Software is furnished to do so, subject to 12 + * the following conditions: 13 + * 14 + * The above copyright notice and this permission notice (including the 15 + * next paragraph) shall be included in all copies or substantial portions 16 + * of the Software. 17 + * 18 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 + * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 + * 26 + **************************************************************************/ 27 + 28 + #include "vmwgfx_drv.h" 29 + 30 + /* 31 + * If we set up the screen target otable, screen objects stop working. 32 + */ 33 + 34 + #define VMW_OTABLE_SETUP_SUB ((VMWGFX_ENABLE_SCREEN_TARGET_OTABLE) ? 0 : 1) 35 + 36 + #ifdef CONFIG_64BIT 37 + #define VMW_PPN_SIZE 8 38 + #define vmw_cmd_set_otable_base SVGA3dCmdSetOTableBase64 39 + #define VMW_ID_SET_OTABLE_BASE SVGA_3D_CMD_SET_OTABLE_BASE64 40 + #define vmw_cmd_define_gb_mob SVGA3dCmdDefineGBMob64 41 + #define VMW_ID_DEFINE_GB_MOB SVGA_3D_CMD_DEFINE_GB_MOB64 42 + #define VMW_MOBFMT_PTDEPTH_0 SVGA3D_MOBFMT_PTDEPTH64_0 43 + #define VMW_MOBFMT_PTDEPTH_1 SVGA3D_MOBFMT_PTDEPTH64_1 44 + #define VMW_MOBFMT_PTDEPTH_2 SVGA3D_MOBFMT_PTDEPTH64_2 45 + #else 46 + #define VMW_PPN_SIZE 4 47 + #define vmw_cmd_set_otable_base SVGA3dCmdSetOTableBase 48 + #define VMW_ID_SET_OTABLE_BASE SVGA_3D_CMD_SET_OTABLE_BASE 49 + #define vmw_cmd_define_gb_mob SVGA3dCmdDefineGBMob 50 + #define VMW_ID_DEFINE_GB_MOB SVGA_3D_CMD_DEFINE_GB_MOB 51 + #define VMW_MOBFMT_PTDEPTH_0 SVGA3D_MOBFMT_PTDEPTH_0 52 + #define VMW_MOBFMT_PTDEPTH_1 SVGA3D_MOBFMT_PTDEPTH_1 53 + #define VMW_MOBFMT_PTDEPTH_2 SVGA3D_MOBFMT_PTDEPTH_2 54 + #endif 55 + 56 + /* 57 + * struct vmw_mob - Structure containing page table and metadata for a 58 + * Guest Memory OBject. 59 + * 60 + * @num_pages Number of pages that make up the page table. 61 + * @pt_level The indirection level of the page table. 0-2. 62 + * @pt_root_page DMA address of the level 0 page of the page table. 63 + */ 64 + struct vmw_mob { 65 + struct ttm_buffer_object *pt_bo; 66 + unsigned long num_pages; 67 + unsigned pt_level; 68 + dma_addr_t pt_root_page; 69 + uint32_t id; 70 + }; 71 + 72 + /* 73 + * struct vmw_otable - Guest Memory OBject table metadata 74 + * 75 + * @size: Size of the table (page-aligned). 76 + * @page_table: Pointer to a struct vmw_mob holding the page table. 77 + */ 78 + struct vmw_otable { 79 + unsigned long size; 80 + struct vmw_mob *page_table; 81 + }; 82 + 83 + static int vmw_mob_pt_populate(struct vmw_private *dev_priv, 84 + struct vmw_mob *mob); 85 + static void vmw_mob_pt_setup(struct vmw_mob *mob, 86 + struct vmw_piter data_iter, 87 + unsigned long num_data_pages); 88 + 89 + /* 90 + * vmw_setup_otable_base - Issue an object table base setup command to 91 + * the device 92 + * 93 + * @dev_priv: Pointer to a device private structure 94 + * @type: Type of object table base 95 + * @offset Start of table offset into dev_priv::otable_bo 96 + * @otable Pointer to otable metadata; 97 + * 98 + * This function returns -ENOMEM if it fails to reserve fifo space, 99 + * and may block waiting for fifo space. 100 + */ 101 + static int vmw_setup_otable_base(struct vmw_private *dev_priv, 102 + SVGAOTableType type, 103 + unsigned long offset, 104 + struct vmw_otable *otable) 105 + { 106 + struct { 107 + SVGA3dCmdHeader header; 108 + vmw_cmd_set_otable_base body; 109 + } *cmd; 110 + struct vmw_mob *mob; 111 + const struct vmw_sg_table *vsgt; 112 + struct vmw_piter iter; 113 + int ret; 114 + 115 + BUG_ON(otable->page_table != NULL); 116 + 117 + vsgt = vmw_bo_sg_table(dev_priv->otable_bo); 118 + vmw_piter_start(&iter, vsgt, offset >> PAGE_SHIFT); 119 + WARN_ON(!vmw_piter_next(&iter)); 120 + 121 + mob = vmw_mob_create(otable->size >> PAGE_SHIFT); 122 + if (unlikely(mob == NULL)) { 123 + DRM_ERROR("Failed creating OTable page table.\n"); 124 + return -ENOMEM; 125 + } 126 + 127 + if (otable->size <= PAGE_SIZE) { 128 + mob->pt_level = VMW_MOBFMT_PTDEPTH_0; 129 + mob->pt_root_page = vmw_piter_dma_addr(&iter); 130 + } else if (vsgt->num_regions == 1) { 131 + mob->pt_level = SVGA3D_MOBFMT_RANGE; 132 + mob->pt_root_page = vmw_piter_dma_addr(&iter); 133 + } else { 134 + ret = vmw_mob_pt_populate(dev_priv, mob); 135 + if (unlikely(ret != 0)) 136 + goto out_no_populate; 137 + 138 + vmw_mob_pt_setup(mob, iter, otable->size >> PAGE_SHIFT); 139 + mob->pt_level += VMW_MOBFMT_PTDEPTH_1 - SVGA3D_MOBFMT_PTDEPTH_1; 140 + } 141 + 142 + cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); 143 + if (unlikely(cmd == NULL)) { 144 + DRM_ERROR("Failed reserving FIFO space for OTable setup.\n"); 145 + goto out_no_fifo; 146 + } 147 + 148 + memset(cmd, 0, sizeof(*cmd)); 149 + cmd->header.id = VMW_ID_SET_OTABLE_BASE; 150 + cmd->header.size = sizeof(cmd->body); 151 + cmd->body.type = type; 152 + cmd->body.baseAddress = mob->pt_root_page >> PAGE_SHIFT; 153 + cmd->body.sizeInBytes = otable->size; 154 + cmd->body.validSizeInBytes = 0; 155 + cmd->body.ptDepth = mob->pt_level; 156 + 157 + /* 158 + * The device doesn't support this, But the otable size is 159 + * determined at compile-time, so this BUG shouldn't trigger 160 + * randomly. 161 + */ 162 + BUG_ON(mob->pt_level == VMW_MOBFMT_PTDEPTH_2); 163 + 164 + vmw_fifo_commit(dev_priv, sizeof(*cmd)); 165 + otable->page_table = mob; 166 + 167 + return 0; 168 + 169 + out_no_fifo: 170 + out_no_populate: 171 + vmw_mob_destroy(mob); 172 + return ret; 173 + } 174 + 175 + /* 176 + * vmw_takedown_otable_base - Issue an object table base takedown command 177 + * to the device 178 + * 179 + * @dev_priv: Pointer to a device private structure 180 + * @type: Type of object table base 181 + * 182 + */ 183 + static void vmw_takedown_otable_base(struct vmw_private *dev_priv, 184 + SVGAOTableType type, 185 + struct vmw_otable *otable) 186 + { 187 + struct { 188 + SVGA3dCmdHeader header; 189 + SVGA3dCmdSetOTableBase body; 190 + } *cmd; 191 + struct ttm_buffer_object *bo = otable->page_table->pt_bo; 192 + 193 + if (otable->page_table == NULL) 194 + return; 195 + 196 + cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); 197 + if (unlikely(cmd == NULL)) 198 + DRM_ERROR("Failed reserving FIFO space for OTable setup.\n"); 199 + 200 + memset(cmd, 0, sizeof(*cmd)); 201 + cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE; 202 + cmd->header.size = sizeof(cmd->body); 203 + cmd->body.type = type; 204 + cmd->body.baseAddress = 0; 205 + cmd->body.sizeInBytes = 0; 206 + cmd->body.validSizeInBytes = 0; 207 + cmd->body.ptDepth = SVGA3D_MOBFMT_INVALID; 208 + vmw_fifo_commit(dev_priv, sizeof(*cmd)); 209 + 210 + if (bo) { 211 + int ret; 212 + 213 + ret = ttm_bo_reserve(bo, false, true, false, false); 214 + BUG_ON(ret != 0); 215 + 216 + vmw_fence_single_bo(bo, NULL); 217 + ttm_bo_unreserve(bo); 218 + } 219 + 220 + vmw_mob_destroy(otable->page_table); 221 + otable->page_table = NULL; 222 + } 223 + 224 + /* 225 + * vmw_otables_setup - Set up guest backed memory object tables 226 + * 227 + * @dev_priv: Pointer to a device private structure 228 + * 229 + * Takes care of the device guest backed surface 230 + * initialization, by setting up the guest backed memory object tables. 231 + * Returns 0 on success and various error codes on failure. A succesful return 232 + * means the object tables can be taken down using the vmw_otables_takedown 233 + * function. 234 + */ 235 + int vmw_otables_setup(struct vmw_private *dev_priv) 236 + { 237 + unsigned long offset; 238 + unsigned long bo_size; 239 + struct vmw_otable *otables; 240 + SVGAOTableType i; 241 + int ret; 242 + 243 + otables = kzalloc(SVGA_OTABLE_DX9_MAX * sizeof(*otables), 244 + GFP_KERNEL); 245 + if (unlikely(otables == NULL)) { 246 + DRM_ERROR("Failed to allocate space for otable " 247 + "metadata.\n"); 248 + return -ENOMEM; 249 + } 250 + 251 + otables[SVGA_OTABLE_MOB].size = 252 + VMWGFX_NUM_MOB * SVGA3D_OTABLE_MOB_ENTRY_SIZE; 253 + otables[SVGA_OTABLE_SURFACE].size = 254 + VMWGFX_NUM_GB_SURFACE * SVGA3D_OTABLE_SURFACE_ENTRY_SIZE; 255 + otables[SVGA_OTABLE_CONTEXT].size = 256 + VMWGFX_NUM_GB_CONTEXT * SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE; 257 + otables[SVGA_OTABLE_SHADER].size = 258 + VMWGFX_NUM_GB_SHADER * SVGA3D_OTABLE_SHADER_ENTRY_SIZE; 259 + otables[SVGA_OTABLE_SCREEN_TARGET].size = 260 + VMWGFX_NUM_GB_SCREEN_TARGET * 261 + SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE; 262 + 263 + bo_size = 0; 264 + for (i = 0; i < SVGA_OTABLE_DX9_MAX; ++i) { 265 + otables[i].size = 266 + (otables[i].size + PAGE_SIZE - 1) & PAGE_MASK; 267 + bo_size += otables[i].size; 268 + } 269 + 270 + ret = ttm_bo_create(&dev_priv->bdev, bo_size, 271 + ttm_bo_type_device, 272 + &vmw_sys_ne_placement, 273 + 0, false, NULL, 274 + &dev_priv->otable_bo); 275 + 276 + if (unlikely(ret != 0)) 277 + goto out_no_bo; 278 + 279 + ret = ttm_bo_reserve(dev_priv->otable_bo, false, true, false, false); 280 + BUG_ON(ret != 0); 281 + ret = vmw_bo_driver.ttm_tt_populate(dev_priv->otable_bo->ttm); 282 + if (unlikely(ret != 0)) 283 + goto out_unreserve; 284 + ret = vmw_bo_map_dma(dev_priv->otable_bo); 285 + if (unlikely(ret != 0)) 286 + goto out_unreserve; 287 + 288 + ttm_bo_unreserve(dev_priv->otable_bo); 289 + 290 + offset = 0; 291 + for (i = 0; i < SVGA_OTABLE_DX9_MAX - VMW_OTABLE_SETUP_SUB; ++i) { 292 + ret = vmw_setup_otable_base(dev_priv, i, offset, 293 + &otables[i]); 294 + if (unlikely(ret != 0)) 295 + goto out_no_setup; 296 + offset += otables[i].size; 297 + } 298 + 299 + dev_priv->otables = otables; 300 + return 0; 301 + 302 + out_unreserve: 303 + ttm_bo_unreserve(dev_priv->otable_bo); 304 + out_no_setup: 305 + for (i = 0; i < SVGA_OTABLE_DX9_MAX - VMW_OTABLE_SETUP_SUB; ++i) 306 + vmw_takedown_otable_base(dev_priv, i, &otables[i]); 307 + 308 + ttm_bo_unref(&dev_priv->otable_bo); 309 + out_no_bo: 310 + kfree(otables); 311 + return ret; 312 + } 313 + 314 + 315 + /* 316 + * vmw_otables_takedown - Take down guest backed memory object tables 317 + * 318 + * @dev_priv: Pointer to a device private structure 319 + * 320 + * Take down the Guest Memory Object tables. 321 + */ 322 + void vmw_otables_takedown(struct vmw_private *dev_priv) 323 + { 324 + SVGAOTableType i; 325 + struct ttm_buffer_object *bo = dev_priv->otable_bo; 326 + int ret; 327 + 328 + for (i = 0; i < SVGA_OTABLE_DX9_MAX - VMW_OTABLE_SETUP_SUB; ++i) 329 + vmw_takedown_otable_base(dev_priv, i, 330 + &dev_priv->otables[i]); 331 + 332 + ret = ttm_bo_reserve(bo, false, true, false, false); 333 + BUG_ON(ret != 0); 334 + 335 + vmw_fence_single_bo(bo, NULL); 336 + ttm_bo_unreserve(bo); 337 + 338 + ttm_bo_unref(&dev_priv->otable_bo); 339 + kfree(dev_priv->otables); 340 + dev_priv->otables = NULL; 341 + } 342 + 343 + 344 + /* 345 + * vmw_mob_calculate_pt_pages - Calculate the number of page table pages 346 + * needed for a guest backed memory object. 347 + * 348 + * @data_pages: Number of data pages in the memory object buffer. 349 + */ 350 + static unsigned long vmw_mob_calculate_pt_pages(unsigned long data_pages) 351 + { 352 + unsigned long data_size = data_pages * PAGE_SIZE; 353 + unsigned long tot_size = 0; 354 + 355 + while (likely(data_size > PAGE_SIZE)) { 356 + data_size = DIV_ROUND_UP(data_size, PAGE_SIZE); 357 + data_size *= VMW_PPN_SIZE; 358 + tot_size += (data_size + PAGE_SIZE - 1) & PAGE_MASK; 359 + } 360 + 361 + return tot_size >> PAGE_SHIFT; 362 + } 363 + 364 + /* 365 + * vmw_mob_create - Create a mob, but don't populate it. 366 + * 367 + * @data_pages: Number of data pages of the underlying buffer object. 368 + */ 369 + struct vmw_mob *vmw_mob_create(unsigned long data_pages) 370 + { 371 + struct vmw_mob *mob = kzalloc(sizeof(*mob), GFP_KERNEL); 372 + 373 + if (unlikely(mob == NULL)) 374 + return NULL; 375 + 376 + mob->num_pages = vmw_mob_calculate_pt_pages(data_pages); 377 + 378 + return mob; 379 + } 380 + 381 + /* 382 + * vmw_mob_pt_populate - Populate the mob pagetable 383 + * 384 + * @mob: Pointer to the mob the pagetable of which we want to 385 + * populate. 386 + * 387 + * This function allocates memory to be used for the pagetable, and 388 + * adjusts TTM memory accounting accordingly. Returns ENOMEM if 389 + * memory resources aren't sufficient and may cause TTM buffer objects 390 + * to be swapped out by using the TTM memory accounting function. 391 + */ 392 + static int vmw_mob_pt_populate(struct vmw_private *dev_priv, 393 + struct vmw_mob *mob) 394 + { 395 + int ret; 396 + BUG_ON(mob->pt_bo != NULL); 397 + 398 + ret = ttm_bo_create(&dev_priv->bdev, mob->num_pages * PAGE_SIZE, 399 + ttm_bo_type_device, 400 + &vmw_sys_ne_placement, 401 + 0, false, NULL, &mob->pt_bo); 402 + if (unlikely(ret != 0)) 403 + return ret; 404 + 405 + ret = ttm_bo_reserve(mob->pt_bo, false, true, false, false); 406 + 407 + BUG_ON(ret != 0); 408 + ret = vmw_bo_driver.ttm_tt_populate(mob->pt_bo->ttm); 409 + if (unlikely(ret != 0)) 410 + goto out_unreserve; 411 + ret = vmw_bo_map_dma(mob->pt_bo); 412 + if (unlikely(ret != 0)) 413 + goto out_unreserve; 414 + 415 + ttm_bo_unreserve(mob->pt_bo); 416 + 417 + return 0; 418 + 419 + out_unreserve: 420 + ttm_bo_unreserve(mob->pt_bo); 421 + ttm_bo_unref(&mob->pt_bo); 422 + 423 + return ret; 424 + } 425 + 426 + /** 427 + * vmw_mob_assign_ppn - Assign a value to a page table entry 428 + * 429 + * @addr: Pointer to pointer to page table entry. 430 + * @val: The page table entry 431 + * 432 + * Assigns a value to a page table entry pointed to by *@addr and increments 433 + * *@addr according to the page table entry size. 434 + */ 435 + #if (VMW_PPN_SIZE == 8) 436 + static void vmw_mob_assign_ppn(uint32_t **addr, dma_addr_t val) 437 + { 438 + *((uint64_t *) *addr) = val >> PAGE_SHIFT; 439 + *addr += 2; 440 + } 441 + #else 442 + static void vmw_mob_assign_ppn(uint32_t **addr, dma_addr_t val) 443 + { 444 + *(*addr)++ = val >> PAGE_SHIFT; 445 + } 446 + #endif 447 + 448 + /* 449 + * vmw_mob_build_pt - Build a pagetable 450 + * 451 + * @data_addr: Array of DMA addresses to the underlying buffer 452 + * object's data pages. 453 + * @num_data_pages: Number of buffer object data pages. 454 + * @pt_pages: Array of page pointers to the page table pages. 455 + * 456 + * Returns the number of page table pages actually used. 457 + * Uses atomic kmaps of highmem pages to avoid TLB thrashing. 458 + */ 459 + static unsigned long vmw_mob_build_pt(struct vmw_piter *data_iter, 460 + unsigned long num_data_pages, 461 + struct vmw_piter *pt_iter) 462 + { 463 + unsigned long pt_size = num_data_pages * VMW_PPN_SIZE; 464 + unsigned long num_pt_pages = DIV_ROUND_UP(pt_size, PAGE_SIZE); 465 + unsigned long pt_page; 466 + uint32_t *addr, *save_addr; 467 + unsigned long i; 468 + struct page *page; 469 + 470 + for (pt_page = 0; pt_page < num_pt_pages; ++pt_page) { 471 + page = vmw_piter_page(pt_iter); 472 + 473 + save_addr = addr = kmap_atomic(page); 474 + 475 + for (i = 0; i < PAGE_SIZE / VMW_PPN_SIZE; ++i) { 476 + vmw_mob_assign_ppn(&addr, 477 + vmw_piter_dma_addr(data_iter)); 478 + if (unlikely(--num_data_pages == 0)) 479 + break; 480 + WARN_ON(!vmw_piter_next(data_iter)); 481 + } 482 + kunmap_atomic(save_addr); 483 + vmw_piter_next(pt_iter); 484 + } 485 + 486 + return num_pt_pages; 487 + } 488 + 489 + /* 490 + * vmw_mob_build_pt - Set up a multilevel mob pagetable 491 + * 492 + * @mob: Pointer to a mob whose page table needs setting up. 493 + * @data_addr Array of DMA addresses to the buffer object's data 494 + * pages. 495 + * @num_data_pages: Number of buffer object data pages. 496 + * 497 + * Uses tail recursion to set up a multilevel mob page table. 498 + */ 499 + static void vmw_mob_pt_setup(struct vmw_mob *mob, 500 + struct vmw_piter data_iter, 501 + unsigned long num_data_pages) 502 + { 503 + unsigned long num_pt_pages = 0; 504 + struct ttm_buffer_object *bo = mob->pt_bo; 505 + struct vmw_piter save_pt_iter; 506 + struct vmw_piter pt_iter; 507 + const struct vmw_sg_table *vsgt; 508 + int ret; 509 + 510 + ret = ttm_bo_reserve(bo, false, true, false, 0); 511 + BUG_ON(ret != 0); 512 + 513 + vsgt = vmw_bo_sg_table(bo); 514 + vmw_piter_start(&pt_iter, vsgt, 0); 515 + BUG_ON(!vmw_piter_next(&pt_iter)); 516 + mob->pt_level = 0; 517 + while (likely(num_data_pages > 1)) { 518 + ++mob->pt_level; 519 + BUG_ON(mob->pt_level > 2); 520 + save_pt_iter = pt_iter; 521 + num_pt_pages = vmw_mob_build_pt(&data_iter, num_data_pages, 522 + &pt_iter); 523 + data_iter = save_pt_iter; 524 + num_data_pages = num_pt_pages; 525 + } 526 + 527 + mob->pt_root_page = vmw_piter_dma_addr(&save_pt_iter); 528 + ttm_bo_unreserve(bo); 529 + } 530 + 531 + /* 532 + * vmw_mob_destroy - Destroy a mob, unpopulating first if necessary. 533 + * 534 + * @mob: Pointer to a mob to destroy. 535 + */ 536 + void vmw_mob_destroy(struct vmw_mob *mob) 537 + { 538 + if (mob->pt_bo) 539 + ttm_bo_unref(&mob->pt_bo); 540 + kfree(mob); 541 + } 542 + 543 + /* 544 + * vmw_mob_unbind - Hide a mob from the device. 545 + * 546 + * @dev_priv: Pointer to a device private. 547 + * @mob_id: Device id of the mob to unbind. 548 + */ 549 + void vmw_mob_unbind(struct vmw_private *dev_priv, 550 + struct vmw_mob *mob) 551 + { 552 + struct { 553 + SVGA3dCmdHeader header; 554 + SVGA3dCmdDestroyGBMob body; 555 + } *cmd; 556 + int ret; 557 + struct ttm_buffer_object *bo = mob->pt_bo; 558 + 559 + if (bo) { 560 + ret = ttm_bo_reserve(bo, false, true, false, 0); 561 + /* 562 + * Noone else should be using this buffer. 563 + */ 564 + BUG_ON(ret != 0); 565 + } 566 + 567 + cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); 568 + if (unlikely(cmd == NULL)) { 569 + DRM_ERROR("Failed reserving FIFO space for Memory " 570 + "Object unbinding.\n"); 571 + } 572 + cmd->header.id = SVGA_3D_CMD_DESTROY_GB_MOB; 573 + cmd->header.size = sizeof(cmd->body); 574 + cmd->body.mobid = mob->id; 575 + vmw_fifo_commit(dev_priv, sizeof(*cmd)); 576 + if (bo) { 577 + vmw_fence_single_bo(bo, NULL); 578 + ttm_bo_unreserve(bo); 579 + } 580 + vmw_3d_resource_dec(dev_priv, false); 581 + } 582 + 583 + /* 584 + * vmw_mob_bind - Make a mob visible to the device after first 585 + * populating it if necessary. 586 + * 587 + * @dev_priv: Pointer to a device private. 588 + * @mob: Pointer to the mob we're making visible. 589 + * @data_addr: Array of DMA addresses to the data pages of the underlying 590 + * buffer object. 591 + * @num_data_pages: Number of data pages of the underlying buffer 592 + * object. 593 + * @mob_id: Device id of the mob to bind 594 + * 595 + * This function is intended to be interfaced with the ttm_tt backend 596 + * code. 597 + */ 598 + int vmw_mob_bind(struct vmw_private *dev_priv, 599 + struct vmw_mob *mob, 600 + const struct vmw_sg_table *vsgt, 601 + unsigned long num_data_pages, 602 + int32_t mob_id) 603 + { 604 + int ret; 605 + bool pt_set_up = false; 606 + struct vmw_piter data_iter; 607 + struct { 608 + SVGA3dCmdHeader header; 609 + vmw_cmd_define_gb_mob body; 610 + } *cmd; 611 + 612 + mob->id = mob_id; 613 + vmw_piter_start(&data_iter, vsgt, 0); 614 + if (unlikely(!vmw_piter_next(&data_iter))) 615 + return 0; 616 + 617 + if (likely(num_data_pages == 1)) { 618 + mob->pt_level = VMW_MOBFMT_PTDEPTH_0; 619 + mob->pt_root_page = vmw_piter_dma_addr(&data_iter); 620 + } else if (vsgt->num_regions == 1) { 621 + mob->pt_level = SVGA3D_MOBFMT_RANGE; 622 + mob->pt_root_page = vmw_piter_dma_addr(&data_iter); 623 + } else if (unlikely(mob->pt_bo == NULL)) { 624 + ret = vmw_mob_pt_populate(dev_priv, mob); 625 + if (unlikely(ret != 0)) 626 + return ret; 627 + 628 + vmw_mob_pt_setup(mob, data_iter, num_data_pages); 629 + pt_set_up = true; 630 + mob->pt_level += VMW_MOBFMT_PTDEPTH_1 - SVGA3D_MOBFMT_PTDEPTH_1; 631 + } 632 + 633 + (void) vmw_3d_resource_inc(dev_priv, false); 634 + 635 + cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); 636 + if (unlikely(cmd == NULL)) { 637 + DRM_ERROR("Failed reserving FIFO space for Memory " 638 + "Object binding.\n"); 639 + goto out_no_cmd_space; 640 + } 641 + 642 + cmd->header.id = VMW_ID_DEFINE_GB_MOB; 643 + cmd->header.size = sizeof(cmd->body); 644 + cmd->body.mobid = mob_id; 645 + cmd->body.ptDepth = mob->pt_level; 646 + cmd->body.base = mob->pt_root_page >> PAGE_SHIFT; 647 + cmd->body.sizeInBytes = num_data_pages * PAGE_SIZE; 648 + 649 + vmw_fifo_commit(dev_priv, sizeof(*cmd)); 650 + 651 + return 0; 652 + 653 + out_no_cmd_space: 654 + vmw_3d_resource_dec(dev_priv, false); 655 + if (pt_set_up) 656 + ttm_bo_unref(&mob->pt_bo); 657 + 658 + return -ENOMEM; 659 + }
+192 -3
drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
··· 215 215 res->func = func; 216 216 INIT_LIST_HEAD(&res->lru_head); 217 217 INIT_LIST_HEAD(&res->mob_head); 218 + INIT_LIST_HEAD(&res->binding_head); 218 219 res->id = -1; 219 220 res->backup = NULL; 220 221 res->backup_offset = 0; ··· 442 441 ttm_bo_unref(&bo); 443 442 } 444 443 444 + static void vmw_user_dmabuf_ref_obj_release(struct ttm_base_object *base, 445 + enum ttm_ref_type ref_type) 446 + { 447 + struct vmw_user_dma_buffer *user_bo; 448 + user_bo = container_of(base, struct vmw_user_dma_buffer, prime.base); 449 + 450 + switch (ref_type) { 451 + case TTM_REF_SYNCCPU_WRITE: 452 + ttm_bo_synccpu_write_release(&user_bo->dma.base); 453 + break; 454 + default: 455 + BUG(); 456 + } 457 + } 458 + 445 459 /** 446 460 * vmw_user_dmabuf_alloc - Allocate a user dma buffer 447 461 * ··· 487 471 } 488 472 489 473 ret = vmw_dmabuf_init(dev_priv, &user_bo->dma, size, 474 + (dev_priv->has_mob) ? 475 + &vmw_sys_placement : 490 476 &vmw_vram_sys_placement, true, 491 477 &vmw_user_dmabuf_destroy); 492 478 if (unlikely(ret != 0)) ··· 500 482 &user_bo->prime, 501 483 shareable, 502 484 ttm_buffer_type, 503 - &vmw_user_dmabuf_release, NULL); 485 + &vmw_user_dmabuf_release, 486 + &vmw_user_dmabuf_ref_obj_release); 504 487 if (unlikely(ret != 0)) { 505 488 ttm_bo_unref(&tmp); 506 489 goto out_no_base_object; ··· 532 513 vmw_user_bo = vmw_user_dma_buffer(bo); 533 514 return (vmw_user_bo->prime.base.tfile == tfile || 534 515 vmw_user_bo->prime.base.shareable) ? 0 : -EPERM; 516 + } 517 + 518 + /** 519 + * vmw_user_dmabuf_synccpu_grab - Grab a struct vmw_user_dma_buffer for cpu 520 + * access, idling previous GPU operations on the buffer and optionally 521 + * blocking it for further command submissions. 522 + * 523 + * @user_bo: Pointer to the buffer object being grabbed for CPU access 524 + * @tfile: Identifying the caller. 525 + * @flags: Flags indicating how the grab should be performed. 526 + * 527 + * A blocking grab will be automatically released when @tfile is closed. 528 + */ 529 + static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer *user_bo, 530 + struct ttm_object_file *tfile, 531 + uint32_t flags) 532 + { 533 + struct ttm_buffer_object *bo = &user_bo->dma.base; 534 + bool existed; 535 + int ret; 536 + 537 + if (flags & drm_vmw_synccpu_allow_cs) { 538 + struct ttm_bo_device *bdev = bo->bdev; 539 + 540 + spin_lock(&bdev->fence_lock); 541 + ret = ttm_bo_wait(bo, false, true, 542 + !!(flags & drm_vmw_synccpu_dontblock)); 543 + spin_unlock(&bdev->fence_lock); 544 + return ret; 545 + } 546 + 547 + ret = ttm_bo_synccpu_write_grab 548 + (bo, !!(flags & drm_vmw_synccpu_dontblock)); 549 + if (unlikely(ret != 0)) 550 + return ret; 551 + 552 + ret = ttm_ref_object_add(tfile, &user_bo->prime.base, 553 + TTM_REF_SYNCCPU_WRITE, &existed); 554 + if (ret != 0 || existed) 555 + ttm_bo_synccpu_write_release(&user_bo->dma.base); 556 + 557 + return ret; 558 + } 559 + 560 + /** 561 + * vmw_user_dmabuf_synccpu_release - Release a previous grab for CPU access, 562 + * and unblock command submission on the buffer if blocked. 563 + * 564 + * @handle: Handle identifying the buffer object. 565 + * @tfile: Identifying the caller. 566 + * @flags: Flags indicating the type of release. 567 + */ 568 + static int vmw_user_dmabuf_synccpu_release(uint32_t handle, 569 + struct ttm_object_file *tfile, 570 + uint32_t flags) 571 + { 572 + if (!(flags & drm_vmw_synccpu_allow_cs)) 573 + return ttm_ref_object_base_unref(tfile, handle, 574 + TTM_REF_SYNCCPU_WRITE); 575 + 576 + return 0; 577 + } 578 + 579 + /** 580 + * vmw_user_dmabuf_synccpu_release - ioctl function implementing the synccpu 581 + * functionality. 582 + * 583 + * @dev: Identifies the drm device. 584 + * @data: Pointer to the ioctl argument. 585 + * @file_priv: Identifies the caller. 586 + * 587 + * This function checks the ioctl arguments for validity and calls the 588 + * relevant synccpu functions. 589 + */ 590 + int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data, 591 + struct drm_file *file_priv) 592 + { 593 + struct drm_vmw_synccpu_arg *arg = 594 + (struct drm_vmw_synccpu_arg *) data; 595 + struct vmw_dma_buffer *dma_buf; 596 + struct vmw_user_dma_buffer *user_bo; 597 + struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 598 + int ret; 599 + 600 + if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0 601 + || (arg->flags & ~(drm_vmw_synccpu_read | drm_vmw_synccpu_write | 602 + drm_vmw_synccpu_dontblock | 603 + drm_vmw_synccpu_allow_cs)) != 0) { 604 + DRM_ERROR("Illegal synccpu flags.\n"); 605 + return -EINVAL; 606 + } 607 + 608 + switch (arg->op) { 609 + case drm_vmw_synccpu_grab: 610 + ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &dma_buf); 611 + if (unlikely(ret != 0)) 612 + return ret; 613 + 614 + user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, 615 + dma); 616 + ret = vmw_user_dmabuf_synccpu_grab(user_bo, tfile, arg->flags); 617 + vmw_dmabuf_unreference(&dma_buf); 618 + if (unlikely(ret != 0 && ret != -ERESTARTSYS && 619 + ret != -EBUSY)) { 620 + DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n", 621 + (unsigned int) arg->handle); 622 + return ret; 623 + } 624 + break; 625 + case drm_vmw_synccpu_release: 626 + ret = vmw_user_dmabuf_synccpu_release(arg->handle, tfile, 627 + arg->flags); 628 + if (unlikely(ret != 0)) { 629 + DRM_ERROR("Failed synccpu release on handle 0x%08x.\n", 630 + (unsigned int) arg->handle); 631 + return ret; 632 + } 633 + break; 634 + default: 635 + DRM_ERROR("Invalid synccpu operation.\n"); 636 + return -EINVAL; 637 + } 638 + 639 + return 0; 535 640 } 536 641 537 642 int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data, ··· 734 591 } 735 592 736 593 int vmw_user_dmabuf_reference(struct ttm_object_file *tfile, 737 - struct vmw_dma_buffer *dma_buf) 594 + struct vmw_dma_buffer *dma_buf, 595 + uint32_t *handle) 738 596 { 739 597 struct vmw_user_dma_buffer *user_bo; 740 598 ··· 743 599 return -EINVAL; 744 600 745 601 user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, dma); 602 + 603 + *handle = user_bo->prime.base.hash.key; 746 604 return ttm_ref_object_add(tfile, &user_bo->prime.base, 747 605 TTM_REF_USAGE, NULL); 748 606 } ··· 1437 1291 * @mem: The truct ttm_mem_reg indicating to what memory 1438 1292 * region the move is taking place. 1439 1293 * 1440 - * For now does nothing. 1294 + * Evicts the Guest Backed hardware resource if the backup 1295 + * buffer is being moved out of MOB memory. 1296 + * Note that this function should not race with the resource 1297 + * validation code as long as it accesses only members of struct 1298 + * resource that remain static while bo::res is !NULL and 1299 + * while we have @bo reserved. struct resource::backup is *not* a 1300 + * static member. The resource validation code will take care 1301 + * to set @bo::res to NULL, while having @bo reserved when the 1302 + * buffer is no longer bound to the resource, so @bo:res can be 1303 + * used to determine whether there is a need to unbind and whether 1304 + * it is safe to unbind. 1441 1305 */ 1442 1306 void vmw_resource_move_notify(struct ttm_buffer_object *bo, 1443 1307 struct ttm_mem_reg *mem) 1444 1308 { 1309 + struct vmw_dma_buffer *dma_buf; 1310 + 1311 + if (mem == NULL) 1312 + return; 1313 + 1314 + if (bo->destroy != vmw_dmabuf_bo_free && 1315 + bo->destroy != vmw_user_dmabuf_destroy) 1316 + return; 1317 + 1318 + dma_buf = container_of(bo, struct vmw_dma_buffer, base); 1319 + 1320 + if (mem->mem_type != VMW_PL_MOB) { 1321 + struct vmw_resource *res, *n; 1322 + struct ttm_bo_device *bdev = bo->bdev; 1323 + struct ttm_validate_buffer val_buf; 1324 + 1325 + val_buf.bo = bo; 1326 + 1327 + list_for_each_entry_safe(res, n, &dma_buf->res_list, mob_head) { 1328 + 1329 + if (unlikely(res->func->unbind == NULL)) 1330 + continue; 1331 + 1332 + (void) res->func->unbind(res, true, &val_buf); 1333 + res->backup_dirty = true; 1334 + res->res_dirty = false; 1335 + list_del_init(&res->mob_head); 1336 + } 1337 + 1338 + spin_lock(&bdev->fence_lock); 1339 + (void) ttm_bo_wait(bo, false, false, false); 1340 + spin_unlock(&bdev->fence_lock); 1341 + } 1445 1342 } 1446 1343 1447 1344 /**
+440
drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
··· 1 + /************************************************************************** 2 + * 3 + * Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA 4 + * All Rights Reserved. 5 + * 6 + * Permission is hereby granted, free of charge, to any person obtaining a 7 + * copy of this software and associated documentation files (the 8 + * "Software"), to deal in the Software without restriction, including 9 + * without limitation the rights to use, copy, modify, merge, publish, 10 + * distribute, sub license, and/or sell copies of the Software, and to 11 + * permit persons to whom the Software is furnished to do so, subject to 12 + * the following conditions: 13 + * 14 + * The above copyright notice and this permission notice (including the 15 + * next paragraph) shall be included in all copies or substantial portions 16 + * of the Software. 17 + * 18 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 + * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 + * 26 + **************************************************************************/ 27 + 28 + #include "vmwgfx_drv.h" 29 + #include "vmwgfx_resource_priv.h" 30 + #include "ttm/ttm_placement.h" 31 + 32 + struct vmw_shader { 33 + struct vmw_resource res; 34 + SVGA3dShaderType type; 35 + uint32_t size; 36 + }; 37 + 38 + struct vmw_user_shader { 39 + struct ttm_base_object base; 40 + struct vmw_shader shader; 41 + }; 42 + 43 + static void vmw_user_shader_free(struct vmw_resource *res); 44 + static struct vmw_resource * 45 + vmw_user_shader_base_to_res(struct ttm_base_object *base); 46 + 47 + static int vmw_gb_shader_create(struct vmw_resource *res); 48 + static int vmw_gb_shader_bind(struct vmw_resource *res, 49 + struct ttm_validate_buffer *val_buf); 50 + static int vmw_gb_shader_unbind(struct vmw_resource *res, 51 + bool readback, 52 + struct ttm_validate_buffer *val_buf); 53 + static int vmw_gb_shader_destroy(struct vmw_resource *res); 54 + 55 + static uint64_t vmw_user_shader_size; 56 + 57 + static const struct vmw_user_resource_conv user_shader_conv = { 58 + .object_type = VMW_RES_SHADER, 59 + .base_obj_to_res = vmw_user_shader_base_to_res, 60 + .res_free = vmw_user_shader_free 61 + }; 62 + 63 + const struct vmw_user_resource_conv *user_shader_converter = 64 + &user_shader_conv; 65 + 66 + 67 + static const struct vmw_res_func vmw_gb_shader_func = { 68 + .res_type = vmw_res_shader, 69 + .needs_backup = true, 70 + .may_evict = true, 71 + .type_name = "guest backed shaders", 72 + .backup_placement = &vmw_mob_placement, 73 + .create = vmw_gb_shader_create, 74 + .destroy = vmw_gb_shader_destroy, 75 + .bind = vmw_gb_shader_bind, 76 + .unbind = vmw_gb_shader_unbind 77 + }; 78 + 79 + /** 80 + * Shader management: 81 + */ 82 + 83 + static inline struct vmw_shader * 84 + vmw_res_to_shader(struct vmw_resource *res) 85 + { 86 + return container_of(res, struct vmw_shader, res); 87 + } 88 + 89 + static void vmw_hw_shader_destroy(struct vmw_resource *res) 90 + { 91 + (void) vmw_gb_shader_destroy(res); 92 + } 93 + 94 + static int vmw_gb_shader_init(struct vmw_private *dev_priv, 95 + struct vmw_resource *res, 96 + uint32_t size, 97 + uint64_t offset, 98 + SVGA3dShaderType type, 99 + struct vmw_dma_buffer *byte_code, 100 + void (*res_free) (struct vmw_resource *res)) 101 + { 102 + struct vmw_shader *shader = vmw_res_to_shader(res); 103 + int ret; 104 + 105 + ret = vmw_resource_init(dev_priv, res, true, 106 + res_free, &vmw_gb_shader_func); 107 + 108 + 109 + if (unlikely(ret != 0)) { 110 + if (res_free) 111 + res_free(res); 112 + else 113 + kfree(res); 114 + return ret; 115 + } 116 + 117 + res->backup_size = size; 118 + if (byte_code) { 119 + res->backup = vmw_dmabuf_reference(byte_code); 120 + res->backup_offset = offset; 121 + } 122 + shader->size = size; 123 + shader->type = type; 124 + 125 + vmw_resource_activate(res, vmw_hw_shader_destroy); 126 + return 0; 127 + } 128 + 129 + static int vmw_gb_shader_create(struct vmw_resource *res) 130 + { 131 + struct vmw_private *dev_priv = res->dev_priv; 132 + struct vmw_shader *shader = vmw_res_to_shader(res); 133 + int ret; 134 + struct { 135 + SVGA3dCmdHeader header; 136 + SVGA3dCmdDefineGBShader body; 137 + } *cmd; 138 + 139 + if (likely(res->id != -1)) 140 + return 0; 141 + 142 + ret = vmw_resource_alloc_id(res); 143 + if (unlikely(ret != 0)) { 144 + DRM_ERROR("Failed to allocate a shader id.\n"); 145 + goto out_no_id; 146 + } 147 + 148 + if (unlikely(res->id >= VMWGFX_NUM_GB_SHADER)) { 149 + ret = -EBUSY; 150 + goto out_no_fifo; 151 + } 152 + 153 + cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); 154 + if (unlikely(cmd == NULL)) { 155 + DRM_ERROR("Failed reserving FIFO space for shader " 156 + "creation.\n"); 157 + ret = -ENOMEM; 158 + goto out_no_fifo; 159 + } 160 + 161 + cmd->header.id = SVGA_3D_CMD_DEFINE_GB_SHADER; 162 + cmd->header.size = sizeof(cmd->body); 163 + cmd->body.shid = res->id; 164 + cmd->body.type = shader->type; 165 + cmd->body.sizeInBytes = shader->size; 166 + vmw_fifo_commit(dev_priv, sizeof(*cmd)); 167 + (void) vmw_3d_resource_inc(dev_priv, false); 168 + 169 + return 0; 170 + 171 + out_no_fifo: 172 + vmw_resource_release_id(res); 173 + out_no_id: 174 + return ret; 175 + } 176 + 177 + static int vmw_gb_shader_bind(struct vmw_resource *res, 178 + struct ttm_validate_buffer *val_buf) 179 + { 180 + struct vmw_private *dev_priv = res->dev_priv; 181 + struct { 182 + SVGA3dCmdHeader header; 183 + SVGA3dCmdBindGBShader body; 184 + } *cmd; 185 + struct ttm_buffer_object *bo = val_buf->bo; 186 + 187 + BUG_ON(bo->mem.mem_type != VMW_PL_MOB); 188 + 189 + cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); 190 + if (unlikely(cmd == NULL)) { 191 + DRM_ERROR("Failed reserving FIFO space for shader " 192 + "binding.\n"); 193 + return -ENOMEM; 194 + } 195 + 196 + cmd->header.id = SVGA_3D_CMD_BIND_GB_SHADER; 197 + cmd->header.size = sizeof(cmd->body); 198 + cmd->body.shid = res->id; 199 + cmd->body.mobid = bo->mem.start; 200 + cmd->body.offsetInBytes = 0; 201 + res->backup_dirty = false; 202 + vmw_fifo_commit(dev_priv, sizeof(*cmd)); 203 + 204 + return 0; 205 + } 206 + 207 + static int vmw_gb_shader_unbind(struct vmw_resource *res, 208 + bool readback, 209 + struct ttm_validate_buffer *val_buf) 210 + { 211 + struct vmw_private *dev_priv = res->dev_priv; 212 + struct { 213 + SVGA3dCmdHeader header; 214 + SVGA3dCmdBindGBShader body; 215 + } *cmd; 216 + struct vmw_fence_obj *fence; 217 + 218 + BUG_ON(res->backup->base.mem.mem_type != VMW_PL_MOB); 219 + 220 + cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); 221 + if (unlikely(cmd == NULL)) { 222 + DRM_ERROR("Failed reserving FIFO space for shader " 223 + "unbinding.\n"); 224 + return -ENOMEM; 225 + } 226 + 227 + cmd->header.id = SVGA_3D_CMD_BIND_GB_SHADER; 228 + cmd->header.size = sizeof(cmd->body); 229 + cmd->body.shid = res->id; 230 + cmd->body.mobid = SVGA3D_INVALID_ID; 231 + cmd->body.offsetInBytes = 0; 232 + vmw_fifo_commit(dev_priv, sizeof(*cmd)); 233 + 234 + /* 235 + * Create a fence object and fence the backup buffer. 236 + */ 237 + 238 + (void) vmw_execbuf_fence_commands(NULL, dev_priv, 239 + &fence, NULL); 240 + 241 + vmw_fence_single_bo(val_buf->bo, fence); 242 + 243 + if (likely(fence != NULL)) 244 + vmw_fence_obj_unreference(&fence); 245 + 246 + return 0; 247 + } 248 + 249 + static int vmw_gb_shader_destroy(struct vmw_resource *res) 250 + { 251 + struct vmw_private *dev_priv = res->dev_priv; 252 + struct { 253 + SVGA3dCmdHeader header; 254 + SVGA3dCmdDestroyGBShader body; 255 + } *cmd; 256 + 257 + if (likely(res->id == -1)) 258 + return 0; 259 + 260 + mutex_lock(&dev_priv->binding_mutex); 261 + vmw_context_binding_res_list_kill(&res->binding_head); 262 + 263 + cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); 264 + if (unlikely(cmd == NULL)) { 265 + DRM_ERROR("Failed reserving FIFO space for shader " 266 + "destruction.\n"); 267 + return -ENOMEM; 268 + } 269 + 270 + cmd->header.id = SVGA_3D_CMD_DESTROY_GB_SHADER; 271 + cmd->header.size = sizeof(cmd->body); 272 + cmd->body.shid = res->id; 273 + vmw_fifo_commit(dev_priv, sizeof(*cmd)); 274 + mutex_unlock(&dev_priv->binding_mutex); 275 + vmw_resource_release_id(res); 276 + vmw_3d_resource_dec(dev_priv, false); 277 + 278 + return 0; 279 + } 280 + 281 + /** 282 + * User-space shader management: 283 + */ 284 + 285 + static struct vmw_resource * 286 + vmw_user_shader_base_to_res(struct ttm_base_object *base) 287 + { 288 + return &(container_of(base, struct vmw_user_shader, base)-> 289 + shader.res); 290 + } 291 + 292 + static void vmw_user_shader_free(struct vmw_resource *res) 293 + { 294 + struct vmw_user_shader *ushader = 295 + container_of(res, struct vmw_user_shader, shader.res); 296 + struct vmw_private *dev_priv = res->dev_priv; 297 + 298 + ttm_base_object_kfree(ushader, base); 299 + ttm_mem_global_free(vmw_mem_glob(dev_priv), 300 + vmw_user_shader_size); 301 + } 302 + 303 + /** 304 + * This function is called when user space has no more references on the 305 + * base object. It releases the base-object's reference on the resource object. 306 + */ 307 + 308 + static void vmw_user_shader_base_release(struct ttm_base_object **p_base) 309 + { 310 + struct ttm_base_object *base = *p_base; 311 + struct vmw_resource *res = vmw_user_shader_base_to_res(base); 312 + 313 + *p_base = NULL; 314 + vmw_resource_unreference(&res); 315 + } 316 + 317 + int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data, 318 + struct drm_file *file_priv) 319 + { 320 + struct drm_vmw_shader_arg *arg = (struct drm_vmw_shader_arg *)data; 321 + struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 322 + 323 + return ttm_ref_object_base_unref(tfile, arg->handle, 324 + TTM_REF_USAGE); 325 + } 326 + 327 + int vmw_shader_define_ioctl(struct drm_device *dev, void *data, 328 + struct drm_file *file_priv) 329 + { 330 + struct vmw_private *dev_priv = vmw_priv(dev); 331 + struct vmw_user_shader *ushader; 332 + struct vmw_resource *res; 333 + struct vmw_resource *tmp; 334 + struct drm_vmw_shader_create_arg *arg = 335 + (struct drm_vmw_shader_create_arg *)data; 336 + struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 337 + struct vmw_master *vmaster = vmw_master(file_priv->master); 338 + struct vmw_dma_buffer *buffer = NULL; 339 + SVGA3dShaderType shader_type; 340 + int ret; 341 + 342 + if (arg->buffer_handle != SVGA3D_INVALID_ID) { 343 + ret = vmw_user_dmabuf_lookup(tfile, arg->buffer_handle, 344 + &buffer); 345 + if (unlikely(ret != 0)) { 346 + DRM_ERROR("Could not find buffer for shader " 347 + "creation.\n"); 348 + return ret; 349 + } 350 + 351 + if ((u64)buffer->base.num_pages * PAGE_SIZE < 352 + (u64)arg->size + (u64)arg->offset) { 353 + DRM_ERROR("Illegal buffer- or shader size.\n"); 354 + ret = -EINVAL; 355 + goto out_bad_arg; 356 + } 357 + } 358 + 359 + switch (arg->shader_type) { 360 + case drm_vmw_shader_type_vs: 361 + shader_type = SVGA3D_SHADERTYPE_VS; 362 + break; 363 + case drm_vmw_shader_type_ps: 364 + shader_type = SVGA3D_SHADERTYPE_PS; 365 + break; 366 + case drm_vmw_shader_type_gs: 367 + shader_type = SVGA3D_SHADERTYPE_GS; 368 + break; 369 + default: 370 + DRM_ERROR("Illegal shader type.\n"); 371 + ret = -EINVAL; 372 + goto out_bad_arg; 373 + } 374 + 375 + /* 376 + * Approximate idr memory usage with 128 bytes. It will be limited 377 + * by maximum number_of shaders anyway. 378 + */ 379 + 380 + if (unlikely(vmw_user_shader_size == 0)) 381 + vmw_user_shader_size = ttm_round_pot(sizeof(*ushader)) 382 + + 128; 383 + 384 + ret = ttm_read_lock(&vmaster->lock, true); 385 + if (unlikely(ret != 0)) 386 + return ret; 387 + 388 + ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), 389 + vmw_user_shader_size, 390 + false, true); 391 + if (unlikely(ret != 0)) { 392 + if (ret != -ERESTARTSYS) 393 + DRM_ERROR("Out of graphics memory for shader" 394 + " creation.\n"); 395 + goto out_unlock; 396 + } 397 + 398 + ushader = kzalloc(sizeof(*ushader), GFP_KERNEL); 399 + if (unlikely(ushader == NULL)) { 400 + ttm_mem_global_free(vmw_mem_glob(dev_priv), 401 + vmw_user_shader_size); 402 + ret = -ENOMEM; 403 + goto out_unlock; 404 + } 405 + 406 + res = &ushader->shader.res; 407 + ushader->base.shareable = false; 408 + ushader->base.tfile = NULL; 409 + 410 + /* 411 + * From here on, the destructor takes over resource freeing. 412 + */ 413 + 414 + ret = vmw_gb_shader_init(dev_priv, res, arg->size, 415 + arg->offset, shader_type, buffer, 416 + vmw_user_shader_free); 417 + if (unlikely(ret != 0)) 418 + goto out_unlock; 419 + 420 + tmp = vmw_resource_reference(res); 421 + ret = ttm_base_object_init(tfile, &ushader->base, false, 422 + VMW_RES_SHADER, 423 + &vmw_user_shader_base_release, NULL); 424 + 425 + if (unlikely(ret != 0)) { 426 + vmw_resource_unreference(&tmp); 427 + goto out_err; 428 + } 429 + 430 + arg->shader_handle = ushader->base.hash.key; 431 + out_err: 432 + vmw_resource_unreference(&res); 433 + out_unlock: 434 + ttm_read_unlock(&vmaster->lock); 435 + out_bad_arg: 436 + vmw_dmabuf_unreference(&buffer); 437 + 438 + return ret; 439 + 440 + }
+463 -4
drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
··· 41 41 struct ttm_prime_object prime; 42 42 struct vmw_surface srf; 43 43 uint32_t size; 44 - uint32_t backup_handle; 45 44 }; 46 45 47 46 /** ··· 67 68 struct ttm_validate_buffer *val_buf); 68 69 static int vmw_legacy_srf_create(struct vmw_resource *res); 69 70 static int vmw_legacy_srf_destroy(struct vmw_resource *res); 71 + static int vmw_gb_surface_create(struct vmw_resource *res); 72 + static int vmw_gb_surface_bind(struct vmw_resource *res, 73 + struct ttm_validate_buffer *val_buf); 74 + static int vmw_gb_surface_unbind(struct vmw_resource *res, 75 + bool readback, 76 + struct ttm_validate_buffer *val_buf); 77 + static int vmw_gb_surface_destroy(struct vmw_resource *res); 78 + 70 79 71 80 static const struct vmw_user_resource_conv user_surface_conv = { 72 81 .object_type = VMW_RES_SURFACE, ··· 98 91 .destroy = &vmw_legacy_srf_destroy, 99 92 .bind = &vmw_legacy_srf_bind, 100 93 .unbind = &vmw_legacy_srf_unbind 94 + }; 95 + 96 + static const struct vmw_res_func vmw_gb_surface_func = { 97 + .res_type = vmw_res_surface, 98 + .needs_backup = true, 99 + .may_evict = true, 100 + .type_name = "guest backed surfaces", 101 + .backup_placement = &vmw_mob_placement, 102 + .create = vmw_gb_surface_create, 103 + .destroy = vmw_gb_surface_destroy, 104 + .bind = vmw_gb_surface_bind, 105 + .unbind = vmw_gb_surface_unbind 101 106 }; 102 107 103 108 /** ··· 309 290 struct vmw_private *dev_priv = res->dev_priv; 310 291 struct vmw_surface *srf; 311 292 void *cmd; 293 + 294 + if (res->func->destroy == vmw_gb_surface_destroy) { 295 + (void) vmw_gb_surface_destroy(res); 296 + return; 297 + } 312 298 313 299 if (res->id != -1) { 314 300 ··· 573 549 struct vmw_resource *res = &srf->res; 574 550 575 551 BUG_ON(res_free == NULL); 576 - (void) vmw_3d_resource_inc(dev_priv, false); 552 + if (!dev_priv->has_mob) 553 + (void) vmw_3d_resource_inc(dev_priv, false); 577 554 ret = vmw_resource_init(dev_priv, res, true, res_free, 555 + (dev_priv->has_mob) ? &vmw_gb_surface_func : 578 556 &vmw_legacy_surface_func); 579 557 580 558 if (unlikely(ret != 0)) { 581 - vmw_3d_resource_dec(dev_priv, false); 559 + if (!dev_priv->has_mob) 560 + vmw_3d_resource_dec(dev_priv, false); 582 561 res_free(res); 583 562 return ret; 584 563 } ··· 777 750 778 751 srf->base_size = *srf->sizes; 779 752 srf->autogen_filter = SVGA3D_TEX_FILTER_NONE; 780 - srf->multisample_count = 1; 753 + srf->multisample_count = 0; 781 754 782 755 cur_bo_offset = 0; 783 756 cur_offset = srf->offsets; ··· 917 890 } 918 891 out_bad_resource: 919 892 out_no_reference: 893 + ttm_base_object_unref(&base); 894 + 895 + return ret; 896 + } 897 + 898 + /** 899 + * vmw_surface_define_encode - Encode a surface_define command. 900 + * 901 + * @srf: Pointer to a struct vmw_surface object. 902 + * @cmd_space: Pointer to memory area in which the commands should be encoded. 903 + */ 904 + static int vmw_gb_surface_create(struct vmw_resource *res) 905 + { 906 + struct vmw_private *dev_priv = res->dev_priv; 907 + struct vmw_surface *srf = vmw_res_to_srf(res); 908 + uint32_t cmd_len, submit_len; 909 + int ret; 910 + struct { 911 + SVGA3dCmdHeader header; 912 + SVGA3dCmdDefineGBSurface body; 913 + } *cmd; 914 + 915 + if (likely(res->id != -1)) 916 + return 0; 917 + 918 + (void) vmw_3d_resource_inc(dev_priv, false); 919 + ret = vmw_resource_alloc_id(res); 920 + if (unlikely(ret != 0)) { 921 + DRM_ERROR("Failed to allocate a surface id.\n"); 922 + goto out_no_id; 923 + } 924 + 925 + if (unlikely(res->id >= VMWGFX_NUM_GB_SURFACE)) { 926 + ret = -EBUSY; 927 + goto out_no_fifo; 928 + } 929 + 930 + cmd_len = sizeof(cmd->body); 931 + submit_len = sizeof(*cmd); 932 + cmd = vmw_fifo_reserve(dev_priv, submit_len); 933 + if (unlikely(cmd == NULL)) { 934 + DRM_ERROR("Failed reserving FIFO space for surface " 935 + "creation.\n"); 936 + ret = -ENOMEM; 937 + goto out_no_fifo; 938 + } 939 + 940 + cmd->header.id = SVGA_3D_CMD_DEFINE_GB_SURFACE; 941 + cmd->header.size = cmd_len; 942 + cmd->body.sid = srf->res.id; 943 + cmd->body.surfaceFlags = srf->flags; 944 + cmd->body.format = cpu_to_le32(srf->format); 945 + cmd->body.numMipLevels = srf->mip_levels[0]; 946 + cmd->body.multisampleCount = srf->multisample_count; 947 + cmd->body.autogenFilter = srf->autogen_filter; 948 + cmd->body.size.width = srf->base_size.width; 949 + cmd->body.size.height = srf->base_size.height; 950 + cmd->body.size.depth = srf->base_size.depth; 951 + vmw_fifo_commit(dev_priv, submit_len); 952 + 953 + return 0; 954 + 955 + out_no_fifo: 956 + vmw_resource_release_id(res); 957 + out_no_id: 958 + vmw_3d_resource_dec(dev_priv, false); 959 + return ret; 960 + } 961 + 962 + 963 + static int vmw_gb_surface_bind(struct vmw_resource *res, 964 + struct ttm_validate_buffer *val_buf) 965 + { 966 + struct vmw_private *dev_priv = res->dev_priv; 967 + struct { 968 + SVGA3dCmdHeader header; 969 + SVGA3dCmdBindGBSurface body; 970 + } *cmd1; 971 + struct { 972 + SVGA3dCmdHeader header; 973 + SVGA3dCmdUpdateGBSurface body; 974 + } *cmd2; 975 + uint32_t submit_size; 976 + struct ttm_buffer_object *bo = val_buf->bo; 977 + 978 + BUG_ON(bo->mem.mem_type != VMW_PL_MOB); 979 + 980 + submit_size = sizeof(*cmd1) + (res->backup_dirty ? sizeof(*cmd2) : 0); 981 + 982 + cmd1 = vmw_fifo_reserve(dev_priv, submit_size); 983 + if (unlikely(cmd1 == NULL)) { 984 + DRM_ERROR("Failed reserving FIFO space for surface " 985 + "binding.\n"); 986 + return -ENOMEM; 987 + } 988 + 989 + cmd1->header.id = SVGA_3D_CMD_BIND_GB_SURFACE; 990 + cmd1->header.size = sizeof(cmd1->body); 991 + cmd1->body.sid = res->id; 992 + cmd1->body.mobid = bo->mem.start; 993 + if (res->backup_dirty) { 994 + cmd2 = (void *) &cmd1[1]; 995 + cmd2->header.id = SVGA_3D_CMD_UPDATE_GB_SURFACE; 996 + cmd2->header.size = sizeof(cmd2->body); 997 + cmd2->body.sid = res->id; 998 + res->backup_dirty = false; 999 + } 1000 + vmw_fifo_commit(dev_priv, submit_size); 1001 + 1002 + return 0; 1003 + } 1004 + 1005 + static int vmw_gb_surface_unbind(struct vmw_resource *res, 1006 + bool readback, 1007 + struct ttm_validate_buffer *val_buf) 1008 + { 1009 + struct vmw_private *dev_priv = res->dev_priv; 1010 + struct ttm_buffer_object *bo = val_buf->bo; 1011 + struct vmw_fence_obj *fence; 1012 + 1013 + struct { 1014 + SVGA3dCmdHeader header; 1015 + SVGA3dCmdReadbackGBSurface body; 1016 + } *cmd1; 1017 + struct { 1018 + SVGA3dCmdHeader header; 1019 + SVGA3dCmdInvalidateGBSurface body; 1020 + } *cmd2; 1021 + struct { 1022 + SVGA3dCmdHeader header; 1023 + SVGA3dCmdBindGBSurface body; 1024 + } *cmd3; 1025 + uint32_t submit_size; 1026 + uint8_t *cmd; 1027 + 1028 + 1029 + BUG_ON(bo->mem.mem_type != VMW_PL_MOB); 1030 + 1031 + submit_size = sizeof(*cmd3) + (readback ? sizeof(*cmd1) : sizeof(*cmd2)); 1032 + cmd = vmw_fifo_reserve(dev_priv, submit_size); 1033 + if (unlikely(cmd == NULL)) { 1034 + DRM_ERROR("Failed reserving FIFO space for surface " 1035 + "unbinding.\n"); 1036 + return -ENOMEM; 1037 + } 1038 + 1039 + if (readback) { 1040 + cmd1 = (void *) cmd; 1041 + cmd1->header.id = SVGA_3D_CMD_READBACK_GB_SURFACE; 1042 + cmd1->header.size = sizeof(cmd1->body); 1043 + cmd1->body.sid = res->id; 1044 + cmd3 = (void *) &cmd1[1]; 1045 + } else { 1046 + cmd2 = (void *) cmd; 1047 + cmd2->header.id = SVGA_3D_CMD_INVALIDATE_GB_SURFACE; 1048 + cmd2->header.size = sizeof(cmd2->body); 1049 + cmd2->body.sid = res->id; 1050 + cmd3 = (void *) &cmd2[1]; 1051 + } 1052 + 1053 + cmd3->header.id = SVGA_3D_CMD_BIND_GB_SURFACE; 1054 + cmd3->header.size = sizeof(cmd3->body); 1055 + cmd3->body.sid = res->id; 1056 + cmd3->body.mobid = SVGA3D_INVALID_ID; 1057 + 1058 + vmw_fifo_commit(dev_priv, submit_size); 1059 + 1060 + /* 1061 + * Create a fence object and fence the backup buffer. 1062 + */ 1063 + 1064 + (void) vmw_execbuf_fence_commands(NULL, dev_priv, 1065 + &fence, NULL); 1066 + 1067 + vmw_fence_single_bo(val_buf->bo, fence); 1068 + 1069 + if (likely(fence != NULL)) 1070 + vmw_fence_obj_unreference(&fence); 1071 + 1072 + return 0; 1073 + } 1074 + 1075 + static int vmw_gb_surface_destroy(struct vmw_resource *res) 1076 + { 1077 + struct vmw_private *dev_priv = res->dev_priv; 1078 + struct { 1079 + SVGA3dCmdHeader header; 1080 + SVGA3dCmdDestroyGBSurface body; 1081 + } *cmd; 1082 + 1083 + if (likely(res->id == -1)) 1084 + return 0; 1085 + 1086 + mutex_lock(&dev_priv->binding_mutex); 1087 + vmw_context_binding_res_list_kill(&res->binding_head); 1088 + 1089 + cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); 1090 + if (unlikely(cmd == NULL)) { 1091 + DRM_ERROR("Failed reserving FIFO space for surface " 1092 + "destruction.\n"); 1093 + return -ENOMEM; 1094 + } 1095 + 1096 + cmd->header.id = SVGA_3D_CMD_DESTROY_GB_SURFACE; 1097 + cmd->header.size = sizeof(cmd->body); 1098 + cmd->body.sid = res->id; 1099 + vmw_fifo_commit(dev_priv, sizeof(*cmd)); 1100 + mutex_unlock(&dev_priv->binding_mutex); 1101 + vmw_resource_release_id(res); 1102 + vmw_3d_resource_dec(dev_priv, false); 1103 + 1104 + return 0; 1105 + } 1106 + 1107 + /** 1108 + * vmw_gb_surface_define_ioctl - Ioctl function implementing 1109 + * the user surface define functionality. 1110 + * 1111 + * @dev: Pointer to a struct drm_device. 1112 + * @data: Pointer to data copied from / to user-space. 1113 + * @file_priv: Pointer to a drm file private structure. 1114 + */ 1115 + int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data, 1116 + struct drm_file *file_priv) 1117 + { 1118 + struct vmw_private *dev_priv = vmw_priv(dev); 1119 + struct vmw_user_surface *user_srf; 1120 + struct vmw_surface *srf; 1121 + struct vmw_resource *res; 1122 + struct vmw_resource *tmp; 1123 + union drm_vmw_gb_surface_create_arg *arg = 1124 + (union drm_vmw_gb_surface_create_arg *)data; 1125 + struct drm_vmw_gb_surface_create_req *req = &arg->req; 1126 + struct drm_vmw_gb_surface_create_rep *rep = &arg->rep; 1127 + struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 1128 + int ret; 1129 + uint32_t size; 1130 + struct vmw_master *vmaster = vmw_master(file_priv->master); 1131 + const struct svga3d_surface_desc *desc; 1132 + uint32_t backup_handle; 1133 + 1134 + if (unlikely(vmw_user_surface_size == 0)) 1135 + vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) + 1136 + 128; 1137 + 1138 + size = vmw_user_surface_size + 128; 1139 + 1140 + desc = svga3dsurface_get_desc(req->format); 1141 + if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) { 1142 + DRM_ERROR("Invalid surface format for surface creation.\n"); 1143 + return -EINVAL; 1144 + } 1145 + 1146 + ret = ttm_read_lock(&vmaster->lock, true); 1147 + if (unlikely(ret != 0)) 1148 + return ret; 1149 + 1150 + ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), 1151 + size, false, true); 1152 + if (unlikely(ret != 0)) { 1153 + if (ret != -ERESTARTSYS) 1154 + DRM_ERROR("Out of graphics memory for surface" 1155 + " creation.\n"); 1156 + goto out_unlock; 1157 + } 1158 + 1159 + user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL); 1160 + if (unlikely(user_srf == NULL)) { 1161 + ret = -ENOMEM; 1162 + goto out_no_user_srf; 1163 + } 1164 + 1165 + srf = &user_srf->srf; 1166 + res = &srf->res; 1167 + 1168 + srf->flags = req->svga3d_flags; 1169 + srf->format = req->format; 1170 + srf->scanout = req->drm_surface_flags & drm_vmw_surface_flag_scanout; 1171 + srf->mip_levels[0] = req->mip_levels; 1172 + srf->num_sizes = 1; 1173 + srf->sizes = NULL; 1174 + srf->offsets = NULL; 1175 + user_srf->size = size; 1176 + srf->base_size = req->base_size; 1177 + srf->autogen_filter = SVGA3D_TEX_FILTER_NONE; 1178 + srf->multisample_count = req->multisample_count; 1179 + res->backup_size = svga3dsurface_get_serialized_size 1180 + (srf->format, srf->base_size, srf->mip_levels[0], 1181 + srf->flags & SVGA3D_SURFACE_CUBEMAP); 1182 + 1183 + user_srf->prime.base.shareable = false; 1184 + user_srf->prime.base.tfile = NULL; 1185 + 1186 + /** 1187 + * From this point, the generic resource management functions 1188 + * destroy the object on failure. 1189 + */ 1190 + 1191 + ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free); 1192 + if (unlikely(ret != 0)) 1193 + goto out_unlock; 1194 + 1195 + if (req->buffer_handle != SVGA3D_INVALID_ID) { 1196 + ret = vmw_user_dmabuf_lookup(tfile, req->buffer_handle, 1197 + &res->backup); 1198 + } else if (req->drm_surface_flags & 1199 + drm_vmw_surface_flag_create_buffer) 1200 + ret = vmw_user_dmabuf_alloc(dev_priv, tfile, 1201 + res->backup_size, 1202 + req->drm_surface_flags & 1203 + drm_vmw_surface_flag_shareable, 1204 + &backup_handle, 1205 + &res->backup); 1206 + 1207 + if (unlikely(ret != 0)) { 1208 + vmw_resource_unreference(&res); 1209 + goto out_unlock; 1210 + } 1211 + 1212 + tmp = vmw_resource_reference(&srf->res); 1213 + ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime, 1214 + req->drm_surface_flags & 1215 + drm_vmw_surface_flag_shareable, 1216 + VMW_RES_SURFACE, 1217 + &vmw_user_surface_base_release, NULL); 1218 + 1219 + if (unlikely(ret != 0)) { 1220 + vmw_resource_unreference(&tmp); 1221 + vmw_resource_unreference(&res); 1222 + goto out_unlock; 1223 + } 1224 + 1225 + rep->handle = user_srf->prime.base.hash.key; 1226 + rep->backup_size = res->backup_size; 1227 + if (res->backup) { 1228 + rep->buffer_map_handle = 1229 + drm_vma_node_offset_addr(&res->backup->base.vma_node); 1230 + rep->buffer_size = res->backup->base.num_pages * PAGE_SIZE; 1231 + rep->buffer_handle = backup_handle; 1232 + } else { 1233 + rep->buffer_map_handle = 0; 1234 + rep->buffer_size = 0; 1235 + rep->buffer_handle = SVGA3D_INVALID_ID; 1236 + } 1237 + 1238 + vmw_resource_unreference(&res); 1239 + 1240 + ttm_read_unlock(&vmaster->lock); 1241 + return 0; 1242 + out_no_user_srf: 1243 + ttm_mem_global_free(vmw_mem_glob(dev_priv), size); 1244 + out_unlock: 1245 + ttm_read_unlock(&vmaster->lock); 1246 + return ret; 1247 + } 1248 + 1249 + /** 1250 + * vmw_gb_surface_reference_ioctl - Ioctl function implementing 1251 + * the user surface reference functionality. 1252 + * 1253 + * @dev: Pointer to a struct drm_device. 1254 + * @data: Pointer to data copied from / to user-space. 1255 + * @file_priv: Pointer to a drm file private structure. 1256 + */ 1257 + int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data, 1258 + struct drm_file *file_priv) 1259 + { 1260 + struct vmw_private *dev_priv = vmw_priv(dev); 1261 + union drm_vmw_gb_surface_reference_arg *arg = 1262 + (union drm_vmw_gb_surface_reference_arg *)data; 1263 + struct drm_vmw_surface_arg *req = &arg->req; 1264 + struct drm_vmw_gb_surface_ref_rep *rep = &arg->rep; 1265 + struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 1266 + struct vmw_surface *srf; 1267 + struct vmw_user_surface *user_srf; 1268 + struct ttm_base_object *base; 1269 + uint32_t backup_handle; 1270 + int ret = -EINVAL; 1271 + 1272 + base = ttm_base_object_lookup_for_ref(dev_priv->tdev, req->sid); 1273 + if (unlikely(base == NULL)) { 1274 + DRM_ERROR("Could not find surface to reference.\n"); 1275 + return -EINVAL; 1276 + } 1277 + 1278 + if (unlikely(ttm_base_object_type(base) != VMW_RES_SURFACE)) 1279 + goto out_bad_resource; 1280 + 1281 + user_srf = container_of(base, struct vmw_user_surface, prime.base); 1282 + srf = &user_srf->srf; 1283 + if (srf->res.backup == NULL) { 1284 + DRM_ERROR("Shared GB surface is missing a backup buffer.\n"); 1285 + goto out_bad_resource; 1286 + } 1287 + 1288 + ret = ttm_ref_object_add(tfile, &user_srf->prime.base, 1289 + TTM_REF_USAGE, NULL); 1290 + if (unlikely(ret != 0)) { 1291 + DRM_ERROR("Could not add a reference to a GB surface.\n"); 1292 + goto out_bad_resource; 1293 + } 1294 + 1295 + mutex_lock(&dev_priv->cmdbuf_mutex); /* Protect res->backup */ 1296 + ret = vmw_user_dmabuf_reference(tfile, srf->res.backup, 1297 + &backup_handle); 1298 + mutex_unlock(&dev_priv->cmdbuf_mutex); 1299 + 1300 + if (unlikely(ret != 0)) { 1301 + DRM_ERROR("Could not add a reference to a GB surface " 1302 + "backup buffer.\n"); 1303 + (void) ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile, 1304 + req->sid, 1305 + TTM_REF_USAGE); 1306 + goto out_bad_resource; 1307 + } 1308 + 1309 + rep->creq.svga3d_flags = srf->flags; 1310 + rep->creq.format = srf->format; 1311 + rep->creq.mip_levels = srf->mip_levels[0]; 1312 + rep->creq.drm_surface_flags = 0; 1313 + rep->creq.multisample_count = srf->multisample_count; 1314 + rep->creq.autogen_filter = srf->autogen_filter; 1315 + rep->creq.buffer_handle = backup_handle; 1316 + rep->creq.base_size = srf->base_size; 1317 + rep->crep.handle = user_srf->prime.base.hash.key; 1318 + rep->crep.backup_size = srf->res.backup_size; 1319 + rep->crep.buffer_handle = backup_handle; 1320 + rep->crep.buffer_map_handle = 1321 + drm_vma_node_offset_addr(&srf->res.backup->base.vma_node); 1322 + rep->crep.buffer_size = srf->res.backup->base.num_pages * PAGE_SIZE; 1323 + 1324 + out_bad_resource: 920 1325 ttm_base_object_unref(&base); 921 1326 922 1327 return ret;
+261
include/uapi/drm/vmwgfx_drm.h
··· 28 28 #ifndef __VMWGFX_DRM_H__ 29 29 #define __VMWGFX_DRM_H__ 30 30 31 + #ifndef __KERNEL__ 32 + #include <drm.h> 33 + #endif 34 + 31 35 #define DRM_VMW_MAX_SURFACE_FACES 6 32 36 #define DRM_VMW_MAX_MIP_LEVELS 24 33 37 ··· 59 55 #define DRM_VMW_PRESENT 18 60 56 #define DRM_VMW_PRESENT_READBACK 19 61 57 #define DRM_VMW_UPDATE_LAYOUT 20 58 + #define DRM_VMW_CREATE_SHADER 21 59 + #define DRM_VMW_UNREF_SHADER 22 60 + #define DRM_VMW_GB_SURFACE_CREATE 23 61 + #define DRM_VMW_GB_SURFACE_REF 24 62 + #define DRM_VMW_SYNCCPU 25 62 63 63 64 /*************************************************************************/ 64 65 /** ··· 84 75 #define DRM_VMW_PARAM_FIFO_CAPS 4 85 76 #define DRM_VMW_PARAM_MAX_FB_SIZE 5 86 77 #define DRM_VMW_PARAM_FIFO_HW_VERSION 6 78 + #define DRM_VMW_PARAM_MAX_SURF_MEMORY 7 79 + #define DRM_VMW_PARAM_3D_CAPS_SIZE 8 80 + #define DRM_VMW_PARAM_MAX_MOB_MEMORY 9 87 81 88 82 /** 89 83 * struct drm_vmw_getparam_arg ··· 797 785 uint32_t num_outputs; 798 786 uint32_t pad64; 799 787 uint64_t rects; 788 + }; 789 + 790 + 791 + /*************************************************************************/ 792 + /** 793 + * DRM_VMW_CREATE_SHADER - Create shader 794 + * 795 + * Creates a shader and optionally binds it to a dma buffer containing 796 + * the shader byte-code. 797 + */ 798 + 799 + /** 800 + * enum drm_vmw_shader_type - Shader types 801 + */ 802 + enum drm_vmw_shader_type { 803 + drm_vmw_shader_type_vs = 0, 804 + drm_vmw_shader_type_ps, 805 + drm_vmw_shader_type_gs 806 + }; 807 + 808 + 809 + /** 810 + * struct drm_vmw_shader_create_arg 811 + * 812 + * @shader_type: Shader type of the shader to create. 813 + * @size: Size of the byte-code in bytes. 814 + * where the shader byte-code starts 815 + * @buffer_handle: Buffer handle identifying the buffer containing the 816 + * shader byte-code 817 + * @shader_handle: On successful completion contains a handle that 818 + * can be used to subsequently identify the shader. 819 + * @offset: Offset in bytes into the buffer given by @buffer_handle, 820 + * 821 + * Input / Output argument to the DRM_VMW_CREATE_SHADER Ioctl. 822 + */ 823 + struct drm_vmw_shader_create_arg { 824 + enum drm_vmw_shader_type shader_type; 825 + uint32_t size; 826 + uint32_t buffer_handle; 827 + uint32_t shader_handle; 828 + uint64_t offset; 829 + }; 830 + 831 + /*************************************************************************/ 832 + /** 833 + * DRM_VMW_UNREF_SHADER - Unreferences a shader 834 + * 835 + * Destroys a user-space reference to a shader, optionally destroying 836 + * it. 837 + */ 838 + 839 + /** 840 + * struct drm_vmw_shader_arg 841 + * 842 + * @handle: Handle identifying the shader to destroy. 843 + * 844 + * Input argument to the DRM_VMW_UNREF_SHADER ioctl. 845 + */ 846 + struct drm_vmw_shader_arg { 847 + uint32_t handle; 848 + uint32_t pad64; 849 + }; 850 + 851 + /*************************************************************************/ 852 + /** 853 + * DRM_VMW_GB_SURFACE_CREATE - Create a host guest-backed surface. 854 + * 855 + * Allocates a surface handle and queues a create surface command 856 + * for the host on the first use of the surface. The surface ID can 857 + * be used as the surface ID in commands referencing the surface. 858 + */ 859 + 860 + /** 861 + * enum drm_vmw_surface_flags 862 + * 863 + * @drm_vmw_surface_flag_shareable: Whether the surface is shareable 864 + * @drm_vmw_surface_flag_scanout: Whether the surface is a scanout 865 + * surface. 866 + * @drm_vmw_surface_flag_create_buffer: Create a backup buffer if none is 867 + * given. 868 + */ 869 + enum drm_vmw_surface_flags { 870 + drm_vmw_surface_flag_shareable = (1 << 0), 871 + drm_vmw_surface_flag_scanout = (1 << 1), 872 + drm_vmw_surface_flag_create_buffer = (1 << 2) 873 + }; 874 + 875 + /** 876 + * struct drm_vmw_gb_surface_create_req 877 + * 878 + * @svga3d_flags: SVGA3d surface flags for the device. 879 + * @format: SVGA3d format. 880 + * @mip_level: Number of mip levels for all faces. 881 + * @drm_surface_flags Flags as described above. 882 + * @multisample_count Future use. Set to 0. 883 + * @autogen_filter Future use. Set to 0. 884 + * @buffer_handle Buffer handle of backup buffer. SVGA3D_INVALID_ID 885 + * if none. 886 + * @base_size Size of the base mip level for all faces. 887 + * 888 + * Input argument to the DRM_VMW_GB_SURFACE_CREATE Ioctl. 889 + * Part of output argument for the DRM_VMW_GB_SURFACE_REF Ioctl. 890 + */ 891 + struct drm_vmw_gb_surface_create_req { 892 + uint32_t svga3d_flags; 893 + uint32_t format; 894 + uint32_t mip_levels; 895 + enum drm_vmw_surface_flags drm_surface_flags; 896 + uint32_t multisample_count; 897 + uint32_t autogen_filter; 898 + uint32_t buffer_handle; 899 + uint32_t pad64; 900 + struct drm_vmw_size base_size; 901 + }; 902 + 903 + /** 904 + * struct drm_vmw_gb_surface_create_rep 905 + * 906 + * @handle: Surface handle. 907 + * @backup_size: Size of backup buffers for this surface. 908 + * @buffer_handle: Handle of backup buffer. SVGA3D_INVALID_ID if none. 909 + * @buffer_size: Actual size of the buffer identified by 910 + * @buffer_handle 911 + * @buffer_map_handle: Offset into device address space for the buffer 912 + * identified by @buffer_handle. 913 + * 914 + * Part of output argument for the DRM_VMW_GB_SURFACE_REF ioctl. 915 + * Output argument for the DRM_VMW_GB_SURFACE_CREATE ioctl. 916 + */ 917 + struct drm_vmw_gb_surface_create_rep { 918 + uint32_t handle; 919 + uint32_t backup_size; 920 + uint32_t buffer_handle; 921 + uint32_t buffer_size; 922 + uint64_t buffer_map_handle; 923 + }; 924 + 925 + /** 926 + * union drm_vmw_gb_surface_create_arg 927 + * 928 + * @req: Input argument as described above. 929 + * @rep: Output argument as described above. 930 + * 931 + * Argument to the DRM_VMW_GB_SURFACE_CREATE ioctl. 932 + */ 933 + union drm_vmw_gb_surface_create_arg { 934 + struct drm_vmw_gb_surface_create_rep rep; 935 + struct drm_vmw_gb_surface_create_req req; 936 + }; 937 + 938 + /*************************************************************************/ 939 + /** 940 + * DRM_VMW_GB_SURFACE_REF - Reference a host surface. 941 + * 942 + * Puts a reference on a host surface with a given handle, as previously 943 + * returned by the DRM_VMW_GB_SURFACE_CREATE ioctl. 944 + * A reference will make sure the surface isn't destroyed while we hold 945 + * it and will allow the calling client to use the surface handle in 946 + * the command stream. 947 + * 948 + * On successful return, the Ioctl returns the surface information given 949 + * to and returned from the DRM_VMW_GB_SURFACE_CREATE ioctl. 950 + */ 951 + 952 + /** 953 + * struct drm_vmw_gb_surface_reference_arg 954 + * 955 + * @creq: The data used as input when the surface was created, as described 956 + * above at "struct drm_vmw_gb_surface_create_req" 957 + * @crep: Additional data output when the surface was created, as described 958 + * above at "struct drm_vmw_gb_surface_create_rep" 959 + * 960 + * Output Argument to the DRM_VMW_GB_SURFACE_REF ioctl. 961 + */ 962 + struct drm_vmw_gb_surface_ref_rep { 963 + struct drm_vmw_gb_surface_create_req creq; 964 + struct drm_vmw_gb_surface_create_rep crep; 965 + }; 966 + 967 + /** 968 + * union drm_vmw_gb_surface_reference_arg 969 + * 970 + * @req: Input data as described above at "struct drm_vmw_surface_arg" 971 + * @rep: Output data as described above at "struct drm_vmw_gb_surface_ref_rep" 972 + * 973 + * Argument to the DRM_VMW_GB_SURFACE_REF Ioctl. 974 + */ 975 + union drm_vmw_gb_surface_reference_arg { 976 + struct drm_vmw_gb_surface_ref_rep rep; 977 + struct drm_vmw_surface_arg req; 978 + }; 979 + 980 + 981 + /*************************************************************************/ 982 + /** 983 + * DRM_VMW_SYNCCPU - Sync a DMA buffer / MOB for CPU access. 984 + * 985 + * Idles any previously submitted GPU operations on the buffer and 986 + * by default blocks command submissions that reference the buffer. 987 + * If the file descriptor used to grab a blocking CPU sync is closed, the 988 + * cpu sync is released. 989 + * The flags argument indicates how the grab / release operation should be 990 + * performed: 991 + */ 992 + 993 + /** 994 + * enum drm_vmw_synccpu_flags - Synccpu flags: 995 + * 996 + * @drm_vmw_synccpu_read: Sync for read. If sync is done for read only, it's a 997 + * hint to the kernel to allow command submissions that references the buffer 998 + * for read-only. 999 + * @drm_vmw_synccpu_write: Sync for write. Block all command submissions 1000 + * referencing this buffer. 1001 + * @drm_vmw_synccpu_dontblock: Dont wait for GPU idle, but rather return 1002 + * -EBUSY should the buffer be busy. 1003 + * @drm_vmw_synccpu_allow_cs: Allow command submission that touches the buffer 1004 + * while the buffer is synced for CPU. This is similar to the GEM bo idle 1005 + * behavior. 1006 + */ 1007 + enum drm_vmw_synccpu_flags { 1008 + drm_vmw_synccpu_read = (1 << 0), 1009 + drm_vmw_synccpu_write = (1 << 1), 1010 + drm_vmw_synccpu_dontblock = (1 << 2), 1011 + drm_vmw_synccpu_allow_cs = (1 << 3) 1012 + }; 1013 + 1014 + /** 1015 + * enum drm_vmw_synccpu_op - Synccpu operations: 1016 + * 1017 + * @drm_vmw_synccpu_grab: Grab the buffer for CPU operations 1018 + * @drm_vmw_synccpu_release: Release a previous grab. 1019 + */ 1020 + enum drm_vmw_synccpu_op { 1021 + drm_vmw_synccpu_grab, 1022 + drm_vmw_synccpu_release 1023 + }; 1024 + 1025 + /** 1026 + * struct drm_vmw_synccpu_arg 1027 + * 1028 + * @op: The synccpu operation as described above. 1029 + * @handle: Handle identifying the buffer object. 1030 + * @flags: Flags as described above. 1031 + */ 1032 + struct drm_vmw_synccpu_arg { 1033 + enum drm_vmw_synccpu_op op; 1034 + enum drm_vmw_synccpu_flags flags; 1035 + uint32_t handle; 1036 + uint32_t pad64; 800 1037 }; 801 1038 802 1039 #endif